Flutter Engine
The Flutter Engine
Public Types | Public Member Functions | Static Public Member Functions | List of all members
GrMtlGpu Class Reference

#include <GrMtlGpu.h>

Inheritance diagram for GrMtlGpu:
GrGpu

Public Types

enum  SyncQueue { kForce_SyncQueue , kSkip_SyncQueue }
 
- Public Types inherited from GrGpu
enum class  DisconnectType { kAbandon , kCleanup }
 

Public Member Functions

 ~GrMtlGpu () override
 
void disconnect (DisconnectType) override
 
GrThreadSafePipelineBuilderpipelineBuilder () override
 
sk_sp< GrThreadSafePipelineBuilderrefPipelineBuilder () override
 
const GrMtlCapsmtlCaps () const
 
id< MTLDevice > device () const
 
GrMtlResourceProviderresourceProvider ()
 
GrStagingBufferManagerstagingBufferManager () override
 
GrMtlCommandBuffercommandBuffer ()
 
void deleteBackendTexture (const GrBackendTexture &) override
 
bool compile (const GrProgramDesc &, const GrProgramInfo &) override
 
bool precompileShader (const SkData &key, const SkData &data) override
 
void copySurfaceAsResolve (GrSurface *dst, GrSurface *src)
 
void copySurfaceAsBlit (GrSurface *dst, GrSurface *src, GrMtlAttachment *dstAttachment, GrMtlAttachment *srcAttachment, const SkIRect &srcRect, const SkIPoint &dstPoint)
 
bool onCopySurface (GrSurface *dst, const SkIRect &dstRect, GrSurface *src, const SkIRect &srcRect, GrSamplerState::Filter) override
 
void submit (GrOpsRenderPass *renderPass) override
 
std::unique_ptr< GrSemaphoremakeSemaphore (bool isOwned) override
 
std::unique_ptr< GrSemaphorewrapBackendSemaphore (const GrBackendSemaphore &, GrSemaphoreWrapType, GrWrapOwnership) override
 
void insertSemaphore (GrSemaphore *semaphore) override
 
void waitSemaphore (GrSemaphore *semaphore) override
 
void checkFinishProcs () override
 
void finishOutstandingGpuWork () override
 
std::unique_ptr< GrSemaphoreprepareTextureForCrossContextUsage (GrTexture *) override
 
GrMtlRenderCommandEncoderloadMSAAFromResolve (GrAttachment *dst, GrMtlAttachment *src, const SkIRect &srcRect, MTLRenderPassStencilAttachmentDescriptor *)
 
void submitIndirectCommandBuffer (GrSurface *surface, GrSurfaceOrigin origin, const SkIRect *bounds)
 
GrRingBufferuniformsRingBuffer () override
 
- Public Member Functions inherited from GrGpu
 GrGpu (GrDirectContext *direct)
 
virtual ~GrGpu ()
 
GrDirectContextgetContext ()
 
const GrDirectContextgetContext () const
 
const GrCapscaps () const
 
sk_sp< const GrCapsrefCaps () const
 
virtual GrStagingBufferManagerstagingBufferManager ()
 
virtual GrRingBufferuniformsRingBuffer ()
 
virtual void disconnect (DisconnectType)
 
virtual GrThreadSafePipelineBuilderpipelineBuilder ()=0
 
virtual sk_sp< GrThreadSafePipelineBuilderrefPipelineBuilder ()=0
 
virtual bool isDeviceLost () const
 
void markContextDirty (uint32_t state=kAll_GrBackendState)
 
sk_sp< GrTexturecreateTexture (SkISize dimensions, const GrBackendFormat &format, GrTextureType textureType, GrRenderable renderable, int renderTargetSampleCnt, skgpu::Budgeted budgeted, GrProtected isProtected, GrColorType textureColorType, GrColorType srcColorType, const GrMipLevel texels[], int texelLevelCount, std::string_view label)
 
sk_sp< GrTexturecreateTexture (SkISize dimensions, const GrBackendFormat &format, GrTextureType textureType, GrRenderable renderable, int renderTargetSampleCnt, skgpu::Mipmapped mipmapped, skgpu::Budgeted budgeted, GrProtected isProtected, std::string_view label)
 
sk_sp< GrTexturecreateCompressedTexture (SkISize dimensions, const GrBackendFormat &format, skgpu::Budgeted budgeted, skgpu::Mipmapped mipmapped, GrProtected isProtected, const void *data, size_t dataSize)
 
sk_sp< GrTexturewrapBackendTexture (const GrBackendTexture &, GrWrapOwnership, GrWrapCacheable, GrIOType)
 
sk_sp< GrTexturewrapCompressedBackendTexture (const GrBackendTexture &, GrWrapOwnership, GrWrapCacheable)
 
sk_sp< GrTexturewrapRenderableBackendTexture (const GrBackendTexture &, int sampleCnt, GrWrapOwnership, GrWrapCacheable)
 
sk_sp< GrRenderTargetwrapBackendRenderTarget (const GrBackendRenderTarget &)
 
sk_sp< GrRenderTargetwrapVulkanSecondaryCBAsRenderTarget (const SkImageInfo &, const GrVkDrawableInfo &)
 
sk_sp< GrGpuBuffercreateBuffer (size_t size, GrGpuBufferType intendedType, GrAccessPattern accessPattern)
 
void resolveRenderTarget (GrRenderTarget *, const SkIRect &resolveRect)
 
bool regenerateMipMapLevels (GrTexture *)
 
void resetTextureBindings ()
 
bool readPixels (GrSurface *surface, SkIRect rect, GrColorType surfaceColorType, GrColorType dstColorType, void *buffer, size_t rowBytes)
 
bool writePixels (GrSurface *surface, SkIRect rect, GrColorType surfaceColorType, GrColorType srcColorType, const GrMipLevel texels[], int mipLevelCount, bool prepForTexSampling=false)
 
bool writePixels (GrSurface *surface, SkIRect rect, GrColorType surfaceColorType, GrColorType srcColorType, const void *buffer, size_t rowBytes, bool prepForTexSampling=false)
 
bool transferFromBufferToBuffer (sk_sp< GrGpuBuffer > src, size_t srcOffset, sk_sp< GrGpuBuffer > dst, size_t dstOffset, size_t size)
 
bool transferPixelsTo (GrTexture *texture, SkIRect rect, GrColorType textureColorType, GrColorType bufferColorType, sk_sp< GrGpuBuffer > transferBuffer, size_t offset, size_t rowBytes)
 
bool transferPixelsFrom (GrSurface *surface, SkIRect rect, GrColorType surfaceColorType, GrColorType bufferColorType, sk_sp< GrGpuBuffer > transferBuffer, size_t offset)
 
bool copySurface (GrSurface *dst, const SkIRect &dstRect, GrSurface *src, const SkIRect &srcRect, GrSamplerState::Filter filter)
 
GrOpsRenderPassgetOpsRenderPass (GrRenderTarget *renderTarget, bool useMSAASurface, GrAttachment *stencil, GrSurfaceOrigin, const SkIRect &bounds, const GrOpsRenderPass::LoadAndStoreInfo &, const GrOpsRenderPass::StencilLoadAndStoreInfo &, const skia_private::TArray< GrSurfaceProxy *, true > &sampledProxies, GrXferBarrierFlags renderPassXferBarriers)
 
void executeFlushInfo (SkSpan< GrSurfaceProxy * >, SkSurfaces::BackendSurfaceAccess access, const GrFlushInfo &, const skgpu::MutableTextureState *newState)
 
virtual void willExecute ()
 
bool submitToGpu (GrSyncCpu sync)
 
virtual void submit (GrOpsRenderPass *)=0
 
virtual std::unique_ptr< GrSemaphoremakeSemaphore (bool isOwned=true)=0
 
virtual std::unique_ptr< GrSemaphorewrapBackendSemaphore (const GrBackendSemaphore &, GrSemaphoreWrapType, GrWrapOwnership)=0
 
virtual void insertSemaphore (GrSemaphore *semaphore)=0
 
virtual void waitSemaphore (GrSemaphore *semaphore)=0
 
virtual void addFinishedProc (GrGpuFinishedProc finishedProc, GrGpuFinishedContext finishedContext)=0
 
virtual void checkFinishProcs ()=0
 
virtual void finishOutstandingGpuWork ()=0
 
virtual void takeOwnershipOfBuffer (sk_sp< GrGpuBuffer >)
 
bool checkAndResetOOMed ()
 
virtual std::unique_ptr< GrSemaphoreprepareTextureForCrossContextUsage (GrTexture *)=0
 
virtual void releaseUnlockedBackendObjects ()
 
Statsstats ()
 
void dumpJSON (SkJSONWriter *) const
 
GrBackendTexture createBackendTexture (SkISize dimensions, const GrBackendFormat &, GrRenderable, skgpu::Mipmapped, GrProtected, std::string_view label)
 
bool clearBackendTexture (const GrBackendTexture &, sk_sp< skgpu::RefCntedCallback > finishedCallback, std::array< float, 4 > color)
 
GrBackendTexture createCompressedBackendTexture (SkISize dimensions, const GrBackendFormat &, skgpu::Mipmapped, GrProtected)
 
bool updateCompressedBackendTexture (const GrBackendTexture &, sk_sp< skgpu::RefCntedCallback > finishedCallback, const void *data, size_t length)
 
virtual bool setBackendTextureState (const GrBackendTexture &, const skgpu::MutableTextureState &, skgpu::MutableTextureState *previousState, sk_sp< skgpu::RefCntedCallback > finishedCallback)
 
virtual bool setBackendRenderTargetState (const GrBackendRenderTarget &, const skgpu::MutableTextureState &, skgpu::MutableTextureState *previousState, sk_sp< skgpu::RefCntedCallback > finishedCallback)
 
virtual void deleteBackendTexture (const GrBackendTexture &)=0
 
virtual bool compile (const GrProgramDesc &, const GrProgramInfo &)=0
 
virtual bool precompileShader (const SkData &key, const SkData &data)
 
virtual sk_sp< GrAttachmentmakeStencilAttachment (const GrBackendFormat &colorFormat, SkISize dimensions, int numStencilSamples)=0
 
virtual GrBackendFormat getPreferredStencilFormat (const GrBackendFormat &)=0
 
virtual sk_sp< GrAttachmentmakeMSAAAttachment (SkISize dimensions, const GrBackendFormat &format, int numSamples, GrProtected isProtected, GrMemoryless isMemoryless)=0
 
void handleDirtyContext ()
 
virtual void storeVkPipelineCacheData ()
 
virtual void xferBarrier (GrRenderTarget *, GrXferBarrierType)=0
 

Static Public Member Functions

static std::unique_ptr< GrGpuMake (const GrMtlBackendContext &, const GrContextOptions &, GrDirectContext *)
 

Additional Inherited Members

- Protected Member Functions inherited from GrGpu
void didWriteToSurface (GrSurface *surface, GrSurfaceOrigin origin, const SkIRect *bounds, uint32_t mipLevels=1) const
 
void setOOMed ()
 
void initCaps (sk_sp< const GrCaps > caps)
 
- Static Protected Member Functions inherited from GrGpu
static bool CompressedDataIsCorrect (SkISize dimensions, SkTextureCompressionType, skgpu::Mipmapped, const void *data, size_t length)
 
- Protected Attributes inherited from GrGpu
Stats fStats
 

Detailed Description

Definition at line 35 of file GrMtlGpu.h.

Member Enumeration Documentation

◆ SyncQueue

Enumerator
kForce_SyncQueue 
kSkip_SyncQueue 

Definition at line 57 of file GrMtlGpu.h.

57 {
60 };
@ kForce_SyncQueue
Definition: GrMtlGpu.h:58
@ kSkip_SyncQueue
Definition: GrMtlGpu.h:59

Constructor & Destructor Documentation

◆ ~GrMtlGpu()

GrMtlGpu::~GrMtlGpu ( )
override

Definition at line 107 of file GrMtlGpu.mm.

107 {
108 if (!fDisconnected) {
109 this->destroyResources();
110 }
111}

Member Function Documentation

◆ checkFinishProcs()

void GrMtlGpu::checkFinishProcs ( )
inlineoverridevirtual

Implements GrGpu.

Definition at line 100 of file GrMtlGpu.h.

100{ this->checkForFinishedCommandBuffers(); }

◆ commandBuffer()

GrMtlCommandBuffer * GrMtlGpu::commandBuffer ( )

Definition at line 187 of file GrMtlGpu.mm.

187 {
188 if (!fCurrentCmdBuffer) {
189#if GR_METAL_CAPTURE_COMMANDBUFFER
190 this->testingOnly_startCapture();
191#endif
192 // Create a new command buffer for the next submit
193 fCurrentCmdBuffer = GrMtlCommandBuffer::Make(fQueue);
194 }
195
196 SkASSERT(fCurrentCmdBuffer);
197 return fCurrentCmdBuffer.get();
198}
#define SkASSERT(cond)
Definition: SkAssert.h:116
static sk_sp< GrMtlCommandBuffer > Make(id< MTLCommandQueue > queue)
T * get() const
Definition: SkRefCnt.h:303

◆ compile()

bool GrMtlGpu::compile ( const GrProgramDesc ,
const GrProgramInfo  
)
overridevirtual

In this case we have a program descriptor and a program info but no render target.

Implements GrGpu.

Definition at line 1134 of file GrMtlGpu.mm.

1134 {
1135
1137
1138 auto pipelineState = this->resourceProvider().findOrCreateCompatiblePipelineState(
1139 desc, programInfo, &stat);
1140 if (!pipelineState) {
1141 return false;
1142 }
1143
1145}
GrMtlResourceProvider & resourceProvider()
Definition: GrMtlGpu.h:51
GrMtlPipelineState * findOrCreateCompatiblePipelineState(const GrProgramDesc &, const GrProgramInfo &, GrThreadSafePipelineBuilder::Stats::ProgramCacheResult *stat=nullptr)

◆ copySurfaceAsBlit()

void GrMtlGpu::copySurfaceAsBlit ( GrSurface dst,
GrSurface src,
GrMtlAttachment dstAttachment,
GrMtlAttachment srcAttachment,
const SkIRect srcRect,
const SkIPoint dstPoint 
)

Definition at line 1230 of file GrMtlGpu.mm.

1232 {
1233#ifdef SK_DEBUG
1234 SkASSERT(this->mtlCaps().canCopyAsBlit(dstAttachment->mtlFormat(), dstAttachment->numSamples(),
1235 srcAttachment->mtlFormat(), dstAttachment->numSamples(),
1236 srcRect, dstPoint, dst == src));
1237#endif
1238 id<MTLTexture> GR_NORETAIN dstTex = dstAttachment->mtlTexture();
1239 id<MTLTexture> GR_NORETAIN srcTex = srcAttachment->mtlTexture();
1240
1241 auto cmdBuffer = this->commandBuffer();
1242 id<MTLBlitCommandEncoder> GR_NORETAIN blitCmdEncoder = cmdBuffer->getBlitCommandEncoder();
1243 if (!blitCmdEncoder) {
1244 return;
1245 }
1246#ifdef SK_ENABLE_MTL_DEBUG_INFO
1247 [blitCmdEncoder pushDebugGroup:@"copySurfaceAsBlit"];
1248#endif
1249 [blitCmdEncoder copyFromTexture: srcTex
1250 sourceSlice: 0
1251 sourceLevel: 0
1252 sourceOrigin: MTLOriginMake(srcRect.x(), srcRect.y(), 0)
1253 sourceSize: MTLSizeMake(srcRect.width(), srcRect.height(), 1)
1254 toTexture: dstTex
1255 destinationSlice: 0
1256 destinationLevel: 0
1257 destinationOrigin: MTLOriginMake(dstPoint.fX, dstPoint.fY, 0)];
1258#ifdef SK_ENABLE_MTL_DEBUG_INFO
1259 [blitCmdEncoder popDebugGroup];
1260#endif
1261 cmdBuffer->addGrSurface(sk_ref_sp<const GrSurface>(dst));
1262 cmdBuffer->addGrSurface(sk_ref_sp<const GrSurface>(src));
1263}
int numSamples() const
Definition: GrAttachment.h:38
MTLPixelFormat mtlFormat() const
id< MTLTexture > mtlTexture() const
GrMtlCommandBuffer * commandBuffer()
Definition: GrMtlGpu.mm:187
const GrMtlCaps & mtlCaps() const
Definition: GrMtlGpu.h:47
dst
Definition: cp.py:12
int32_t height
int32_t width
constexpr int32_t x() const
Definition: SkRect.h:141
constexpr int32_t y() const
Definition: SkRect.h:148

◆ copySurfaceAsResolve()

void GrMtlGpu::copySurfaceAsResolve ( GrSurface dst,
GrSurface src 
)

Definition at line 1214 of file GrMtlGpu.mm.

1214 {
1215 // TODO: Add support for subrectangles
1216 GrMtlRenderTarget* srcRT = static_cast<GrMtlRenderTarget*>(src->asRenderTarget());
1217 GrRenderTarget* dstRT = dst->asRenderTarget();
1218 GrMtlAttachment* dstAttachment;
1219 if (dstRT) {
1220 GrMtlRenderTarget* mtlRT = static_cast<GrMtlRenderTarget*>(dstRT);
1221 dstAttachment = mtlRT->colorAttachment();
1222 } else {
1223 SkASSERT(dst->asTexture());
1224 dstAttachment = static_cast<GrMtlTexture*>(dst->asTexture())->attachment();
1225 }
1226
1227 this->resolve(dstAttachment, srcRT->colorAttachment());
1228}
GrMtlAttachment * colorAttachment() const

◆ deleteBackendTexture()

void GrMtlGpu::deleteBackendTexture ( const GrBackendTexture )
overridevirtual

Frees a texture created by createBackendTexture(). If ownership of the backend texture has been transferred to a context using adopt semantics this should not be called.

Implements GrGpu.

Definition at line 1129 of file GrMtlGpu.mm.

1129 {
1130 SkASSERT(GrBackendApi::kMetal == tex.backend());
1131 // Nothing to do here, will get cleaned up when the GrBackendTexture object goes away
1132}

◆ device()

id< MTLDevice > GrMtlGpu::device ( ) const
inline

Definition at line 49 of file GrMtlGpu.h.

49{ return fDevice; }

◆ disconnect()

void GrMtlGpu::disconnect ( DisconnectType  type)
overridevirtual

Reimplemented from GrGpu.

Definition at line 113 of file GrMtlGpu.mm.

113 {
115
116 if (!fDisconnected) {
117 this->destroyResources();
118 fDisconnected = true;
119 }
120}
GLenum type
virtual void disconnect(DisconnectType)
Definition: GrGpu.cpp:51

◆ finishOutstandingGpuWork()

void GrMtlGpu::finishOutstandingGpuWork ( )
overridevirtual

Implements GrGpu.

Definition at line 262 of file GrMtlGpu.mm.

262 {
263 // wait for the last command buffer we've submitted to finish
264 OutstandingCommandBuffer* back =
265 (OutstandingCommandBuffer*)fOutstandingCommandBuffers.back();
266 if (back) {
267 (*back)->waitUntilCompleted();
268 }
269}
const void * back() const
Definition: SkDeque.h:43

◆ insertSemaphore()

void GrMtlGpu::insertSemaphore ( GrSemaphore semaphore)
overridevirtual

Implements GrGpu.

Definition at line 1611 of file GrMtlGpu.mm.

1611 {
1612 if (@available(macOS 10.14, iOS 12.0, tvOS 12.0, *)) {
1613 SkASSERT(semaphore);
1614 GrMtlSemaphore* mtlSem = static_cast<GrMtlSemaphore*>(semaphore);
1615
1616 this->commandBuffer()->encodeSignalEvent(mtlSem->event(), mtlSem->value());
1617 }
1618}
void encodeSignalEvent(sk_sp< GrMtlEvent >, uint64_t value)
sk_sp< GrMtlEvent > event()
uint64_t value() const

◆ loadMSAAFromResolve()

GrMtlRenderCommandEncoder * GrMtlGpu::loadMSAAFromResolve ( GrAttachment dst,
GrMtlAttachment src,
const SkIRect srcRect,
MTLRenderPassStencilAttachmentDescriptor *  stencil 
)

Definition at line 1660 of file GrMtlGpu.mm.

1662 {
1663 if (!dst) {
1664 return nil;
1665 }
1666 if (!src || src->framebufferOnly()) {
1667 return nil;
1668 }
1669
1670 GrMtlAttachment* mtlDst = static_cast<GrMtlAttachment*>(dst);
1671
1672 MTLPixelFormat stencilFormat = stencil.texture.pixelFormat;
1673 auto renderPipeline = this->resourceProvider().findOrCreateMSAALoadPipeline(mtlDst->mtlFormat(),
1674 dst->numSamples(),
1675 stencilFormat);
1676
1677 // Set up rendercommandencoder
1678 auto renderPassDesc = [MTLRenderPassDescriptor new];
1679 auto colorAttachment = renderPassDesc.colorAttachments[0];
1680 colorAttachment.texture = mtlDst->mtlTexture();
1681 colorAttachment.loadAction = MTLLoadActionDontCare;
1682 colorAttachment.storeAction = MTLStoreActionMultisampleResolve;
1683 colorAttachment.resolveTexture = src->mtlTexture();
1684
1685 renderPassDesc.stencilAttachment = stencil;
1686
1687 // We know in this case that the preceding renderCommandEncoder will not be compatible.
1688 // Either it's using a different rendertarget, or we are reading from the resolve and
1689 // hence we need to let the previous resolve finish. So we create a new one without checking.
1690 auto renderCmdEncoder =
1691 this->commandBuffer()->getRenderCommandEncoder(renderPassDesc, nullptr);
1692 if (!renderCmdEncoder) {
1693 return nullptr;
1694 }
1695
1696 // Bind pipeline
1697 renderCmdEncoder->setRenderPipelineState(renderPipeline->mtlPipelineState());
1698 this->commandBuffer()->addResource(sk_ref_sp(renderPipeline));
1699
1700 // Bind src as input texture
1701 renderCmdEncoder->setFragmentTexture(src->mtlTexture(), 0);
1702 // No sampler needed
1703 this->commandBuffer()->addGrSurface(sk_ref_sp<GrSurface>(src));
1704
1705 // Scissor and viewport should default to size of color attachment
1706
1707 // Update and bind uniform data
1708 int w = srcRect.width();
1709 int h = srcRect.height();
1710
1711 // dst rect edges in NDC (-1 to 1)
1712 int dw = dst->width();
1713 int dh = dst->height();
1714 float dx0 = 2.f * srcRect.fLeft / dw - 1.f;
1715 float dx1 = 2.f * (srcRect.fLeft + w) / dw - 1.f;
1716 float dy0 = 2.f * srcRect.fTop / dh - 1.f;
1717 float dy1 = 2.f * (srcRect.fTop + h) / dh - 1.f;
1718
1719 struct {
1720 float posXform[4];
1721 int textureSize[2];
1722 int pad[2];
1723 } uniData = {{dx1 - dx0, dy1 - dy0, dx0, dy0}, {dw, dh}, {0, 0}};
1724
1725 constexpr size_t uniformSize = 32;
1726 if (@available(macOS 10.11, iOS 8.3, tvOS 9.0, *)) {
1727 SkASSERT(uniformSize <= this->caps()->maxPushConstantsSize());
1728 renderCmdEncoder->setVertexBytes(&uniData, uniformSize, 0);
1729 } else {
1730 // upload the data
1731 GrRingBuffer::Slice slice = this->uniformsRingBuffer()->suballocate(uniformSize);
1733 char* destPtr = static_cast<char*>(slice.fBuffer->map()) + slice.fOffset;
1734 memcpy(destPtr, &uniData, uniformSize);
1735
1736 renderCmdEncoder->setVertexBuffer(buffer->mtlBuffer(), slice.fOffset, 0);
1737 }
1738
1739 renderCmdEncoder->drawPrimitives(MTLPrimitiveTypeTriangleStrip, (NSUInteger)0, (NSUInteger)4);
1740
1741 return renderCmdEncoder;
1742}
sk_sp< T > sk_ref_sp(T *obj)
Definition: SkRefCnt.h:381
void * map()
Definition: GrGpuBuffer.cpp:28
const GrCaps * caps() const
Definition: GrGpu.h:73
GrMtlRenderCommandEncoder * getRenderCommandEncoder(MTLRenderPassDescriptor *, const GrMtlPipelineState *, GrMtlOpsRenderPass *opsRenderPass)
void addResource(const sk_sp< const GrManagedResource > &resource)
void addGrSurface(sk_sp< const GrSurface > surface)
GrRingBuffer * uniformsRingBuffer() override
Definition: GrMtlGpu.h:118
void setRenderPipelineState(id< MTLRenderPipelineState > pso)
const GrMtlRenderPipeline * findOrCreateMSAALoadPipeline(MTLPixelFormat colorFormat, int sampleCount, MTLPixelFormat stencilFormat)
Slice suballocate(size_t size)
DEF_SWITCHES_START aot vmservice shared library Name of the *so containing AOT compiled Dart assets for launching the service isolate vm snapshot The VM snapshot data that will be memory mapped as read only SnapshotAssetPath must be present isolate snapshot The isolate snapshot data that will be memory mapped as read only SnapshotAssetPath must be present cache dir Path to the cache directory This is different from the persistent_cache_path in embedder which is used for Skia shader cache icu native lib Path to the library file that exports the ICU data vm service The hostname IP address on which the Dart VM Service should be served If not defaults to or::depending on whether ipv6 is specified vm service A custom Dart VM Service port The default is to pick a randomly available open port disable vm Disable the Dart VM Service The Dart VM Service is never available in release mode disable vm service Disable mDNS Dart VM Service publication Bind to the IPv6 localhost address for the Dart VM Service Ignored if vm service host is set endless trace buffer
Definition: switches.h:126
SkScalar w
SkScalar h
GrGpuBuffer * fBuffer
Definition: GrRingBuffer.h:40
constexpr int32_t height() const
Definition: SkRect.h:165
int32_t fTop
smaller y-axis bounds
Definition: SkRect.h:34
constexpr int32_t width() const
Definition: SkRect.h:158
int32_t fLeft
smaller x-axis bounds
Definition: SkRect.h:33

◆ Make()

GR_NORETAIN_BEGIN std::unique_ptr< GrGpu > GrMtlGpu::Make ( const GrMtlBackendContext context,
const GrContextOptions options,
GrDirectContext direct 
)
static

Definition at line 56 of file GrMtlGpu.mm.

58 {
59 if (!context.fDevice || !context.fQueue) {
60 return nullptr;
61 }
62 if (@available(macOS 10.14, iOS 10.0, tvOS 10.0, *)) {
63 // no warning needed
64 } else {
65 SkDebugf("*** Error ***: Skia's Metal backend no longer supports this OS version.\n");
66#ifdef SK_BUILD_FOR_IOS
67 SkDebugf("Minimum supported version is iOS 10.0.\n");
68#else
69 SkDebugf("Minimum supported version is MacOS 10.14.\n");
70#endif
71 return nullptr;
72 }
73
74 id<MTLDevice> GR_NORETAIN device = (__bridge id<MTLDevice>)(context.fDevice.get());
75 id<MTLCommandQueue> GR_NORETAIN queue = (__bridge id<MTLCommandQueue>)(context.fQueue.get());
76
77 return std::unique_ptr<GrGpu>(new GrMtlGpu(direct,
78 options,
79 device,
80 queue));
81}
const char * options
void SK_SPI SkDebugf(const char format[],...) SK_PRINTF_LIKE(1
id< MTLDevice > device() const
Definition: GrMtlGpu.h:49
VkQueue queue
Definition: main.cc:55
sk_cfp< GrMTLHandle > fDevice
sk_cfp< GrMTLHandle > fQueue

◆ makeSemaphore()

std::unique_ptr< GrSemaphore > GrMtlGpu::makeSemaphore ( bool  isOwned)
overridevirtual

Implements GrGpu.

Definition at line 1598 of file GrMtlGpu.mm.

1598 {
1599 SkASSERT(this->caps()->semaphoreSupport());
1600 return GrMtlSemaphore::Make(this);
1601}
static std::unique_ptr< GrMtlSemaphore > Make(GrMtlGpu *gpu)

◆ mtlCaps()

const GrMtlCaps & GrMtlGpu::mtlCaps ( ) const
inline

Definition at line 47 of file GrMtlGpu.h.

47{ return *fMtlCaps; }

◆ onCopySurface()

bool GrMtlGpu::onCopySurface ( GrSurface dst,
const SkIRect dstRect,
GrSurface src,
const SkIRect srcRect,
GrSamplerState::Filter   
)
overridevirtual

Implements GrGpu.

Definition at line 1265 of file GrMtlGpu.mm.

1267 {
1268 SkASSERT(!src->isProtected() && !dst->isProtected());
1269
1270 if (srcRect.size() != dstRect.size()) {
1271 return false;
1272 }
1273
1274 GrMtlAttachment* dstAttachment;
1275 GrMtlAttachment* srcAttachment;
1276 GrRenderTarget* dstRT = dst->asRenderTarget();
1277 if (dstRT) {
1278 GrMtlRenderTarget* mtlRT = static_cast<GrMtlRenderTarget*>(dstRT);
1279 // This will technically return true for single sample rts that used DMSAA in which case we
1280 // don't have to pick the resolve attachment. But in that case the resolve and color
1281 // attachments will be the same anyways.
1282 if (this->mtlCaps().renderTargetSupportsDiscardableMSAA(mtlRT)) {
1283 dstAttachment = mtlRT->resolveAttachment();
1284 } else {
1285 dstAttachment = mtlRT->colorAttachment();
1286 }
1287 } else if (dst->asTexture()) {
1288 dstAttachment = static_cast<GrMtlTexture*>(dst->asTexture())->attachment();
1289 } else {
1290 // The surface in a GrAttachment already
1291 dstAttachment = static_cast<GrMtlAttachment*>(dst);
1292 }
1293 GrRenderTarget* srcRT = src->asRenderTarget();
1294 if (srcRT) {
1295 GrMtlRenderTarget* mtlRT = static_cast<GrMtlRenderTarget*>(srcRT);
1296 // This will technically return true for single sample rts that used DMSAA in which case we
1297 // don't have to pick the resolve attachment. But in that case the resolve and color
1298 // attachments will be the same anyways.
1299 if (this->mtlCaps().renderTargetSupportsDiscardableMSAA(mtlRT)) {
1300 srcAttachment = mtlRT->resolveAttachment();
1301 } else {
1302 srcAttachment = mtlRT->colorAttachment();
1303 }
1304 } else if (src->asTexture()) {
1305 SkASSERT(src->asTexture());
1306 srcAttachment = static_cast<GrMtlTexture*>(src->asTexture())->attachment();
1307 } else {
1308 // The surface in a GrAttachment already
1309 srcAttachment = static_cast<GrMtlAttachment*>(src);
1310 }
1311
1312 MTLPixelFormat dstFormat = dstAttachment->mtlFormat();
1313 MTLPixelFormat srcFormat = srcAttachment->mtlFormat();
1314
1315 int dstSampleCnt = dstAttachment->sampleCount();
1316 int srcSampleCnt = srcAttachment->sampleCount();
1317
1318 const SkIPoint dstPoint = dstRect.topLeft();
1319 if (this->mtlCaps().canCopyAsResolve(dstFormat, dstSampleCnt,
1320 srcFormat, srcSampleCnt,
1321 SkToBool(srcRT), src->dimensions(),
1322 srcRect, dstPoint,
1323 dstAttachment == srcAttachment)) {
1324 this->copySurfaceAsResolve(dst, src);
1325 return true;
1326 }
1327
1328 if (srcAttachment->framebufferOnly() || dstAttachment->framebufferOnly()) {
1329 return false;
1330 }
1331
1332 if (this->mtlCaps().canCopyAsBlit(dstFormat, dstSampleCnt, srcFormat, srcSampleCnt,
1333 srcRect, dstPoint, dstAttachment == srcAttachment)) {
1334 this->copySurfaceAsBlit(dst, src, dstAttachment, srcAttachment, srcRect, dstPoint);
1335 return true;
1336 }
1337
1338 return false;
1339}
static constexpr bool SkToBool(const T &x)
Definition: SkTo.h:35
bool framebufferOnly() const
unsigned int sampleCount() const
bool canCopyAsResolve(MTLPixelFormat dstFormat, int dstSampleCount, MTLPixelFormat srcFormat, int srcSampleCount, bool srcIsRenderTarget, const SkISize srcDimensions, const SkIRect &srcRect, const SkIPoint &dstPoint, bool areDstSrcSameObj) const
Definition: GrMtlCaps.mm:288
bool renderTargetSupportsDiscardableMSAA(const GrMtlRenderTarget *) const
Definition: GrMtlCaps.mm:1251
void copySurfaceAsBlit(GrSurface *dst, GrSurface *src, GrMtlAttachment *dstAttachment, GrMtlAttachment *srcAttachment, const SkIRect &srcRect, const SkIPoint &dstPoint)
Definition: GrMtlGpu.mm:1230
void copySurfaceAsResolve(GrSurface *dst, GrSurface *src)
Definition: GrMtlGpu.mm:1214
GrMtlAttachment * resolveAttachment() const
constexpr SkISize size() const
Definition: SkRect.h:172
constexpr SkIPoint topLeft() const
Definition: SkRect.h:151

◆ pipelineBuilder()

GrThreadSafePipelineBuilder * GrMtlGpu::pipelineBuilder ( )
overridevirtual

Implements GrGpu.

Definition at line 122 of file GrMtlGpu.mm.

122 {
123 return nullptr;
124}

◆ precompileShader()

bool GrMtlGpu::precompileShader ( const SkData key,
const SkData data 
)
overridevirtual

Reimplemented from GrGpu.

Definition at line 1147 of file GrMtlGpu.mm.

1147 {
1148 return this->resourceProvider().precompileShader(key, data);
1149}
bool precompileShader(const SkData &key, const SkData &data)
std::shared_ptr< const fml::Mapping > data
Definition: texture_gles.cc:63

◆ prepareTextureForCrossContextUsage()

std::unique_ptr< GrSemaphore > GrMtlGpu::prepareTextureForCrossContextUsage ( GrTexture )
overridevirtual

Put this texture in a safe and known state for use across multiple contexts. Depending on the backend, this may return a GrSemaphore. If so, other contexts should wait on that semaphore before using this texture.

Implements GrGpu.

Definition at line 300 of file GrMtlGpu.mm.

300 {
302 return nullptr;
303}
bool submitToGpu(GrSyncCpu sync)
Definition: GrGpu.cpp:748

◆ refPipelineBuilder()

sk_sp< GrThreadSafePipelineBuilder > GrMtlGpu::refPipelineBuilder ( )
overridevirtual

Implements GrGpu.

Definition at line 126 of file GrMtlGpu.mm.

126 {
127 return nullptr;
128}

◆ resourceProvider()

GrMtlResourceProvider & GrMtlGpu::resourceProvider ( )
inline

Definition at line 51 of file GrMtlGpu.h.

51{ return fResourceProvider; }

◆ stagingBufferManager()

GrStagingBufferManager * GrMtlGpu::stagingBufferManager ( )
inlineoverridevirtual

Reimplemented from GrGpu.

Definition at line 53 of file GrMtlGpu.h.

53{ return &fStagingBufferManager; }

◆ submit()

void GrMtlGpu::submit ( GrOpsRenderPass renderPass)
overridevirtual

Implements GrGpu.

Definition at line 205 of file GrMtlGpu.mm.

205 {
206 GrMtlOpsRenderPass* mtlRenderPass = reinterpret_cast<GrMtlOpsRenderPass*>(renderPass);
207 mtlRenderPass->submit();
208 delete renderPass;
209}

◆ submitIndirectCommandBuffer()

void GrMtlGpu::submitIndirectCommandBuffer ( GrSurface surface,
GrSurfaceOrigin  origin,
const SkIRect bounds 
)
inline

Definition at line 113 of file GrMtlGpu.h.

114 {
115 this->didWriteToSurface(surface, origin, bounds);
116 }
void didWriteToSurface(GrSurface *surface, GrSurfaceOrigin origin, const SkIRect *bounds, uint32_t mipLevels=1) const
Definition: GrGpu.cpp:665
Optional< SkRect > bounds
Definition: SkRecords.h:189

◆ uniformsRingBuffer()

GrRingBuffer * GrMtlGpu::uniformsRingBuffer ( )
inlineoverridevirtual

Reimplemented from GrGpu.

Definition at line 118 of file GrMtlGpu.h.

118{ return &fUniformsRingBuffer; }

◆ waitSemaphore()

void GrMtlGpu::waitSemaphore ( GrSemaphore semaphore)
overridevirtual

Implements GrGpu.

Definition at line 1620 of file GrMtlGpu.mm.

1620 {
1621 if (@available(macOS 10.14, iOS 12.0, tvOS 12.0, *)) {
1622 SkASSERT(semaphore);
1623 GrMtlSemaphore* mtlSem = static_cast<GrMtlSemaphore*>(semaphore);
1624
1625 this->commandBuffer()->encodeWaitForEvent(mtlSem->event(), mtlSem->value());
1626 }
1627}
void encodeWaitForEvent(sk_sp< GrMtlEvent >, uint64_t value)

◆ wrapBackendSemaphore()

std::unique_ptr< GrSemaphore > GrMtlGpu::wrapBackendSemaphore ( const GrBackendSemaphore semaphore,
GrSemaphoreWrapType  ,
GrWrapOwnership   
)
overridevirtual

Implements GrGpu.

Definition at line 1603 of file GrMtlGpu.mm.

1605 {
1606 SkASSERT(this->caps()->backendSemaphoreSupport());
1609}
static std::unique_ptr< GrMtlSemaphore > MakeWrapped(GrMTLHandle mtlEvent, uint64_t value)
SK_API uint64_t GetMtlValue(const GrBackendSemaphore &)
SK_API GrMTLHandle GetMtlHandle(const GrBackendSemaphore &)

The documentation for this class was generated from the following files: