Flutter Engine
The Flutter Engine
Loading...
Searching...
No Matches
Public Types | Public Member Functions | Static Public Member Functions | Private Member Functions | List of all members
GrMtlGpu Class Reference

#include <GrMtlGpu.h>

Inheritance diagram for GrMtlGpu:
GrGpu

Public Types

enum  SyncQueue { kForce_SyncQueue , kSkip_SyncQueue }
 
- Public Types inherited from GrGpu
enum class  DisconnectType { kAbandon , kCleanup }
 

Public Member Functions

 ~GrMtlGpu () override
 
void disconnect (DisconnectType) override
 
GrThreadSafePipelineBuilderpipelineBuilder () override
 
sk_sp< GrThreadSafePipelineBuilderrefPipelineBuilder () override
 
const GrMtlCapsmtlCaps () const
 
id< MTLDevice > device () const
 
GrMtlResourceProviderresourceProvider ()
 
GrStagingBufferManagerstagingBufferManager () override
 
GrMtlCommandBuffercommandBuffer ()
 
void deleteBackendTexture (const GrBackendTexture &) override
 
bool compile (const GrProgramDesc &, const GrProgramInfo &) override
 
bool precompileShader (const SkData &key, const SkData &data) override
 
void copySurfaceAsResolve (GrSurface *dst, GrSurface *src)
 
void copySurfaceAsBlit (GrSurface *dst, GrSurface *src, GrMtlAttachment *dstAttachment, GrMtlAttachment *srcAttachment, const SkIRect &srcRect, const SkIPoint &dstPoint)
 
bool onCopySurface (GrSurface *dst, const SkIRect &dstRect, GrSurface *src, const SkIRect &srcRect, GrSamplerState::Filter) override
 
void submit (GrOpsRenderPass *renderPass) override
 
std::unique_ptr< GrSemaphoremakeSemaphore (bool isOwned) override
 
std::unique_ptr< GrSemaphorewrapBackendSemaphore (const GrBackendSemaphore &, GrSemaphoreWrapType, GrWrapOwnership) override
 
void insertSemaphore (GrSemaphore *semaphore) override
 
void waitSemaphore (GrSemaphore *semaphore) override
 
void checkFinishProcs () override
 
void finishOutstandingGpuWork () override
 
std::unique_ptr< GrSemaphoreprepareTextureForCrossContextUsage (GrTexture *) override
 
GrMtlRenderCommandEncoderloadMSAAFromResolve (GrAttachment *dst, GrMtlAttachment *src, const SkIRect &srcRect, MTLRenderPassStencilAttachmentDescriptor *)
 
void submitIndirectCommandBuffer (GrSurface *surface, GrSurfaceOrigin origin, const SkIRect *bounds)
 
GrRingBufferuniformsRingBuffer () override
 
- Public Member Functions inherited from GrGpu
 GrGpu (GrDirectContext *direct)
 
virtual ~GrGpu ()
 
GrDirectContextgetContext ()
 
const GrDirectContextgetContext () const
 
const GrCapscaps () const
 
sk_sp< const GrCapsrefCaps () const
 
virtual bool isDeviceLost () const
 
void markContextDirty (uint32_t state=kAll_GrBackendState)
 
sk_sp< GrTexturecreateTexture (SkISize dimensions, const GrBackendFormat &format, GrTextureType textureType, GrRenderable renderable, int renderTargetSampleCnt, skgpu::Budgeted budgeted, GrProtected isProtected, GrColorType textureColorType, GrColorType srcColorType, const GrMipLevel texels[], int texelLevelCount, std::string_view label)
 
sk_sp< GrTexturecreateTexture (SkISize dimensions, const GrBackendFormat &format, GrTextureType textureType, GrRenderable renderable, int renderTargetSampleCnt, skgpu::Mipmapped mipmapped, skgpu::Budgeted budgeted, GrProtected isProtected, std::string_view label)
 
sk_sp< GrTexturecreateCompressedTexture (SkISize dimensions, const GrBackendFormat &format, skgpu::Budgeted budgeted, skgpu::Mipmapped mipmapped, GrProtected isProtected, const void *data, size_t dataSize)
 
sk_sp< GrTexturewrapBackendTexture (const GrBackendTexture &, GrWrapOwnership, GrWrapCacheable, GrIOType)
 
sk_sp< GrTexturewrapCompressedBackendTexture (const GrBackendTexture &, GrWrapOwnership, GrWrapCacheable)
 
sk_sp< GrTexturewrapRenderableBackendTexture (const GrBackendTexture &, int sampleCnt, GrWrapOwnership, GrWrapCacheable)
 
sk_sp< GrRenderTargetwrapBackendRenderTarget (const GrBackendRenderTarget &)
 
sk_sp< GrRenderTargetwrapVulkanSecondaryCBAsRenderTarget (const SkImageInfo &, const GrVkDrawableInfo &)
 
sk_sp< GrGpuBuffercreateBuffer (size_t size, GrGpuBufferType intendedType, GrAccessPattern accessPattern)
 
void resolveRenderTarget (GrRenderTarget *, const SkIRect &resolveRect)
 
bool regenerateMipMapLevels (GrTexture *)
 
void resetTextureBindings ()
 
bool readPixels (GrSurface *surface, SkIRect rect, GrColorType surfaceColorType, GrColorType dstColorType, void *buffer, size_t rowBytes)
 
bool writePixels (GrSurface *surface, SkIRect rect, GrColorType surfaceColorType, GrColorType srcColorType, const GrMipLevel texels[], int mipLevelCount, bool prepForTexSampling=false)
 
bool writePixels (GrSurface *surface, SkIRect rect, GrColorType surfaceColorType, GrColorType srcColorType, const void *buffer, size_t rowBytes, bool prepForTexSampling=false)
 
bool transferFromBufferToBuffer (sk_sp< GrGpuBuffer > src, size_t srcOffset, sk_sp< GrGpuBuffer > dst, size_t dstOffset, size_t size)
 
bool transferPixelsTo (GrTexture *texture, SkIRect rect, GrColorType textureColorType, GrColorType bufferColorType, sk_sp< GrGpuBuffer > transferBuffer, size_t offset, size_t rowBytes)
 
bool transferPixelsFrom (GrSurface *surface, SkIRect rect, GrColorType surfaceColorType, GrColorType bufferColorType, sk_sp< GrGpuBuffer > transferBuffer, size_t offset)
 
bool copySurface (GrSurface *dst, const SkIRect &dstRect, GrSurface *src, const SkIRect &srcRect, GrSamplerState::Filter filter)
 
GrOpsRenderPassgetOpsRenderPass (GrRenderTarget *renderTarget, bool useMSAASurface, GrAttachment *stencil, GrSurfaceOrigin, const SkIRect &bounds, const GrOpsRenderPass::LoadAndStoreInfo &, const GrOpsRenderPass::StencilLoadAndStoreInfo &, const skia_private::TArray< GrSurfaceProxy *, true > &sampledProxies, GrXferBarrierFlags renderPassXferBarriers)
 
void executeFlushInfo (SkSpan< GrSurfaceProxy * >, SkSurfaces::BackendSurfaceAccess access, const GrFlushInfo &, const skgpu::MutableTextureState *newState)
 
virtual void willExecute ()
 
bool submitToGpu (GrSyncCpu sync)
 
bool checkAndResetOOMed ()
 
virtual void releaseUnlockedBackendObjects ()
 
Statsstats ()
 
void dumpJSON (SkJSONWriter *) const
 
GrBackendTexture createBackendTexture (SkISize dimensions, const GrBackendFormat &, GrRenderable, skgpu::Mipmapped, GrProtected, std::string_view label)
 
bool clearBackendTexture (const GrBackendTexture &, sk_sp< skgpu::RefCntedCallback > finishedCallback, std::array< float, 4 > color)
 
GrBackendTexture createCompressedBackendTexture (SkISize dimensions, const GrBackendFormat &, skgpu::Mipmapped, GrProtected)
 
bool updateCompressedBackendTexture (const GrBackendTexture &, sk_sp< skgpu::RefCntedCallback > finishedCallback, const void *data, size_t length)
 
virtual bool setBackendTextureState (const GrBackendTexture &, const skgpu::MutableTextureState &, skgpu::MutableTextureState *previousState, sk_sp< skgpu::RefCntedCallback > finishedCallback)
 
virtual bool setBackendRenderTargetState (const GrBackendRenderTarget &, const skgpu::MutableTextureState &, skgpu::MutableTextureState *previousState, sk_sp< skgpu::RefCntedCallback > finishedCallback)
 
void handleDirtyContext ()
 
virtual void storeVkPipelineCacheData ()
 

Static Public Member Functions

static std::unique_ptr< GrGpuMake (const GrMtlBackendContext &, const GrContextOptions &, GrDirectContext *)
 

Private Member Functions

void xferBarrier (GrRenderTarget *, GrXferBarrierType) override
 
void takeOwnershipOfBuffer (sk_sp< GrGpuBuffer >) override
 
GrBackendTexture onCreateBackendTexture (SkISize dimensions, const GrBackendFormat &, GrRenderable, skgpu::Mipmapped, GrProtected, std::string_view label) override
 
bool onClearBackendTexture (const GrBackendTexture &, sk_sp< skgpu::RefCntedCallback > finishedCallback, std::array< float, 4 > color) override
 
GrBackendTexture onCreateCompressedBackendTexture (SkISize dimensions, const GrBackendFormat &, skgpu::Mipmapped, GrProtected) override
 
bool onUpdateCompressedBackendTexture (const GrBackendTexture &, sk_sp< skgpu::RefCntedCallback > finishedCallback, const void *data, size_t size) override
 
sk_sp< GrTextureonCreateTexture (SkISize, const GrBackendFormat &, GrRenderable, int renderTargetSampleCnt, skgpu::Budgeted, GrProtected, int mipLevelCount, uint32_t levelClearMask, std::string_view label) override
 
sk_sp< GrTextureonCreateCompressedTexture (SkISize dimensions, const GrBackendFormat &, skgpu::Budgeted, skgpu::Mipmapped, GrProtected, const void *data, size_t dataSize) override
 
sk_sp< GrTextureonWrapBackendTexture (const GrBackendTexture &, GrWrapOwnership, GrWrapCacheable, GrIOType) override
 
sk_sp< GrTextureonWrapCompressedBackendTexture (const GrBackendTexture &, GrWrapOwnership, GrWrapCacheable) override
 
sk_sp< GrTextureonWrapRenderableBackendTexture (const GrBackendTexture &, int sampleCnt, GrWrapOwnership, GrWrapCacheable) override
 
sk_sp< GrRenderTargetonWrapBackendRenderTarget (const GrBackendRenderTarget &) override
 
sk_sp< GrGpuBufferonCreateBuffer (size_t, GrGpuBufferType, GrAccessPattern) override
 
bool onReadPixels (GrSurface *surface, SkIRect, GrColorType surfaceColorType, GrColorType bufferColorType, void *, size_t rowBytes) override
 
bool onTransferFromBufferToBuffer (sk_sp< GrGpuBuffer > src, size_t srcOffset, sk_sp< GrGpuBuffer > dst, size_t dstOffset, size_t size) override
 
bool onWritePixels (GrSurface *, SkIRect, GrColorType surfaceColorType, GrColorType bufferColorType, const GrMipLevel[], int mipLevelCount, bool prepForTexSampling) override
 
bool onTransferPixelsTo (GrTexture *, SkIRect, GrColorType textureColorType, GrColorType bufferColorType, sk_sp< GrGpuBuffer >, size_t offset, size_t rowBytes) override
 
bool onTransferPixelsFrom (GrSurface *, SkIRect, GrColorType surfaceColorType, GrColorType bufferColorType, sk_sp< GrGpuBuffer >, size_t offset) override
 
bool onRegenerateMipMapLevels (GrTexture *) override
 
void onResolveRenderTarget (GrRenderTarget *target, const SkIRect &resolveRect) override
 
void addFinishedProc (GrGpuFinishedProc finishedProc, GrGpuFinishedContext finishedContext) override
 
GrOpsRenderPassonGetOpsRenderPass (GrRenderTarget *, bool useMSAASurface, GrAttachment *, GrSurfaceOrigin, const SkIRect &, const GrOpsRenderPass::LoadAndStoreInfo &, const GrOpsRenderPass::StencilLoadAndStoreInfo &, const skia_private::TArray< GrSurfaceProxy *, true > &sampledProxies, GrXferBarrierFlags renderPassXferBarriers) override
 
bool onSubmitToGpu (GrSyncCpu sync) override
 
sk_sp< GrAttachmentmakeStencilAttachment (const GrBackendFormat &, SkISize dimensions, int numStencilSamples) override
 
GrBackendFormat getPreferredStencilFormat (const GrBackendFormat &) override
 
sk_sp< GrAttachmentmakeMSAAAttachment (SkISize dimensions, const GrBackendFormat &format, int numSamples, GrProtected isProtected, GrMemoryless isMemoryless) override
 

Additional Inherited Members

- Protected Member Functions inherited from GrGpu
void didWriteToSurface (GrSurface *surface, GrSurfaceOrigin origin, const SkIRect *bounds, uint32_t mipLevels=1) const
 
void setOOMed ()
 
void initCaps (sk_sp< const GrCaps > caps)
 
- Static Protected Member Functions inherited from GrGpu
static bool CompressedDataIsCorrect (SkISize dimensions, SkTextureCompressionType, skgpu::Mipmapped, const void *data, size_t length)
 
- Protected Attributes inherited from GrGpu
Stats fStats
 

Detailed Description

Definition at line 35 of file GrMtlGpu.h.

Member Enumeration Documentation

◆ SyncQueue

Enumerator
kForce_SyncQueue 
kSkip_SyncQueue 

Definition at line 57 of file GrMtlGpu.h.

57 {
60 };
@ kForce_SyncQueue
Definition GrMtlGpu.h:58
@ kSkip_SyncQueue
Definition GrMtlGpu.h:59

Constructor & Destructor Documentation

◆ ~GrMtlGpu()

GrMtlGpu::~GrMtlGpu ( )
override

Definition at line 107 of file GrMtlGpu.mm.

107 {
108 if (!fDisconnected) {
109 this->destroyResources();
110 }
111}

Member Function Documentation

◆ addFinishedProc()

void GrMtlGpu::addFinishedProc ( GrGpuFinishedProc  finishedProc,
GrGpuFinishedContext  finishedContext 
)
overrideprivatevirtual

Implements GrGpu.

Definition at line 271 of file GrMtlGpu.mm.

272 {
273 SkASSERT(finishedProc);
274 this->addFinishedCallback(skgpu::RefCntedCallback::Make(finishedProc, finishedContext));
275}
#define SkASSERT(cond)
Definition SkAssert.h:116
static sk_sp< RefCntedCallback > Make(Callback proc, Context ctx)

◆ checkFinishProcs()

void GrMtlGpu::checkFinishProcs ( )
inlineoverridevirtual

Implements GrGpu.

Definition at line 100 of file GrMtlGpu.h.

100{ this->checkForFinishedCommandBuffers(); }

◆ commandBuffer()

GrMtlCommandBuffer * GrMtlGpu::commandBuffer ( )

Definition at line 187 of file GrMtlGpu.mm.

187 {
188 if (!fCurrentCmdBuffer) {
189#if GR_METAL_CAPTURE_COMMANDBUFFER
190 this->testingOnly_startCapture();
191#endif
192 // Create a new command buffer for the next submit
193 fCurrentCmdBuffer = GrMtlCommandBuffer::Make(fQueue);
194 }
195
196 SkASSERT(fCurrentCmdBuffer);
197 return fCurrentCmdBuffer.get();
198}
static sk_sp< GrMtlCommandBuffer > Make(id< MTLCommandQueue > queue)
T * get() const
Definition SkRefCnt.h:303

◆ compile()

bool GrMtlGpu::compile ( const GrProgramDesc ,
const GrProgramInfo  
)
overridevirtual

In this case we have a program descriptor and a program info but no render target.

Implements GrGpu.

Definition at line 1134 of file GrMtlGpu.mm.

1134 {
1135
1137
1138 auto pipelineState = this->resourceProvider().findOrCreateCompatiblePipelineState(
1139 desc, programInfo, &stat);
1140 if (!pipelineState) {
1141 return false;
1142 }
1143
1145}
GrMtlResourceProvider & resourceProvider()
Definition GrMtlGpu.h:51
GrMtlPipelineState * findOrCreateCompatiblePipelineState(const GrProgramDesc &, const GrProgramInfo &, GrThreadSafePipelineBuilder::Stats::ProgramCacheResult *stat=nullptr)

◆ copySurfaceAsBlit()

void GrMtlGpu::copySurfaceAsBlit ( GrSurface dst,
GrSurface src,
GrMtlAttachment dstAttachment,
GrMtlAttachment srcAttachment,
const SkIRect srcRect,
const SkIPoint dstPoint 
)

Definition at line 1230 of file GrMtlGpu.mm.

1232 {
1233#ifdef SK_DEBUG
1234 SkASSERT(this->mtlCaps().canCopyAsBlit(dstAttachment->mtlFormat(), dstAttachment->numSamples(),
1235 srcAttachment->mtlFormat(), dstAttachment->numSamples(),
1236 srcRect, dstPoint, dst == src));
1237#endif
1238 id<MTLTexture> GR_NORETAIN dstTex = dstAttachment->mtlTexture();
1239 id<MTLTexture> GR_NORETAIN srcTex = srcAttachment->mtlTexture();
1240
1241 auto cmdBuffer = this->commandBuffer();
1242 id<MTLBlitCommandEncoder> GR_NORETAIN blitCmdEncoder = cmdBuffer->getBlitCommandEncoder();
1243 if (!blitCmdEncoder) {
1244 return;
1245 }
1246#ifdef SK_ENABLE_MTL_DEBUG_INFO
1247 [blitCmdEncoder pushDebugGroup:@"copySurfaceAsBlit"];
1248#endif
1249 [blitCmdEncoder copyFromTexture: srcTex
1250 sourceSlice: 0
1251 sourceLevel: 0
1252 sourceOrigin: MTLOriginMake(srcRect.x(), srcRect.y(), 0)
1253 sourceSize: MTLSizeMake(srcRect.width(), srcRect.height(), 1)
1254 toTexture: dstTex
1255 destinationSlice: 0
1256 destinationLevel: 0
1257 destinationOrigin: MTLOriginMake(dstPoint.fX, dstPoint.fY, 0)];
1258#ifdef SK_ENABLE_MTL_DEBUG_INFO
1259 [blitCmdEncoder popDebugGroup];
1260#endif
1261 cmdBuffer->addGrSurface(sk_ref_sp<const GrSurface>(dst));
1262 cmdBuffer->addGrSurface(sk_ref_sp<const GrSurface>(src));
1263}
int numSamples() const
MTLPixelFormat mtlFormat() const
id< MTLTexture > mtlTexture() const
GrMtlCommandBuffer * commandBuffer()
Definition GrMtlGpu.mm:187
const GrMtlCaps & mtlCaps() const
Definition GrMtlGpu.h:47
int32_t height
int32_t width
constexpr int32_t x() const
Definition SkRect.h:141
constexpr int32_t y() const
Definition SkRect.h:148

◆ copySurfaceAsResolve()

void GrMtlGpu::copySurfaceAsResolve ( GrSurface dst,
GrSurface src 
)

Definition at line 1214 of file GrMtlGpu.mm.

1214 {
1215 // TODO: Add support for subrectangles
1216 GrMtlRenderTarget* srcRT = static_cast<GrMtlRenderTarget*>(src->asRenderTarget());
1217 GrRenderTarget* dstRT = dst->asRenderTarget();
1218 GrMtlAttachment* dstAttachment;
1219 if (dstRT) {
1220 GrMtlRenderTarget* mtlRT = static_cast<GrMtlRenderTarget*>(dstRT);
1221 dstAttachment = mtlRT->colorAttachment();
1222 } else {
1223 SkASSERT(dst->asTexture());
1224 dstAttachment = static_cast<GrMtlTexture*>(dst->asTexture())->attachment();
1225 }
1226
1227 this->resolve(dstAttachment, srcRT->colorAttachment());
1228}
GrMtlAttachment * colorAttachment() const
dst
Definition cp.py:12

◆ deleteBackendTexture()

void GrMtlGpu::deleteBackendTexture ( const GrBackendTexture )
overridevirtual

Frees a texture created by createBackendTexture(). If ownership of the backend texture has been transferred to a context using adopt semantics this should not be called.

Implements GrGpu.

Definition at line 1129 of file GrMtlGpu.mm.

1129 {
1130 SkASSERT(GrBackendApi::kMetal == tex.backend());
1131 // Nothing to do here, will get cleaned up when the GrBackendTexture object goes away
1132}

◆ device()

id< MTLDevice > GrMtlGpu::device ( ) const
inline

Definition at line 49 of file GrMtlGpu.h.

49{ return fDevice; }

◆ disconnect()

void GrMtlGpu::disconnect ( DisconnectType  type)
overridevirtual

Reimplemented from GrGpu.

Definition at line 113 of file GrMtlGpu.mm.

113 {
115
116 if (!fDisconnected) {
117 this->destroyResources();
118 fDisconnected = true;
119 }
120}
virtual void disconnect(DisconnectType)
Definition GrGpu.cpp:51

◆ finishOutstandingGpuWork()

void GrMtlGpu::finishOutstandingGpuWork ( )
overridevirtual

Implements GrGpu.

Definition at line 262 of file GrMtlGpu.mm.

262 {
263 // wait for the last command buffer we've submitted to finish
264 OutstandingCommandBuffer* back =
265 (OutstandingCommandBuffer*)fOutstandingCommandBuffers.back();
266 if (back) {
267 (*back)->waitUntilCompleted();
268 }
269}
const void * back() const
Definition SkDeque.h:43

◆ getPreferredStencilFormat()

GrBackendFormat GrMtlGpu::getPreferredStencilFormat ( const GrBackendFormat )
inlineoverrideprivatevirtual

Implements GrGpu.

Definition at line 272 of file GrMtlGpu.h.

272 {
273 return GrBackendFormats::MakeMtl(this->mtlCaps().preferredStencilFormat());
274 }
SK_API GrBackendFormat MakeMtl(GrMTLPixelFormat format)

◆ insertSemaphore()

void GrMtlGpu::insertSemaphore ( GrSemaphore semaphore)
overridevirtual

Implements GrGpu.

Definition at line 1611 of file GrMtlGpu.mm.

1611 {
1612 if (@available(macOS 10.14, iOS 12.0, tvOS 12.0, *)) {
1613 SkASSERT(semaphore);
1614 GrMtlSemaphore* mtlSem = static_cast<GrMtlSemaphore*>(semaphore);
1615
1616 this->commandBuffer()->encodeSignalEvent(mtlSem->event(), mtlSem->value());
1617 }
1618}
void encodeSignalEvent(sk_sp< GrMtlEvent >, uint64_t value)
sk_sp< GrMtlEvent > event()
uint64_t value() const

◆ loadMSAAFromResolve()

GrMtlRenderCommandEncoder * GrMtlGpu::loadMSAAFromResolve ( GrAttachment dst,
GrMtlAttachment src,
const SkIRect srcRect,
MTLRenderPassStencilAttachmentDescriptor *  stencil 
)

Definition at line 1660 of file GrMtlGpu.mm.

1662 {
1663 if (!dst) {
1664 return nil;
1665 }
1666 if (!src || src->framebufferOnly()) {
1667 return nil;
1668 }
1669
1670 GrMtlAttachment* mtlDst = static_cast<GrMtlAttachment*>(dst);
1671
1672 MTLPixelFormat stencilFormat = stencil.texture.pixelFormat;
1673 auto renderPipeline = this->resourceProvider().findOrCreateMSAALoadPipeline(mtlDst->mtlFormat(),
1674 dst->numSamples(),
1675 stencilFormat);
1676
1677 // Set up rendercommandencoder
1678 auto renderPassDesc = [MTLRenderPassDescriptor new];
1679 auto colorAttachment = renderPassDesc.colorAttachments[0];
1680 colorAttachment.texture = mtlDst->mtlTexture();
1681 colorAttachment.loadAction = MTLLoadActionDontCare;
1682 colorAttachment.storeAction = MTLStoreActionMultisampleResolve;
1683 colorAttachment.resolveTexture = src->mtlTexture();
1684
1685 renderPassDesc.stencilAttachment = stencil;
1686
1687 // We know in this case that the preceding renderCommandEncoder will not be compatible.
1688 // Either it's using a different rendertarget, or we are reading from the resolve and
1689 // hence we need to let the previous resolve finish. So we create a new one without checking.
1690 auto renderCmdEncoder =
1691 this->commandBuffer()->getRenderCommandEncoder(renderPassDesc, nullptr);
1692 if (!renderCmdEncoder) {
1693 return nullptr;
1694 }
1695
1696 // Bind pipeline
1697 renderCmdEncoder->setRenderPipelineState(renderPipeline->mtlPipelineState());
1698 this->commandBuffer()->addResource(sk_ref_sp(renderPipeline));
1699
1700 // Bind src as input texture
1701 renderCmdEncoder->setFragmentTexture(src->mtlTexture(), 0);
1702 // No sampler needed
1703 this->commandBuffer()->addGrSurface(sk_ref_sp<GrSurface>(src));
1704
1705 // Scissor and viewport should default to size of color attachment
1706
1707 // Update and bind uniform data
1708 int w = srcRect.width();
1709 int h = srcRect.height();
1710
1711 // dst rect edges in NDC (-1 to 1)
1712 int dw = dst->width();
1713 int dh = dst->height();
1714 float dx0 = 2.f * srcRect.fLeft / dw - 1.f;
1715 float dx1 = 2.f * (srcRect.fLeft + w) / dw - 1.f;
1716 float dy0 = 2.f * srcRect.fTop / dh - 1.f;
1717 float dy1 = 2.f * (srcRect.fTop + h) / dh - 1.f;
1718
1719 struct {
1720 float posXform[4];
1721 int textureSize[2];
1722 int pad[2];
1723 } uniData = {{dx1 - dx0, dy1 - dy0, dx0, dy0}, {dw, dh}, {0, 0}};
1724
1725 constexpr size_t uniformSize = 32;
1726 if (@available(macOS 10.11, iOS 8.3, tvOS 9.0, *)) {
1727 SkASSERT(uniformSize <= this->caps()->maxPushConstantsSize());
1728 renderCmdEncoder->setVertexBytes(&uniData, uniformSize, 0);
1729 } else {
1730 // upload the data
1731 GrRingBuffer::Slice slice = this->uniformsRingBuffer()->suballocate(uniformSize);
1733 char* destPtr = static_cast<char*>(slice.fBuffer->map()) + slice.fOffset;
1734 memcpy(destPtr, &uniData, uniformSize);
1735
1736 renderCmdEncoder->setVertexBuffer(buffer->mtlBuffer(), slice.fOffset, 0);
1737 }
1738
1739 renderCmdEncoder->drawPrimitives(MTLPrimitiveTypeTriangleStrip, (NSUInteger)0, (NSUInteger)4);
1740
1741 return renderCmdEncoder;
1742}
sk_sp< T > sk_ref_sp(T *obj)
Definition SkRefCnt.h:381
void * map()
const GrCaps * caps() const
Definition GrGpu.h:73
GrMtlRenderCommandEncoder * getRenderCommandEncoder(MTLRenderPassDescriptor *, const GrMtlPipelineState *, GrMtlOpsRenderPass *opsRenderPass)
void addResource(const sk_sp< const GrManagedResource > &resource)
void addGrSurface(sk_sp< const GrSurface > surface)
GrRingBuffer * uniformsRingBuffer() override
Definition GrMtlGpu.h:118
void setRenderPipelineState(id< MTLRenderPipelineState > pso)
const GrMtlRenderPipeline * findOrCreateMSAALoadPipeline(MTLPixelFormat colorFormat, int sampleCount, MTLPixelFormat stencilFormat)
Slice suballocate(size_t size)
static const uint8_t buffer[]
SkScalar w
SkScalar h
GrGpuBuffer * fBuffer
constexpr int32_t height() const
Definition SkRect.h:165
int32_t fTop
smaller y-axis bounds
Definition SkRect.h:34
constexpr int32_t width() const
Definition SkRect.h:158
int32_t fLeft
smaller x-axis bounds
Definition SkRect.h:33

◆ Make()

GR_NORETAIN_BEGIN std::unique_ptr< GrGpu > GrMtlGpu::Make ( const GrMtlBackendContext context,
const GrContextOptions options,
GrDirectContext direct 
)
static

Definition at line 56 of file GrMtlGpu.mm.

58 {
59 if (!context.fDevice || !context.fQueue) {
60 return nullptr;
61 }
62 if (@available(macOS 10.14, iOS 10.0, tvOS 10.0, *)) {
63 // no warning needed
64 } else {
65 SkDebugf("*** Error ***: Skia's Metal backend no longer supports this OS version.\n");
66#ifdef SK_BUILD_FOR_IOS
67 SkDebugf("Minimum supported version is iOS 10.0.\n");
68#else
69 SkDebugf("Minimum supported version is MacOS 10.14.\n");
70#endif
71 return nullptr;
72 }
73
74 id<MTLDevice> GR_NORETAIN device = (__bridge id<MTLDevice>)(context.fDevice.get());
75 id<MTLCommandQueue> GR_NORETAIN queue = (__bridge id<MTLCommandQueue>)(context.fQueue.get());
76
77 return std::unique_ptr<GrGpu>(new GrMtlGpu(direct,
78 options,
79 device,
80 queue));
81}
const char * options
void SK_SPI SkDebugf(const char format[],...) SK_PRINTF_LIKE(1
id< MTLDevice > device() const
Definition GrMtlGpu.h:49
VkQueue queue
Definition main.cc:55
sk_cfp< GrMTLHandle > fDevice
sk_cfp< GrMTLHandle > fQueue

◆ makeMSAAAttachment()

sk_sp< GrAttachment > GrMtlGpu::makeMSAAAttachment ( SkISize  dimensions,
const GrBackendFormat format,
int  numSamples,
GrProtected  isProtected,
GrMemoryless  isMemoryless 
)
overrideprivatevirtual

Implements GrGpu.

Definition at line 536 of file GrMtlGpu.mm.

540 {
541 // Metal doesn't support protected textures
542 SkASSERT(isProtected == GrProtected::kNo);
543 // TODO: add memoryless support
544 SkASSERT(isMemoryless == GrMemoryless::kNo);
545
546 MTLPixelFormat pixelFormat = (MTLPixelFormat)GrBackendFormats::AsMtlFormat(format);
547 SkASSERT(pixelFormat != MTLPixelFormatInvalid);
549 SkASSERT(this->mtlCaps().isFormatRenderable(pixelFormat, numSamples));
550
552 return GrMtlAttachment::MakeMSAA(this, dimensions, numSamples, pixelFormat);
553}
void incMSAAAttachmentCreates()
Definition GrGpu.h:540
Stats fStats
Definition GrGpu.h:703
static sk_sp< GrMtlAttachment > MakeMSAA(GrMtlGpu *gpu, SkISize dimensions, int sampleCnt, MTLPixelFormat format)
uint32_t uint32_t * format
SK_API GrMTLPixelFormat AsMtlFormat(const GrBackendFormat &)
bool MtlFormatIsCompressed(MTLPixelFormat mtlFormat)
Definition MtlUtils.mm:49

◆ makeSemaphore()

std::unique_ptr< GrSemaphore > GrMtlGpu::makeSemaphore ( bool  isOwned)
overridevirtual

Implements GrGpu.

Definition at line 1598 of file GrMtlGpu.mm.

1598 {
1599 SkASSERT(this->caps()->semaphoreSupport());
1600 return GrMtlSemaphore::Make(this);
1601}
static std::unique_ptr< GrMtlSemaphore > Make(GrMtlGpu *gpu)

◆ makeStencilAttachment()

sk_sp< GrAttachment > GrMtlGpu::makeStencilAttachment ( const GrBackendFormat ,
SkISize  dimensions,
int  numStencilSamples 
)
overrideprivatevirtual

Implements GrGpu.

Definition at line 528 of file GrMtlGpu.mm.

529 {
530 MTLPixelFormat sFmt = this->mtlCaps().preferredStencilFormat();
531
533 return GrMtlAttachment::GrMtlAttachment::MakeStencil(this, dimensions, numStencilSamples, sFmt);
534}
void incStencilAttachmentCreates()
Definition GrGpu.h:539
MTLPixelFormat preferredStencilFormat() const
Definition GrMtlCaps.h:57

◆ mtlCaps()

const GrMtlCaps & GrMtlGpu::mtlCaps ( ) const
inline

Definition at line 47 of file GrMtlGpu.h.

47{ return *fMtlCaps; }

◆ onClearBackendTexture()

bool GrMtlGpu::onClearBackendTexture ( const GrBackendTexture backendTexture,
sk_sp< skgpu::RefCntedCallback finishedCallback,
std::array< float, 4 >  color 
)
overrideprivatevirtual

Implements GrGpu.

Definition at line 939 of file GrMtlGpu.mm.

941 {
942 GrMtlTextureInfo info;
944
945 id<MTLTexture> GR_NORETAIN mtlTexture = GrGetMTLTexture(info.fTexture.get());
946
947 const MTLPixelFormat mtlFormat = mtlTexture.pixelFormat;
948
949 // Create a transfer buffer and fill with data.
950 size_t bytesPerPixel = skgpu::MtlFormatBytesPerBlock(mtlFormat);
951 size_t combinedBufferSize;
952
953 // Reuse the same buffer for all levels. Should be ok since we made the row bytes tight.
954 combinedBufferSize = bytesPerPixel*backendTexture.width()*backendTexture.height();
955
956 size_t alignment = std::max(bytesPerPixel, this->mtlCaps().getMinBufferAlignment());
957 GrStagingBufferManager::Slice slice = fStagingBufferManager.allocateStagingBufferSlice(
958 combinedBufferSize, alignment);
959 if (!slice.fBuffer) {
960 return false;
961 }
962 char* buffer = (char*)slice.fOffsetMapPtr;
963
966 return false;
967 }
968 GrImageInfo ii(colorType, kUnpremul_SkAlphaType, nullptr, backendTexture.dimensions());
969 auto rb = ii.minRowBytes();
970 SkASSERT(rb == bytesPerPixel*backendTexture.width());
971 if (!GrClearImage(ii, buffer, rb, color)) {
972 return false;
973 }
974
975 // Transfer buffer contents to texture
976 MTLOrigin origin = MTLOriginMake(0, 0, 0);
977
978 GrMtlCommandBuffer* cmdBuffer = this->commandBuffer();
979 id<MTLBlitCommandEncoder> GR_NORETAIN blitCmdEncoder = cmdBuffer->getBlitCommandEncoder();
980 if (!blitCmdEncoder) {
981 return false;
982 }
983#ifdef SK_ENABLE_MTL_DEBUG_INFO
984 [blitCmdEncoder pushDebugGroup:@"onClearBackendTexture"];
985#endif
986 GrMtlBuffer* mtlBuffer = static_cast<GrMtlBuffer*>(slice.fBuffer);
987
988 SkISize levelDimensions(backendTexture.dimensions());
989 int numMipLevels = mtlTexture.mipmapLevelCount;
990 for (int currentMipLevel = 0; currentMipLevel < numMipLevels; currentMipLevel++) {
991 size_t levelRowBytes;
992 size_t levelSize;
993
994 levelRowBytes = levelDimensions.width() * bytesPerPixel;
995 levelSize = levelRowBytes * levelDimensions.height();
996
997 // TODO: can this all be done in one go?
998 [blitCmdEncoder copyFromBuffer: mtlBuffer->mtlBuffer()
999 sourceOffset: slice.fOffset
1000 sourceBytesPerRow: levelRowBytes
1001 sourceBytesPerImage: levelSize
1002 sourceSize: MTLSizeMake(levelDimensions.width(),
1003 levelDimensions.height(),
1004 1)
1005 toTexture: mtlTexture
1006 destinationSlice: 0
1007 destinationLevel: currentMipLevel
1008 destinationOrigin: origin];
1009
1010 levelDimensions = {std::max(1, levelDimensions.width() / 2),
1011 std::max(1, levelDimensions.height() / 2)};
1012 }
1013#ifdef SK_BUILD_FOR_MAC
1014 if (this->mtlCaps().isMac()) {
1015 [mtlBuffer->mtlBuffer() didModifyRange: NSMakeRange(slice.fOffset, combinedBufferSize)];
1016 }
1017#endif
1018 [blitCmdEncoder popDebugGroup];
1019
1020 if (finishedCallback) {
1021 this->addFinishedCallback(std::move(finishedCallback));
1022 }
1023
1024 return true;
1025}
static void info(const char *fmt,...) SK_PRINTF_LIKE(1
Definition DM.cpp:213
bool GrClearImage(const GrImageInfo &dstInfo, void *dst, size_t dstRB, std::array< float, 4 > color)
static GrColorType mtl_format_to_backend_tex_clear_colortype(MTLPixelFormat format)
Definition GrMtlGpu.mm:834
GR_NORETAIN_BEGIN SK_ALWAYS_INLINE id< MTLTexture > GrGetMTLTexture(const void *mtlTexture)
Definition GrMtlUtil.h:36
SkColor4f color
kUnpremul_SkAlphaType
#define SkAssertResult(cond)
Definition SkAssert.h:123
static SkColorType colorType(AImageDecoder *decoder, const AImageDecoderHeaderInfo *headerInfo)
SkISize dimensions() const
id< MTLBuffer > mtlBuffer() const
Definition GrMtlBuffer.h:28
id< MTLBlitCommandEncoder > getBlitCommandEncoder()
Slice allocateStagingBufferSlice(size_t size, size_t requiredAlignment=1)
SK_API bool GetMtlTextureInfo(const GrBackendTexture &, GrMtlTextureInfo *)
size_t MtlFormatBytesPerBlock(MTLPixelFormat mtlFormat)
Definition MtlUtils.mm:119

◆ onCopySurface()

bool GrMtlGpu::onCopySurface ( GrSurface dst,
const SkIRect dstRect,
GrSurface src,
const SkIRect srcRect,
GrSamplerState::Filter   
)
overridevirtual

Implements GrGpu.

Definition at line 1265 of file GrMtlGpu.mm.

1267 {
1268 SkASSERT(!src->isProtected() && !dst->isProtected());
1269
1270 if (srcRect.size() != dstRect.size()) {
1271 return false;
1272 }
1273
1274 GrMtlAttachment* dstAttachment;
1275 GrMtlAttachment* srcAttachment;
1276 GrRenderTarget* dstRT = dst->asRenderTarget();
1277 if (dstRT) {
1278 GrMtlRenderTarget* mtlRT = static_cast<GrMtlRenderTarget*>(dstRT);
1279 // This will technically return true for single sample rts that used DMSAA in which case we
1280 // don't have to pick the resolve attachment. But in that case the resolve and color
1281 // attachments will be the same anyways.
1282 if (this->mtlCaps().renderTargetSupportsDiscardableMSAA(mtlRT)) {
1283 dstAttachment = mtlRT->resolveAttachment();
1284 } else {
1285 dstAttachment = mtlRT->colorAttachment();
1286 }
1287 } else if (dst->asTexture()) {
1288 dstAttachment = static_cast<GrMtlTexture*>(dst->asTexture())->attachment();
1289 } else {
1290 // The surface in a GrAttachment already
1291 dstAttachment = static_cast<GrMtlAttachment*>(dst);
1292 }
1293 GrRenderTarget* srcRT = src->asRenderTarget();
1294 if (srcRT) {
1295 GrMtlRenderTarget* mtlRT = static_cast<GrMtlRenderTarget*>(srcRT);
1296 // This will technically return true for single sample rts that used DMSAA in which case we
1297 // don't have to pick the resolve attachment. But in that case the resolve and color
1298 // attachments will be the same anyways.
1299 if (this->mtlCaps().renderTargetSupportsDiscardableMSAA(mtlRT)) {
1300 srcAttachment = mtlRT->resolveAttachment();
1301 } else {
1302 srcAttachment = mtlRT->colorAttachment();
1303 }
1304 } else if (src->asTexture()) {
1305 SkASSERT(src->asTexture());
1306 srcAttachment = static_cast<GrMtlTexture*>(src->asTexture())->attachment();
1307 } else {
1308 // The surface in a GrAttachment already
1309 srcAttachment = static_cast<GrMtlAttachment*>(src);
1310 }
1311
1312 MTLPixelFormat dstFormat = dstAttachment->mtlFormat();
1313 MTLPixelFormat srcFormat = srcAttachment->mtlFormat();
1314
1315 int dstSampleCnt = dstAttachment->sampleCount();
1316 int srcSampleCnt = srcAttachment->sampleCount();
1317
1318 const SkIPoint dstPoint = dstRect.topLeft();
1319 if (this->mtlCaps().canCopyAsResolve(dstFormat, dstSampleCnt,
1320 srcFormat, srcSampleCnt,
1321 SkToBool(srcRT), src->dimensions(),
1322 srcRect, dstPoint,
1323 dstAttachment == srcAttachment)) {
1324 this->copySurfaceAsResolve(dst, src);
1325 return true;
1326 }
1327
1328 if (srcAttachment->framebufferOnly() || dstAttachment->framebufferOnly()) {
1329 return false;
1330 }
1331
1332 if (this->mtlCaps().canCopyAsBlit(dstFormat, dstSampleCnt, srcFormat, srcSampleCnt,
1333 srcRect, dstPoint, dstAttachment == srcAttachment)) {
1334 this->copySurfaceAsBlit(dst, src, dstAttachment, srcAttachment, srcRect, dstPoint);
1335 return true;
1336 }
1337
1338 return false;
1339}
static constexpr bool SkToBool(const T &x)
Definition SkTo.h:35
bool framebufferOnly() const
unsigned int sampleCount() const
bool canCopyAsResolve(MTLPixelFormat dstFormat, int dstSampleCount, MTLPixelFormat srcFormat, int srcSampleCount, bool srcIsRenderTarget, const SkISize srcDimensions, const SkIRect &srcRect, const SkIPoint &dstPoint, bool areDstSrcSameObj) const
Definition GrMtlCaps.mm:288
bool renderTargetSupportsDiscardableMSAA(const GrMtlRenderTarget *) const
void copySurfaceAsBlit(GrSurface *dst, GrSurface *src, GrMtlAttachment *dstAttachment, GrMtlAttachment *srcAttachment, const SkIRect &srcRect, const SkIPoint &dstPoint)
Definition GrMtlGpu.mm:1230
void copySurfaceAsResolve(GrSurface *dst, GrSurface *src)
Definition GrMtlGpu.mm:1214
GrMtlAttachment * resolveAttachment() const
constexpr SkISize size() const
Definition SkRect.h:172
constexpr SkIPoint topLeft() const
Definition SkRect.h:151

◆ onCreateBackendTexture()

GrBackendTexture GrMtlGpu::onCreateBackendTexture ( SkISize  dimensions,
const GrBackendFormat format,
GrRenderable  renderable,
skgpu::Mipmapped  mipmapped,
GrProtected  isProtected,
std::string_view  label 
)
overrideprivatevirtual

Implements GrGpu.

Definition at line 922 of file GrMtlGpu.mm.

927 {
928 const MTLPixelFormat mtlFormat = GrBackendFormatAsMTLPixelFormat(format);
929
930 GrMtlTextureInfo info;
931 if (!this->createMtlTextureForBackendSurface(mtlFormat, dimensions, 1, GrTexturable::kYes,
932 renderable, mipmapped, &info)) {
933 return {};
934 }
935
936 return GrBackendTextures::MakeMtl(dimensions.width(), dimensions.height(), mipmapped, info);
937}
static MTLPixelFormat GrBackendFormatAsMTLPixelFormat(const GrBackendFormat &format)
Definition GrMtlUtil.h:106
SK_API GrBackendTexture MakeMtl(int width, int height, skgpu::Mipmapped, const GrMtlTextureInfo &mtlInfo, std::string_view label={})
constexpr int32_t width() const
Definition SkSize.h:36
constexpr int32_t height() const
Definition SkSize.h:37

◆ onCreateBuffer()

sk_sp< GrGpuBuffer > GrMtlGpu::onCreateBuffer ( size_t  size,
GrGpuBufferType  type,
GrAccessPattern  accessPattern 
)
overrideprivatevirtual

Implements GrGpu.

Definition at line 305 of file GrMtlGpu.mm.

307 {
308 return GrMtlBuffer::Make(this, size, type, accessPattern);
309}
static sk_sp< GrMtlBuffer > Make(GrMtlGpu *, size_t size, GrGpuBufferType intendedType, GrAccessPattern)

◆ onCreateCompressedBackendTexture()

GrBackendTexture GrMtlGpu::onCreateCompressedBackendTexture ( SkISize  dimensions,
const GrBackendFormat format,
skgpu::Mipmapped  mipmapped,
GrProtected  isProtected 
)
overrideprivatevirtual

Implements GrGpu.

Definition at line 1027 of file GrMtlGpu.mm.

1030 {
1031 const MTLPixelFormat mtlFormat = GrBackendFormatAsMTLPixelFormat(format);
1032
1033 GrMtlTextureInfo info;
1034 if (!this->createMtlTextureForBackendSurface(mtlFormat, dimensions, 1, GrTexturable::kYes,
1035 GrRenderable::kNo, mipmapped, &info)) {
1036 return {};
1037 }
1038
1039 return GrBackendTextures::MakeMtl(dimensions.width(), dimensions.height(), mipmapped, info);
1040}

◆ onCreateCompressedTexture()

sk_sp< GrTexture > GrMtlGpu::onCreateCompressedTexture ( SkISize  dimensions,
const GrBackendFormat format,
skgpu::Budgeted  budgeted,
skgpu::Mipmapped  mipmapped,
GrProtected  isProtected,
const void *  data,
size_t  dataSize 
)
overrideprivatevirtual

Implements GrGpu.

Definition at line 599 of file GrMtlGpu.mm.

605 {
606 // We don't support protected textures in Metal.
607 if (isProtected == GrProtected::kYes) {
608 return nullptr;
609 }
610
611 SkASSERT(this->caps()->isFormatTexturable(format, GrTextureType::k2D));
612 SkASSERT(data);
613
614 if (!check_max_blit_width(dimensions.width())) {
615 return nullptr;
616 }
617
618 MTLPixelFormat mtlPixelFormat = GrBackendFormatAsMTLPixelFormat(format);
619 SkASSERT(this->caps()->isFormatCompressed(format));
620
621 int numMipLevels = 1;
622 if (mipmapped == skgpu::Mipmapped::kYes) {
623 numMipLevels = SkMipmap::ComputeLevelCount(dimensions.width(), dimensions.height()) + 1;
624 }
625
626 GrMipmapStatus mipmapStatus = (mipmapped == skgpu::Mipmapped::kYes)
629
630 auto tex = GrMtlTexture::MakeNewTexture(this, budgeted, dimensions, mtlPixelFormat,
631 numMipLevels, mipmapStatus,
632 /*label=*/"MtlGpu_CreateCompressedTexture");
633 if (!tex) {
634 return nullptr;
635 }
636
637 // Upload to texture
638 id<MTLTexture> GR_NORETAIN mtlTexture = tex->mtlTexture();
639 SkASSERT(mtlTexture);
640
641 auto compressionType = GrBackendFormatToCompressionType(format);
642 SkASSERT(compressionType != SkTextureCompressionType::kNone);
643
644 TArray<size_t> individualMipOffsets(numMipLevels);
645 SkDEBUGCODE(size_t combinedBufferSize =)
646 SkCompressedDataSize(compressionType,
647 dimensions,
648 &individualMipOffsets,
649 mipmapped == skgpu::Mipmapped::kYes);
650 SkASSERT(individualMipOffsets.size() == numMipLevels);
651 SkASSERT(dataSize == combinedBufferSize);
652
653 // offset value must be a multiple of the destination texture's pixel size in bytes
654 // for compressed textures, this is the block size
655 size_t alignment = SkCompressedBlockSize(compressionType);
656 GrStagingBufferManager::Slice slice = fStagingBufferManager.allocateStagingBufferSlice(
657 dataSize, alignment);
658 if (!slice.fBuffer) {
659 return nullptr;
660 }
661 char* bufferData = (char*)slice.fOffsetMapPtr;
662 GrMtlBuffer* mtlBuffer = static_cast<GrMtlBuffer*>(slice.fBuffer);
663
664 MTLOrigin origin = MTLOriginMake(0, 0, 0);
665
666 auto cmdBuffer = this->commandBuffer();
667 id<MTLBlitCommandEncoder> GR_NORETAIN blitCmdEncoder = cmdBuffer->getBlitCommandEncoder();
668 if (!blitCmdEncoder) {
669 return nullptr;
670 }
671#ifdef SK_ENABLE_MTL_DEBUG_INFO
672 [blitCmdEncoder pushDebugGroup:@"onCreateCompressedTexture"];
673#endif
674
675 // copy data into the buffer, skipping any trailing bytes
676 memcpy(bufferData, data, dataSize);
677
678 SkISize levelDimensions = dimensions;
679 for (int currentMipLevel = 0; currentMipLevel < numMipLevels; currentMipLevel++) {
680 const size_t levelRowBytes = skgpu::CompressedRowBytes(compressionType,
681 levelDimensions.width());
682 size_t levelSize = SkCompressedDataSize(compressionType, levelDimensions, nullptr, false);
683
684 // TODO: can this all be done in one go?
685 [blitCmdEncoder copyFromBuffer: mtlBuffer->mtlBuffer()
686 sourceOffset: slice.fOffset + individualMipOffsets[currentMipLevel]
687 sourceBytesPerRow: levelRowBytes
688 sourceBytesPerImage: levelSize
689 sourceSize: MTLSizeMake(levelDimensions.width(),
690 levelDimensions.height(), 1)
691 toTexture: mtlTexture
692 destinationSlice: 0
693 destinationLevel: currentMipLevel
694 destinationOrigin: origin];
695
696 levelDimensions = {std::max(1, levelDimensions.width() /2),
697 std::max(1, levelDimensions.height()/2)};
698 }
699#ifdef SK_BUILD_FOR_MAC
700 if (this->mtlCaps().isMac()) {
701 [mtlBuffer->mtlBuffer() didModifyRange: NSMakeRange(slice.fOffset, dataSize)];
702 }
703#endif
704#ifdef SK_ENABLE_MTL_DEBUG_INFO
705 [blitCmdEncoder popDebugGroup];
706#endif
707
708 return std::move(tex);
709}
SkTextureCompressionType GrBackendFormatToCompressionType(const GrBackendFormat &format)
static bool check_max_blit_width(int widthInPixels)
Definition GrMtlGpu.mm:311
GrMipmapStatus
size_t SkCompressedDataSize(SkTextureCompressionType type, SkISize dimensions, TArray< size_t > *individualMipOffsets, bool mipmapped)
size_t SkCompressedBlockSize(SkTextureCompressionType type)
#define SkDEBUGCODE(...)
Definition SkDebug.h:23
@ kYes
Do pre-clip the geometry before applying the (perspective) matrix.
static sk_sp< GrMtlTexture > MakeNewTexture(GrMtlGpu *, skgpu::Budgeted budgeted, SkISize dimensions, MTLPixelFormat format, uint32_t mipLevels, GrMipmapStatus, std::string_view label)
static int ComputeLevelCount(int baseWidth, int baseHeight)
Definition SkMipmap.cpp:134
size_t CompressedRowBytes(SkTextureCompressionType type, int width)
Mipmapped
Definition GpuTypes.h:53

◆ onCreateTexture()

sk_sp< GrTexture > GrMtlGpu::onCreateTexture ( SkISize  dimensions,
const GrBackendFormat format,
GrRenderable  renderable,
int  renderTargetSampleCnt,
skgpu::Budgeted  budgeted,
GrProtected  isProtected,
int  mipLevelCount,
uint32_t  levelClearMask,
std::string_view  label 
)
overrideprivatevirtual

Implements GrGpu.

Definition at line 555 of file GrMtlGpu.mm.

563 {
564 // We don't support protected textures in Metal.
565 if (isProtected == GrProtected::kYes) {
566 return nullptr;
567 }
568 SkASSERT(mipLevelCount > 0);
569
570 MTLPixelFormat mtlPixelFormat = GrBackendFormatAsMTLPixelFormat(format);
571 SkASSERT(mtlPixelFormat != MTLPixelFormatInvalid);
572 SkASSERT(!this->caps()->isFormatCompressed(format));
573
575 GrMipmapStatus mipmapStatus =
577 if (renderable == GrRenderable::kYes) {
579 this, budgeted, dimensions, renderTargetSampleCnt, mtlPixelFormat, mipLevelCount,
580 mipmapStatus, label);
581 } else {
582 tex = GrMtlTexture::MakeNewTexture(this, budgeted, dimensions, mtlPixelFormat,
583 mipLevelCount, mipmapStatus, label);
584 }
585
586 if (!tex) {
587 return nullptr;
588 }
589
590 if (levelClearMask) {
591 this->clearTexture(tex.get(),
592 skgpu::MtlFormatBytesPerBlock(mtlPixelFormat),
593 levelClearMask);
594 }
595
596 return std::move(tex);
597}
static sk_sp< GrMtlTextureRenderTarget > MakeNewTextureRenderTarget(GrMtlGpu *, skgpu::Budgeted, SkISize, int sampleCnt, MTLPixelFormat, uint32_t mipLevels, GrMipmapStatus, std::string_view label)

◆ onGetOpsRenderPass()

GrOpsRenderPass * GrMtlGpu::onGetOpsRenderPass ( GrRenderTarget renderTarget,
bool  useMSAASurface,
GrAttachment stencil,
GrSurfaceOrigin  origin,
const SkIRect bounds,
const GrOpsRenderPass::LoadAndStoreInfo colorInfo,
const GrOpsRenderPass::StencilLoadAndStoreInfo stencilInfo,
const skia_private::TArray< GrSurfaceProxy *, true > &  sampledProxies,
GrXferBarrierFlags  renderPassXferBarriers 
)
overrideprivatevirtual

Implements GrGpu.

Definition at line 153 of file GrMtlGpu.mm.

159 {
160 // For the given render target and requested render pass features we need to find a compatible
161 // framebuffer to use.
162 GrMtlRenderTarget* mtlRT = static_cast<GrMtlRenderTarget*>(renderTarget);
163
164 // TODO: support DMSAA
165 SkASSERT(!useMSAASurface ||
166 (renderTarget->numSamples() > 1));
167
168 bool withResolve = false;
169
170 // Figure out if we can use a Resolve store action for this render pass. When we set up
171 // the render pass we'll update the color load/store ops since we don't want to ever load
172 // or store the msaa color attachment, but may need to for the resolve attachment.
173 if (useMSAASurface && this->mtlCaps().renderTargetSupportsDiscardableMSAA(mtlRT)) {
174 withResolve = true;
175 }
176
177 sk_sp<GrMtlFramebuffer> framebuffer =
178 sk_ref_sp(mtlRT->getFramebuffer(withResolve, SkToBool(stencil)));
179 if (!framebuffer) {
180 return nullptr;
181 }
182
183 return new GrMtlOpsRenderPass(this, renderTarget, std::move(framebuffer), origin, colorInfo,
184 stencilInfo);
185}
const GrMtlFramebuffer * getFramebuffer(bool withResolve, bool withStencil)
int numSamples() const

◆ onReadPixels()

bool GrMtlGpu::onReadPixels ( GrSurface surface,
SkIRect  rect,
GrColorType  surfaceColorType,
GrColorType  bufferColorType,
void *  buffer,
size_t  rowBytes 
)
overrideprivatevirtual

Implements GrGpu.

Definition at line 1365 of file GrMtlGpu.mm.

1370 {
1372
1373 if (surfaceColorType != dstColorType) {
1374 return false;
1375 }
1376
1377 int bpp = GrColorTypeBytesPerPixel(dstColorType);
1378 size_t transBufferRowBytes = bpp*rect.width();
1379 size_t transBufferImageBytes = transBufferRowBytes*rect.height();
1380
1382 sk_sp<GrGpuBuffer> transferBuffer = resourceProvider->createBuffer(
1383 transBufferImageBytes,
1387
1388 if (!transferBuffer) {
1389 return false;
1390 }
1391
1392 GrMtlBuffer* grMtlBuffer = static_cast<GrMtlBuffer*>(transferBuffer.get());
1393 if (!this->readOrTransferPixels(surface,
1394 rect,
1395 dstColorType,
1396 grMtlBuffer->mtlBuffer(),
1397 0,
1398 transBufferImageBytes,
1399 transBufferRowBytes)) {
1400 return false;
1401 }
1402 this->submitCommandBuffer(kForce_SyncQueue);
1403
1404 const void* mappedMemory = grMtlBuffer->mtlBuffer().contents;
1405
1407 rowBytes,
1408 mappedMemory,
1409 transBufferRowBytes,
1410 transBufferRowBytes,
1411 rect.height());
1412
1413 return true;
1414}
static constexpr size_t GrColorTypeBytesPerPixel(GrColorType ct)
@ kDynamic_GrAccessPattern
static void SkRectMemcpy(void *dst, size_t dstRB, const void *src, size_t srcRB, size_t trimRowBytes, int rowCount)
GrResourceProvider * resourceProvider()
GrDirectContextPriv priv()
GrDirectContext * getContext()
Definition GrGpu.h:67
VkSurfaceKHR surface
Definition main.cc:49
sk_sp< SkBlender > blender SkRect rect
Definition SkRecords.h:350

◆ onRegenerateMipMapLevels()

bool GrMtlGpu::onRegenerateMipMapLevels ( GrTexture texture)
overrideprivatevirtual

Implements GrGpu.

Definition at line 811 of file GrMtlGpu.mm.

811 {
812 GrMtlTexture* grMtlTexture = static_cast<GrMtlTexture*>(texture);
813 id<MTLTexture> GR_NORETAIN mtlTexture = grMtlTexture->mtlTexture();
814
815 // Automatic mipmap generation is only supported by color-renderable formats
816 if (!fMtlCaps->isFormatRenderable(mtlTexture.pixelFormat, 1) &&
817 // We have pixel configs marked as textureable-only that use RGBA8 as the internal format
818 MTLPixelFormatRGBA8Unorm != mtlTexture.pixelFormat) {
819 return false;
820 }
821
822 auto cmdBuffer = this->commandBuffer();
823 id<MTLBlitCommandEncoder> GR_NORETAIN blitCmdEncoder = cmdBuffer->getBlitCommandEncoder();
824 if (!blitCmdEncoder) {
825 return false;
826 }
827 [blitCmdEncoder generateMipmapsForTexture: mtlTexture];
828 this->commandBuffer()->addGrSurface(sk_ref_sp<const GrSurface>(grMtlTexture->attachment()));
829
830 return true;
831}
bool isFormatRenderable(const GrBackendFormat &format, int sampleCount) const override
Definition GrMtlCaps.mm:490
id< MTLTexture > mtlTexture() const
GrMtlAttachment * attachment() const
FlTexture * texture

◆ onResolveRenderTarget()

void GrMtlGpu::onResolveRenderTarget ( GrRenderTarget target,
const SkIRect resolveRect 
)
overrideprivatevirtual

Implements GrGpu.

Definition at line 1629 of file GrMtlGpu.mm.

1629 {
1630 SkASSERT(target->numSamples() > 1);
1631 GrMtlRenderTarget* rt = static_cast<GrMtlRenderTarget*>(target);
1632
1634 // We would have resolved the RT during the render pass.
1635 return;
1636 }
1637
1638 this->resolve(static_cast<GrMtlRenderTarget*>(target)->resolveAttachment(),
1639 static_cast<GrMtlRenderTarget*>(target)->colorAttachment());
1640}
uint32_t * target

◆ onSubmitToGpu()

bool GrMtlGpu::onSubmitToGpu ( GrSyncCpu  sync)
overrideprivatevirtual

Implements GrGpu.

Definition at line 292 of file GrMtlGpu.mm.

292 {
293 if (sync == GrSyncCpu::kYes) {
294 return this->submitCommandBuffer(kForce_SyncQueue);
295 } else {
296 return this->submitCommandBuffer(kSkip_SyncQueue);
297 }
298}

◆ onTransferFromBufferToBuffer()

bool GrMtlGpu::onTransferFromBufferToBuffer ( sk_sp< GrGpuBuffer src,
size_t  srcOffset,
sk_sp< GrGpuBuffer dst,
size_t  dstOffset,
size_t  size 
)
overrideprivatevirtual

Implements GrGpu.

Definition at line 1416 of file GrMtlGpu.mm.

1420 {
1421 id<MTLBuffer> GR_NORETAIN mtlSrc = static_cast<GrMtlBuffer*>(src.get())->mtlBuffer();
1422 id<MTLBuffer> GR_NORETAIN mtlDst = static_cast<GrMtlBuffer*>(dst.get())->mtlBuffer();
1423 SkASSERT(mtlSrc);
1424 SkASSERT(mtlDst);
1425
1426 auto cmdBuffer = this->commandBuffer();
1427 id<MTLBlitCommandEncoder> GR_NORETAIN blitCmdEncoder = cmdBuffer->getBlitCommandEncoder();
1428 if (!blitCmdEncoder) {
1429 return false;
1430 }
1431
1432#ifdef SK_ENABLE_MTL_DEBUG_INFO
1433 [blitCmdEncoder pushDebugGroup:@"onTransferFromBufferToBuffer"];
1434#endif
1435 [blitCmdEncoder copyFromBuffer: mtlSrc
1436 sourceOffset: srcOffset
1437 toBuffer: mtlDst
1438 destinationOffset: dstOffset
1439 size: size];
1440#ifdef SK_ENABLE_MTL_DEBUG_INFO
1441 [blitCmdEncoder popDebugGroup];
1442#endif
1443
1444 cmdBuffer->addGrBuffer(std::move(src));
1445 cmdBuffer->addGrBuffer(std::move(dst));
1446
1447 return true;
1448}
it will be possible to load the file into Perfetto s trace viewer disable asset Prevents usage of any non test fonts unless they were explicitly Loaded via prefetched default font Indicates whether the embedding started a prefetch of the default font manager before creating the engine run In non interactive keep the shell running after the Dart script has completed enable serial On low power devices with low core running concurrent GC tasks on threads can cause them to contend with the UI thread which could potentially lead to jank This option turns off all concurrent GC activities domain network JSON encoded network policy per domain This overrides the DisallowInsecureConnections switch Embedder can specify whether to allow or disallow insecure connections at a domain level old gen heap size
Definition switches.h:259

◆ onTransferPixelsFrom()

bool GrMtlGpu::onTransferPixelsFrom ( GrSurface surface,
SkIRect  rect,
GrColorType  surfaceColorType,
GrColorType  bufferColorType,
sk_sp< GrGpuBuffer transferBuffer,
size_t  offset 
)
overrideprivatevirtual

Implements GrGpu.

Definition at line 1505 of file GrMtlGpu.mm.

1510 {
1512 SkASSERT(transferBuffer);
1513
1514 if (surfaceColorType != bufferColorType) {
1515 return false;
1516 }
1517
1518 // Metal only supports offsets that are aligned to a pixel.
1519 size_t bpp = GrColorTypeBytesPerPixel(bufferColorType);
1520 if (offset % bpp) {
1521 return false;
1522 }
1523 if (GrBackendFormatBytesPerPixel(surface->backendFormat()) != bpp) {
1524 return false;
1525 }
1526
1527 GrMtlBuffer* grMtlBuffer = static_cast<GrMtlBuffer*>(transferBuffer.get());
1528
1529 size_t transBufferRowBytes = bpp*rect.width();
1530 size_t transBufferImageBytes = transBufferRowBytes*rect.height();
1531
1532 return this->readOrTransferPixels(surface,
1533 rect,
1534 bufferColorType,
1535 grMtlBuffer->mtlBuffer(),
1536 offset,
1537 transBufferImageBytes,
1538 transBufferRowBytes);
1539}
size_t GrBackendFormatBytesPerPixel(const GrBackendFormat &format)
Point offset

◆ onTransferPixelsTo()

bool GrMtlGpu::onTransferPixelsTo ( GrTexture texture,
SkIRect  rect,
GrColorType  textureColorType,
GrColorType  bufferColorType,
sk_sp< GrGpuBuffer transferBuffer,
size_t  offset,
size_t  rowBytes 
)
overrideprivatevirtual

Implements GrGpu.

Definition at line 1450 of file GrMtlGpu.mm.

1456 {
1458 SkASSERT(transferBuffer);
1459 if (textureColorType != bufferColorType) {
1460 return false;
1461 }
1462
1463 GrMtlTexture* grMtlTexture = static_cast<GrMtlTexture*>(texture);
1464 id<MTLTexture> GR_NORETAIN mtlTexture = grMtlTexture->mtlTexture();
1465 SkASSERT(mtlTexture);
1466
1467 GrMtlBuffer* grMtlBuffer = static_cast<GrMtlBuffer*>(transferBuffer.get());
1468 id<MTLBuffer> GR_NORETAIN mtlBuffer = grMtlBuffer->mtlBuffer();
1469 SkASSERT(mtlBuffer);
1470
1471 size_t bpp = GrColorTypeBytesPerPixel(bufferColorType);
1472 if (offset % bpp) {
1473 return false;
1474 }
1475 if (GrBackendFormatBytesPerPixel(texture->backendFormat()) != bpp) {
1476 return false;
1477 }
1478
1479 MTLOrigin origin = MTLOriginMake(rect.left(), rect.top(), 0);
1480
1481 auto cmdBuffer = this->commandBuffer();
1482 id<MTLBlitCommandEncoder> GR_NORETAIN blitCmdEncoder = cmdBuffer->getBlitCommandEncoder();
1483 if (!blitCmdEncoder) {
1484 return false;
1485 }
1486#ifdef SK_ENABLE_MTL_DEBUG_INFO
1487 [blitCmdEncoder pushDebugGroup:@"onTransferPixelsTo"];
1488#endif
1489 [blitCmdEncoder copyFromBuffer: mtlBuffer
1490 sourceOffset: offset
1491 sourceBytesPerRow: rowBytes
1492 sourceBytesPerImage: rowBytes*rect.height()
1493 sourceSize: MTLSizeMake(rect.width(), rect.height(), 1)
1494 toTexture: mtlTexture
1495 destinationSlice: 0
1496 destinationLevel: 0
1497 destinationOrigin: origin];
1498#ifdef SK_ENABLE_MTL_DEBUG_INFO
1499 [blitCmdEncoder popDebugGroup];
1500#endif
1501
1502 return true;
1503}

◆ onUpdateCompressedBackendTexture()

bool GrMtlGpu::onUpdateCompressedBackendTexture ( const GrBackendTexture backendTexture,
sk_sp< skgpu::RefCntedCallback finishedCallback,
const void *  data,
size_t  size 
)
overrideprivatevirtual

Implements GrGpu.

Definition at line 1042 of file GrMtlGpu.mm.

1045 {
1046 GrMtlTextureInfo info;
1048
1049 id<MTLTexture> mtlTexture = GrGetMTLTexture(info.fTexture.get());
1050
1051 int numMipLevels = mtlTexture.mipmapLevelCount;
1052 skgpu::Mipmapped mipmapped = numMipLevels > 1 ? skgpu::Mipmapped::kYes : skgpu::Mipmapped::kNo;
1053
1054 SkTextureCompressionType compression =
1057
1058 // Create a transfer buffer and fill with data.
1059 STArray<16, size_t> individualMipOffsets;
1060 size_t combinedBufferSize;
1061 combinedBufferSize = SkCompressedDataSize(compression,
1062 backendTexture.dimensions(),
1063 &individualMipOffsets,
1064 mipmapped == skgpu::Mipmapped::kYes);
1065 SkASSERT(individualMipOffsets.size() == numMipLevels);
1066
1067 size_t alignment = std::max(SkCompressedBlockSize(compression),
1068 this->mtlCaps().getMinBufferAlignment());
1070 fStagingBufferManager.allocateStagingBufferSlice(combinedBufferSize, alignment);
1071 if (!slice.fBuffer) {
1072 return false;
1073 }
1074 char* buffer = (char*)slice.fOffsetMapPtr;
1075
1076 memcpy(buffer, data, size);
1077
1078 // Transfer buffer contents to texture
1079 MTLOrigin origin = MTLOriginMake(0, 0, 0);
1080
1081 GrMtlCommandBuffer* cmdBuffer = this->commandBuffer();
1082 id<MTLBlitCommandEncoder> blitCmdEncoder = cmdBuffer->getBlitCommandEncoder();
1083 if (!blitCmdEncoder) {
1084 return false;
1085 }
1086#ifdef SK_ENABLE_MTL_DEBUG_INFO
1087 [blitCmdEncoder pushDebugGroup:@"onUpdateCompressedBackendTexture"];
1088#endif
1089 GrMtlBuffer* mtlBuffer = static_cast<GrMtlBuffer*>(slice.fBuffer);
1090
1091 SkISize levelDimensions(backendTexture.dimensions());
1092 for (int currentMipLevel = 0; currentMipLevel < numMipLevels; currentMipLevel++) {
1093 size_t levelRowBytes;
1094 size_t levelSize;
1095
1096 levelRowBytes = skgpu::CompressedRowBytes(compression, levelDimensions.width());
1097 levelSize = SkCompressedDataSize(compression, levelDimensions, nullptr, false);
1098
1099 // TODO: can this all be done in one go?
1100 [blitCmdEncoder copyFromBuffer: mtlBuffer->mtlBuffer()
1101 sourceOffset: slice.fOffset + individualMipOffsets[currentMipLevel]
1102 sourceBytesPerRow: levelRowBytes
1103 sourceBytesPerImage: levelSize
1104 sourceSize: MTLSizeMake(levelDimensions.width(),
1105 levelDimensions.height(),
1106 1)
1107 toTexture: mtlTexture
1108 destinationSlice: 0
1109 destinationLevel: currentMipLevel
1110 destinationOrigin: origin];
1111
1112 levelDimensions = {std::max(1, levelDimensions.width() / 2),
1113 std::max(1, levelDimensions.height() / 2)};
1114 }
1115#ifdef SK_BUILD_FOR_MAC
1116 if (this->mtlCaps().isMac()) {
1117 [mtlBuffer->mtlBuffer() didModifyRange:NSMakeRange(slice.fOffset, combinedBufferSize)];
1118 }
1119#endif
1120 [blitCmdEncoder popDebugGroup];
1121
1122 if (finishedCallback) {
1123 this->addFinishedCallback(std::move(finishedCallback));
1124 }
1125
1126 return true;
1127}
GrBackendFormat getBackendFormat() const
int size() const
Definition SkTArray.h:416

◆ onWrapBackendRenderTarget()

sk_sp< GrRenderTarget > GrMtlGpu::onWrapBackendRenderTarget ( const GrBackendRenderTarget backendRT)
overrideprivatevirtual

Implements GrGpu.

Definition at line 793 of file GrMtlGpu.mm.

793 {
794 if (!this->caps()->isFormatRenderable(backendRT.getBackendFormat(), backendRT.sampleCnt())) {
795 return nullptr;
796 }
797
798 id<MTLTexture> mtlTexture = get_texture_from_backend(backendRT);
799 if (!mtlTexture) {
800 return nullptr;
801 }
802
803 if (@available(macOS 10.11, iOS 9.0, tvOS 9.0, *)) {
804 SkASSERT(MTLTextureUsageRenderTarget & mtlTexture.usage);
805 }
806
808 backendRT.sampleCnt(), mtlTexture);
809}
static id< MTLTexture > get_texture_from_backend(const GrBackendTexture &backendTex)
Definition GrMtlGpu.mm:713
GrBackendFormat getBackendFormat() const
SkISize dimensions() const
static sk_sp< GrMtlRenderTarget > MakeWrappedRenderTarget(GrMtlGpu *, SkISize, int sampleCnt, id< MTLTexture >)

◆ onWrapBackendTexture()

sk_sp< GrTexture > GrMtlGpu::onWrapBackendTexture ( const GrBackendTexture backendTex,
GrWrapOwnership  ,
GrWrapCacheable  cacheable,
GrIOType  ioType 
)
overrideprivatevirtual

Implements GrGpu.

Definition at line 729 of file GrMtlGpu.mm.

732 {
733 id<MTLTexture> mtlTexture = get_texture_from_backend(backendTex);
734 if (!mtlTexture) {
735 return nullptr;
736 }
737 // We don't currently support sampling from a MSAA texture in shaders.
738 if (mtlTexture.sampleCount != 1) {
739 return nullptr;
740 }
741
742 return GrMtlTexture::MakeWrappedTexture(this, backendTex.dimensions(), mtlTexture, cacheable,
743 ioType);
744}
static sk_sp< GrMtlTexture > MakeWrappedTexture(GrMtlGpu *, SkISize, id< MTLTexture >, GrWrapCacheable, GrIOType)

◆ onWrapCompressedBackendTexture()

sk_sp< GrTexture > GrMtlGpu::onWrapCompressedBackendTexture ( const GrBackendTexture backendTex,
GrWrapOwnership  ,
GrWrapCacheable  cacheable 
)
overrideprivatevirtual

Implements GrGpu.

Definition at line 746 of file GrMtlGpu.mm.

748 {
749 id<MTLTexture> mtlTexture = get_texture_from_backend(backendTex);
750 if (!mtlTexture) {
751 return nullptr;
752 }
753 // We don't currently support sampling from a MSAA texture in shaders.
754 if (mtlTexture.sampleCount != 1) {
755 return nullptr;
756 }
757
758 return GrMtlTexture::MakeWrappedTexture(this, backendTex.dimensions(), mtlTexture, cacheable,
760}
@ kRead_GrIOType

◆ onWrapRenderableBackendTexture()

sk_sp< GrTexture > GrMtlGpu::onWrapRenderableBackendTexture ( const GrBackendTexture backendTex,
int  sampleCnt,
GrWrapOwnership  ,
GrWrapCacheable  cacheable 
)
overrideprivatevirtual

Implements GrGpu.

Definition at line 762 of file GrMtlGpu.mm.

765 {
766 id<MTLTexture> mtlTexture = get_texture_from_backend(backendTex);
767 if (!mtlTexture) {
768 return nullptr;
769 }
770 // We don't currently support sampling from a MSAA texture in shaders.
771 if (mtlTexture.sampleCount != 1) {
772 return nullptr;
773 }
774
775 const GrMtlCaps& caps = this->mtlCaps();
776
777 MTLPixelFormat format = mtlTexture.pixelFormat;
778 if (!caps.isFormatRenderable(format, sampleCnt)) {
779 return nullptr;
780 }
781
782 if (@available(macOS 10.11, iOS 9.0, tvOS 9.0, *)) {
783 SkASSERT(MTLTextureUsageRenderTarget & mtlTexture.usage);
784 }
785
786 sampleCnt = caps.getRenderTargetSampleCount(sampleCnt, format);
787 SkASSERT(sampleCnt);
788
790 this, backendTex.dimensions(), sampleCnt, mtlTexture, cacheable);
791}
virtual bool isFormatRenderable(const GrBackendFormat &format, int sampleCount) const =0
virtual int getRenderTargetSampleCount(int requestedCount, const GrBackendFormat &) const =0
static sk_sp< GrMtlTextureRenderTarget > MakeWrappedTextureRenderTarget(GrMtlGpu *, SkISize, int sampleCnt, id< MTLTexture >, GrWrapCacheable)

◆ onWritePixels()

bool GrMtlGpu::onWritePixels ( GrSurface surface,
SkIRect  rect,
GrColorType  surfaceColorType,
GrColorType  bufferColorType,
const GrMipLevel  texels[],
int  mipLevelCount,
bool  prepForTexSampling 
)
overrideprivatevirtual

Implements GrGpu.

Definition at line 1341 of file GrMtlGpu.mm.

1347 {
1348 GrMtlTexture* mtlTexture = static_cast<GrMtlTexture*>(surface->asTexture());
1349 // TODO: In principle we should be able to support pure rendertargets as well, but
1350 // until we find a use case we'll only support texture rendertargets.
1351 if (!mtlTexture) {
1352 return false;
1353 }
1354 if (!mipLevelCount) {
1355 return false;
1356 }
1357#ifdef SK_DEBUG
1358 for (int i = 0; i < mipLevelCount; i++) {
1359 SkASSERT(texels[i].fPixels);
1360 }
1361#endif
1362 return this->uploadToTexture(mtlTexture, rect, srcColorType, texels, mipLevelCount);
1363}

◆ pipelineBuilder()

GrThreadSafePipelineBuilder * GrMtlGpu::pipelineBuilder ( )
overridevirtual

Implements GrGpu.

Definition at line 122 of file GrMtlGpu.mm.

122 {
123 return nullptr;
124}

◆ precompileShader()

bool GrMtlGpu::precompileShader ( const SkData key,
const SkData data 
)
overridevirtual

Reimplemented from GrGpu.

Definition at line 1147 of file GrMtlGpu.mm.

1147 {
1148 return this->resourceProvider().precompileShader(key, data);
1149}
bool precompileShader(const SkData &key, const SkData &data)

◆ prepareTextureForCrossContextUsage()

std::unique_ptr< GrSemaphore > GrMtlGpu::prepareTextureForCrossContextUsage ( GrTexture )
overridevirtual

Put this texture in a safe and known state for use across multiple contexts. Depending on the backend, this may return a GrSemaphore. If so, other contexts should wait on that semaphore before using this texture.

Implements GrGpu.

Definition at line 300 of file GrMtlGpu.mm.

300 {
302 return nullptr;
303}
bool submitToGpu(GrSyncCpu sync)
Definition GrGpu.cpp:748

◆ refPipelineBuilder()

sk_sp< GrThreadSafePipelineBuilder > GrMtlGpu::refPipelineBuilder ( )
overridevirtual

Implements GrGpu.

Definition at line 126 of file GrMtlGpu.mm.

126 {
127 return nullptr;
128}

◆ resourceProvider()

GrMtlResourceProvider & GrMtlGpu::resourceProvider ( )
inline

Definition at line 51 of file GrMtlGpu.h.

51{ return fResourceProvider; }

◆ stagingBufferManager()

GrStagingBufferManager * GrMtlGpu::stagingBufferManager ( )
inlineoverridevirtual

Reimplemented from GrGpu.

Definition at line 53 of file GrMtlGpu.h.

53{ return &fStagingBufferManager; }

◆ submit()

void GrMtlGpu::submit ( GrOpsRenderPass renderPass)
overridevirtual

Implements GrGpu.

Definition at line 205 of file GrMtlGpu.mm.

205 {
206 GrMtlOpsRenderPass* mtlRenderPass = reinterpret_cast<GrMtlOpsRenderPass*>(renderPass);
207 mtlRenderPass->submit();
208 delete renderPass;
209}

◆ submitIndirectCommandBuffer()

void GrMtlGpu::submitIndirectCommandBuffer ( GrSurface surface,
GrSurfaceOrigin  origin,
const SkIRect bounds 
)
inline

Definition at line 113 of file GrMtlGpu.h.

114 {
115 this->didWriteToSurface(surface, origin, bounds);
116 }
void didWriteToSurface(GrSurface *surface, GrSurfaceOrigin origin, const SkIRect *bounds, uint32_t mipLevels=1) const
Definition GrGpu.cpp:665

◆ takeOwnershipOfBuffer()

void GrMtlGpu::takeOwnershipOfBuffer ( sk_sp< GrGpuBuffer buffer)
overrideprivatevirtual

Reimplemented from GrGpu.

Definition at line 200 of file GrMtlGpu.mm.

200 {
202 this->commandBuffer()->addGrBuffer(std::move(buffer));
203}
void addGrBuffer(sk_sp< const GrBuffer > buffer)

◆ uniformsRingBuffer()

GrRingBuffer * GrMtlGpu::uniformsRingBuffer ( )
inlineoverridevirtual

Reimplemented from GrGpu.

Definition at line 118 of file GrMtlGpu.h.

118{ return &fUniformsRingBuffer; }

◆ waitSemaphore()

void GrMtlGpu::waitSemaphore ( GrSemaphore semaphore)
overridevirtual

Implements GrGpu.

Definition at line 1620 of file GrMtlGpu.mm.

1620 {
1621 if (@available(macOS 10.14, iOS 12.0, tvOS 12.0, *)) {
1622 SkASSERT(semaphore);
1623 GrMtlSemaphore* mtlSem = static_cast<GrMtlSemaphore*>(semaphore);
1624
1625 this->commandBuffer()->encodeWaitForEvent(mtlSem->event(), mtlSem->value());
1626 }
1627}
void encodeWaitForEvent(sk_sp< GrMtlEvent >, uint64_t value)

◆ wrapBackendSemaphore()

std::unique_ptr< GrSemaphore > GrMtlGpu::wrapBackendSemaphore ( const GrBackendSemaphore semaphore,
GrSemaphoreWrapType  ,
GrWrapOwnership   
)
overridevirtual

Implements GrGpu.

Definition at line 1603 of file GrMtlGpu.mm.

1605 {
1606 SkASSERT(this->caps()->backendSemaphoreSupport());
1609}
static std::unique_ptr< GrMtlSemaphore > MakeWrapped(GrMTLHandle mtlEvent, uint64_t value)
SK_API uint64_t GetMtlValue(const GrBackendSemaphore &)
SK_API GrMTLHandle GetMtlHandle(const GrBackendSemaphore &)

◆ xferBarrier()

void GrMtlGpu::xferBarrier ( GrRenderTarget ,
GrXferBarrierType   
)
inlineoverrideprivatevirtual

Implements GrGpu.

Definition at line 125 of file GrMtlGpu.h.

125{}

The documentation for this class was generated from the following files: