Flutter Engine
The Flutter Engine
Loading...
Searching...
No Matches
GrDirectContextPriv.h
Go to the documentation of this file.
1/*
2 * Copyright 2016 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#ifndef GrDirectContextPriv_DEFINED
9#define GrDirectContextPriv_DEFINED
10
11#include "include/core/SkSpan.h"
14#include "src/gpu/AtlasTypes.h"
18
19class GrAtlasManager;
20class GrBackendFormat;
22class GrImageInfo;
23class GrMemoryPool;
26class GrSemaphore;
27class GrSurfaceProxy;
28
30class SkTaskGroup;
31
32/** Class that adds methods to GrDirectContext that are only intended for use internal to Skia.
33 This class is purely a privileged window into GrDirectContext. It should never have additional
34 data members or virtual methods. */
36public:
42
43 static bool Init(const sk_sp<GrDirectContext>& ctx) {
44 SkASSERT(ctx);
45 return ctx->init();
46 }
47
48 static void SetGpu(const sk_sp<GrDirectContext>& ctx, std::unique_ptr<GrGpu> gpu) {
49 SkASSERT(ctx);
50 ctx->fGpu = std::move(gpu);
51 }
52
53 GrDirectContext* context() { return static_cast<GrDirectContext*>(fContext); }
54 const GrDirectContext* context() const { return static_cast<const GrDirectContext*>(fContext); }
55
56 sktext::gpu::StrikeCache* getStrikeCache() { return this->context()->fStrikeCache.get(); }
57
58 /**
59 * Finalizes all pending reads and writes to the surfaces and also performs an MSAA resolves
60 * if necessary. The GrSurfaceProxy array is treated as a hint. If it is supplied the context
61 * will guarantee that the draws required for those proxies are flushed but it could do more.
62 * If no array is provided then all current work will be flushed.
63 *
64 * It is not necessary to call this before reading the render target via Skia/GrContext.
65 * GrContext will detect when it must perform a resolve before reading pixels back from the
66 * surface or using it as a texture.
67 */
71 const GrFlushInfo& = {},
72 const skgpu::MutableTextureState* newState = nullptr);
73
74 /** Version of above that flushes for a single proxy. Null is allowed. */
76 GrSurfaceProxy* proxy,
78 const GrFlushInfo& info = {},
79 const skgpu::MutableTextureState* newState = nullptr) {
80 size_t size = proxy ? 1 : 0;
81 return this->flushSurfaces({&proxy, size}, access, info, newState);
82 }
83
84 /**
85 * Returns true if createPMToUPMEffect and createUPMToPMEffect will succeed. In other words,
86 * did we find a pair of round-trip preserving conversion effects?
87 */
89
90 /**
91 * These functions create premul <-> unpremul effects, using specialized round-trip effects.
92 */
93 std::unique_ptr<GrFragmentProcessor> createPMToUPMEffect(std::unique_ptr<GrFragmentProcessor>);
94 std::unique_ptr<GrFragmentProcessor> createUPMToPMEffect(std::unique_ptr<GrFragmentProcessor>);
95
96 SkTaskGroup* getTaskGroup() { return this->context()->fTaskGroup.get(); }
97
98 GrResourceProvider* resourceProvider() { return this->context()->fResourceProvider.get(); }
100 return this->context()->fResourceProvider.get();
101 }
102
103 GrResourceCache* getResourceCache() { return this->context()->fResourceCache.get(); }
104
105 GrGpu* getGpu() { return this->context()->fGpu.get(); }
106 const GrGpu* getGpu() const { return this->context()->fGpu.get(); }
107
108 // This accessor should only ever be called by the GrOpFlushState.
110 return this->context()->onGetAtlasManager();
111 }
112
113 // This accessor should only ever be called by the GrOpFlushState.
114#if !defined(SK_ENABLE_OPTIMIZE_SIZE)
118#endif
119
122
123 bool compile(const GrProgramDesc&, const GrProgramInfo&);
124
126 return this->context()->fPersistentCache;
127 }
128
130 return this->context()->fMappedBufferManager.get();
131 }
132
133 void setInsideReleaseProc(bool inside) {
134 if (inside) {
135 this->context()->fInsideReleaseProcCnt++;
136 } else {
137 SkASSERT(this->context()->fInsideReleaseProcCnt > 0);
138 this->context()->fInsideReleaseProcCnt--;
139 }
140 }
141
142#if defined(GR_TEST_UTILS)
143 /** Reset GPU stats */
144 void resetGpuStats() const;
145
146 /** Prints cache stats to the string if GR_CACHE_STATS == 1. */
147 void dumpCacheStats(SkString*) const;
148 void dumpCacheStatsKeyValuePairs(
150 void printCacheStats() const;
151
152 /** Prints GPU stats to the string if GR_GPU_STATS == 1. */
153 void dumpGpuStats(SkString*) const;
154 void dumpGpuStatsKeyValuePairs(
156 void printGpuStats() const;
157
158 /** These are only active if GR_GPU_STATS == 1. */
159 void resetContextStats();
160 void dumpContextStats(SkString*) const;
161 void dumpContextStatsKeyValuePairs(
163 void printContextStats() const;
164
165 /** Get pointer to atlas texture for given mask format. Note that this wraps an
166 actively mutating texture in an SkImage. This could yield unexpected results
167 if it gets cached or used more generally. */
168 sk_sp<SkImage> testingOnly_getFontAtlasImage(skgpu::MaskFormat format, unsigned int index = 0);
169
170 void testingOnly_flushAndRemoveOnFlushCallbackObject(GrOnFlushCallbackObject*);
171#endif
172
173private:
174 explicit GrDirectContextPriv(GrDirectContext* dContext) : GrRecordingContextPriv(dContext) {}
175 GrDirectContextPriv& operator=(const GrDirectContextPriv&) = delete;
176
177 // No taking addresses of this type.
178 const GrDirectContextPriv* operator&() const;
179 GrDirectContextPriv* operator&();
180
181 friend class GrDirectContext; // to construct/copy this type.
182
184};
185
187
188// NOLINTNEXTLINE(readability-const-return-type)
190 return GrDirectContextPriv(const_cast<GrDirectContext*>(this));
191}
192
193#endif
const char * backend
static void info(const char *fmt,...) SK_PRINTF_LIKE(1
Definition DM.cpp:213
GrBackendApi
Definition GrTypes.h:95
GrSemaphoresSubmitted
Definition GrTypes.h:229
#define SkASSERT(cond)
Definition SkAssert.h:116
const GrContextOptions & options() const
GrContext_Base * fContext
GrSemaphoresSubmitted flushSurface(GrSurfaceProxy *proxy, SkSurfaces::BackendSurfaceAccess access=SkSurfaces::BackendSurfaceAccess::kNoAccess, const GrFlushInfo &info={}, const skgpu::MutableTextureState *newState=nullptr)
GrResourceProvider * resourceProvider()
void createDDLTask(sk_sp< const GrDeferredDisplayList >, sk_sp< GrRenderTargetProxy > newDest)
skgpu::ganesh::SmallPathAtlasMgr * getSmallPathAtlasMgr()
std::unique_ptr< GrFragmentProcessor > createPMToUPMEffect(std::unique_ptr< GrFragmentProcessor >)
std::unique_ptr< GrFragmentProcessor > createUPMToPMEffect(std::unique_ptr< GrFragmentProcessor >)
void setInsideReleaseProc(bool inside)
SkTaskGroup * getTaskGroup()
GrAtlasManager * getAtlasManager()
GrResourceCache * getResourceCache()
sktext::gpu::StrikeCache * getStrikeCache()
GrClientMappedBufferManager * clientMappedBufferManager()
GrSemaphoresSubmitted flushSurfaces(SkSpan< GrSurfaceProxy * >, SkSurfaces::BackendSurfaceAccess=SkSurfaces::BackendSurfaceAccess::kNoAccess, const GrFlushInfo &={}, const skgpu::MutableTextureState *newState=nullptr)
static void SetGpu(const sk_sp< GrDirectContext > &ctx, std::unique_ptr< GrGpu > gpu)
const GrDirectContext * context() const
const GrResourceProvider * resourceProvider() const
static bool Init(const sk_sp< GrDirectContext > &ctx)
GrDirectContext * context()
GrContextOptions::PersistentCache * getPersistentCache()
static sk_sp< GrDirectContext > Make(GrBackendApi backend, const GrContextOptions &options, sk_sp< GrContextThreadSafeProxy > proxy)
const GrGpu * getGpu() const
friend class GrDirectContextPriv
skgpu::ganesh::SmallPathAtlasMgr * onGetSmallPathAtlasMgr()
GrAtlasManager * onGetAtlasManager()
GrDirectContextPriv priv()
Definition GrGpu.h:62
uint32_t uint32_t * format
BackendSurfaceAccess
Definition SkSurface.h:44
@ kNoAccess
back-end surface will not be used by client