Flutter Engine
The Flutter Engine
Loading...
Searching...
No Matches
GrOpFlushState.h
Go to the documentation of this file.
1/*
2 * Copyright 2015 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7#ifndef GrOpFlushState_DEFINED
8#define GrOpFlushState_DEFINED
9
17#include "src/gpu/AtlasTypes.h"
31
32#include <cstddef>
33#include <cstdint>
34#include <utility>
35
36class GrAtlasManager;
37class GrCaps;
38class GrGpu;
39class GrOp;
42class GrSurfaceProxy;
44enum class GrXferBarrierFlags;
45struct GrSimpleMesh;
47struct SkIRect;
48struct SkRect;
49
50namespace skgpu::ganesh {
51class SmallPathAtlasMgr;
52}
53namespace sktext::gpu {
54class StrikeCache;
55}
56
57/** Tracks the state across all the GrOps (really just the GrDrawOps) in a OpsTask flush. */
59public:
60 // vertexSpace and indexSpace may either be null or an alloation of size
61 // GrBufferAllocPool::kDefaultBufferSize. If the latter, then CPU memory is only allocated for
62 // vertices/indices when a buffer larger than kDefaultBufferSize is required.
65
66 ~GrOpFlushState() final { this->reset(); }
67
68 /** This is called after each op has a chance to prepare its draws and before the draws are
69 executed. */
70 void preExecuteDraws();
71
72 /** Called to upload data to a texture using the GrDeferredTextureUploadFn. If the uploaded
73 surface needs to be prepared for being sampled in a draw after the upload, the caller
74 should pass in true for shouldPrepareSurfaceForSampling. This feature is needed for Vulkan
75 when doing inline uploads to reset the image layout back to sampled. */
76 void doUpload(GrDeferredTextureUploadFn&, bool shouldPrepareSurfaceForSampling = false);
77
78 /** Called as ops are executed. Must be called in the same order as the ops were prepared. */
79 void executeDrawsAndUploadsForMeshDrawOp(const GrOp* op, const SkRect& chainBounds,
80 const GrPipeline*, const GrUserStencilSettings*);
81
82 GrOpsRenderPass* opsRenderPass() { return fOpsRenderPass; }
83 void setOpsRenderPass(GrOpsRenderPass* renderPass) { fOpsRenderPass = renderPass; }
84
85 GrGpu* gpu() { return fGpu; }
86
87 void reset();
88
89 /** Additional data required on a per-op basis when executing GrOps. */
90 struct OpArgs {
91 // TODO: why does OpArgs have the op we're going to pass it to as a member? Remove it.
92 explicit OpArgs(GrOp* op, const GrSurfaceProxyView& surfaceView, bool usesMSAASurface,
94 GrXferBarrierFlags renderPassXferBarriers, GrLoadOp colorLoadOp)
95 : fOp(op)
96 , fSurfaceView(surfaceView)
97 , fRenderTargetProxy(surfaceView.asRenderTargetProxy())
98 , fUsesMSAASurface(usesMSAASurface)
99 , fAppliedClip(appliedClip)
100 , fDstProxyView(dstProxyView)
101 , fRenderPassXferBarriers(renderPassXferBarriers)
102 , fColorLoadOp(colorLoadOp) {
103 SkASSERT(surfaceView.asRenderTargetProxy());
104 }
105
106 GrOp* op() { return fOp; }
107 const GrSurfaceProxyView& writeView() const { return fSurfaceView; }
108 GrRenderTargetProxy* rtProxy() const { return fRenderTargetProxy; }
109 // True if the op under consideration belongs to an opsTask that renders to an MSAA buffer.
110 bool usesMSAASurface() const { return fUsesMSAASurface; }
111 GrAppliedClip* appliedClip() { return fAppliedClip; }
112 const GrAppliedClip* appliedClip() const { return fAppliedClip; }
113 const GrDstProxyView& dstProxyView() const { return fDstProxyView; }
114 GrXferBarrierFlags renderPassBarriers() const { return fRenderPassXferBarriers; }
115 GrLoadOp colorLoadOp() const { return fColorLoadOp; }
116
117#ifdef SK_DEBUG
118 void validate() const {
119 SkASSERT(fOp);
120 SkASSERT(fSurfaceView);
121 }
122#endif
123
124 private:
125 GrOp* fOp;
126 const GrSurfaceProxyView& fSurfaceView;
127 GrRenderTargetProxy* fRenderTargetProxy;
128 bool fUsesMSAASurface;
129 GrAppliedClip* fAppliedClip;
130 GrDstProxyView fDstProxyView; // TODO: do we still need the dst proxy here?
131 GrXferBarrierFlags fRenderPassXferBarriers;
132 GrLoadOp fColorLoadOp;
133 };
134
135 void setOpArgs(OpArgs* opArgs) { fOpArgs = opArgs; }
136
137 const OpArgs& drawOpArgs() const {
138 SkASSERT(fOpArgs);
139 SkDEBUGCODE(fOpArgs->validate());
140 return *fOpArgs;
141 }
142
144 fSampledProxies = sampledProxies;
145 }
146
148 return fSampledProxies;
149 }
150
151 /** Overrides of GrDeferredUploadTarget. */
152
153 const skgpu::TokenTracker* tokenTracker() final { return fTokenTracker; }
156
157 /** Overrides of GrMeshDrawTarget. */
159 const GrSimpleMesh[],
160 int meshCnt,
161 const GrSurfaceProxy* const primProcProxies[],
162 GrPrimitiveType) final;
163 void* makeVertexSpace(size_t vertexSize, int vertexCount, sk_sp<const GrBuffer>*,
164 int* startVertex) final;
165 uint16_t* makeIndexSpace(int indexCount, sk_sp<const GrBuffer>*, int* startIndex) final;
166 void* makeVertexSpaceAtLeast(size_t vertexSize, int minVertexCount, int fallbackVertexCount,
167 sk_sp<const GrBuffer>*, int* startVertex,
168 int* actualVertexCount) final;
169 uint16_t* makeIndexSpaceAtLeast(int minIndexCount, int fallbackIndexCount,
170 sk_sp<const GrBuffer>*, int* startIndex,
171 int* actualIndexCount) final;
173 size_t* offset) override {
174 return fDrawIndirectPool.makeSpace(drawCount, buffer, offset);
175 }
178 size_t* offset) override {
179 return fDrawIndirectPool.makeIndexedSpace(drawCount, buffer, offset);
180 }
181 void putBackIndices(int indexCount) final;
182 void putBackVertices(int vertices, size_t vertexStride) final;
183 void putBackIndirectDraws(int drawCount) final { fDrawIndirectPool.putBack(drawCount); }
184 void putBackIndexedIndirectDraws(int drawCount) final {
185 fDrawIndirectPool.putBackIndexed(drawCount);
186 }
187 const GrSurfaceProxyView& writeView() const final { return this->drawOpArgs().writeView(); }
188 GrRenderTargetProxy* rtProxy() const final { return this->drawOpArgs().rtProxy(); }
189 bool usesMSAASurface() const final { return this->drawOpArgs().usesMSAASurface(); }
190 const GrAppliedClip* appliedClip() const final { return this->drawOpArgs().appliedClip(); }
192 return (fOpArgs->appliedClip()) ?
194 }
196 const GrDstProxyView& dstProxyView() const final {
197 return this->drawOpArgs().dstProxyView();
198 }
199
201 return this->drawOpArgs().renderPassBarriers();
202 }
203
204 GrLoadOp colorLoadOp() const final {
205 return this->drawOpArgs().colorLoadOp();
206 }
207
209 const GrCaps& caps() const final;
210 GrThreadSafeCache* threadSafeCache() const final;
211 GrResourceProvider* resourceProvider() const final { return fResourceProvider; }
212
214
215 // At this point we know we're flushing so full access to the GrAtlasManager and
216 // SmallPathAtlasMgr is required (and permissible).
217 GrAtlasManager* atlasManager() const final;
218#if !defined(SK_ENABLE_OPTIMIZE_SIZE)
220#endif
221
222 /** GrMeshDrawTarget override. */
223 SkArenaAlloc* allocator() override { return &fArena; }
224
225 // This is a convenience method that binds the given pipeline, and then, if our applied clip has
226 // a scissor, sets the scissor rect from the applied clip.
227 void bindPipelineAndScissorClip(const GrProgramInfo& programInfo, const SkRect& drawBounds) {
228 SkASSERT((programInfo.pipeline().isScissorTestEnabled()) ==
229 (this->appliedClip() && this->appliedClip()->scissorState().enabled()));
230 this->bindPipeline(programInfo, drawBounds);
231 if (programInfo.pipeline().isScissorTestEnabled()) {
232 this->setScissorRect(this->appliedClip()->scissorState().rect());
233 }
234 }
235
236 // This is a convenience method for when the primitive processor has exactly one texture. It
237 // binds one texture for the primitive processor, and any others for FPs on the pipeline.
238 void bindTextures(const GrGeometryProcessor& geomProc,
239 const GrSurfaceProxy& singleGeomProcTexture,
240 const GrPipeline& pipeline) {
241 SkASSERT(geomProc.numTextureSamplers() == 1);
242 const GrSurfaceProxy* ptr = &singleGeomProcTexture;
243 this->bindTextures(geomProc, &ptr, pipeline);
244 }
245
246 // Makes the appropriate bindBuffers() and draw*() calls for the provided mesh.
247 void drawMesh(const GrSimpleMesh& mesh);
248
249 // Pass-through methods to GrOpsRenderPass.
250 void bindPipeline(const GrProgramInfo& programInfo, const SkRect& drawBounds) {
251 fOpsRenderPass->bindPipeline(programInfo, drawBounds);
252 }
253 void setScissorRect(const SkIRect& scissorRect) {
254 fOpsRenderPass->setScissorRect(scissorRect);
255 }
256 void bindTextures(const GrGeometryProcessor& geomProc,
257 const GrSurfaceProxy* const geomProcTextures[],
258 const GrPipeline& pipeline) {
259 fOpsRenderPass->bindTextures(geomProc, geomProcTextures, pipeline);
260 }
262 sk_sp<const GrBuffer> vertexBuffer,
263 GrPrimitiveRestart primitiveRestart = GrPrimitiveRestart::kNo) {
264 fOpsRenderPass->bindBuffers(std::move(indexBuffer), std::move(instanceBuffer),
265 std::move(vertexBuffer), primitiveRestart);
266 }
267 void draw(int vertexCount, int baseVertex) {
268 fOpsRenderPass->draw(vertexCount, baseVertex);
269 }
270 void drawIndexed(int indexCount, int baseIndex, uint16_t minIndexValue, uint16_t maxIndexValue,
271 int baseVertex) {
272 fOpsRenderPass->drawIndexed(indexCount, baseIndex, minIndexValue, maxIndexValue,
273 baseVertex);
274 }
275 void drawInstanced(int instanceCount, int baseInstance, int vertexCount, int baseVertex) {
276 fOpsRenderPass->drawInstanced(instanceCount, baseInstance, vertexCount, baseVertex);
277 }
278 void drawIndexedInstanced(int indexCount, int baseIndex, int instanceCount, int baseInstance,
279 int baseVertex) {
280 fOpsRenderPass->drawIndexedInstanced(indexCount, baseIndex, instanceCount, baseInstance,
281 baseVertex);
282 }
283 void drawIndirect(const GrBuffer* drawIndirectBuffer, size_t offset, int drawCount) {
284 fOpsRenderPass->drawIndirect(drawIndirectBuffer, offset, drawCount);
285 }
286 void drawIndexedIndirect(const GrBuffer* drawIndirectBuffer, size_t offset, int drawCount) {
287 fOpsRenderPass->drawIndexedIndirect(drawIndirectBuffer, offset, drawCount);
288 }
289 void drawIndexPattern(int patternIndexCount, int patternRepeatCount,
290 int maxPatternRepetitionsInIndexBuffer, int patternVertexCount,
291 int baseVertex) {
292 fOpsRenderPass->drawIndexPattern(patternIndexCount, patternRepeatCount,
293 maxPatternRepetitionsInIndexBuffer, patternVertexCount,
294 baseVertex);
295 }
296
297private:
298 struct InlineUpload {
300 : fUpload(std::move(upload)), fUploadBeforeToken(token) {}
302 skgpu::AtlasToken fUploadBeforeToken;
303 };
304
305 // A set of contiguous draws that share a draw token, geometry processor, and pipeline. The
306 // meshes for the draw are stored in the fMeshes array. The reason for coalescing meshes
307 // that share a geometry processor into a Draw is that it allows the Gpu object to setup
308 // the shared state once and then issue draws for each mesh.
309 struct Draw {
310 ~Draw();
311 // The geometry processor is always forced to be in an arena allocation. This object does
312 // not need to manage its lifetime.
314 // Must have GrGeometryProcessor::numTextureSamplers() entries. Can be null if no samplers.
315 const GrSurfaceProxy* const* fGeomProcProxies = nullptr;
316 const GrSimpleMesh* fMeshes = nullptr;
317 const GrOp* fOp = nullptr;
318 int fMeshCnt = 0;
319 GrPrimitiveType fPrimitiveType;
320 };
321
322 // Storage for ops' pipelines, draws, and inline uploads.
323 SkArenaAllocWithReset fArena{sizeof(GrPipeline) * 100};
324
325 // Store vertex and index data on behalf of ops that are flushed.
326 GrVertexBufferAllocPool fVertexPool;
327 GrIndexBufferAllocPool fIndexPool;
328 GrDrawIndirectBufferAllocPool fDrawIndirectPool;
329
330 // Data stored on behalf of the ops being flushed.
332 SkArenaAllocList<InlineUpload> fInlineUploads;
334
335 // All draws we store have an implicit draw token. This is the draw token for the first draw
336 // in fDraws.
338
339 // Info about the op that is currently preparing or executing using the flush state or null if
340 // an op is not currently preparing of executing.
341 OpArgs* fOpArgs = nullptr;
342
343 // This field is only transiently set during flush. Each OpsTask will set it to point to an
344 // array of proxies it uses before call onPrepare and onExecute.
346
347 GrGpu* fGpu;
348 GrResourceProvider* fResourceProvider;
349 skgpu::TokenTracker* fTokenTracker;
350 GrOpsRenderPass* fOpsRenderPass = nullptr;
351
352 // Variables that are used to track where we are in lists as ops are executed
355};
356
357#endif
GrSimpleMesh * fMeshes
std::function< void(GrDeferredTextureUploadWritePixelsFn &)> GrDeferredTextureUploadFn
GrPrimitiveRestart
Definition GrTypesPriv.h:55
GrPrimitiveType
Definition GrTypesPriv.h:42
GrLoadOp
GrXferBarrierFlags
#define SkASSERT(cond)
Definition SkAssert.h:116
#define SkDEBUGCODE(...)
Definition SkDebug.h:23
GrGeometryProcessor * fGeometryProcessor
const GrAppliedHardClip & hardClip() const
static const GrAppliedHardClip & Disabled()
GrDrawIndirectWriter makeSpace(int drawCount, sk_sp< const GrBuffer > *buffer, size_t *offset)
GrDrawIndexedIndirectWriter makeIndexedSpace(int drawCount, sk_sp< const GrBuffer > *buffer, size_t *offset)
Definition GrGpu.h:62
void setScissorRect(const SkIRect &scissorRect)
GrLoadOp colorLoadOp() const final
const GrDstProxyView & dstProxyView() const final
const skgpu::TokenTracker * tokenTracker() final
sktext::gpu::StrikeCache * strikeCache() const final
void putBackIndexedIndirectDraws(int drawCount) final
void bindTextures(const GrGeometryProcessor &geomProc, const GrSurfaceProxy *const geomProcTextures[], const GrPipeline &pipeline)
void setOpArgs(OpArgs *opArgs)
GrDeferredUploadTarget * deferredUploadTarget() final
GrXferBarrierFlags renderPassBarriers() const final
const GrAppliedHardClip & appliedHardClip() const
~GrOpFlushState() final
SkArenaAlloc * allocator() override
skgpu::ganesh::SmallPathAtlasMgr * smallPathAtlasManager() const final
void setOpsRenderPass(GrOpsRenderPass *renderPass)
void drawMesh(const GrSimpleMesh &mesh)
uint16_t * makeIndexSpace(int indexCount, sk_sp< const GrBuffer > *, int *startIndex) final
void drawIndexedInstanced(int indexCount, int baseIndex, int instanceCount, int baseInstance, int baseVertex)
const GrAppliedClip * appliedClip() const final
void bindPipelineAndScissorClip(const GrProgramInfo &programInfo, const SkRect &drawBounds)
GrThreadSafeCache * threadSafeCache() const final
void recordDraw(const GrGeometryProcessor *, const GrSimpleMesh[], int meshCnt, const GrSurfaceProxy *const primProcProxies[], GrPrimitiveType) final
void putBackIndirectDraws(int drawCount) final
void putBackIndices(int indexCount) final
void * makeVertexSpaceAtLeast(size_t vertexSize, int minVertexCount, int fallbackVertexCount, sk_sp< const GrBuffer > *, int *startVertex, int *actualVertexCount) final
void bindBuffers(sk_sp< const GrBuffer > indexBuffer, sk_sp< const GrBuffer > instanceBuffer, sk_sp< const GrBuffer > vertexBuffer, GrPrimitiveRestart primitiveRestart=GrPrimitiveRestart::kNo)
void setSampledProxyArray(skia_private::TArray< GrSurfaceProxy *, true > *sampledProxies)
const GrSurfaceProxyView & writeView() const final
void drawInstanced(int instanceCount, int baseInstance, int vertexCount, int baseVertex)
GrResourceProvider * resourceProvider() const final
GrAppliedClip detachAppliedClip() final
GrAtlasManager * atlasManager() const final
const GrCaps & caps() const final
void drawIndexedIndirect(const GrBuffer *drawIndirectBuffer, size_t offset, int drawCount)
GrDrawIndexedIndirectWriter makeDrawIndexedIndirectSpace(int drawCount, sk_sp< const GrBuffer > *buffer, size_t *offset) override
GrDrawIndirectWriter makeDrawIndirectSpace(int drawCount, sk_sp< const GrBuffer > *buffer, size_t *offset) override
skia_private::TArray< GrSurfaceProxy *, true > * sampledProxyArray() override
GrRenderTargetProxy * rtProxy() const final
skgpu::AtlasToken addInlineUpload(GrDeferredTextureUploadFn &&) final
void drawIndexed(int indexCount, int baseIndex, uint16_t minIndexValue, uint16_t maxIndexValue, int baseVertex)
GrOpsRenderPass * opsRenderPass()
void putBackVertices(int vertices, size_t vertexStride) final
void bindPipeline(const GrProgramInfo &programInfo, const SkRect &drawBounds)
void * makeVertexSpace(size_t vertexSize, int vertexCount, sk_sp< const GrBuffer > *, int *startVertex) final
void executeDrawsAndUploadsForMeshDrawOp(const GrOp *op, const SkRect &chainBounds, const GrPipeline *, const GrUserStencilSettings *)
const OpArgs & drawOpArgs() const
uint16_t * makeIndexSpaceAtLeast(int minIndexCount, int fallbackIndexCount, sk_sp< const GrBuffer > *, int *startIndex, int *actualIndexCount) final
void bindTextures(const GrGeometryProcessor &geomProc, const GrSurfaceProxy &singleGeomProcTexture, const GrPipeline &pipeline)
void drawIndirect(const GrBuffer *drawIndirectBuffer, size_t offset, int drawCount)
void doUpload(GrDeferredTextureUploadFn &, bool shouldPrepareSurfaceForSampling=false)
void draw(int vertexCount, int baseVertex)
bool usesMSAASurface() const final
skgpu::AtlasToken addASAPUpload(GrDeferredTextureUploadFn &&) final
void drawIndexPattern(int patternIndexCount, int patternRepeatCount, int maxPatternRepetitionsInIndexBuffer, int patternVertexCount, int baseVertex)
Definition GrOp.h:70
void bindTextures(const GrGeometryProcessor &, const GrSurfaceProxy *const geomProcTextures[], const GrPipeline &)
void bindPipeline(const GrProgramInfo &, const SkRect &drawBounds)
void draw(int vertexCount, int baseVertex)
void drawInstanced(int instanceCount, int baseInstance, int vertexCount, int baseVertex)
void drawIndexPattern(int patternIndexCount, int patternRepeatCount, int maxPatternRepetitionsInIndexBuffer, int patternVertexCount, int baseVertex)
void bindBuffers(sk_sp< const GrBuffer > indexBuffer, sk_sp< const GrBuffer > instanceBuffer, sk_sp< const GrBuffer > vertexBuffer, GrPrimitiveRestart=GrPrimitiveRestart::kNo)
void setScissorRect(const SkIRect &)
void drawIndexed(int indexCount, int baseIndex, uint16_t minIndexValue, uint16_t maxIndexValue, int baseVertex)
void drawIndexedIndirect(const GrBuffer *drawIndirectBuffer, size_t bufferOffset, int drawCount)
void drawIndexedInstanced(int indexCount, int baseIndex, int instanceCount, int baseInstance, int baseVertex)
void drawIndirect(const GrBuffer *drawIndirectBuffer, size_t bufferOffset, int drawCount)
bool isScissorTestEnabled() const
Definition GrPipeline.h:163
const GrPipeline & pipeline() const
GrRenderTargetProxy * asRenderTargetProxy() const
static AtlasToken InvalidToken()
Definition AtlasTypes.h:153
static void Draw(SkCanvas *canvas, const SkRect &rect)
static const uint8_t buffer[]
Definition ref_ptr.h:256
Point offset
OpArgs(GrOp *op, const GrSurfaceProxyView &surfaceView, bool usesMSAASurface, GrAppliedClip *appliedClip, const GrDstProxyView &dstProxyView, GrXferBarrierFlags renderPassXferBarriers, GrLoadOp colorLoadOp)
GrXferBarrierFlags renderPassBarriers() const
GrAppliedClip * appliedClip()
GrRenderTargetProxy * rtProxy() const
const GrDstProxyView & dstProxyView() const
GrLoadOp colorLoadOp() const
const GrAppliedClip * appliedClip() const
const GrSurfaceProxyView & writeView() const