Flutter Engine
The Flutter Engine
Loading...
Searching...
No Matches
BulkRectTest.cpp
Go to the documentation of this file.
1/*
2 * Copyright 2019 Google LLC
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
13#include "include/core/SkRect.h"
16#include "include/core/SkSize.h"
23#include "include/gpu/GrTypes.h"
27#include "src/gpu/Swizzle.h"
48#include "tests/Test.h"
49
50#include <cstdint>
51#include <memory>
52#include <utility>
53
54struct GrContextOptions;
55
56using namespace skgpu::ganesh;
57
58static std::unique_ptr<skgpu::ganesh::SurfaceDrawContext> new_SDC(GrRecordingContext* rContext) {
61 nullptr,
63 {128, 128},
65 /*label=*/{});
66}
67
69 using namespace skgpu;
70
71 static constexpr SkISize kDimensions = {128, 128};
72
73 Protected isProtected = Protected(rContext->priv().caps()->supportsProtectedContent());
74
77 GrRenderable::kYes);
78 return rContext->priv().proxyProvider()->createProxy(format,
79 kDimensions,
80 GrRenderable::kYes,
81 1,
82 Mipmapped::kNo,
84 Budgeted::kNo,
85 isProtected,
86 /*label=*/"CreateSurfaceProxy",
88}
89
90typedef GrQuadAAFlags (*PerQuadAAFunc)(int i);
91
95 GrAAType overallAA,
97 bool addOneByOne,
98 bool allUniqueProxies,
99 int requestedTotNumQuads,
100 int expectedNumOps);
101
102//-------------------------------------------------------------------------------------------------
104 PerQuadAAFunc perQuadAA, GrAAType overallAA,
105 SkBlendMode blendMode, bool addOneByOne,
106 bool allUniqueProxies,
107 int requestedTotNumQuads, int expectedNumOps) {
108
109 if (addOneByOne || allUniqueProxies) {
110 return;
111 }
112
113 std::unique_ptr<skgpu::ganesh::SurfaceDrawContext> sdc = new_SDC(dContext);
114
115 auto quads = new GrQuadSetEntry[requestedTotNumQuads];
116
117 for (int i = 0; i < requestedTotNumQuads; ++i) {
118 quads[i].fRect = SkRect::MakeWH(100.5f, 100.5f); // prevent the int non-AA optimization
119 quads[i].fColor = SK_PMColor4fWHITE;
120 quads[i].fLocalMatrix = SkMatrix::I();
121 quads[i].fAAFlags = perQuadAA(i);
122 }
123
125 paint.setXPFactory(GrXPFactory::FromBlendMode(blendMode));
126
128 nullptr,
129 dContext,
130 std::move(paint),
131 overallAA,
132 SkMatrix::I(),
133 quads,
134 requestedTotNumQuads);
135
136 auto opsTask = sdc->testingOnly_PeekLastOpsTask();
137 int actualNumOps = opsTask->numOpChains();
138
139 int actualTotNumQuads = 0;
140
141 for (int i = 0; i < actualNumOps; ++i) {
142 const GrOp* tmp = opsTask->getChain(i);
143 REPORTER_ASSERT(reporter, tmp->classID() == skgpu::ganesh::FillRectOp::ClassID());
145 actualTotNumQuads += ((const GrDrawOp*) tmp)->numQuads();
146 }
147
148 REPORTER_ASSERT(reporter, expectedNumOps == actualNumOps);
149 REPORTER_ASSERT(reporter, requestedTotNumQuads == actualTotNumQuads);
150
151 dContext->flushAndSubmit();
152
153 delete[] quads;
154}
155
156//-------------------------------------------------------------------------------------------------
158 PerQuadAAFunc perQuadAA, GrAAType overallAA,
159 SkBlendMode blendMode, bool addOneByOne,
160 bool allUniqueProxies,
161 int requestedTotNumQuads, int expectedNumOps) {
162 std::unique_ptr<skgpu::ganesh::SurfaceDrawContext> sdc = new_SDC(dContext);
163
164 GrSurfaceProxyView proxyViewA, proxyViewB;
165
166 if (!allUniqueProxies) {
167 sk_sp<GrSurfaceProxy> proxyA = create_proxy(dContext);
168 sk_sp<GrSurfaceProxy> proxyB = create_proxy(dContext);
169 proxyViewA = GrSurfaceProxyView(std::move(proxyA),
172 proxyViewB = GrSurfaceProxyView(std::move(proxyB),
175 }
176
177 auto set = new GrTextureSetEntry[requestedTotNumQuads];
178
179 for (int i = 0; i < requestedTotNumQuads; ++i) {
180 if (!allUniqueProxies) {
181 // Alternate between two proxies to prevent op merging if the batch API was forced to
182 // submit one op at a time (to work, this does require that all fDstRects overlap).
183 set[i].fProxyView = i % 2 == 0 ? proxyViewA : proxyViewB;
184 } else {
185 // Each op gets its own proxy to force chaining only
186 sk_sp<GrSurfaceProxy> proxyA = create_proxy(dContext);
187 set[i].fProxyView = GrSurfaceProxyView(std::move(proxyA),
190 }
191
192 set[i].fSrcAlphaType = kPremul_SkAlphaType;
193 set[i].fSrcRect = SkRect::MakeWH(100.0f, 100.0f);
194 set[i].fDstRect = SkRect::MakeWH(100.5f, 100.5f); // prevent the int non-AA optimization
195 set[i].fDstClipQuad = nullptr;
196 set[i].fPreViewMatrix = nullptr;
197 set[i].fColor = {1.f, 1.f, 1.f, 1.f};
198 set[i].fAAFlags = perQuadAA(i);
199 }
200
201 if (addOneByOne) {
202 for (int i = 0; i < requestedTotNumQuads; ++i) {
203 DrawQuad quad;
204
205 quad.fDevice = GrQuad::MakeFromRect(set[i].fDstRect, SkMatrix::I());
206 quad.fLocal = GrQuad(set[i].fSrcRect);
207 quad.fEdgeFlags = set[i].fAAFlags;
208
209 GrOp::Owner op = TextureOp::Make(dContext,
210 set[i].fProxyView,
211 set[i].fSrcAlphaType,
212 nullptr,
213 GrSamplerState::Filter::kNearest,
214 GrSamplerState::MipmapMode::kNone,
215 set[i].fColor,
216 TextureOp::Saturate::kYes,
217 blendMode,
218 overallAA,
219 &quad,
220 nullptr);
221 sdc->addDrawOp(nullptr, std::move(op));
222 }
223 } else {
225 nullptr,
226 dContext,
227 set,
228 requestedTotNumQuads,
229 requestedTotNumQuads, // We alternate so proxyCnt == cnt
230 GrSamplerState::Filter::kNearest,
231 GrSamplerState::MipmapMode::kNone,
232 TextureOp::Saturate::kYes,
233 blendMode,
234 overallAA,
236 SkMatrix::I(),
237 nullptr);
238 }
239
240 auto opsTask = sdc->testingOnly_PeekLastOpsTask();
241 int actualNumOps = opsTask->numOpChains();
242
243 int actualTotNumQuads = 0;
244
245 if (blendMode != SkBlendMode::kSrcOver ||
247 // In either of these two cases, TextureOp creates one op per quad instead. Since
248 // each entry alternates proxies but overlaps geometrically, this will prevent the ops
249 // from being merged back into fewer ops.
250 expectedNumOps = requestedTotNumQuads;
251 }
252 uint32_t expectedOpID = blendMode == SkBlendMode::kSrcOver
253 ? TextureOp::ClassID()
254 : skgpu::ganesh::FillRectOp::ClassID();
255 for (int i = 0; i < actualNumOps; ++i) {
256 const GrOp* tmp = opsTask->getChain(i);
257 REPORTER_ASSERT(reporter, allUniqueProxies || tmp->isChainTail());
258 while (tmp) {
259 REPORTER_ASSERT(reporter, tmp->classID() == expectedOpID);
260 actualTotNumQuads += ((const GrDrawOp*) tmp)->numQuads();
261 tmp = tmp->nextInChain();
262 }
263 }
264
265 REPORTER_ASSERT(reporter, expectedNumOps == actualNumOps);
266 REPORTER_ASSERT(reporter, requestedTotNumQuads == actualTotNumQuads);
267
268 dContext->flushAndSubmit();
269
270 delete[] set;
271}
272
273//-------------------------------------------------------------------------------------------------
275
276 // This is the simple case where there is no AA at all. We expect 2 non-AA clumps of quads.
277 {
278 auto noAA = [](int i) -> GrQuadAAFlags {
280 };
281
282 static const int kNumExpectedOps = 2;
283
285 false, false, 2*GrResourceProvider::MaxNumNonAAQuads(), kNumExpectedOps);
286 }
287
288 // This is the same as the above case except the overall AA is kCoverage. However, since
289 // the per-quad AA is still none, all the quads should be downgraded to non-AA.
290 {
291 auto noAA = [](int i) -> GrQuadAAFlags {
293 };
294
295 static const int kNumExpectedOps = 2;
296
298 false, false, 2*GrResourceProvider::MaxNumNonAAQuads(), kNumExpectedOps);
299 }
300
301 // This case has an overall AA of kCoverage but the per-quad AA alternates.
302 // We should end up with several aa-sized clumps
303 {
304 auto alternateAA = [](int i) -> GrQuadAAFlags {
305 return (i % 2) ? GrQuadAAFlags::kAll : GrQuadAAFlags::kNone;
306 };
307
308 int numExpectedOps = 2*GrResourceProvider::MaxNumNonAAQuads() /
310
312 false, false, 2*GrResourceProvider::MaxNumNonAAQuads(), numExpectedOps);
313 }
314
315 // In this case we have a run of MaxNumAAQuads non-AA quads and then AA quads. This
316 // exercises the case where we have a clump of quads that can't be upgraded to AA bc of
317 // its size. We expect one clump of non-AA quads followed by one clump of AA quads.
318 {
319 auto runOfNonAA = [](int i) -> GrQuadAAFlags {
322 };
323
324 static const int kNumExpectedOps = 2;
325
327 false, false, 2*GrResourceProvider::MaxNumAAQuads(), kNumExpectedOps);
328 }
329
330 // In this case we use a blend mode other than src-over, which hits the FillRectOp fallback
331 // code path for TextureOp. We pass in the expected results if batching was successful, to
332 // that bulk_fill_rect_create_test batches on all modes; bulk_texture_rect_create_test is
333 // responsible for revising its expectations.
334 {
335 auto fixedAA = [](int i) -> GrQuadAAFlags {
336 return GrQuadAAFlags::kAll;
337 };
338
339 static const int kNumExpectedOps = 2;
340
342 false, false, 2*GrResourceProvider::MaxNumAAQuads(), kNumExpectedOps);
343 }
344
345 // This repros crbug.com/1108475, where we create 1024 non-AA texture ops w/ one coverage-AA
346 // texture op in the middle. Because each op has its own texture, all the texture ops
347 // get chained together so the quad count can exceed the AA maximum.
348 {
349 auto onlyOneAA = [](int i) -> GrQuadAAFlags {
350 return i == 256 ? GrQuadAAFlags::kAll : GrQuadAAFlags::kNone;
351 };
352
353 static const int kNumExpectedOps = 3;
354
356 true, true, 1024, kNumExpectedOps);
357 }
358
359 // This repros a problem related to crbug.com/1108475. In this case, the bulk creation
360 // method had no way to break up the set of texture ops at the AA quad limit.
361 {
362 auto onlyOneAA = [](int i) -> GrQuadAAFlags {
363 return i == 256 ? GrQuadAAFlags::kAll : GrQuadAAFlags::kNone;
364 };
365
366 static const int kNumExpectedOps = 2;
367
369 false, true, 1024, kNumExpectedOps);
370 }
371
372}
373
375 reporter,
376 ctxInfo,
378 run_test(ctxInfo.directContext(), reporter, fillrectop_creation_test);
379}
380
382 reporter,
383 ctxInfo,
385 run_test(ctxInfo.directContext(), reporter, textureop_creation_test);
386}
GrQuadAAFlags(* PerQuadAAFunc)(int i)
static void run_test(GrDirectContext *dContext, skiatest::Reporter *reporter, BulkRectTest test)
void(* BulkRectTest)(skiatest::Reporter *, GrDirectContext *, PerQuadAAFunc, GrAAType overallAA, SkBlendMode, bool addOneByOne, bool allUniqueProxies, int requestedTotNumQuads, int expectedNumOps)
static void fillrectop_creation_test(skiatest::Reporter *reporter, GrDirectContext *dContext, PerQuadAAFunc perQuadAA, GrAAType overallAA, SkBlendMode blendMode, bool addOneByOne, bool allUniqueProxies, int requestedTotNumQuads, int expectedNumOps)
static sk_sp< GrSurfaceProxy > create_proxy(GrRecordingContext *rContext)
static std::unique_ptr< skgpu::ganesh::SurfaceDrawContext > new_SDC(GrRecordingContext *rContext)
static void textureop_creation_test(skiatest::Reporter *reporter, GrDirectContext *dContext, PerQuadAAFunc perQuadAA, GrAAType overallAA, SkBlendMode blendMode, bool addOneByOne, bool allUniqueProxies, int requestedTotNumQuads, int expectedNumOps)
#define test(name)
reporter
GrQuadAAFlags
GrAAType
@ kTopLeft_GrSurfaceOrigin
Definition GrTypes.h:148
skgpu::Protected Protected
@ kPremul_SkAlphaType
pixel components are premultiplied by alpha
Definition SkAlphaType.h:29
SkBlendMode
Definition SkBlendMode.h:38
@ kSrcOver
r = s + (1-sa)*d
@ kSrcATop
r = s*da + d*(1-sa)
constexpr SkPMColor4f SK_PMColor4fWHITE
#define REPORTER_ASSERT(r, cond,...)
Definition Test.h:286
#define DEF_GANESH_TEST_FOR_RENDERING_CONTEXTS(name, reporter, context_info, ctsEnforcement)
Definition Test.h:434
const GrCaps * caps() const
bool supportsProtectedContent() const
Definition GrCaps.h:422
GrBackendFormat getDefaultBackendFormat(GrColorType, GrRenderable) const
Definition GrCaps.cpp:400
bool dynamicStateArrayGeometryProcessorTextureSupport() const
Definition GrCaps.h:418
void flushAndSubmit(GrSyncCpu sync=GrSyncCpu::kNo)
GrDirectContextPriv priv()
Definition GrOp.h:70
std::unique_ptr< GrOp > Owner
Definition GrOp.h:72
GrOp * nextInChain() const
Definition GrOp.h:244
bool isChainTail() const
Definition GrOp.h:242
uint32_t classID() const
Definition GrOp.h:158
sk_sp< GrTextureProxy > createProxy(const GrBackendFormat &, SkISize dimensions, GrRenderable, int renderTargetSampleCnt, skgpu::Mipmapped, SkBackingFit, skgpu::Budgeted, GrProtected, std::string_view label, GrInternalSurfaceFlags=GrInternalSurfaceFlags::kNone, UseAllocator useAllocator=UseAllocator::kYes)
static GrQuad MakeFromRect(const SkRect &, const SkMatrix &)
Definition GrQuad.cpp:107
GrProxyProvider * proxyProvider()
GrRecordingContextPriv priv()
static const GrXPFactory * FromBlendMode(SkBlendMode)
@ kStrict_SrcRectConstraint
sample only inside bounds; slower
Definition SkCanvas.h:1542
static const SkMatrix & I()
static constexpr Swizzle RGBA()
Definition Swizzle.h:66
static void AddFillRectOps(SurfaceDrawContext *, const GrClip *, GrRecordingContext *, GrPaint &&, GrAAType, const SkMatrix &viewMatrix, const GrQuadSetEntry quads[], int quadCount, const GrUserStencilSettings *=nullptr)
static std::unique_ptr< SurfaceDrawContext > Make(GrRecordingContext *, GrColorType, sk_sp< GrSurfaceProxy >, sk_sp< SkColorSpace >, GrSurfaceOrigin, const SkSurfaceProps &)
static void AddTextureSetOps(skgpu::ganesh::SurfaceDrawContext *, const GrClip *, GrRecordingContext *, GrTextureSetEntry[], int cnt, int proxyRunCnt, GrSamplerState::Filter, GrSamplerState::MipmapMode, Saturate, SkBlendMode, GrAAType, SkCanvas::SrcRectConstraint, const SkMatrix &viewMatrix, sk_sp< GrColorSpaceXform > textureXform)
static GrOp::Owner Make(GrRecordingContext *, GrSurfaceProxyView, SkAlphaType srcAlphaType, sk_sp< GrColorSpaceXform >, GrSamplerState::Filter, GrSamplerState::MipmapMode, const SkPMColor4f &, Saturate, SkBlendMode, GrAAType, DrawQuad *, const SkRect *subset=nullptr)
const Paint & paint
uint32_t uint32_t * format
Protected
Definition GpuTypes.h:61
GrQuad fLocal
Definition GrQuad.h:186
GrQuad fDevice
Definition GrQuad.h:185
GrQuadAAFlags fEdgeFlags
Definition GrQuad.h:187
static constexpr SkRect MakeWH(float w, float h)
Definition SkRect.h:609