Flutter Engine
The Flutter Engine
Loading...
Searching...
No Matches
FuzzDDLThreading.cpp
Go to the documentation of this file.
1/*
2 * Copyright 2021 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#include "fuzz/Fuzz.h"
9#include "fuzz/FuzzCommon.h"
10
13#include "include/core/SkSize.h"
29
30#include <atomic>
31#include <memory>
32#include <queue>
33
34using namespace skia_private;
36
37// be careful: `foo(make_fuzz_t<T>(f), make_fuzz_t<U>(f))` is undefined.
38// In fact, all make_fuzz_foo() functions have this potential problem.
39// Use sequence points!
40template <typename T>
41inline T make_fuzz_t(Fuzz* fuzz) {
42 T t;
43 fuzz->next(&t);
44 return t;
45}
46
47class DDLFuzzer;
48
49// This class stores the state of a given promise image owned by the fuzzer. It acts as the
50// context for the callback procs of the promise image.
51class PromiseImageInfo : public SkNVRefCnt<PromiseImageInfo> {
52public:
53 enum class State : int {
56 kDone
57 };
58
59 PromiseImageInfo() = default;
61 // If we hit this, then the image or the texture will outlive this object which is bad.
64 fImage.reset();
66 State s = fState;
68 }
69
70 // Make noncopyable
73
74 DDLFuzzer* fFuzzer = nullptr;
76 // At the moment, the atomicity of this isn't used because all our promise image callbacks
77 // happen on the same thread. See the TODO below about them unreffing them off the GPU thread.
78 std::atomic<State> fState{State::kInitial};
79 std::atomic<bool> fDrawn{false};
80
82};
83
84static constexpr int kPromiseImageCount = 8;
85static constexpr SkISize kPromiseImageSize{16, 16};
86static constexpr int kPromiseImagesPerDDL = 4;
87static constexpr int kRecordingThreadCount = 4;
88static constexpr int kIterationCount = 10000;
89
90// A one-shot runner object for fuzzing our DDL threading. It creates an array of promise images,
91// and concurrently records DDLs that reference them, playing each DDL back on the GPU thread.
92// The backing textures for promise images may be recycled into a pool, or not, for each case
93// as determined by the fuzzing data.
94class DDLFuzzer {
95public:
97 DDLFuzzer() = delete;
98 // Make noncopyable
99 DDLFuzzer(DDLFuzzer&) = delete;
101
102 void run();
103
106private:
107 void initPromiseImage(int index);
108 void recordAndPlayDDL();
109 bool isOnGPUThread() const { return SkGetThreadID() == fGpuThread; }
110 bool isOnMainThread() const { return SkGetThreadID() == fMainThread; }
111
112 Fuzz* fFuzz = nullptr;
113 GrDirectContext* fContext = nullptr;
115 sk_sp<SkSurface> fSurface;
116 GrSurfaceCharacterization fSurfaceCharacterization;
117 std::unique_ptr<SkExecutor> fGpuExecutor = SkExecutor::MakeFIFOThreadPool(1, false);
118 std::unique_ptr<SkExecutor> fRecordingExecutor =
120 SkTaskGroup fGpuTaskGroup{*fGpuExecutor};
121 SkTaskGroup fRecordingTaskGroup{*fRecordingExecutor};
122 SkThreadID fGpuThread = kIllegalThreadID;
123 SkThreadID fMainThread = SkGetThreadID();
124 std::queue<sk_sp<GrPromiseImageTexture>> fReusableTextures;
125 sk_gpu_test::GrContextFactory fContextFactory;
126};
127
128DDLFuzzer::DDLFuzzer(Fuzz* fuzz, ContextType contextType) : fFuzz(fuzz) {
129 sk_gpu_test::ContextInfo ctxInfo = fContextFactory.getContextInfo(contextType);
130 sk_gpu_test::TestContext* testCtx = ctxInfo.testContext();
131 fContext = ctxInfo.directContext();
132 if (!fContext) {
133 return;
134 }
135 SkISize canvasSize = kPromiseImageSize;
136 canvasSize.fWidth *= kPromiseImagesPerDDL;
138 fSurface = SkSurfaces::RenderTarget(fContext, skgpu::Budgeted::kNo, ii);
139 if (!fSurface || !fSurface->characterize(&fSurfaceCharacterization)) {
140 return;
141 }
142
143 testCtx->makeNotCurrent();
144 fGpuTaskGroup.add([&]{
145 testCtx->makeCurrent();
146 fGpuThread = SkGetThreadID();
147 });
148 fGpuTaskGroup.wait();
149 for (int i = 0; i < kPromiseImageCount; ++i) {
150 this->initPromiseImage(i);
151 }
152}
153
156 if (!this->isOnGPUThread()) {
157 fFuzz->signalBug();
158 }
159 bool success = make_fuzz_t<bool>(fFuzz);
160 State prior = promiseImage.fState.exchange(State::kTriedToFulfill, std::memory_order_relaxed);
161 if (prior != State::kInitial || promiseImage.fTexture != nullptr) {
162 fFuzz->signalBug();
163 }
164 if (!success) {
165 return nullptr;
166 }
167
168 // Try reusing an existing texture if we can and if the fuzzer wills it.
169 if (!fReusableTextures.empty() && make_fuzz_t<bool>(fFuzz)) {
170 promiseImage.fTexture = std::move(fReusableTextures.front());
171 fReusableTextures.pop();
172 return promiseImage.fTexture;
173 }
174
175 bool finishedBECreate = false;
176 auto markFinished = [](void* context) {
177 *(bool*)context = true;
178 };
179
180 GrBackendTexture backendTex =
185 skgpu::Mipmapped::kNo,
186 GrRenderable::kYes,
187 GrProtected::kNo,
188 markFinished,
189 &finishedBECreate,
190 /*label=*/"DDLFuzzer_FulFillPromiseImage");
191 SkASSERT_RELEASE(backendTex.isValid());
192 while (!finishedBECreate) {
193 fContext->checkAsyncWorkCompletion();
194 }
195
196 promiseImage.fTexture = GrPromiseImageTexture::Make(backendTex);
197
198 return promiseImage.fTexture;
199}
200
203 // TODO: This requirement will go away when we unref promise images off the GPU thread.
204 if (!this->isOnGPUThread()) {
205 fFuzz->signalBug();
206 }
207
208 State old = promiseImage.fState.exchange(State::kDone, std::memory_order_relaxed);
209 if (promiseImage.fDrawn && old != State::kTriedToFulfill) {
210 fFuzz->signalBug();
211 }
212
213 // If we failed to fulfill, then nothing to be done.
214 if (!promiseImage.fTexture) {
215 return;
216 }
217
218 bool reuse = make_fuzz_t<bool>(fFuzz);
219 if (reuse) {
220 fReusableTextures.push(std::move(promiseImage.fTexture));
221 } else {
222 fContext->deleteBackendTexture(promiseImage.fTexture->backendTexture());
223 }
224 promiseImage.fTexture = nullptr;
225}
226
228 PromiseImageInfo& fuzzPromiseImage = *(PromiseImageInfo*)ctxIn;
229 return fuzzPromiseImage.fFuzzer->fulfillPromiseImage(fuzzPromiseImage);
230}
231
232static void fuzz_promise_image_release(void* ctxIn) {
233 PromiseImageInfo& fuzzPromiseImage = *(PromiseImageInfo*)ctxIn;
234 fuzzPromiseImage.fFuzzer->releasePromiseImage(fuzzPromiseImage);
235}
236
237void DDLFuzzer::initPromiseImage(int index) {
238 PromiseImageInfo& promiseImage = fPromiseImages[index];
239 promiseImage.fFuzzer = this;
241 GrRenderable::kYes);
242 promiseImage.fImage = SkImages::PromiseTextureFrom(fContext->threadSafeProxy(),
243 backendFmt,
245 skgpu::Mipmapped::kNo,
252 &promiseImage);
253}
254
255void DDLFuzzer::recordAndPlayDDL() {
256 SkASSERT(!this->isOnGPUThread() && !this->isOnMainThread());
257 GrDeferredDisplayListRecorder recorder(fSurfaceCharacterization);
258 SkCanvas* canvas = recorder.getCanvas();
259 // Draw promise images in a strip
260 for (int i = 0; i < kPromiseImagesPerDDL; i++) {
261 int xOffset = i * kPromiseImageSize.width();
262 int j;
263 // Pick random promise images to draw.
264 fFuzz->nextRange(&j, 0, kPromiseImageCount - 1);
265 fPromiseImages[j].fDrawn = true;
266 canvas->drawImage(fPromiseImages[j].fImage, xOffset, 0);
267 }
268 sk_sp<GrDeferredDisplayList> ddl = recorder.detach();
269 fGpuTaskGroup.add([ddl{std::move(ddl)}, this] {
270 bool success = skgpu::ganesh::DrawDDL(fSurface, std::move(ddl));
271 if (!success) {
272 fFuzz->signalBug();
273 }
274 });
275}
276
278 if (!fSurface) {
279 return;
280 }
281 fRecordingTaskGroup.batch(kIterationCount, [this](int i) { this->recordAndPlayDDL(); });
282 fRecordingTaskGroup.wait();
283
284 fGpuTaskGroup.add([this] { fContext->flushAndSubmit(fSurface.get(), GrSyncCpu::kYes); });
285
286 fGpuTaskGroup.wait();
287
288 fGpuTaskGroup.add([this] {
289 while (!fReusableTextures.empty()) {
290 sk_sp<GrPromiseImageTexture> gpuTexture = std::move(fReusableTextures.front());
291 fContext->deleteBackendTexture(gpuTexture->backendTexture());
292 fReusableTextures.pop();
293 }
294 fContextFactory.destroyContexts();
295 // TODO: Release promise images not on the GPU thread.
296 fPromiseImages.reset(0);
297 });
298 fGpuTaskGroup.wait();
299}
300
301DEF_FUZZ(DDLThreadingGL, fuzz) {
302 DDLFuzzer(fuzz, skgpu::ContextType::kGL).run();
303}
static constexpr int kRecordingThreadCount
static sk_sp< GrPromiseImageTexture > fuzz_promise_image_fulfill(void *ctxIn)
T make_fuzz_t(Fuzz *fuzz)
static constexpr int kIterationCount
static void fuzz_promise_image_release(void *ctxIn)
static constexpr int kPromiseImageCount
static constexpr SkISize kPromiseImageSize
static constexpr int kPromiseImagesPerDDL
#define DEF_FUZZ(name, f)
Definition Fuzz.h:156
@ kTopLeft_GrSurfaceOrigin
Definition GrTypes.h:148
kUnpremul_SkAlphaType
@ kPremul_SkAlphaType
pixel components are premultiplied by alpha
Definition SkAlphaType.h:29
#define SkASSERT_RELEASE(cond)
Definition SkAssert.h:100
#define SkASSERT(cond)
Definition SkAssert.h:116
@ kRGBA_8888_SkColorType
pixel with 8 bits for red, green, blue, alpha; in 32-bit word
Definition SkColorType.h:24
SkThreadID SkGetThreadID()
const SkThreadID kIllegalThreadID
Definition SkThreadID.h:21
int64_t SkThreadID
Definition SkThreadID.h:16
DDLFuzzer & operator=(DDLFuzzer &)=delete
void releasePromiseImage(PromiseImageInfo &)
DDLFuzzer(DDLFuzzer &)=delete
sk_sp< GrPromiseImageTexture > fulfillPromiseImage(PromiseImageInfo &)
DDLFuzzer()=delete
Definition Fuzz.h:24
void next(T *t)
Definition Fuzz.h:64
void signalBug()
Definition Fuzz.h:82
void nextRange(T *, Min, Max)
Definition Fuzz.h:119
SK_API GrBackendFormat defaultBackendFormat(SkColorType, GrRenderable) const
void flushAndSubmit(GrSyncCpu sync=GrSyncCpu::kNo)
void deleteBackendTexture(const GrBackendTexture &)
GrBackendTexture createBackendTexture(int width, int height, const GrBackendFormat &, skgpu::Mipmapped, GrRenderable, GrProtected=GrProtected::kNo, std::string_view label={})
sk_sp< GrContextThreadSafeProxy > threadSafeProxy()
GrBackendTexture backendTexture() const
static sk_sp< GrPromiseImageTexture > Make(const GrBackendTexture &backendTexture)
std::atomic< State > fState
sk_sp< SkImage > fImage
PromiseImageInfo(PromiseImageInfo &)=delete
std::atomic< bool > fDrawn
PromiseImageInfo()=default
sk_sp< GrPromiseImageTexture > fTexture
PromiseImageInfo & operator=(PromiseImageInfo &)=delete
void drawImage(const SkImage *image, SkScalar left, SkScalar top)
Definition SkCanvas.h:1528
static sk_sp< SkColorSpace > MakeSRGB()
static std::unique_ptr< SkExecutor > MakeFIFOThreadPool(int threads=0, bool allowBorrowing=true)
bool unique() const
Definition SkRefCnt.h:175
bool unique() const
Definition SkRefCnt.h:50
bool characterize(GrSurfaceCharacterization *characterization) const
void batch(int N, std::function< void(int)> fn)
void add(std::function< void(void)> fn)
GrDirectContext * directContext() const
TestContext * testContext() const
ContextInfo getContextInfo(ContextType type, ContextOverrides=ContextOverrides::kNone)
T * get() const
Definition SkRefCnt.h:303
void reset(T *ptr=nullptr)
Definition SkRefCnt.h:310
void reset(size_t count=0)
struct MyStruct s
constexpr SkColor4f kRed
Definition SkColor.h:440
SK_API sk_sp< SkImage > PromiseTextureFrom(skgpu::graphite::Recorder *, SkISize dimensions, const skgpu::graphite::TextureInfo &, const SkColorInfo &, skgpu::Origin origin, skgpu::graphite::Volatile, GraphitePromiseTextureFulfillProc, GraphitePromiseImageReleaseProc, GraphitePromiseTextureReleaseProc, GraphitePromiseImageContext)
SK_API sk_sp< SkSurface > RenderTarget(GrRecordingContext *context, skgpu::Budgeted budgeted, const SkImageInfo &imageInfo, int sampleCount, GrSurfaceOrigin surfaceOrigin, const SkSurfaceProps *surfaceProps, bool shouldCreateWithMips=false, bool isProtected=false)
SK_API bool DrawDDL(SkSurface *, sk_sp< const GrDeferredDisplayList > ddl)
#define T
int32_t fWidth
Definition SkSize.h:17
constexpr int32_t width() const
Definition SkSize.h:36
constexpr int32_t height() const
Definition SkSize.h:37
static SkImageInfo Make(int width, int height, SkColorType ct, SkAlphaType at)