Flutter Engine
The Flutter Engine
GrResourceAllocator.cpp
Go to the documentation of this file.
1/*
2 * Copyright 2017 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
9
18
19#ifdef SK_DEBUG
20#include <atomic>
21
22uint32_t GrResourceAllocator::Interval::CreateUniqueID() {
23 static std::atomic<uint32_t> nextID{1};
24 uint32_t id;
25 do {
26 id = nextID.fetch_add(1, std::memory_order_relaxed);
27 } while (id == SK_InvalidUniqueID);
28 return id;
29}
30
31uint32_t GrResourceAllocator::Register::CreateUniqueID() {
32 static std::atomic<uint32_t> nextID{1};
33 uint32_t id;
34 do {
35 id = nextID.fetch_add(1, std::memory_order_relaxed);
36 } while (id == SK_InvalidUniqueID);
37 return id;
38}
39#endif
40
42 SkASSERT(fFailedInstantiation || fIntvlList.empty());
43 SkASSERT(fActiveIntvls.empty());
44 SkASSERT(!fIntvlHash.count());
45}
46
47void GrResourceAllocator::addInterval(GrSurfaceProxy* proxy, unsigned int start, unsigned int end,
48 ActualUse actualUse, AllowRecycling allowRecycling
49 SkDEBUGCODE(, bool isDirectDstRead)) {
50 SkASSERT(start <= end);
51 SkASSERT(!fAssigned); // We shouldn't be adding any intervals after (or during) assignment
52
53 if (proxy->canSkipResourceAllocator()) {
54 return;
55 }
56
57 // If a proxy is read only it must refer to a texture with specific content that cannot be
58 // recycled. We don't need to assign a texture to it and no other proxy can be instantiated
59 // with the same texture.
60 if (proxy->readOnly()) {
61 auto resourceProvider = fDContext->priv().resourceProvider();
62 if (proxy->isLazy() && !proxy->priv().doLazyInstantiation(resourceProvider)) {
63 fFailedInstantiation = true;
64 } else {
65 // Since we aren't going to add an interval we won't revisit this proxy in assign(). So
66 // must already be instantiated or it must be a lazy proxy that we instantiated above.
67 SkASSERT(proxy->isInstantiated());
68 }
69 return;
70 }
71 uint32_t proxyID = proxy->uniqueID().asUInt();
72 if (Interval** intvlPtr = fIntvlHash.find(proxyID)) {
73 // Revise the interval for an existing use
74 Interval* intvl = *intvlPtr;
75#ifdef SK_DEBUG
76 if (0 == start && 0 == end) {
77 // This interval is for the initial upload to a deferred proxy. Due to the vagaries
78 // of how deferred proxies are collected they can appear as uploads multiple times
79 // in a single opsTasks' list and as uploads in several opsTasks.
80 SkASSERT(0 == intvl->start());
81 } else if (isDirectDstRead) {
82 // Direct reads from the render target itself should occur w/in the existing
83 // interval
84 SkASSERT(intvl->start() <= start && intvl->end() >= end);
85 } else {
86 SkASSERT(intvl->end() <= start && intvl->end() <= end);
87 }
88#endif
89 if (ActualUse::kYes == actualUse) {
90 intvl->addUse();
91 }
92 if (allowRecycling == AllowRecycling::kNo) {
93 // In this case, a preexisting interval is made non-reuseable since its proxy is sampled
94 // into a secondary command buffer.
95 intvl->disallowRecycling();
96 }
97 intvl->extendEnd(end);
98 return;
99 }
100 Interval* newIntvl = fInternalAllocator.make<Interval>(proxy, start, end);
101
102 if (ActualUse::kYes == actualUse) {
103 newIntvl->addUse();
104 }
105 if (allowRecycling == AllowRecycling::kNo) {
106 newIntvl->disallowRecycling();
107 }
108 fIntvlList.insertByIncreasingStart(newIntvl);
109 fIntvlHash.set(proxyID, newIntvl);
110}
111
112// Tragically we have cases where we always have to make new textures.
113static bool can_proxy_use_scratch(const GrCaps& caps, GrSurfaceProxy* proxy) {
114 return caps.reuseScratchTextures() || proxy->asRenderTargetProxy();
115}
116
118 skgpu::ScratchKey scratchKey,
119 GrResourceProvider* provider)
120 : fOriginatingProxy(originatingProxy)
121 , fScratchKey(std::move(scratchKey)) {
122 SkASSERT(originatingProxy);
123 SkASSERT(!originatingProxy->isInstantiated());
124 SkASSERT(!originatingProxy->isLazy());
125 SkDEBUGCODE(fUniqueID = CreateUniqueID();)
126 if (fScratchKey.isValid()) {
127 if (can_proxy_use_scratch(*provider->caps(), originatingProxy)) {
128 fExistingSurface = provider->findAndRefScratchTexture(
129 fScratchKey, /*label=*/"ResourceAllocatorRegister");
130 }
131 } else {
132 SkASSERT(this->uniqueKey().isValid());
133 fExistingSurface = provider->findByUniqueKey<GrSurface>(this->uniqueKey());
134 }
135}
136
137bool GrResourceAllocator::Register::isRecyclable(const GrCaps& caps,
138 GrSurfaceProxy* proxy,
139 int knownUseCount,
140 AllowRecycling allowRecycling) const {
141 if (allowRecycling == AllowRecycling::kNo) {
142 return false;
143 }
144
145 if (!can_proxy_use_scratch(caps, proxy)) {
146 return false;
147 }
148
149 if (!this->scratchKey().isValid()) {
150 return false; // no scratch key, no free pool
151 }
152 if (this->uniqueKey().isValid()) {
153 return false; // rely on the resource cache to hold onto uniquely-keyed surfaces.
154 }
155 // If all the refs on the proxy are known to the resource allocator then no one
156 // should be holding onto it outside of Ganesh.
157 return !proxy->refCntGreaterThan(knownUseCount);
158}
159
160bool GrResourceAllocator::Register::instantiateSurface(GrSurfaceProxy* proxy,
161 GrResourceProvider* resourceProvider) {
162 SkASSERT(!proxy->peekSurface());
163
164 sk_sp<GrSurface> newSurface;
165 if (!fExistingSurface) {
166 if (proxy == fOriginatingProxy) {
167 newSurface = proxy->priv().createSurface(resourceProvider);
168 } else {
169 newSurface = sk_ref_sp(fOriginatingProxy->peekSurface());
170 }
171 }
172 if (!fExistingSurface && !newSurface) {
173 return false;
174 }
175
176 GrSurface* surface = newSurface ? newSurface.get() : fExistingSurface.get();
177 // Make surface budgeted if this proxy is budgeted.
178 if (skgpu::Budgeted::kYes == proxy->isBudgeted() &&
179 GrBudgetedType::kBudgeted != surface->resourcePriv().budgetedType()) {
180 // This gets the job done but isn't quite correct. It would be better to try to
181 // match budgeted proxies w/ budgeted surfaces and unbudgeted w/ unbudgeted.
182 surface->resourcePriv().makeBudgeted();
183 }
184
185 // Propagate the proxy unique key to the surface if we have one.
186 if (const auto& uniqueKey = proxy->getUniqueKey(); uniqueKey.isValid()) {
187 if (!surface->getUniqueKey().isValid()) {
188 resourceProvider->assignUniqueKeyToResource(uniqueKey, surface);
189 }
190 SkASSERT(surface->getUniqueKey() == uniqueKey);
191 }
192 proxy->priv().assign(fExistingSurface ? fExistingSurface : std::move(newSurface));
193 return true;
194}
195
196GrResourceAllocator::Interval* GrResourceAllocator::IntervalList::popHead() {
197 SkDEBUGCODE(this->validate());
198
199 Interval* temp = fHead;
200 if (temp) {
201 fHead = temp->next();
202 if (!fHead) {
203 fTail = nullptr;
204 }
205 temp->setNext(nullptr);
206 }
207
208 SkDEBUGCODE(this->validate());
209 return temp;
210}
211
212// TODO: fuse this with insertByIncreasingEnd
213void GrResourceAllocator::IntervalList::insertByIncreasingStart(Interval* intvl) {
214 SkDEBUGCODE(this->validate());
215 SkASSERT(!intvl->next());
216
217 if (!fHead) {
218 // 14%
219 fHead = fTail = intvl;
220 } else if (intvl->start() <= fHead->start()) {
221 // 3%
222 intvl->setNext(fHead);
223 fHead = intvl;
224 } else if (fTail->start() <= intvl->start()) {
225 // 83%
226 fTail->setNext(intvl);
227 fTail = intvl;
228 } else {
229 // almost never
230 Interval* prev = fHead;
231 Interval* next = prev->next();
232 for (; intvl->start() > next->start(); prev = next, next = next->next()) {
233 }
234
235 SkASSERT(next);
236 intvl->setNext(next);
237 prev->setNext(intvl);
238 }
239
240 SkDEBUGCODE(this->validate());
241}
242
243// TODO: fuse this with insertByIncreasingStart
244void GrResourceAllocator::IntervalList::insertByIncreasingEnd(Interval* intvl) {
245 SkDEBUGCODE(this->validate());
246 SkASSERT(!intvl->next());
247
248 if (!fHead) {
249 // 14%
250 fHead = fTail = intvl;
251 } else if (intvl->end() <= fHead->end()) {
252 // 64%
253 intvl->setNext(fHead);
254 fHead = intvl;
255 } else if (fTail->end() <= intvl->end()) {
256 // 3%
257 fTail->setNext(intvl);
258 fTail = intvl;
259 } else {
260 // 19% but 81% of those land right after the list's head
261 Interval* prev = fHead;
262 Interval* next = prev->next();
263 for (; intvl->end() > next->end(); prev = next, next = next->next()) {
264 }
265
266 SkASSERT(next);
267 intvl->setNext(next);
268 prev->setNext(intvl);
269 }
270
271 SkDEBUGCODE(this->validate());
272}
273
274#ifdef SK_DEBUG
275void GrResourceAllocator::IntervalList::validate() const {
276 SkASSERT(SkToBool(fHead) == SkToBool(fTail));
277
278 Interval* prev = nullptr;
279 for (Interval* cur = fHead; cur; prev = cur, cur = cur->next()) {
280 }
281
282 SkASSERT(fTail == prev);
283}
284#endif
285
286// First try to reuse one of the recently allocated/used registers in the free pool.
287GrResourceAllocator::Register* GrResourceAllocator::findOrCreateRegisterFor(GrSurfaceProxy* proxy) {
288 auto resourceProvider = fDContext->priv().resourceProvider();
289 // Handle uniquely keyed proxies
290 if (const auto& uniqueKey = proxy->getUniqueKey(); uniqueKey.isValid()) {
291 if (auto p = fUniqueKeyRegisters.find(uniqueKey)) {
292 return *p;
293 }
294 // No need for a scratch key. These don't go in the free pool.
295 Register* r = fInternalAllocator.make<Register>(proxy,
297 resourceProvider);
298 fUniqueKeyRegisters.set(uniqueKey, r);
299 return r;
300 }
301
302 // Then look in the free pool
303 skgpu::ScratchKey scratchKey;
304 proxy->priv().computeScratchKey(*fDContext->priv().caps(), &scratchKey);
305
306 auto filter = [] (const Register* r) {
307 return true;
308 };
309 if (Register* r = fFreePool.findAndRemove(scratchKey, filter)) {
310 return r;
311 }
312
313 return fInternalAllocator.make<Register>(proxy, std::move(scratchKey), resourceProvider);
314}
315
316// Remove any intervals that end before the current index. Add their registers
317// to the free pool if possible.
318void GrResourceAllocator::expire(unsigned int curIndex) {
319 while (!fActiveIntvls.empty() && fActiveIntvls.peekHead()->end() < curIndex) {
320 Interval* intvl = fActiveIntvls.popHead();
321 SkASSERT(!intvl->next());
322
323 Register* r = intvl->getRegister();
324 if (r && r->isRecyclable(*fDContext->priv().caps(), intvl->proxy(), intvl->uses(),
325 intvl->allowRecycling())) {
326#if GR_ALLOCATION_SPEW
327 SkDebugf("putting register %d back into pool\n", r->uniqueID());
328#endif
329 // TODO: fix this insertion so we get a more LRU-ish behavior
330 fFreePool.insert(r->scratchKey(), r);
331 }
332 fFinishedIntvls.insertByIncreasingStart(intvl);
333 }
334}
335
337 fIntvlHash.reset(); // we don't need the interval hash anymore
338
339 SkASSERT(!fPlanned && !fAssigned);
340 SkDEBUGCODE(fPlanned = true;)
341
343 SkDebugf("assigning %d ops\n", fNumOps);
344 this->dumpIntervals();
345#endif
346
347 auto resourceProvider = fDContext->priv().resourceProvider();
348 while (Interval* cur = fIntvlList.popHead()) {
349 this->expire(cur->start());
350 fActiveIntvls.insertByIncreasingEnd(cur);
351
352 // Already-instantiated proxies and lazy proxies don't use registers.
353 if (cur->proxy()->isInstantiated()) {
354 continue;
355 }
356
357 // Instantiate fully-lazy proxies immediately. Ignore other lazy proxies at this stage.
358 if (cur->proxy()->isLazy()) {
359 if (cur->proxy()->isFullyLazy()) {
360 fFailedInstantiation = !cur->proxy()->priv().doLazyInstantiation(resourceProvider);
361 if (fFailedInstantiation) {
362 break;
363 }
364 }
365 continue;
366 }
367
368 Register* r = this->findOrCreateRegisterFor(cur->proxy());
369#if GR_ALLOCATION_SPEW
370 SkDebugf("Assigning register %d to %d\n",
371 r->uniqueID(),
372 cur->proxy()->uniqueID().asUInt());
373#endif
374 SkASSERT(!cur->proxy()->peekSurface());
375 cur->setRegister(r);
376 }
377
378 // expire all the remaining intervals to drain the active interval list
380 return !fFailedInstantiation;
381}
382
384 SkASSERT(fPlanned);
385 SkASSERT(!fFailedInstantiation);
386 size_t additionalBytesNeeded = 0;
387 for (Interval* cur = fFinishedIntvls.peekHead(); cur; cur = cur->next()) {
388 GrSurfaceProxy* proxy = cur->proxy();
389 if (skgpu::Budgeted::kNo == proxy->isBudgeted() || proxy->isInstantiated()) {
390 continue;
391 }
392
393 // N.B Fully-lazy proxies were already instantiated in planAssignment
394 if (proxy->isLazy()) {
395 additionalBytesNeeded += proxy->gpuMemorySize();
396 } else {
397 Register* r = cur->getRegister();
398 SkASSERT(r);
399 if (!r->accountedForInBudget() && !r->existingSurface()) {
400 additionalBytesNeeded += proxy->gpuMemorySize();
401 }
402 r->setAccountedForInBudget();
403 }
404 }
405 return fDContext->priv().getResourceCache()->purgeToMakeHeadroom(additionalBytesNeeded);
406}
407
409 // NOTE: We do not reset the failedInstantiation flag because we currently do not attempt
410 // to recover from failed instantiations. The user is responsible for checking this flag and
411 // bailing early.
412 SkDEBUGCODE(fPlanned = false;)
413 SkDEBUGCODE(fAssigned = false;)
414 SkASSERT(fActiveIntvls.empty());
415 fFinishedIntvls = IntervalList();
416 fIntvlList = IntervalList();
417 fIntvlHash.reset();
418 fUniqueKeyRegisters.reset();
419 fFreePool.reset();
420 fInternalAllocator.reset();
421}
422
424 if (fFailedInstantiation) {
425 return false;
426 }
427 SkASSERT(fPlanned && !fAssigned);
428 SkDEBUGCODE(fAssigned = true;)
429 auto resourceProvider = fDContext->priv().resourceProvider();
430 while (Interval* cur = fFinishedIntvls.popHead()) {
431 if (fFailedInstantiation) {
432 break;
433 }
434 if (cur->proxy()->isInstantiated()) {
435 continue;
436 }
437 if (cur->proxy()->isLazy()) {
438 fFailedInstantiation = !cur->proxy()->priv().doLazyInstantiation(resourceProvider);
439 continue;
440 }
441 Register* r = cur->getRegister();
442 SkASSERT(r);
443 fFailedInstantiation = !r->instantiateSurface(cur->proxy(), resourceProvider);
444 }
445 return !fFailedInstantiation;
446}
447
448#if GR_ALLOCATION_SPEW
449void GrResourceAllocator::dumpIntervals() {
450 // Print all the intervals while computing their range
451 SkDebugf("------------------------------------------------------------\n");
453 unsigned int max = 0;
454 for(const Interval* cur = fIntvlList.peekHead(); cur; cur = cur->next()) {
455 SkDebugf("{ %3d,%3d }: [%2d, %2d] - refProxys:%d surfaceRefs:%d\n",
456 cur->proxy()->uniqueID().asUInt(),
457 cur->proxy()->isInstantiated() ? cur->proxy()->underlyingUniqueID().asUInt() : -1,
458 cur->start(),
459 cur->end(),
460 cur->proxy()->priv().getProxyRefCnt(),
461 cur->proxy()->testingOnly_getBackingRefCnt());
462 min = std::min(min, cur->start());
463 max = std::max(max, cur->end());
464 }
465
466 // Draw a graph of the useage intervals
467 for(const Interval* cur = fIntvlList.peekHead(); cur; cur = cur->next()) {
468 SkDebugf("{ %3d,%3d }: ",
469 cur->proxy()->uniqueID().asUInt(),
470 cur->proxy()->isInstantiated() ? cur->proxy()->underlyingUniqueID().asUInt() : -1);
471 for (unsigned int i = min; i <= max; ++i) {
472 if (i >= cur->start() && i <= cur->end()) {
473 SkDebugf("x");
474 } else {
475 SkDebugf(" ");
476 }
477 }
478 SkDebugf("\n");
479 }
480}
481#endif
static bool can_proxy_use_scratch(const GrCaps &caps, GrSurfaceProxy *proxy)
#define GR_ALLOCATION_SPEW
static float next(float f)
static float prev(float f)
#define SkASSERT(cond)
Definition: SkAssert.h:116
void SK_SPI SkDebugf(const char format[],...) SK_PRINTF_LIKE(1
sk_sp< T > sk_ref_sp(T *obj)
Definition: SkRefCnt.h:381
SkDEBUGCODE(SK_SPI) SkThreadID SkGetThreadID()
static constexpr bool SkToBool(const T &x)
Definition: SkTo.h:35
static constexpr uint32_t SK_InvalidUniqueID
Definition: SkTypes.h:196
const GrCaps * caps() const
Definition: GrCaps.h:57
bool reuseScratchTextures() const
Definition: GrCaps.h:217
GrResourceProvider * resourceProvider()
GrResourceCache * getResourceCache()
GrDirectContextPriv priv()
void addInterval(GrSurfaceProxy *, unsigned int start, unsigned int end, ActualUse actualUse, AllowRecycling SkDEBUGCODE(, bool isDirectDstRead=false))
bool purgeToMakeHeadroom(size_t desiredHeadroomBytes)
std::enable_if< std::is_base_of< GrGpuResource, T >::value, sk_sp< T > >::type findByUniqueKey(const skgpu::UniqueKey &key)
void assignUniqueKeyToResource(const skgpu::UniqueKey &, GrGpuResource *)
sk_sp< GrTexture > findAndRefScratchTexture(const skgpu::ScratchKey &, std::string_view label)
const GrCaps * caps() const
bool doLazyInstantiation(GrResourceProvider *)
sk_sp< GrSurface > createSurface(GrResourceProvider *resourceProvider) const
void assign(sk_sp< GrSurface > surface)
uint32_t asUInt() const
virtual const skgpu::UniqueKey & getUniqueKey() const
virtual GrRenderTargetProxy * asRenderTargetProxy()
bool readOnly() const
size_t gpuMemorySize() const
GrSurfaceProxyPriv priv()
bool isLazy() const
skgpu::Budgeted isBudgeted() const
GrSurface * peekSurface() const
bool canSkipResourceAllocator() const
bool isInstantiated() const
UniqueID uniqueID() const
auto make(Ctor &&ctor) -> decltype(ctor(nullptr))
Definition: SkArenaAlloc.h:120
bool refCntGreaterThan(int32_t threadIsolatedTestCnt) const
Definition: SkRefCnt.h:191
void insert(const Key &key, T *value)
Definition: SkTMultiMap.h:48
void reset()
Definition: SkTMultiMap.h:35
T * findAndRemove(const Key &key, const FindPredicate f)
Definition: SkTMultiMap.h:115
T * get() const
Definition: SkRefCnt.h:303
bool isValid() const
Definition: ResourceKey.h:55
int count() const
Definition: SkTHash.h:471
V * find(const K &key) const
Definition: SkTHash.h:494
V * set(K key, V val)
Definition: SkTHash.h:487
VkSurfaceKHR surface
Definition: main.cc:49
glong glong end
static float max(float r, float g, float b)
Definition: hsl.cpp:49
static float min(float r, float g, float b)
Definition: hsl.cpp:48
Definition: ref_ptr.h:256
const uintptr_t id