Flutter Engine
The Flutter Engine
Loading...
Searching...
No Matches
GrResourceCache.cpp
Go to the documentation of this file.
1/*
2 * Copyright 2014 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
9#include <atomic>
10#include <vector>
14#include "src/base/SkRandom.h"
16#include "src/base/SkTSort.h"
26#include "src/gpu/ganesh/SkGr.h"
27
28using namespace skia_private;
29
31
33 GrDirectContext::DirectContextID,
34 /*AllowCopyableMessage=*/false)
35
36#define ASSERT_SINGLE_OWNER SKGPU_ASSERT_SINGLE_OWNER(fSingleOwner)
37
38//////////////////////////////////////////////////////////////////////////////
39
40class GrResourceCache::AutoValidate : ::SkNoncopyable {
41public:
42 AutoValidate(GrResourceCache* cache) : fCache(cache) { cache->validate(); }
43 ~AutoValidate() { fCache->validate(); }
44private:
45 GrResourceCache* fCache;
46};
47
48//////////////////////////////////////////////////////////////////////////////
49
52 uint32_t familyID)
53 : fInvalidUniqueKeyInbox(familyID)
54 , fUnrefResourceInbox(owningContextID)
55 , fOwningContextID(owningContextID)
56 , fContextUniqueID(familyID)
57 , fSingleOwner(singleOwner) {
58 SkASSERT(owningContextID.isValid());
59 SkASSERT(familyID != SK_InvalidUniqueID);
60}
61
65
66void GrResourceCache::setLimit(size_t bytes) {
67 fMaxBytes = bytes;
68 this->purgeAsNeeded();
69}
70
71void GrResourceCache::insertResource(GrGpuResource* resource) {
73 SkASSERT(resource);
74 SkASSERT(!this->isInCache(resource));
75 SkASSERT(!resource->wasDestroyed());
76 SkASSERT(!resource->resourcePriv().isPurgeable());
77
78 // We must set the timestamp before adding to the array in case the timestamp wraps and we wind
79 // up iterating over all the resources that already have timestamps.
80 resource->cacheAccess().setTimestamp(this->getNextTimestamp());
81
82 this->addToNonpurgeableArray(resource);
83
84 size_t size = resource->gpuMemorySize();
85 SkDEBUGCODE(++fCount;)
86 fBytes += size;
87#if GR_CACHE_STATS
88 fHighWaterCount = std::max(this->getResourceCount(), fHighWaterCount);
89 fHighWaterBytes = std::max(fBytes, fHighWaterBytes);
90#endif
92 ++fBudgetedCount;
93 fBudgetedBytes += size;
94 TRACE_COUNTER2("skia.gpu.cache", "skia budget", "used",
95 fBudgetedBytes, "free", fMaxBytes - fBudgetedBytes);
96#if GR_CACHE_STATS
97 fBudgetedHighWaterCount = std::max(fBudgetedCount, fBudgetedHighWaterCount);
98 fBudgetedHighWaterBytes = std::max(fBudgetedBytes, fBudgetedHighWaterBytes);
99#endif
100 }
101 SkASSERT(!resource->cacheAccess().isUsableAsScratch());
102 this->purgeAsNeeded();
103}
104
105void GrResourceCache::removeResource(GrGpuResource* resource) {
107 this->validate();
108 SkASSERT(this->isInCache(resource));
109
110 size_t size = resource->gpuMemorySize();
111 if (resource->resourcePriv().isPurgeable()) {
112 fPurgeableQueue.remove(resource);
113 fPurgeableBytes -= size;
114 } else {
115 this->removeFromNonpurgeableArray(resource);
116 }
117
118 SkDEBUGCODE(--fCount;)
119 fBytes -= size;
121 --fBudgetedCount;
122 fBudgetedBytes -= size;
123 TRACE_COUNTER2("skia.gpu.cache", "skia budget", "used",
124 fBudgetedBytes, "free", fMaxBytes - fBudgetedBytes);
125 }
126
127 if (resource->cacheAccess().isUsableAsScratch()) {
128 fScratchMap.remove(resource->resourcePriv().getScratchKey(), resource);
129 }
130 if (resource->getUniqueKey().isValid()) {
131 fUniqueHash.remove(resource->getUniqueKey());
132 }
133 this->validate();
134}
135
137 AutoValidate av(this);
138
139 while (!fNonpurgeableResources.empty()) {
140 GrGpuResource* back = *(fNonpurgeableResources.end() - 1);
141 SkASSERT(!back->wasDestroyed());
142 back->cacheAccess().abandon();
143 }
144
145 while (fPurgeableQueue.count()) {
146 GrGpuResource* top = fPurgeableQueue.peek();
147 SkASSERT(!top->wasDestroyed());
148 top->cacheAccess().abandon();
149 }
150
151 fThreadSafeCache->dropAllRefs();
152
153 SkASSERT(!fScratchMap.count());
154 SkASSERT(!fUniqueHash.count());
155 SkASSERT(!fCount);
156 SkASSERT(!this->getResourceCount());
157 SkASSERT(!fBytes);
158 SkASSERT(!fBudgetedCount);
159 SkASSERT(!fBudgetedBytes);
160 SkASSERT(!fPurgeableBytes);
161}
162
164 AutoValidate av(this);
165
166 fThreadSafeCache->dropAllRefs();
167
168 this->processFreedGpuResources();
169
170 SkASSERT(fProxyProvider); // better have called setProxyProvider
171 SkASSERT(fThreadSafeCache); // better have called setThreadSafeCache too
172
173 // We must remove the uniqueKeys from the proxies here. While they possess a uniqueKey
174 // they also have a raw pointer back to this class (which is presumably going away)!
175 fProxyProvider->removeAllUniqueKeys();
176
177 while (!fNonpurgeableResources.empty()) {
178 GrGpuResource* back = *(fNonpurgeableResources.end() - 1);
179 SkASSERT(!back->wasDestroyed());
180 back->cacheAccess().release();
181 }
182
183 while (fPurgeableQueue.count()) {
184 GrGpuResource* top = fPurgeableQueue.peek();
185 SkASSERT(!top->wasDestroyed());
186 top->cacheAccess().release();
187 }
188
189 SkASSERT(!fScratchMap.count());
190 SkASSERT(!fUniqueHash.count());
191 SkASSERT(!fCount);
192 SkASSERT(!this->getResourceCount());
193 SkASSERT(!fBytes);
194 SkASSERT(!fBudgetedCount);
195 SkASSERT(!fBudgetedBytes);
196 SkASSERT(!fPurgeableBytes);
197}
198
199void GrResourceCache::refResource(GrGpuResource* resource) {
200 SkASSERT(resource);
201 SkASSERT(resource->getContext()->priv().getResourceCache() == this);
202 if (resource->cacheAccess().hasRef()) {
203 resource->ref();
204 } else {
205 this->refAndMakeResourceMRU(resource);
206 }
207 this->validate();
208}
209
211 SkASSERT(scratchKey.isValid());
212
213 GrGpuResource* resource = fScratchMap.find(scratchKey);
214 if (resource) {
215 fScratchMap.remove(scratchKey, resource);
216 this->refAndMakeResourceMRU(resource);
217 this->validate();
218 }
219 return resource;
220}
221
222void GrResourceCache::willRemoveScratchKey(const GrGpuResource* resource) {
225 if (resource->cacheAccess().isUsableAsScratch()) {
226 fScratchMap.remove(resource->resourcePriv().getScratchKey(), resource);
227 }
228}
229
230void GrResourceCache::removeUniqueKey(GrGpuResource* resource) {
232 // Someone has a ref to this resource in order to have removed the key. When the ref count
233 // reaches zero we will get a ref cnt notification and figure out what to do with it.
234 if (resource->getUniqueKey().isValid()) {
235 SkASSERT(resource == fUniqueHash.find(resource->getUniqueKey()));
236 fUniqueHash.remove(resource->getUniqueKey());
237 }
238 resource->cacheAccess().removeUniqueKey();
239 if (resource->cacheAccess().isUsableAsScratch()) {
240 fScratchMap.insert(resource->resourcePriv().getScratchKey(), resource);
241 }
242
243 // Removing a unique key from a kUnbudgetedCacheable resource would make the resource
244 // require purging. However, the resource must be ref'ed to get here and therefore can't
245 // be purgeable. We'll purge it when the refs reach zero.
246 SkASSERT(!resource->resourcePriv().isPurgeable());
247 this->validate();
248}
249
250void GrResourceCache::changeUniqueKey(GrGpuResource* resource, const skgpu::UniqueKey& newKey) {
252 SkASSERT(resource);
253 SkASSERT(this->isInCache(resource));
254
255 // If another resource has the new key, remove its key then install the key on this resource.
256 if (newKey.isValid()) {
257 if (GrGpuResource* old = fUniqueHash.find(newKey)) {
258 // If the old resource using the key is purgeable and is unreachable, then remove it.
259 if (!old->resourcePriv().getScratchKey().isValid() &&
260 old->resourcePriv().isPurgeable()) {
261 old->cacheAccess().release();
262 } else {
263 // removeUniqueKey expects an external owner of the resource.
264 this->removeUniqueKey(sk_ref_sp(old).get());
265 }
266 }
267 SkASSERT(nullptr == fUniqueHash.find(newKey));
268
269 // Remove the entry for this resource if it already has a unique key.
270 if (resource->getUniqueKey().isValid()) {
271 SkASSERT(resource == fUniqueHash.find(resource->getUniqueKey()));
272 fUniqueHash.remove(resource->getUniqueKey());
273 SkASSERT(nullptr == fUniqueHash.find(resource->getUniqueKey()));
274 } else {
275 // 'resource' didn't have a valid unique key before so it is switching sides. Remove it
276 // from the ScratchMap. The isUsableAsScratch call depends on us not adding the new
277 // unique key until after this check.
278 if (resource->cacheAccess().isUsableAsScratch()) {
279 fScratchMap.remove(resource->resourcePriv().getScratchKey(), resource);
280 }
281 }
282
283 resource->cacheAccess().setUniqueKey(newKey);
284 fUniqueHash.add(resource);
285 } else {
286 this->removeUniqueKey(resource);
287 }
288
289 this->validate();
290}
291
292void GrResourceCache::refAndMakeResourceMRU(GrGpuResource* resource) {
294 SkASSERT(resource);
295 SkASSERT(this->isInCache(resource));
296
297 if (resource->resourcePriv().isPurgeable()) {
298 // It's about to become unpurgeable.
299 fPurgeableBytes -= resource->gpuMemorySize();
300 fPurgeableQueue.remove(resource);
301 this->addToNonpurgeableArray(resource);
302 } else if (!resource->cacheAccess().hasRefOrCommandBufferUsage() &&
304 SkASSERT(fNumBudgetedResourcesFlushWillMakePurgeable > 0);
305 fNumBudgetedResourcesFlushWillMakePurgeable--;
306 }
307 resource->cacheAccess().ref();
308
309 resource->cacheAccess().setTimestamp(this->getNextTimestamp());
310 this->validate();
311}
312
313void GrResourceCache::notifyARefCntReachedZero(GrGpuResource* resource,
316 SkASSERT(resource);
317 SkASSERT(!resource->wasDestroyed());
318 SkASSERT(this->isInCache(resource));
319 // This resource should always be in the nonpurgeable array when this function is called. It
320 // will be moved to the queue if it is newly purgeable.
321 SkASSERT(fNonpurgeableResources[*resource->cacheAccess().accessCacheIndex()] == resource);
322
323 if (removedRef == GrGpuResource::LastRemovedRef::kMainRef) {
324 if (resource->cacheAccess().isUsableAsScratch()) {
325 fScratchMap.insert(resource->resourcePriv().getScratchKey(), resource);
326 }
327 }
328
329 if (resource->cacheAccess().hasRefOrCommandBufferUsage()) {
330 this->validate();
331 return;
332 }
333
334#ifdef SK_DEBUG
335 // When the timestamp overflows validate() is called. validate() checks that resources in
336 // the nonpurgeable array are indeed not purgeable. However, the movement from the array to
337 // the purgeable queue happens just below in this function. So we mark it as an exception.
338 if (resource->resourcePriv().isPurgeable()) {
339 fNewlyPurgeableResourceForValidation = resource;
340 }
341#endif
342 resource->cacheAccess().setTimestamp(this->getNextTimestamp());
343 SkDEBUGCODE(fNewlyPurgeableResourceForValidation = nullptr);
344
345 if (!resource->resourcePriv().isPurgeable() &&
347 ++fNumBudgetedResourcesFlushWillMakePurgeable;
348 }
349
350 if (!resource->resourcePriv().isPurgeable()) {
351 this->validate();
352 return;
353 }
354
355 this->removeFromNonpurgeableArray(resource);
356 fPurgeableQueue.insert(resource);
357 resource->cacheAccess().setTimeWhenResourceBecomePurgeable();
358 fPurgeableBytes += resource->gpuMemorySize();
359
360 bool hasUniqueKey = resource->getUniqueKey().isValid();
361
362 GrBudgetedType budgetedType = resource->resourcePriv().budgetedType();
363
364 if (budgetedType == GrBudgetedType::kBudgeted) {
365 // Purge the resource immediately if we're over budget
366 // Also purge if the resource has neither a valid scratch key nor a unique key.
367 bool hasKey = resource->resourcePriv().getScratchKey().isValid() || hasUniqueKey;
368 if (!this->overBudget() && hasKey) {
369 return;
370 }
371 } else {
372 // We keep unbudgeted resources with a unique key in the purgeable queue of the cache so
373 // they can be reused again by the image connected to the unique key.
374 if (hasUniqueKey && budgetedType == GrBudgetedType::kUnbudgetedCacheable) {
375 return;
376 }
377 // Check whether this resource could still be used as a scratch resource.
378 if (!resource->resourcePriv().refsWrappedObjects() &&
379 resource->resourcePriv().getScratchKey().isValid()) {
380 // We won't purge an existing resource to make room for this one.
381 if (this->wouldFit(resource->gpuMemorySize())) {
382 resource->resourcePriv().makeBudgeted();
383 return;
384 }
385 }
386 }
387
388 SkDEBUGCODE(int beforeCount = this->getResourceCount();)
389 resource->cacheAccess().release();
390 // We should at least free this resource, perhaps dependent resources as well.
391 SkASSERT(this->getResourceCount() < beforeCount);
392 this->validate();
393}
394
395void GrResourceCache::didChangeBudgetStatus(GrGpuResource* resource) {
397 SkASSERT(resource);
398 SkASSERT(this->isInCache(resource));
399
400 size_t size = resource->gpuMemorySize();
401 // Changing from BudgetedType::kUnbudgetedCacheable to another budgeted type could make
402 // resource become purgeable. However, we should never allow that transition. Wrapped
403 // resources are the only resources that can be in that state and they aren't allowed to
404 // transition from one budgeted state to another.
405 SkDEBUGCODE(bool wasPurgeable = resource->resourcePriv().isPurgeable());
407 ++fBudgetedCount;
408 fBudgetedBytes += size;
409#if GR_CACHE_STATS
410 fBudgetedHighWaterBytes = std::max(fBudgetedBytes, fBudgetedHighWaterBytes);
411 fBudgetedHighWaterCount = std::max(fBudgetedCount, fBudgetedHighWaterCount);
412#endif
413 if (!resource->resourcePriv().isPurgeable() &&
414 !resource->cacheAccess().hasRefOrCommandBufferUsage()) {
415 ++fNumBudgetedResourcesFlushWillMakePurgeable;
416 }
417 if (resource->cacheAccess().isUsableAsScratch()) {
418 fScratchMap.insert(resource->resourcePriv().getScratchKey(), resource);
419 }
420 this->purgeAsNeeded();
421 } else {
423 --fBudgetedCount;
424 fBudgetedBytes -= size;
425 if (!resource->resourcePriv().isPurgeable() &&
426 !resource->cacheAccess().hasRefOrCommandBufferUsage()) {
427 --fNumBudgetedResourcesFlushWillMakePurgeable;
428 }
429 if (!resource->cacheAccess().hasRef() && !resource->getUniqueKey().isValid() &&
430 resource->resourcePriv().getScratchKey().isValid()) {
431 fScratchMap.remove(resource->resourcePriv().getScratchKey(), resource);
432 }
433 }
434 SkASSERT(wasPurgeable == resource->resourcePriv().isPurgeable());
435 TRACE_COUNTER2("skia.gpu.cache", "skia budget", "used",
436 fBudgetedBytes, "free", fMaxBytes - fBudgetedBytes);
437
438 this->validate();
439}
440
443 fInvalidUniqueKeyInbox.poll(&invalidKeyMsgs);
444 if (!invalidKeyMsgs.empty()) {
445 SkASSERT(fProxyProvider);
446
447 for (int i = 0; i < invalidKeyMsgs.size(); ++i) {
448 if (invalidKeyMsgs[i].inThreadSafeCache()) {
449 fThreadSafeCache->remove(invalidKeyMsgs[i].key());
450 SkASSERT(!fThreadSafeCache->has(invalidKeyMsgs[i].key()));
451 } else {
452 fProxyProvider->processInvalidUniqueKey(
453 invalidKeyMsgs[i].key(), nullptr,
455 SkASSERT(!this->findAndRefUniqueResource(invalidKeyMsgs[i].key()));
456 }
457 }
458 }
459
460 this->processFreedGpuResources();
461
462 bool stillOverbudget = this->overBudget();
463 while (stillOverbudget && fPurgeableQueue.count()) {
464 GrGpuResource* resource = fPurgeableQueue.peek();
465 SkASSERT(resource->resourcePriv().isPurgeable());
466 resource->cacheAccess().release();
467 stillOverbudget = this->overBudget();
468 }
469
470 if (stillOverbudget) {
471 fThreadSafeCache->dropUniqueRefs(this);
472
473 stillOverbudget = this->overBudget();
474 while (stillOverbudget && fPurgeableQueue.count()) {
475 GrGpuResource* resource = fPurgeableQueue.peek();
476 SkASSERT(resource->resourcePriv().isPurgeable());
477 resource->cacheAccess().release();
478 stillOverbudget = this->overBudget();
479 }
480 }
481
482 this->validate();
483}
484
485void GrResourceCache::purgeUnlockedResources(const skgpu::StdSteadyClock::time_point* purgeTime,
488 if (purgeTime) {
489 fThreadSafeCache->dropUniqueRefsOlderThan(*purgeTime);
490 } else {
491 fThreadSafeCache->dropUniqueRefs(nullptr);
492 }
493
494 // We could disable maintaining the heap property here, but it would add a lot of
495 // complexity. Moreover, this is rarely called.
496 while (fPurgeableQueue.count()) {
497 GrGpuResource* resource = fPurgeableQueue.peek();
498
499 const skgpu::StdSteadyClock::time_point resourceTime =
500 resource->cacheAccess().timeWhenResourceBecamePurgeable();
501 if (purgeTime && resourceTime >= *purgeTime) {
502 // Resources were given both LRU timestamps and tagged with a frame number when
503 // they first became purgeable. The LRU timestamp won't change again until the
504 // resource is made non-purgeable again. So, at this point all the remaining
505 // resources in the timestamp-sorted queue will have a frame number >= to this
506 // one.
507 break;
508 }
509
510 SkASSERT(resource->resourcePriv().isPurgeable());
511 resource->cacheAccess().release();
512 }
513 } else {
515 // Early out if the very first item is too new to purge to avoid sorting the queue when
516 // nothing will be deleted.
517 if (purgeTime && fPurgeableQueue.count() &&
518 fPurgeableQueue.peek()->cacheAccess().timeWhenResourceBecamePurgeable() >= *purgeTime) {
519 return;
520 }
521
522 // Sort the queue
523 fPurgeableQueue.sort();
524
525 // Make a list of the scratch resources to delete
526 SkTDArray<GrGpuResource*> scratchResources;
527 for (int i = 0; i < fPurgeableQueue.count(); i++) {
528 GrGpuResource* resource = fPurgeableQueue.at(i);
529
530 const skgpu::StdSteadyClock::time_point resourceTime =
531 resource->cacheAccess().timeWhenResourceBecamePurgeable();
532 if (purgeTime && resourceTime >= *purgeTime) {
533 // scratch or not, all later iterations will be too recently used to purge.
534 break;
535 }
536 SkASSERT(resource->resourcePriv().isPurgeable());
537 if (!resource->getUniqueKey().isValid()) {
538 *scratchResources.append() = resource;
539 }
540 }
541
542 // Delete the scratch resources. This must be done as a separate pass
543 // to avoid messing up the sorted order of the queue
544 for (int i = 0; i < scratchResources.size(); i++) {
545 scratchResources[i]->cacheAccess().release();
546 }
547 }
548
549 this->validate();
550}
551
552bool GrResourceCache::purgeToMakeHeadroom(size_t desiredHeadroomBytes) {
553 AutoValidate av(this);
554 if (desiredHeadroomBytes > fMaxBytes) {
555 return false;
556 }
557 if (this->wouldFit(desiredHeadroomBytes)) {
558 return true;
559 }
560 fPurgeableQueue.sort();
561
562 size_t projectedBudget = fBudgetedBytes;
563 int purgeCnt = 0;
564 for (int i = 0; i < fPurgeableQueue.count(); i++) {
565 GrGpuResource* resource = fPurgeableQueue.at(i);
567 projectedBudget -= resource->gpuMemorySize();
568 }
569 if (projectedBudget + desiredHeadroomBytes <= fMaxBytes) {
570 purgeCnt = i + 1;
571 break;
572 }
573 }
574 if (purgeCnt == 0) {
575 return false;
576 }
577
578 // Success! Release the resources.
579 // Copy to array first so we don't mess with the queue.
580 std::vector<GrGpuResource*> resources;
581 resources.reserve(purgeCnt);
582 for (int i = 0; i < purgeCnt; i++) {
583 resources.push_back(fPurgeableQueue.at(i));
584 }
585 for (GrGpuResource* resource : resources) {
586 resource->cacheAccess().release();
587 }
588 return true;
589}
590
591void GrResourceCache::purgeUnlockedResources(size_t bytesToPurge, bool preferScratchResources) {
592
593 const size_t tmpByteBudget = std::max((size_t)0, fBytes - bytesToPurge);
594 bool stillOverbudget = tmpByteBudget < fBytes;
595
596 if (preferScratchResources && bytesToPurge < fPurgeableBytes) {
597 // Sort the queue
598 fPurgeableQueue.sort();
599
600 // Make a list of the scratch resources to delete
601 SkTDArray<GrGpuResource*> scratchResources;
602 size_t scratchByteCount = 0;
603 for (int i = 0; i < fPurgeableQueue.count() && stillOverbudget; i++) {
604 GrGpuResource* resource = fPurgeableQueue.at(i);
605 SkASSERT(resource->resourcePriv().isPurgeable());
606 if (!resource->getUniqueKey().isValid()) {
607 *scratchResources.append() = resource;
608 scratchByteCount += resource->gpuMemorySize();
609 stillOverbudget = tmpByteBudget < fBytes - scratchByteCount;
610 }
611 }
612
613 // Delete the scratch resources. This must be done as a separate pass
614 // to avoid messing up the sorted order of the queue
615 for (int i = 0; i < scratchResources.size(); i++) {
616 scratchResources[i]->cacheAccess().release();
617 }
618 stillOverbudget = tmpByteBudget < fBytes;
619
620 this->validate();
621 }
622
623 // Purge any remaining resources in LRU order
624 if (stillOverbudget) {
625 const size_t cachedByteCount = fMaxBytes;
626 fMaxBytes = tmpByteBudget;
627 this->purgeAsNeeded();
628 fMaxBytes = cachedByteCount;
629 }
630}
631
633 return this->overBudget() && !fPurgeableQueue.count() &&
634 fNumBudgetedResourcesFlushWillMakePurgeable > 0;
635}
636
637void GrResourceCache::processFreedGpuResources() {
639 fUnrefResourceInbox.poll(&msgs);
640 // We don't need to do anything other than let the messages delete themselves and call unref.
641}
642
643void GrResourceCache::addToNonpurgeableArray(GrGpuResource* resource) {
644 int index = fNonpurgeableResources.size();
645 *fNonpurgeableResources.append() = resource;
646 *resource->cacheAccess().accessCacheIndex() = index;
647}
648
649void GrResourceCache::removeFromNonpurgeableArray(GrGpuResource* resource) {
650 int* index = resource->cacheAccess().accessCacheIndex();
651 // Fill the hole we will create in the array with the tail object, adjust its index, and
652 // then pop the array
653 GrGpuResource* tail = *(fNonpurgeableResources.end() - 1);
654 SkASSERT(fNonpurgeableResources[*index] == resource);
655 fNonpurgeableResources[*index] = tail;
656 *tail->cacheAccess().accessCacheIndex() = *index;
657 fNonpurgeableResources.pop_back();
658 SkDEBUGCODE(*index = -1);
659}
660
661uint32_t GrResourceCache::getNextTimestamp() {
662 // If we wrap then all the existing resources will appear older than any resources that get
663 // a timestamp after the wrap.
664 if (0 == fTimestamp) {
665 int count = this->getResourceCount();
666 if (count) {
667 // Reset all the timestamps. We sort the resources by timestamp and then assign
668 // sequential timestamps beginning with 0. This is O(n*lg(n)) but it should be extremely
669 // rare.
670 SkTDArray<GrGpuResource*> sortedPurgeableResources;
671 sortedPurgeableResources.reserve(fPurgeableQueue.count());
672
673 while (fPurgeableQueue.count()) {
674 *sortedPurgeableResources.append() = fPurgeableQueue.peek();
675 fPurgeableQueue.pop();
676 }
677
678 SkTQSort(fNonpurgeableResources.begin(), fNonpurgeableResources.end(),
679 CompareTimestamp);
680
681 // Pick resources out of the purgeable and non-purgeable arrays based on lowest
682 // timestamp and assign new timestamps.
683 int currP = 0;
684 int currNP = 0;
685 while (currP < sortedPurgeableResources.size() &&
686 currNP < fNonpurgeableResources.size()) {
687 uint32_t tsP = sortedPurgeableResources[currP]->cacheAccess().timestamp();
688 uint32_t tsNP = fNonpurgeableResources[currNP]->cacheAccess().timestamp();
689 SkASSERT(tsP != tsNP);
690 if (tsP < tsNP) {
691 sortedPurgeableResources[currP++]->cacheAccess().setTimestamp(fTimestamp++);
692 } else {
693 // Correct the index in the nonpurgeable array stored on the resource post-sort.
694 *fNonpurgeableResources[currNP]->cacheAccess().accessCacheIndex() = currNP;
695 fNonpurgeableResources[currNP++]->cacheAccess().setTimestamp(fTimestamp++);
696 }
697 }
698
699 // The above loop ended when we hit the end of one array. Finish the other one.
700 while (currP < sortedPurgeableResources.size()) {
701 sortedPurgeableResources[currP++]->cacheAccess().setTimestamp(fTimestamp++);
702 }
703 while (currNP < fNonpurgeableResources.size()) {
704 *fNonpurgeableResources[currNP]->cacheAccess().accessCacheIndex() = currNP;
705 fNonpurgeableResources[currNP++]->cacheAccess().setTimestamp(fTimestamp++);
706 }
707
708 // Rebuild the queue.
709 for (int i = 0; i < sortedPurgeableResources.size(); ++i) {
710 fPurgeableQueue.insert(sortedPurgeableResources[i]);
711 }
712
713 this->validate();
714 SkASSERT(count == this->getResourceCount());
715
716 // count should be the next timestamp we return.
717 SkASSERT(fTimestamp == SkToU32(count));
718 }
719 }
720 return fTimestamp++;
721}
722
724 for (int i = 0; i < fNonpurgeableResources.size(); ++i) {
725 fNonpurgeableResources[i]->dumpMemoryStatistics(traceMemoryDump);
726 }
727 for (int i = 0; i < fPurgeableQueue.count(); ++i) {
728 fPurgeableQueue.at(i)->dumpMemoryStatistics(traceMemoryDump);
729 }
730}
731
732#if GR_CACHE_STATS
733void GrResourceCache::getStats(Stats* stats) const {
734 stats->reset();
735
736 stats->fTotal = this->getResourceCount();
737 stats->fNumNonPurgeable = fNonpurgeableResources.size();
738 stats->fNumPurgeable = fPurgeableQueue.count();
739
740 for (int i = 0; i < fNonpurgeableResources.size(); ++i) {
741 stats->update(fNonpurgeableResources[i]);
742 }
743 for (int i = 0; i < fPurgeableQueue.count(); ++i) {
744 stats->update(fPurgeableQueue.at(i));
745 }
746}
747
748#if defined(GR_TEST_UTILS)
749void GrResourceCache::dumpStats(SkString* out) const {
750 this->validate();
751
752 Stats stats;
753
754 this->getStats(&stats);
755
756 float byteUtilization = (100.f * fBudgetedBytes) / fMaxBytes;
757
758 out->appendf("Budget: %d bytes\n", (int)fMaxBytes);
759 out->appendf("\t\tEntry Count: current %d"
760 " (%d budgeted, %d wrapped, %d locked, %d scratch), high %d\n",
761 stats.fTotal, fBudgetedCount, stats.fWrapped, stats.fNumNonPurgeable,
762 stats.fScratch, fHighWaterCount);
763 out->appendf("\t\tEntry Bytes: current %d (budgeted %d, %.2g%% full, %d unbudgeted) high %d\n",
764 SkToInt(fBytes), SkToInt(fBudgetedBytes), byteUtilization,
765 SkToInt(stats.fUnbudgetedSize), SkToInt(fHighWaterBytes));
766}
767
768void GrResourceCache::dumpStatsKeyValuePairs(TArray<SkString>* keys,
769 TArray<double>* values) const {
770 this->validate();
771
772 Stats stats;
773 this->getStats(&stats);
774
775 keys->push_back(SkString("gpu_cache_purgable_entries")); values->push_back(stats.fNumPurgeable);
776}
777#endif // defined(GR_TEST_UTILS)
778#endif // GR_CACHE_STATS
779
780#ifdef SK_DEBUG
781void GrResourceCache::validate() const {
782 // Reduce the frequency of validations for large resource counts.
783 static SkRandom gRandom;
784 int mask = (SkNextPow2(fCount + 1) >> 5) - 1;
785 if (~mask && (gRandom.nextU() & mask)) {
786 return;
787 }
788
789 struct Stats {
790 size_t fBytes;
791 int fBudgetedCount;
792 size_t fBudgetedBytes;
793 int fLocked;
794 int fScratch;
795 int fCouldBeScratch;
796 int fContent;
797 const ScratchMap* fScratchMap;
798 const UniqueHash* fUniqueHash;
799
800 Stats(const GrResourceCache* cache) {
801 memset(this, 0, sizeof(*this));
802 fScratchMap = &cache->fScratchMap;
803 fUniqueHash = &cache->fUniqueHash;
804 }
805
806 void update(GrGpuResource* resource) {
807 fBytes += resource->gpuMemorySize();
808
809 if (!resource->resourcePriv().isPurgeable()) {
810 ++fLocked;
811 }
812
813 const skgpu::ScratchKey& scratchKey = resource->resourcePriv().getScratchKey();
814 const skgpu::UniqueKey& uniqueKey = resource->getUniqueKey();
815
816 if (resource->cacheAccess().isUsableAsScratch()) {
817 SkASSERT(!uniqueKey.isValid());
819 SkASSERT(!resource->cacheAccess().hasRef());
820 ++fScratch;
821 SkASSERT(fScratchMap->countForKey(scratchKey));
823 } else if (scratchKey.isValid()) {
825 uniqueKey.isValid() || resource->cacheAccess().hasRef());
827 SkASSERT(!fScratchMap->has(resource, scratchKey));
828 }
829 if (uniqueKey.isValid()) {
830 ++fContent;
831 SkASSERT(fUniqueHash->find(uniqueKey) == resource);
833 resource->resourcePriv().refsWrappedObjects());
834 }
835
837 ++fBudgetedCount;
838 fBudgetedBytes += resource->gpuMemorySize();
839 }
840 }
841 };
842
843 {
844 int count = 0;
845 fScratchMap.foreach([&](const GrGpuResource& resource) {
846 SkASSERT(resource.cacheAccess().isUsableAsScratch());
847 count++;
848 });
849 SkASSERT(count == fScratchMap.count());
850 }
851
852 Stats stats(this);
853 size_t purgeableBytes = 0;
854 int numBudgetedResourcesFlushWillMakePurgeable = 0;
855
856 for (int i = 0; i < fNonpurgeableResources.size(); ++i) {
857 SkASSERT(!fNonpurgeableResources[i]->resourcePriv().isPurgeable() ||
858 fNewlyPurgeableResourceForValidation == fNonpurgeableResources[i]);
859 SkASSERT(*fNonpurgeableResources[i]->cacheAccess().accessCacheIndex() == i);
860 SkASSERT(!fNonpurgeableResources[i]->wasDestroyed());
861 if (fNonpurgeableResources[i]->resourcePriv().budgetedType() == GrBudgetedType::kBudgeted &&
862 !fNonpurgeableResources[i]->cacheAccess().hasRefOrCommandBufferUsage() &&
863 fNewlyPurgeableResourceForValidation != fNonpurgeableResources[i]) {
864 ++numBudgetedResourcesFlushWillMakePurgeable;
865 }
866 stats.update(fNonpurgeableResources[i]);
867 }
868 for (int i = 0; i < fPurgeableQueue.count(); ++i) {
869 SkASSERT(fPurgeableQueue.at(i)->resourcePriv().isPurgeable());
870 SkASSERT(*fPurgeableQueue.at(i)->cacheAccess().accessCacheIndex() == i);
871 SkASSERT(!fPurgeableQueue.at(i)->wasDestroyed());
872 stats.update(fPurgeableQueue.at(i));
873 purgeableBytes += fPurgeableQueue.at(i)->gpuMemorySize();
874 }
875
876 SkASSERT(fCount == this->getResourceCount());
877 SkASSERT(fBudgetedCount <= fCount);
878 SkASSERT(fBudgetedBytes <= fBytes);
879 SkASSERT(stats.fBytes == fBytes);
880 SkASSERT(fNumBudgetedResourcesFlushWillMakePurgeable ==
881 numBudgetedResourcesFlushWillMakePurgeable);
882 SkASSERT(stats.fBudgetedBytes == fBudgetedBytes);
883 SkASSERT(stats.fBudgetedCount == fBudgetedCount);
884 SkASSERT(purgeableBytes == fPurgeableBytes);
885#if GR_CACHE_STATS
886 SkASSERT(fBudgetedHighWaterCount <= fHighWaterCount);
887 SkASSERT(fBudgetedHighWaterBytes <= fHighWaterBytes);
888 SkASSERT(fBytes <= fHighWaterBytes);
889 SkASSERT(fCount <= fHighWaterCount);
890 SkASSERT(fBudgetedBytes <= fBudgetedHighWaterBytes);
891 SkASSERT(fBudgetedCount <= fBudgetedHighWaterCount);
892#endif
893 SkASSERT(stats.fContent == fUniqueHash.count());
894 SkASSERT(stats.fScratch == fScratchMap.count());
895
896 // This assertion is not currently valid because we can be in recursive notifyCntReachedZero()
897 // calls. This will be fixed when subresource registration is explicit.
898 // bool overBudget = budgetedBytes > fMaxBytes || budgetedCount > fMaxCount;
899 // SkASSERT(!overBudget || locked == count || fPurging);
900}
901
902bool GrResourceCache::isInCache(const GrGpuResource* resource) const {
903 int index = *resource->cacheAccess().accessCacheIndex();
904 if (index < 0) {
905 return false;
906 }
907 if (index < fPurgeableQueue.count() && fPurgeableQueue.at(index) == resource) {
908 return true;
909 }
910 if (index < fNonpurgeableResources.size() && fNonpurgeableResources[index] == resource) {
911 return true;
912 }
913 SkDEBUGFAIL("Resource index should be -1 or the resource should be in the cache.");
914 return false;
915}
916
917#endif // SK_DEBUG
918
919#if defined(GR_TEST_UTILS)
920
921int GrResourceCache::countUniqueKeysWithTag(const char* tag) const {
922 int count = 0;
923 fUniqueHash.foreach([&](const GrGpuResource& resource){
924 if (0 == strcmp(tag, resource.getUniqueKey().tag())) {
925 ++count;
926 }
927 });
928 return count;
929}
930
931void GrResourceCache::changeTimestamp(uint32_t newTimestamp) {
932 fTimestamp = newTimestamp;
933}
934
935void GrResourceCache::visitSurfaces(
936 const std::function<void(const GrSurface*, bool purgeable)>& func) const {
937
938 for (int i = 0; i < fNonpurgeableResources.size(); ++i) {
939 if (const GrSurface* surf = fNonpurgeableResources[i]->asSurface()) {
940 func(surf, /* purgeable= */ false);
941 }
942 }
943 for (int i = 0; i < fPurgeableQueue.count(); ++i) {
944 if (const GrSurface* surf = fPurgeableQueue.at(i)->asSurface()) {
945 func(surf, /* purgeable= */ true);
946 }
947 }
948}
949
950#endif // defined(GR_TEST_UTILS)
int count
GrBudgetedType
Definition GrTypesPriv.h:99
GrPurgeResourceOptions
Definition GrTypes.h:234
#define SkDEBUGFAIL(message)
Definition SkAssert.h:118
#define SkASSERT(cond)
Definition SkAssert.h:116
#define SkDEBUGCODE(...)
Definition SkDebug.h:23
static int SkNextPow2(int value)
Definition SkMathPriv.h:272
#define DECLARE_SKMESSAGEBUS_MESSAGE(Message, IDType, AllowCopyableMessage)
sk_sp< T > sk_ref_sp(T *obj)
Definition SkRefCnt.h:381
void SkTQSort(T *begin, T *end, const C &lessThan)
Definition SkTSort.h:194
constexpr int SkToInt(S x)
Definition SkTo.h:29
constexpr uint32_t SkToU32(S x)
Definition SkTo.h:26
#define TRACE_COUNTER2(category_group, name, value1_name, value1_val, value2_name, value2_val)
static constexpr uint32_t SK_InvalidUniqueID
Definition SkTypes.h:196
GrResourceCache * getResourceCache()
GrDirectContextPriv priv()
const skgpu::ScratchKey & getScratchKey() const
GrBudgetedType budgetedType() const
size_t gpuMemorySize() const
const GrDirectContext * getContext() const
virtual void dumpMemoryStatistics(SkTraceMemoryDump *traceMemoryDump) const
bool wasDestroyed() const
const skgpu::UniqueKey & getUniqueKey() const
ResourcePriv resourcePriv()
void ref() const
void processInvalidUniqueKey(const skgpu::UniqueKey &, GrTextureProxy *, InvalidateGPUResource)
void dumpMemoryStatistics(SkTraceMemoryDump *traceMemoryDump) const
void setLimit(size_t bytes)
GrResourceCache(skgpu::SingleOwner *owner, GrDirectContext::DirectContextID owningContextID, uint32_t familyID)
bool hasUniqueKey(const skgpu::UniqueKey &key) const
int getResourceCount() const
bool requestsFlush() const
bool purgeToMakeHeadroom(size_t desiredHeadroomBytes)
void purgeUnlockedResources(GrPurgeResourceOptions opts)
bool overBudget() const
GrGpuResource * findAndRefScratchResource(const skgpu::ScratchKey &scratchKey)
GrGpuResource * findAndRefUniqueResource(const skgpu::UniqueKey &key)
void dropUniqueRefsOlderThan(skgpu::StdSteadyClock::time_point purgeTime) SK_EXCLUDES(fSpinLock)
void remove(const skgpu::UniqueKey &) SK_EXCLUDES(fSpinLock)
void dropUniqueRefs(GrResourceCache *resourceCache) SK_EXCLUDES(fSpinLock)
void dropAllRefs() SK_EXCLUDES(fSpinLock)
void poll(skia_private::TArray< Message > *out)
uint32_t nextU()
Definition SkRandom.h:42
T * end()
Definition SkTDArray.h:152
int size() const
Definition SkTDArray.h:138
bool empty() const
Definition SkTDArray.h:135
void reserve(int n)
Definition SkTDArray.h:187
T * begin()
Definition SkTDArray.h:150
T * append()
Definition SkTDArray.h:191
void pop_back()
Definition SkTDArray.h:223
void remove(T entry)
Definition SkTDPQueue.h:79
void pop()
Definition SkTDPQueue.h:52
void sort()
Definition SkTDPQueue.h:115
const T & peek() const
Definition SkTDPQueue.h:48
int count() const
Definition SkTDPQueue.h:45
T at(int i) const
Definition SkTDPQueue.h:110
void insert(T entry)
Definition SkTDPQueue.h:69
T * find(const Key &key) const
void remove(const Key &key)
int count() const
void add(T *entry)
int count() const
T * find(const Key &key) const
Definition SkTMultiMap.h:94
void insert(const Key &key, T *value)
Definition SkTMultiMap.h:48
void remove(const Key &key, const T *value)
Definition SkTMultiMap.h:66
bool isValid() const
Definition ResourceKey.h:55
const char * tag() const
bool empty() const
Definition SkTArray.h:194
int size() const
Definition SkTArray.h:416
#define ASSERT_SINGLE_OWNER
Definition Device.cpp:120
DEF_SWITCHES_START aot vmservice shared library Name of the *so containing AOT compiled Dart assets for launching the service isolate vm snapshot The VM snapshot data that will be memory mapped as read only SnapshotAssetPath must be present isolate snapshot The isolate snapshot data that will be memory mapped as read only SnapshotAssetPath must be present cache dir Path to the cache directory This is different from the persistent_cache_path in embedder which is used for Skia shader cache icu native lib Path to the library file that exports the ICU data vm service The hostname IP address on which the Dart VM Service should be served If not defaults to or::depending on whether ipv6 is specified vm service A custom Dart VM Service port The default is to pick a randomly available open port disable vm Disable the Dart VM Service The Dart VM Service is never available in release mode disable vm service Disable mDNS Dart VM Service publication Bind to the IPv6 localhost address for the Dart VM Service Ignored if vm service host is set endless trace Enable an endless trace buffer The default is a ring buffer This is useful when very old events need to viewed For during application launch Memory usage will continue to grow indefinitely however Start app with an specific route defined on the framework flutter assets Path to the Flutter assets directory enable service port Allow the VM service to fallback to automatic port selection if binding to a specified port fails trace Trace early application lifecycle Automatically switches to an endless trace buffer trace skia Filters out all Skia trace event categories except those that are specified in this comma separated list dump skp on shader Automatically dump the skp that triggers new shader compilations This is useful for writing custom ShaderWarmUp to reduce jank By this is not enabled to reduce the overhead purge persistent cache
Definition switches.h:191
it will be possible to load the file into Perfetto s trace viewer disable asset Prevents usage of any non test fonts unless they were explicitly Loaded via prefetched default font Indicates whether the embedding started a prefetch of the default font manager before creating the engine run In non interactive keep the shell running after the Dart script has completed enable serial On low power devices with low core running concurrent GC tasks on threads can cause them to contend with the UI thread which could potentially lead to jank This option turns off all concurrent GC activities domain network JSON encoded network policy per domain This overrides the DisallowInsecureConnections switch Embedder can specify whether to allow or disallow insecure connections at a domain level old gen heap size
Definition switches.h:259
dict stats
Definition malisc.py:20
const myers::Point & get(const myers::Segment &)