Flutter Engine
The Flutter Engine
ResourceCache.cpp
Go to the documentation of this file.
1/*
2 * Copyright 2022 Google LLC
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
9
11#include "src/base/SkRandom.h"
17
18#if defined(GRAPHITE_TEST_UTILS)
20#endif
21
22namespace skgpu::graphite {
23
24#define ASSERT_SINGLE_OWNER SKGPU_ASSERT_SINGLE_OWNER(fSingleOwner)
25
27 uint32_t recorderID,
28 size_t maxBytes) {
29 return sk_sp<ResourceCache>(new ResourceCache(singleOwner, recorderID, maxBytes));
30}
31
32ResourceCache::ResourceCache(SingleOwner* singleOwner, uint32_t recorderID, size_t maxBytes)
33 : fMaxBytes(maxBytes)
34 , fSingleOwner(singleOwner) {
35 if (recorderID != SK_InvalidGenID) {
36 fProxyCache = std::make_unique<ProxyCache>(recorderID);
37 }
38 // TODO: Maybe when things start using ResourceCache, then like Ganesh the compiler won't
39 // complain about not using fSingleOwner in Release builds and we can delete this.
40#ifndef SK_DEBUG
41 (void)fSingleOwner;
42#endif
43}
44
46 // The ResourceCache must have been shutdown by the ResourceProvider before it is destroyed.
47 SkASSERT(fIsShutdown);
48}
49
52
53 SkASSERT(!fIsShutdown);
54
55 {
56 SkAutoMutexExclusive locked(fReturnMutex);
57 fIsShutdown = true;
58 }
59 if (fProxyCache) {
60 fProxyCache->purgeAll();
61 }
62
63 this->processReturnedResources();
64
65 while (fNonpurgeableResources.size()) {
66 Resource* back = *(fNonpurgeableResources.end() - 1);
67 SkASSERT(!back->wasDestroyed());
68 this->removeFromNonpurgeableArray(back);
69 back->unrefCache();
70 }
71
72 while (fPurgeableQueue.count()) {
73 Resource* top = fPurgeableQueue.peek();
74 SkASSERT(!top->wasDestroyed());
75 this->removeFromPurgeableQueue(top);
76 top->unrefCache();
77 }
78
80}
81
85 SkASSERT(!this->isInCache(resource));
86 SkASSERT(!resource->wasDestroyed());
87 SkASSERT(!resource->isPurgeable());
88 SkASSERT(resource->key().isValid());
89 // All resources in the cache are owned. If we track wrapped resources in the cache we'll need
90 // to update this check.
91 SkASSERT(resource->ownership() == Ownership::kOwned);
92
93 // The reason to call processReturnedResources here is to get an accurate accounting of our
94 // memory usage as some resources can go from unbudgeted to budgeted when they return. So we
95 // want to have them all returned before adding the budget for the new resource in case we need
96 // to purge things. However, if the new resource has a memory size of 0, then we just skip
97 // returning resources (which has overhead for each call) since the new resource won't be
98 // affecting whether we're over or under budget.
99 if (resource->gpuMemorySize() > 0) {
100 this->processReturnedResources();
101 }
102
103 resource->registerWithCache(sk_ref_sp(this));
104 resource->refCache();
105
106 // We must set the timestamp before adding to the array in case the timestamp wraps and we wind
107 // up iterating over all the resources that already have timestamps.
108 this->setResourceTimestamp(resource, this->getNextTimestamp());
109 resource->updateAccessTime();
110
111 this->addToNonpurgeableArray(resource);
112
113 SkDEBUGCODE(fCount++;)
114
115 if (resource->key().shareable() == Shareable::kYes) {
116 fResourceMap.insert(resource->key(), resource);
117 }
118
119 if (resource->budgeted() == skgpu::Budgeted::kYes) {
120 fBudgetedBytes += resource->gpuMemorySize();
121 }
122
123 this->purgeAsNeeded();
124}
125
127 skgpu::Budgeted budgeted) {
129
130 SkASSERT(key.isValid());
131
132 Resource* resource = fResourceMap.find(key);
133 if (!resource) {
134 // The main reason to call processReturnedResources in this call is to see if there are any
135 // resources that we could match with the key. However, there is overhead into calling it.
136 // So we only call it if we first failed to find a matching resource.
137 if (this->processReturnedResources()) {
138 resource = fResourceMap.find(key);
139 }
140 }
141 if (resource) {
142 // All resources we pull out of the cache for use should be budgeted
144 if (key.shareable() == Shareable::kNo) {
145 // If a resource is not shareable (i.e. scratch resource) then we remove it from the map
146 // so that it isn't found again.
147 fResourceMap.remove(key, resource);
148 if (budgeted == skgpu::Budgeted::kNo) {
149 resource->makeUnbudgeted();
150 fBudgetedBytes -= resource->gpuMemorySize();
151 }
152 SkDEBUGCODE(resource->fNonShareableInCache = false;)
153 } else {
154 // Shareable resources should never be requested as non budgeted
155 SkASSERT(budgeted == skgpu::Budgeted::kYes);
156 }
157 this->refAndMakeResourceMRU(resource);
158 this->validate();
159 }
160
161 // processReturnedResources may have added resources back into our budget if they were being
162 // using in an SkImage or SkSurface previously. However, instead of calling purgeAsNeeded in
163 // processReturnedResources, we delay calling it until now so we don't end up purging a resource
164 // we're looking for in this function.
165 //
166 // We could avoid calling this if we didn't return any resources from processReturnedResources.
167 // However, when not overbudget purgeAsNeeded is very cheap. When overbudget there may be some
168 // really niche usage patterns that could cause us to never actually return resources to the
169 // cache, but still be overbudget due to shared resources. So to be safe we just always call it
170 // here.
171 this->purgeAsNeeded();
172
173 return resource;
174}
175
176void ResourceCache::refAndMakeResourceMRU(Resource* resource) {
178 SkASSERT(this->isInCache(resource));
179
180 if (this->inPurgeableQueue(resource)) {
181 // It's about to become unpurgeable.
182 this->removeFromPurgeableQueue(resource);
183 this->addToNonpurgeableArray(resource);
184 }
185 resource->initialUsageRef();
186
187 this->setResourceTimestamp(resource, this->getNextTimestamp());
188 this->validate();
189}
190
192 // We should never be trying to return a LastRemovedRef of kCache.
193 SkASSERT(removedRef != LastRemovedRef::kCache);
194 SkAutoMutexExclusive locked(fReturnMutex);
195 if (fIsShutdown) {
196 return false;
197 }
198
200
201 // When a non-shareable resource's CB and Usage refs are both zero, give it a chance prepare
202 // itself to be reused. On Dawn/WebGPU we use this to remap kXferCpuToGpu buffers asynchronously
203 // so that they are already mapped before they come out of the cache again.
204 if (resource->shouldDeleteASAP() == Resource::DeleteASAP::kNo &&
205 resource->key().shareable() == Shareable::kNo &&
206 removedRef == LastRemovedRef::kUsage) {
207 resource->prepareForReturnToCache([resource] { resource->initialUsageRef(); });
208 // Check if resource was re-ref'ed. In that case exit without adding to the queue.
209 if (resource->hasUsageRef()) {
210 return true;
211 }
212 }
213
214 // We only allow one instance of a Resource to be in the return queue at a time. We do this so
215 // that the ReturnQueue stays small and quick to process.
216 //
217 // Because we take CacheRefs to all Resources added to the ReturnQueue, we would be safe if we
218 // decided to have multiple instances of a Resource. Even if an earlier returned instance of a
219 // Resource triggers that Resource to get purged from the cache, the Resource itself wouldn't
220 // get deleted until we drop all the CacheRefs in this ReturnQueue.
221 if (*resource->accessReturnIndex() >= 0) {
222 // If the resource is already in the return queue we promote the LastRemovedRef to be
223 // kUsage if that is what is returned here.
224 if (removedRef == LastRemovedRef::kUsage) {
225 SkASSERT(*resource->accessReturnIndex() < (int)fReturnQueue.size());
226 fReturnQueue[*resource->accessReturnIndex()].second = removedRef;
227 }
228 return true;
229 }
230#ifdef SK_DEBUG
231 for (auto& nextResource : fReturnQueue) {
232 SkASSERT(nextResource.first != resource);
233 }
234#endif
235
236 fReturnQueue.push_back(std::make_pair(resource, removedRef));
237 *resource->accessReturnIndex() = fReturnQueue.size() - 1;
238 resource->refCache();
239 return true;
240}
241
242bool ResourceCache::processReturnedResources() {
243 // We need to move the returned Resources off of the ReturnQueue before we start processing them
244 // so that we can drop the fReturnMutex. When we process a Resource we may need to grab its
245 // UnrefMutex. This could cause a deadlock if on another thread the Resource has the UnrefMutex
246 // and is waiting on the ReturnMutex to be free.
247 ReturnQueue tempQueue;
248 {
249 SkAutoMutexExclusive locked(fReturnMutex);
250 // TODO: Instead of doing a copy of the vector, we may be able to improve the performance
251 // here by storing some form of linked list, then just move the pointer the first element
252 // and reset the ReturnQueue's top element to nullptr.
253 tempQueue = fReturnQueue;
254 fReturnQueue.clear();
255 for (auto& nextResource : tempQueue) {
256 auto [resource, ref] = nextResource;
257 SkASSERT(*resource->accessReturnIndex() >= 0);
258 *resource->accessReturnIndex() = -1;
259 }
260 }
261
262 if (tempQueue.empty()) {
263 return false;
264 }
265
266 // Trace after the lock has been released so we can simply record the tempQueue size.
267 TRACE_EVENT1("skia.gpu.cache", TRACE_FUNC, "count", tempQueue.size());
268
269 for (auto& nextResource : tempQueue) {
270 auto [resource, ref] = nextResource;
271 // We need this check here to handle the following scenario. A Resource is sitting in the
272 // ReturnQueue (say from kUsage last ref) and the Resource still has a command buffer ref
273 // out in the wild. When the ResourceCache calls processReturnedResources it locks the
274 // ReturnMutex. Immediately after this, the command buffer ref is released on another
275 // thread. The Resource cannot be added to the ReturnQueue since the lock is held. Back in
276 // the ResourceCache (we'll drop the ReturnMutex) and when we try to return the Resource we
277 // will see that it is purgeable. If we are overbudget it is possible that the Resource gets
278 // purged from the ResourceCache at this time setting its cache index to -1. The unrefCache
279 // call will actually block here on the Resource's UnrefMutex which is held from the command
280 // buffer ref. Eventually the command bufer ref thread will get to run again and with the
281 // ReturnMutex lock dropped it will get added to the ReturnQueue. At this point the first
282 // unrefCache call will continue on the main ResourceCache thread. When we call
283 // processReturnedResources the next time, we don't want this Resource added back into the
284 // cache, thus we have the check here. The Resource will then get deleted when we call
285 // unrefCache below to remove the cache ref added from the ReturnQueue.
286 if (*resource->accessCacheIndex() != -1) {
287 this->returnResourceToCache(resource, ref);
288 }
289 // Remove cache ref held by ReturnQueue
290 resource->unrefCache();
291 }
292 return true;
293}
294
295void ResourceCache::returnResourceToCache(Resource* resource, LastRemovedRef removedRef) {
296 // A resource should not have been destroyed when placed into the return queue. Also before
297 // purging any resources from the cache itself, it should always empty the queue first. When the
298 // cache releases/abandons all of its resources, it first invalidates the return queue so no new
299 // resources can be added. Thus we should not end up in a situation where a resource gets
300 // destroyed after it was added to the return queue.
301 SkASSERT(!resource->wasDestroyed());
302
303 SkASSERT(this->isInCache(resource));
304 if (removedRef == LastRemovedRef::kUsage) {
305 if (resource->key().shareable() == Shareable::kYes) {
306 // Shareable resources should still be in the cache
307 SkASSERT(fResourceMap.find(resource->key()));
308 } else {
309 SkDEBUGCODE(resource->fNonShareableInCache = true;)
310 fResourceMap.insert(resource->key(), resource);
311 if (resource->budgeted() == skgpu::Budgeted::kNo) {
312 resource->makeBudgeted();
313 fBudgetedBytes += resource->gpuMemorySize();
314 }
315 }
316 }
317
318 // If we weren't using multiple threads, it is ok to assume a resource that isn't purgeable must
319 // be in the non purgeable array. However, since resources can be unreffed from multiple
320 // threads, it is possible that a resource became purgeable while we are in the middle of
321 // returning resources. For example, a resource could have 1 usage and 1 command buffer ref. We
322 // then unref the usage which puts the resource in the return queue. Then the ResourceCache
323 // thread locks the ReturnQueue as it returns the Resource. At this same time another thread
324 // unrefs the command buffer usage but can't add the Resource to the ReturnQueue as it is
325 // locked (but the command buffer ref has been reduced to zero). When we are processing the
326 // Resource (from the kUsage ref) to return it to the cache it will look like it is purgeable
327 // since all refs are zero. Thus we will move the Resource from the non purgeable to purgeable
328 // queue. Then later when we return the command buffer ref, the Resource will have already been
329 // moved to purgeable queue and we don't need to do it again.
330 if (!resource->isPurgeable() || this->inPurgeableQueue(resource)) {
331 this->validate();
332 return;
333 }
334
335 this->setResourceTimestamp(resource, this->getNextTimestamp());
336
337 this->removeFromNonpurgeableArray(resource);
338
339 if (resource->shouldDeleteASAP() == Resource::DeleteASAP::kYes) {
340 this->purgeResource(resource);
341 } else {
342 resource->updateAccessTime();
343 fPurgeableQueue.insert(resource);
344 }
345 this->validate();
346}
347
348void ResourceCache::addToNonpurgeableArray(Resource* resource) {
349 int index = fNonpurgeableResources.size();
350 *fNonpurgeableResources.append() = resource;
351 *resource->accessCacheIndex() = index;
352}
353
354void ResourceCache::removeFromNonpurgeableArray(Resource* resource) {
355 int* index = resource->accessCacheIndex();
356 // Fill the hole we will create in the array with the tail object, adjust its index, and
357 // then pop the array
358 Resource* tail = *(fNonpurgeableResources.end() - 1);
359 SkASSERT(fNonpurgeableResources[*index] == resource);
360 fNonpurgeableResources[*index] = tail;
361 *tail->accessCacheIndex() = *index;
362 fNonpurgeableResources.pop_back();
363 *index = -1;
364}
365
366void ResourceCache::removeFromPurgeableQueue(Resource* resource) {
367 fPurgeableQueue.remove(resource);
368 // SkTDPQueue will set the index back to -1 in debug builds, but we are using the index as a
369 // flag for whether the Resource has been purged from the cache or not. So we need to make sure
370 // it always gets set.
371 *resource->accessCacheIndex() = -1;
372}
373
374bool ResourceCache::inPurgeableQueue(Resource* resource) const {
375 SkASSERT(this->isInCache(resource));
376 int index = *resource->accessCacheIndex();
377 if (index < fPurgeableQueue.count() && fPurgeableQueue.at(index) == resource) {
378 return true;
379 }
380 return false;
381}
382
383void ResourceCache::purgeResource(Resource* resource) {
384 SkASSERT(resource->isPurgeable());
385
387 "size", resource->gpuMemorySize());
388
389 fResourceMap.remove(resource->key(), resource);
390
391 if (resource->shouldDeleteASAP() == Resource::DeleteASAP::kNo) {
392 SkASSERT(this->inPurgeableQueue(resource));
393 this->removeFromPurgeableQueue(resource);
394 } else {
395 SkASSERT(!this->isInCache(resource));
396 }
397
398 fBudgetedBytes -= resource->gpuMemorySize();
399 resource->unrefCache();
400}
401
402void ResourceCache::purgeAsNeeded() {
404
405 if (this->overbudget() && fProxyCache) {
406 fProxyCache->freeUniquelyHeld();
407
408 // After the image cache frees resources we need to return those resources to the cache
409 this->processReturnedResources();
410 }
411 while (this->overbudget() && fPurgeableQueue.count()) {
412 Resource* resource = fPurgeableQueue.peek();
413 SkASSERT(!resource->wasDestroyed());
414 SkASSERT(fResourceMap.find(resource->key()));
415
416 if (resource->timestamp() == kMaxTimestamp) {
417 // If we hit a resource that is at kMaxTimestamp, then we've hit the part of the
418 // purgeable queue with all zero sized resources. We don't want to actually remove those
419 // so we just break here.
420 SkASSERT(resource->gpuMemorySize() == 0);
421 break;
422 }
423
424 this->purgeResource(resource);
425 }
426
427 this->validate();
428}
429
430void ResourceCache::purgeResourcesNotUsedSince(StdSteadyClock::time_point purgeTime) {
432 this->purgeResources(&purgeTime);
433}
434
437 this->purgeResources(nullptr);
438}
439
440void ResourceCache::purgeResources(const StdSteadyClock::time_point* purgeTime) {
441 TRACE_EVENT0("skia.gpu.cache", TRACE_FUNC);
442 if (fProxyCache) {
443 fProxyCache->purgeProxiesNotUsedSince(purgeTime);
444 }
445 this->processReturnedResources();
446
447 // Early out if the very first item is too new to purge to avoid sorting the queue when
448 // nothing will be deleted.
449 if (fPurgeableQueue.count() &&
450 purgeTime &&
451 fPurgeableQueue.peek()->lastAccessTime() >= *purgeTime) {
452 return;
453 }
454
455 // Sort the queue
456 fPurgeableQueue.sort();
457
458 // Make a list of the scratch resources to delete
459 SkTDArray<Resource*> nonZeroSizedResources;
460 for (int i = 0; i < fPurgeableQueue.count(); i++) {
461 Resource* resource = fPurgeableQueue.at(i);
462
463 const skgpu::StdSteadyClock::time_point resourceTime = resource->lastAccessTime();
464 if (purgeTime && resourceTime >= *purgeTime) {
465 // scratch or not, all later iterations will be too recently used to purge.
466 break;
467 }
468 SkASSERT(resource->isPurgeable());
469 if (resource->gpuMemorySize() > 0) {
470 *nonZeroSizedResources.append() = resource;
471 }
472 }
473
474 // Delete the scratch resources. This must be done as a separate pass
475 // to avoid messing up the sorted order of the queue
476 for (int i = 0; i < nonZeroSizedResources.size(); i++) {
477 this->purgeResource(nonZeroSizedResources[i]);
478 }
479
480 // Since we called process returned resources at the start of this call, we could still end up
481 // over budget even after purging resources based on purgeTime. So we call purgeAsNeeded at the
482 // end here.
483 this->purgeAsNeeded();
484}
485
486uint32_t ResourceCache::getNextTimestamp() {
487 // If we wrap then all the existing resources will appear older than any resources that get
488 // a timestamp after the wrap. We wrap one value early when we reach kMaxTimestamp so that we
489 // can continue to use kMaxTimestamp as a special case for zero sized resources.
490 if (fTimestamp == kMaxTimestamp) {
491 fTimestamp = 0;
492 int count = this->getResourceCount();
493 if (count) {
494 // Reset all the timestamps. We sort the resources by timestamp and then assign
495 // sequential timestamps beginning with 0. This is O(n*lg(n)) but it should be extremely
496 // rare.
497 SkTDArray<Resource*> sortedPurgeableResources;
498 sortedPurgeableResources.reserve(fPurgeableQueue.count());
499
500 while (fPurgeableQueue.count()) {
501 *sortedPurgeableResources.append() = fPurgeableQueue.peek();
502 fPurgeableQueue.pop();
503 }
504
505 SkTQSort(fNonpurgeableResources.begin(), fNonpurgeableResources.end(),
506 CompareTimestamp);
507
508 // Pick resources out of the purgeable and non-purgeable arrays based on lowest
509 // timestamp and assign new timestamps.
510 int currP = 0;
511 int currNP = 0;
512 while (currP < sortedPurgeableResources.size() &&
513 currNP < fNonpurgeableResources.size()) {
514 uint32_t tsP = sortedPurgeableResources[currP]->timestamp();
515 uint32_t tsNP = fNonpurgeableResources[currNP]->timestamp();
516 SkASSERT(tsP != tsNP);
517 if (tsP < tsNP) {
518 this->setResourceTimestamp(sortedPurgeableResources[currP++], fTimestamp++);
519 } else {
520 // Correct the index in the nonpurgeable array stored on the resource post-sort.
521 *fNonpurgeableResources[currNP]->accessCacheIndex() = currNP;
522 this->setResourceTimestamp(fNonpurgeableResources[currNP++], fTimestamp++);
523 }
524 }
525
526 // The above loop ended when we hit the end of one array. Finish the other one.
527 while (currP < sortedPurgeableResources.size()) {
528 this->setResourceTimestamp(sortedPurgeableResources[currP++], fTimestamp++);
529 }
530 while (currNP < fNonpurgeableResources.size()) {
531 *fNonpurgeableResources[currNP]->accessCacheIndex() = currNP;
532 this->setResourceTimestamp(fNonpurgeableResources[currNP++], fTimestamp++);
533 }
534
535 // Rebuild the queue.
536 for (int i = 0; i < sortedPurgeableResources.size(); ++i) {
537 fPurgeableQueue.insert(sortedPurgeableResources[i]);
538 }
539
540 this->validate();
541 SkASSERT(count == this->getResourceCount());
542
543 // count should be the next timestamp we return.
544 SkASSERT(fTimestamp == SkToU32(count));
545 }
546 }
547 return fTimestamp++;
548}
549
550void ResourceCache::setResourceTimestamp(Resource* resource, uint32_t timestamp) {
551 // We always set the timestamp for zero sized resources to be kMaxTimestamp
552 if (resource->gpuMemorySize() == 0) {
553 timestamp = kMaxTimestamp;
554 }
555 resource->setTimestamp(timestamp);
556}
557
559 for (int i = 0; i < fNonpurgeableResources.size(); ++i) {
560 fNonpurgeableResources[i]->dumpMemoryStatistics(traceMemoryDump);
561 }
562 for (int i = 0; i < fPurgeableQueue.count(); ++i) {
563 fPurgeableQueue.at(i)->dumpMemoryStatistics(traceMemoryDump);
564 }
565}
566
567////////////////////////////////////////////////////////////////////////////////
568
569const GraphiteResourceKey& ResourceCache::MapTraits::GetKey(const Resource& r) {
570 return r.key();
571}
572
573uint32_t ResourceCache::MapTraits::Hash(const GraphiteResourceKey& key) {
574 return key.hash();
575}
576
577bool ResourceCache::CompareTimestamp(Resource* const& a, Resource* const& b) {
578 return a->timestamp() < b->timestamp();
579}
580
581int* ResourceCache::AccessResourceIndex(Resource* const& res) {
582 return res->accessCacheIndex();
583}
584
585#ifdef SK_DEBUG
586void ResourceCache::validate() const {
587 // Reduce the frequency of validations for large resource counts.
588 static SkRandom gRandom;
589 int mask = (SkNextPow2(fCount + 1) >> 5) - 1;
590 if (~mask && (gRandom.nextU() & mask)) {
591 return;
592 }
593
594 struct Stats {
595 int fShareable;
596 int fScratch;
597 size_t fBudgetedBytes;
598 const ResourceMap* fResourceMap;
599
600 Stats(const ResourceCache* cache) {
601 memset(this, 0, sizeof(*this));
602 fResourceMap = &cache->fResourceMap;
603 }
604
605 void update(Resource* resource) {
606 const GraphiteResourceKey& key = resource->key();
607 SkASSERT(key.isValid());
608
609 // We should always have at least 1 cache ref
610 SkASSERT(resource->hasCacheRef());
611
612 // All resources in the cache are owned. If we track wrapped resources in the cache
613 // we'll need to update this check.
614 SkASSERT(resource->ownership() == Ownership::kOwned);
615
616 // We track scratch (non-shareable, no usage refs, has been returned to cache) and
617 // shareable resources here as those should be the only things in the fResourceMap. A
618 // non-shareable resources that does meet the scratch criteria will not be able to be
619 // given back out from a cache requests. After processing all the resources we assert
620 // that the fScratch + fShareable equals the count in the fResourceMap.
621 if (resource->isUsableAsScratch()) {
622 SkASSERT(key.shareable() == Shareable::kNo);
623 SkASSERT(!resource->hasUsageRef());
624 ++fScratch;
625 SkASSERT(fResourceMap->has(resource, key));
627 } else if (key.shareable() == Shareable::kNo) {
628 SkASSERT(!fResourceMap->has(resource, key));
629 } else {
630 SkASSERT(key.shareable() == Shareable::kYes);
631 ++fShareable;
632 SkASSERT(fResourceMap->has(resource, key));
634 }
635
636 if (resource->budgeted() == skgpu::Budgeted::kYes) {
637 fBudgetedBytes += resource->gpuMemorySize();
638 }
639
640 if (resource->gpuMemorySize() == 0) {
641 SkASSERT(resource->timestamp() == kMaxTimestamp);
642 } else {
643 SkASSERT(resource->timestamp() < kMaxTimestamp);
644 }
645 }
646 };
647
648 {
649 int count = 0;
650 fResourceMap.foreach([&](const Resource& resource) {
651 SkASSERT(resource.isUsableAsScratch() || resource.key().shareable() == Shareable::kYes);
653 count++;
654 });
655 SkASSERT(count == fResourceMap.count());
656 }
657
658 // In the below checks we can assert that anything in the purgeable queue is purgeable because
659 // we won't put a Resource into that queue unless all refs are zero. Thus there is no way for
660 // that resource to be made non-purgeable without going through the cache (which will switch
661 // queues back to non-purgeable).
662 //
663 // However, we can't say the same for things in the non-purgeable array. It is possible that
664 // Resources have removed all their refs (thus technically become purgeable) but have not been
665 // processed back into the cache yet. Thus we may not have moved resources to the purgeable
666 // queue yet. Its also possible that Resource hasn't been added to the ReturnQueue yet (thread
667 // paused between unref and adding to ReturnQueue) so we can't even make asserts like not
668 // purgeable or is in ReturnQueue.
669 Stats stats(this);
670 for (int i = 0; i < fNonpurgeableResources.size(); ++i) {
671 SkASSERT(*fNonpurgeableResources[i]->accessCacheIndex() == i);
672 SkASSERT(!fNonpurgeableResources[i]->wasDestroyed());
673 SkASSERT(!this->inPurgeableQueue(fNonpurgeableResources[i]));
674 stats.update(fNonpurgeableResources[i]);
675 }
676 bool firstPurgeableIsSizeZero = false;
677 for (int i = 0; i < fPurgeableQueue.count(); ++i) {
678 if (i == 0) {
679 firstPurgeableIsSizeZero = (fPurgeableQueue.at(0)->gpuMemorySize() == 0);
680 }
681 if (firstPurgeableIsSizeZero) {
682 // If the first purgeable item (i.e. least recently used) is sized zero, then all other
683 // purgeable resources must also be sized zero since they should all have a timestamp of
684 // kMaxTimestamp.
685 SkASSERT(fPurgeableQueue.at(i)->gpuMemorySize() == 0);
686 }
687 SkASSERT(fPurgeableQueue.at(i)->isPurgeable());
688 SkASSERT(*fPurgeableQueue.at(i)->accessCacheIndex() == i);
689 SkASSERT(!fPurgeableQueue.at(i)->wasDestroyed());
690 stats.update(fPurgeableQueue.at(i));
691 }
692
693 SkASSERT((stats.fScratch + stats.fShareable) == fResourceMap.count());
694 SkASSERT(stats.fBudgetedBytes == fBudgetedBytes);
695}
696
697bool ResourceCache::isInCache(const Resource* resource) const {
698 int index = *resource->accessCacheIndex();
699 if (index < 0) {
700 return false;
701 }
702 if (index < fPurgeableQueue.count() && fPurgeableQueue.at(index) == resource) {
703 return true;
704 }
705 if (index < fNonpurgeableResources.size() && fNonpurgeableResources[index] == resource) {
706 return true;
707 }
708 SkDEBUGFAIL("Resource index should be -1 or the resource should be in the cache.");
709 return false;
710}
711
712#endif // SK_DEBUG
713
714#if defined(GRAPHITE_TEST_UTILS)
715
716int ResourceCache::numFindableResources() const {
717 return fResourceMap.count();
718}
719
720void ResourceCache::setMaxBudget(size_t bytes) {
721 fMaxBytes = bytes;
722 this->processReturnedResources();
723 this->purgeAsNeeded();
724}
725
726Resource* ResourceCache::topOfPurgeableQueue() {
727 if (!fPurgeableQueue.count()) {
728 return nullptr;
729 }
730 return fPurgeableQueue.peek();
731}
732
733void ResourceCache::visitTextures(
734 const std::function<void(const Texture*, bool purgeable)>& func) const {
735 for (int i = 0; i < fNonpurgeableResources.size(); ++i) {
736 if (const Texture* tex = fNonpurgeableResources[i]->asTexture()) {
737 func(tex, /* purgeable= */ false);
738 }
739 }
740 for (int i = 0; i < fPurgeableQueue.count(); ++i) {
741 if (const Texture* tex = fPurgeableQueue.at(i)->asTexture()) {
742 func(tex, /* purgeable= */ true);
743 }
744 }
745}
746
747#endif // defined(GRAPHITE_TEST_UTILS)
748
749} // namespace skgpu::graphite
int count
Definition: FontMgrTest.cpp:50
#define ASSERT_SINGLE_OWNER
#define SkDEBUGFAIL(message)
Definition: SkAssert.h:118
#define SkASSERT(cond)
Definition: SkAssert.h:116
static int SkNextPow2(int value)
Definition: SkMathPriv.h:272
static SkString resource(SkPDFResourceType type, int index)
sk_sp< T > sk_ref_sp(T *obj)
Definition: SkRefCnt.h:381
void SkTQSort(T *begin, T *end, const C &lessThan)
Definition: SkTSort.h:194
constexpr uint32_t SkToU32(S x)
Definition: SkTo.h:26
#define TRACE_EVENT_SCOPE_THREAD
#define TRACE_FUNC
Definition: SkTraceEvent.h:30
static constexpr uint32_t SK_InvalidGenID
Definition: SkTypes.h:192
uint32_t nextU()
Definition: SkRandom.h:42
void ref() const
Definition: SkRefCnt.h:62
size_t size() const
Definition: SkString.h:131
int find(const char substring[]) const
Definition: SkString.h:158
T * end()
Definition: SkTDArray.h:152
int size() const
Definition: SkTDArray.h:138
void reserve(int n)
Definition: SkTDArray.h:187
T * begin()
Definition: SkTDArray.h:150
T * append()
Definition: SkTDArray.h:191
void pop_back()
Definition: SkTDArray.h:223
void remove(T entry)
Definition: SkTDPQueue.h:79
void pop()
Definition: SkTDPQueue.h:52
void sort()
Definition: SkTDPQueue.h:115
const T & peek() const
Definition: SkTDPQueue.h:48
int count() const
Definition: SkTDPQueue.h:45
T at(int i) const
Definition: SkTDPQueue.h:110
void insert(T entry)
Definition: SkTDPQueue.h:69
bool returnResource(Resource *, LastRemovedRef)
ResourceCache(const ResourceCache &)=delete
void purgeResourcesNotUsedSince(StdSteadyClock::time_point purgeTime)
static sk_sp< ResourceCache > Make(SingleOwner *, uint32_t recorderID, size_t maxBytes)
void dumpMemoryStatistics(SkTraceMemoryDump *traceMemoryDump) const
Resource * findAndRefResource(const GraphiteResourceKey &key, skgpu::Budgeted)
bool wasDestroyed() const
Definition: Resource.h:154
const GraphiteResourceKey & key() const
Definition: Resource.h:156
static bool b
struct MyStruct a[10]
if(end==-1)
Dart_NativeFunction function
Definition: fuchsia.cc:51
static uint32_t Hash(uint32_t key)
Definition: hashmap_test.cc:65
DEF_SWITCHES_START aot vmservice shared library Name of the *so containing AOT compiled Dart assets for launching the service isolate vm snapshot The VM snapshot data that will be memory mapped as read only SnapshotAssetPath must be present isolate snapshot The isolate snapshot data that will be memory mapped as read only SnapshotAssetPath must be present cache dir Path to the cache directory This is different from the persistent_cache_path in embedder which is used for Skia shader cache icu native lib Path to the library file that exports the ICU data vm service The hostname IP address on which the Dart VM Service should be served If not defaults to or::depending on whether ipv6 is specified vm service A custom Dart VM Service port The default is to pick a randomly available open port disable vm Disable the Dart VM Service The Dart VM Service is never available in release mode disable vm service Disable mDNS Dart VM Service publication Bind to the IPv6 localhost address for the Dart VM Service Ignored if vm service host is set endless trace Enable an endless trace buffer The default is a ring buffer This is useful when very old events need to viewed For during application launch Memory usage will continue to grow indefinitely however Start app with an specific route defined on the framework flutter assets Path to the Flutter assets directory enable service port Allow the VM service to fallback to automatic port selection if binding to a specified port fails trace Trace early application lifecycle Automatically switches to an endless trace buffer trace skia Filters out all Skia trace event categories except those that are specified in this comma separated list dump skp on shader Automatically dump the skp that triggers new shader compilations This is useful for writing custom ShaderWarmUp to reduce jank By this is not enabled to reduce the overhead purge persistent cache
Definition: switches.h:191
dictionary stats
Definition: malisc.py:20
Definition: GpuTools.h:21
Budgeted
Definition: GpuTypes.h:35
Definition: update.py:1
#define TRACE_EVENT0(category_group, name)
Definition: trace_event.h:131
#define TRACE_EVENT_INSTANT0(category_group, name)
Definition: trace_event.h:175
#define TRACE_EVENT1(category_group, name, arg1_name, arg1_val)
Definition: trace_event.h:141
#define TRACE_EVENT_INSTANT1(category_group, name, arg1_name, arg1_val)
Definition: trace_event.h:179