36#define ASSERT_SINGLE_OWNER SKGPU_ASSERT_SINGLE_OWNER(fSingleOwner)
43 ~AutoValidate() { fCache->validate(); }
53 : fInvalidUniqueKeyInbox(familyID)
54 , fUnrefResourceInbox(owningContextID)
55 , fOwningContextID(owningContextID)
56 , fContextUniqueID(familyID)
57 , fSingleOwner(singleOwner) {
80 resource->cacheAccess().setTimestamp(this->getNextTimestamp());
82 this->addToNonpurgeableArray(
resource);
89 fHighWaterBytes =
std::max(fBytes, fHighWaterBytes);
93 fBudgetedBytes +=
size;
95 fBudgetedBytes,
"free", fMaxBytes - fBudgetedBytes);
97 fBudgetedHighWaterCount =
std::max(fBudgetedCount, fBudgetedHighWaterCount);
98 fBudgetedHighWaterBytes =
std::max(fBudgetedBytes, fBudgetedHighWaterBytes);
111 if (
resource->resourcePriv().isPurgeable()) {
113 fPurgeableBytes -=
size;
115 this->removeFromNonpurgeableArray(
resource);
122 fBudgetedBytes -=
size;
124 fBudgetedBytes,
"free", fMaxBytes - fBudgetedBytes);
127 if (
resource->cacheAccess().isUsableAsScratch()) {
130 if (
resource->getUniqueKey().isValid()) {
137 AutoValidate av(
this);
139 while (!fNonpurgeableResources.
empty()) {
145 while (fPurgeableQueue.
count()) {
164 AutoValidate av(
this);
168 this->processFreedGpuResources();
177 while (!fNonpurgeableResources.
empty()) {
183 while (fPurgeableQueue.
count()) {
202 if (
resource->cacheAccess().hasRef()) {
205 this->refAndMakeResourceMRU(
resource);
216 this->refAndMakeResourceMRU(
resource);
225 if (
resource->cacheAccess().isUsableAsScratch()) {
234 if (
resource->getUniqueKey().isValid()) {
238 resource->cacheAccess().removeUniqueKey();
239 if (
resource->cacheAccess().isUsableAsScratch()) {
259 if (!old->resourcePriv().getScratchKey().isValid() &&
260 old->resourcePriv().isPurgeable()) {
261 old->cacheAccess().release();
270 if (
resource->getUniqueKey().isValid()) {
278 if (
resource->cacheAccess().isUsableAsScratch()) {
283 resource->cacheAccess().setUniqueKey(newKey);
297 if (
resource->resourcePriv().isPurgeable()) {
299 fPurgeableBytes -=
resource->gpuMemorySize();
301 this->addToNonpurgeableArray(
resource);
302 }
else if (!
resource->cacheAccess().hasRefOrCommandBufferUsage() &&
304 SkASSERT(fNumBudgetedResourcesFlushWillMakePurgeable > 0);
305 fNumBudgetedResourcesFlushWillMakePurgeable--;
309 resource->cacheAccess().setTimestamp(this->getNextTimestamp());
323 if (removedRef == GrGpuResource::LastRemovedRef::kMainRef) {
324 if (
resource->cacheAccess().isUsableAsScratch()) {
329 if (
resource->cacheAccess().hasRefOrCommandBufferUsage()) {
338 if (
resource->resourcePriv().isPurgeable()) {
339 fNewlyPurgeableResourceForValidation =
resource;
342 resource->cacheAccess().setTimestamp(this->getNextTimestamp());
343 SkDEBUGCODE(fNewlyPurgeableResourceForValidation =
nullptr);
345 if (!
resource->resourcePriv().isPurgeable() &&
347 ++fNumBudgetedResourcesFlushWillMakePurgeable;
350 if (!
resource->resourcePriv().isPurgeable()) {
355 this->removeFromNonpurgeableArray(
resource);
357 resource->cacheAccess().setTimeWhenResourceBecomePurgeable();
358 fPurgeableBytes +=
resource->gpuMemorySize();
378 if (!
resource->resourcePriv().refsWrappedObjects() &&
379 resource->resourcePriv().getScratchKey().isValid()) {
381 if (this->wouldFit(
resource->gpuMemorySize())) {
382 resource->resourcePriv().makeBudgeted();
408 fBudgetedBytes +=
size;
410 fBudgetedHighWaterBytes =
std::max(fBudgetedBytes, fBudgetedHighWaterBytes);
411 fBudgetedHighWaterCount =
std::max(fBudgetedCount, fBudgetedHighWaterCount);
413 if (!
resource->resourcePriv().isPurgeable() &&
414 !
resource->cacheAccess().hasRefOrCommandBufferUsage()) {
415 ++fNumBudgetedResourcesFlushWillMakePurgeable;
417 if (
resource->cacheAccess().isUsableAsScratch()) {
424 fBudgetedBytes -=
size;
425 if (!
resource->resourcePriv().isPurgeable() &&
426 !
resource->cacheAccess().hasRefOrCommandBufferUsage()) {
427 --fNumBudgetedResourcesFlushWillMakePurgeable;
429 if (!
resource->cacheAccess().hasRef() && !
resource->getUniqueKey().isValid() &&
430 resource->resourcePriv().getScratchKey().isValid()) {
436 fBudgetedBytes,
"free", fMaxBytes - fBudgetedBytes);
443 fInvalidUniqueKeyInbox.poll(&invalidKeyMsgs);
444 if (!invalidKeyMsgs.
empty()) {
447 for (
int i = 0;
i < invalidKeyMsgs.
size(); ++
i) {
448 if (invalidKeyMsgs[
i].inThreadSafeCache()) {
449 fThreadSafeCache->
remove(invalidKeyMsgs[
i].
key());
450 SkASSERT(!fThreadSafeCache->has(invalidKeyMsgs[
i].key()));
453 invalidKeyMsgs[
i].
key(),
nullptr,
460 this->processFreedGpuResources();
463 while (stillOverbudget && fPurgeableQueue.
count()) {
470 if (stillOverbudget) {
474 while (stillOverbudget && fPurgeableQueue.
count()) {
496 while (fPurgeableQueue.
count()) {
499 const skgpu::StdSteadyClock::time_point resourceTime =
500 resource->cacheAccess().timeWhenResourceBecamePurgeable();
501 if (purgeTime && resourceTime >= *purgeTime) {
517 if (purgeTime && fPurgeableQueue.
count() &&
518 fPurgeableQueue.
peek()->
cacheAccess().timeWhenResourceBecamePurgeable() >= *purgeTime) {
523 fPurgeableQueue.
sort();
527 for (
int i = 0;
i < fPurgeableQueue.
count();
i++) {
530 const skgpu::StdSteadyClock::time_point resourceTime =
531 resource->cacheAccess().timeWhenResourceBecamePurgeable();
532 if (purgeTime && resourceTime >= *purgeTime) {
537 if (!
resource->getUniqueKey().isValid()) {
544 for (
int i = 0;
i < scratchResources.
size();
i++) {
545 scratchResources[
i]->cacheAccess().release();
553 AutoValidate av(
this);
554 if (desiredHeadroomBytes > fMaxBytes) {
557 if (this->wouldFit(desiredHeadroomBytes)) {
560 fPurgeableQueue.
sort();
562 size_t projectedBudget = fBudgetedBytes;
564 for (
int i = 0;
i < fPurgeableQueue.
count();
i++) {
567 projectedBudget -=
resource->gpuMemorySize();
569 if (projectedBudget + desiredHeadroomBytes <= fMaxBytes) {
580 std::vector<GrGpuResource*> resources;
581 resources.reserve(purgeCnt);
582 for (
int i = 0;
i < purgeCnt;
i++) {
583 resources.push_back(fPurgeableQueue.
at(
i));
593 const size_t tmpByteBudget =
std::max((
size_t)0, fBytes - bytesToPurge);
594 bool stillOverbudget = tmpByteBudget < fBytes;
596 if (preferScratchResources && bytesToPurge < fPurgeableBytes) {
598 fPurgeableQueue.
sort();
602 size_t scratchByteCount = 0;
603 for (
int i = 0;
i < fPurgeableQueue.
count() && stillOverbudget;
i++) {
606 if (!
resource->getUniqueKey().isValid()) {
608 scratchByteCount +=
resource->gpuMemorySize();
609 stillOverbudget = tmpByteBudget < fBytes - scratchByteCount;
615 for (
int i = 0;
i < scratchResources.
size();
i++) {
616 scratchResources[
i]->cacheAccess().release();
618 stillOverbudget = tmpByteBudget < fBytes;
624 if (stillOverbudget) {
625 const size_t cachedByteCount = fMaxBytes;
626 fMaxBytes = tmpByteBudget;
628 fMaxBytes = cachedByteCount;
634 fNumBudgetedResourcesFlushWillMakePurgeable > 0;
637void GrResourceCache::processFreedGpuResources() {
639 fUnrefResourceInbox.
poll(&msgs);
644 int index = fNonpurgeableResources.
size();
646 *
resource->cacheAccess().accessCacheIndex() = index;
650 int* index =
resource->cacheAccess().accessCacheIndex();
655 fNonpurgeableResources[*index] =
tail;
656 *
tail->cacheAccess().accessCacheIndex() = *index;
661uint32_t GrResourceCache::getNextTimestamp() {
664 if (0 == fTimestamp) {
671 sortedPurgeableResources.
reserve(fPurgeableQueue.
count());
673 while (fPurgeableQueue.
count()) {
674 *sortedPurgeableResources.
append() = fPurgeableQueue.
peek();
675 fPurgeableQueue.
pop();
685 while (currP < sortedPurgeableResources.
size() &&
686 currNP < fNonpurgeableResources.
size()) {
687 uint32_t tsP = sortedPurgeableResources[currP]->cacheAccess().timestamp();
688 uint32_t tsNP = fNonpurgeableResources[currNP]->cacheAccess().timestamp();
691 sortedPurgeableResources[currP++]->cacheAccess().setTimestamp(fTimestamp++);
694 *fNonpurgeableResources[currNP]->cacheAccess().accessCacheIndex() = currNP;
695 fNonpurgeableResources[currNP++]->cacheAccess().setTimestamp(fTimestamp++);
700 while (currP < sortedPurgeableResources.
size()) {
701 sortedPurgeableResources[currP++]->cacheAccess().setTimestamp(fTimestamp++);
703 while (currNP < fNonpurgeableResources.
size()) {
704 *fNonpurgeableResources[currNP]->cacheAccess().accessCacheIndex() = currNP;
705 fNonpurgeableResources[currNP++]->cacheAccess().setTimestamp(fTimestamp++);
709 for (
int i = 0;
i < sortedPurgeableResources.
size(); ++
i) {
710 fPurgeableQueue.
insert(sortedPurgeableResources[
i]);
724 for (
int i = 0;
i < fNonpurgeableResources.
size(); ++
i) {
725 fNonpurgeableResources[
i]->dumpMemoryStatistics(traceMemoryDump);
727 for (
int i = 0;
i < fPurgeableQueue.
count(); ++
i) {
733void GrResourceCache::getStats(
Stats*
stats)
const {
737 stats->fNumNonPurgeable = fNonpurgeableResources.
size();
738 stats->fNumPurgeable = fPurgeableQueue.
count();
740 for (
int i = 0;
i < fNonpurgeableResources.
size(); ++
i) {
741 stats->update(fNonpurgeableResources[
i]);
743 for (
int i = 0;
i < fPurgeableQueue.
count(); ++
i) {
744 stats->update(fPurgeableQueue.
at(
i));
748#if defined(GR_TEST_UTILS)
749void GrResourceCache::dumpStats(
SkString*
out)
const {
754 this->getStats(&
stats);
756 float byteUtilization = (100.f * fBudgetedBytes) / fMaxBytes;
758 out->appendf(
"Budget: %d bytes\n", (
int)fMaxBytes);
759 out->appendf(
"\t\tEntry Count: current %d"
760 " (%d budgeted, %d wrapped, %d locked, %d scratch), high %d\n",
762 stats.fScratch, fHighWaterCount);
763 out->appendf(
"\t\tEntry Bytes: current %d (budgeted %d, %.2g%% full, %d unbudgeted) high %d\n",
773 this->getStats(&
stats);
781void GrResourceCache::validate()
const {
785 if (~mask && (gRandom.
nextU() & mask)) {
792 size_t fBudgetedBytes;
797 const ScratchMap* fScratchMap;
798 const UniqueHash* fUniqueHash;
801 memset(
this, 0,
sizeof(*
this));
802 fScratchMap = &
cache->fScratchMap;
803 fUniqueHash = &
cache->fUniqueHash;
807 fBytes +=
resource->gpuMemorySize();
809 if (!
resource->resourcePriv().isPurgeable()) {
816 if (
resource->cacheAccess().isUsableAsScratch()) {
821 SkASSERT(fScratchMap->countForKey(scratchKey));
823 }
else if (scratchKey.
isValid()) {
833 resource->resourcePriv().refsWrappedObjects());
838 fBudgetedBytes +=
resource->gpuMemorySize();
853 size_t purgeableBytes = 0;
854 int numBudgetedResourcesFlushWillMakePurgeable = 0;
856 for (
int i = 0;
i < fNonpurgeableResources.
size(); ++
i) {
857 SkASSERT(!fNonpurgeableResources[
i]->resourcePriv().isPurgeable() ||
858 fNewlyPurgeableResourceForValidation == fNonpurgeableResources[
i]);
859 SkASSERT(*fNonpurgeableResources[
i]->cacheAccess().accessCacheIndex() ==
i);
860 SkASSERT(!fNonpurgeableResources[
i]->wasDestroyed());
862 !fNonpurgeableResources[
i]->cacheAccess().hasRefOrCommandBufferUsage() &&
863 fNewlyPurgeableResourceForValidation != fNonpurgeableResources[
i]) {
864 ++numBudgetedResourcesFlushWillMakePurgeable;
866 stats.update(fNonpurgeableResources[
i]);
868 for (
int i = 0;
i < fPurgeableQueue.
count(); ++
i) {
872 stats.update(fPurgeableQueue.
at(
i));
880 SkASSERT(fNumBudgetedResourcesFlushWillMakePurgeable ==
881 numBudgetedResourcesFlushWillMakePurgeable);
884 SkASSERT(purgeableBytes == fPurgeableBytes);
886 SkASSERT(fBudgetedHighWaterCount <= fHighWaterCount);
887 SkASSERT(fBudgetedHighWaterBytes <= fHighWaterBytes);
888 SkASSERT(fBytes <= fHighWaterBytes);
890 SkASSERT(fBudgetedBytes <= fBudgetedHighWaterBytes);
891 SkASSERT(fBudgetedCount <= fBudgetedHighWaterCount);
903 int index = *
resource->cacheAccess().accessCacheIndex();
907 if (index < fPurgeableQueue.
count() && fPurgeableQueue.
at(index) ==
resource) {
910 if (index < fNonpurgeableResources.
size() && fNonpurgeableResources[index] ==
resource) {
913 SkDEBUGFAIL(
"Resource index should be -1 or the resource should be in the cache.");
919#if defined(GR_TEST_UTILS)
921int GrResourceCache::countUniqueKeysWithTag(
const char* tag)
const {
924 if (0 == strcmp(tag,
resource.getUniqueKey().tag())) {
931void GrResourceCache::changeTimestamp(uint32_t newTimestamp) {
932 fTimestamp = newTimestamp;
935void GrResourceCache::visitSurfaces(
938 for (
int i = 0;
i < fNonpurgeableResources.
size(); ++
i) {
939 if (
const GrSurface* surf = fNonpurgeableResources[
i]->asSurface()) {
943 for (
int i = 0;
i < fPurgeableQueue.
count(); ++
i) {
944 if (
const GrSurface* surf = fPurgeableQueue.
at(
i)->asSurface()) {
#define ASSERT_SINGLE_OWNER
#define SkDEBUGFAIL(message)
static int SkNextPow2(int value)
#define DECLARE_SKMESSAGEBUS_MESSAGE(Message, IDType, AllowCopyableMessage)
static SkString resource(SkPDFResourceType type, int index)
sk_sp< T > sk_ref_sp(T *obj)
void SkTQSort(T *begin, T *end, const C &lessThan)
constexpr int SkToInt(S x)
constexpr uint32_t SkToU32(S x)
#define TRACE_COUNTER2(category_group, name, value1_name, value1_val, value2_name, value2_val)
static constexpr uint32_t SK_InvalidUniqueID
GrBudgetedType budgetedType() const
size_t gpuMemorySize() const
virtual void dumpMemoryStatistics(SkTraceMemoryDump *traceMemoryDump) const
bool wasDestroyed() const
CacheAccess cacheAccess()
ResourcePriv resourcePriv()
void processInvalidUniqueKey(const skgpu::UniqueKey &, GrTextureProxy *, InvalidateGPUResource)
void removeAllUniqueKeys()
void dumpMemoryStatistics(SkTraceMemoryDump *traceMemoryDump) const
void setLimit(size_t bytes)
GrResourceCache(skgpu::SingleOwner *owner, GrDirectContext::DirectContextID owningContextID, uint32_t familyID)
bool hasUniqueKey(const skgpu::UniqueKey &key) const
int getResourceCount() const
bool requestsFlush() const
bool purgeToMakeHeadroom(size_t desiredHeadroomBytes)
void purgeUnlockedResources(GrPurgeResourceOptions opts)
GrGpuResource * findAndRefScratchResource(const skgpu::ScratchKey &scratchKey)
GrGpuResource * findAndRefUniqueResource(const skgpu::UniqueKey &key)
SkDEBUGCODE(int fCount=0;) size_t fBytes=0
void dropUniqueRefsOlderThan(skgpu::StdSteadyClock::time_point purgeTime) SK_EXCLUDES(fSpinLock)
void remove(const skgpu::UniqueKey &) SK_EXCLUDES(fSpinLock)
void dropUniqueRefs(GrResourceCache *resourceCache) SK_EXCLUDES(fSpinLock)
void dropAllRefs() SK_EXCLUDES(fSpinLock)
void poll(skia_private::TArray< Message > *out)
T * find(const Key &key) const
void remove(const Key &key)
T * find(const Key &key) const
void insert(const Key &key, T *value)
void remove(const Key &key, const T *value)
Dart_NativeFunction function
static float max(float r, float g, float b)
DEF_SWITCHES_START aot vmservice shared library Name of the *so containing AOT compiled Dart assets for launching the service isolate vm snapshot The VM snapshot data that will be memory mapped as read only SnapshotAssetPath must be present isolate snapshot The isolate snapshot data that will be memory mapped as read only SnapshotAssetPath must be present cache dir Path to the cache directory This is different from the persistent_cache_path in embedder which is used for Skia shader cache icu native lib Path to the library file that exports the ICU data vm service The hostname IP address on which the Dart VM Service should be served If not defaults to or::depending on whether ipv6 is specified vm service A custom Dart VM Service port The default is to pick a randomly available open port disable vm Disable the Dart VM Service The Dart VM Service is never available in release mode disable vm service Disable mDNS Dart VM Service publication Bind to the IPv6 localhost address for the Dart VM Service Ignored if vm service host is set endless trace Enable an endless trace buffer The default is a ring buffer This is useful when very old events need to viewed For during application launch Memory usage will continue to grow indefinitely however Start app with an specific route defined on the framework flutter assets Path to the Flutter assets directory enable service port Allow the VM service to fallback to automatic port selection if binding to a specified port fails trace Trace early application lifecycle Automatically switches to an endless trace buffer trace skia Filters out all Skia trace event categories except those that are specified in this comma separated list dump skp on shader Automatically dump the skp that triggers new shader compilations This is useful for writing custom ShaderWarmUp to reduce jank By this is not enabled to reduce the overhead purge persistent cache
it will be possible to load the file into Perfetto s trace viewer disable asset Prevents usage of any non test fonts unless they were explicitly Loaded via prefetched default font Indicates whether the embedding started a prefetch of the default font manager before creating the engine run In non interactive keep the shell running after the Dart script has completed enable serial On low power devices with low core running concurrent GC tasks on threads can cause them to contend with the UI thread which could potentially lead to jank This option turns off all concurrent GC activities domain network JSON encoded network policy per domain This overrides the DisallowInsecureConnections switch Embedder can specify whether to allow or disallow insecure connections at a domain level old gen heap size
const myers::Point & get(const myers::Segment &)