25 : fFreeEntryList(nullptr) {
32#if defined(GR_TEST_UTILS)
33int GrThreadSafeCache::numEntries()
const {
36 return fUniquelyKeyedEntryMap.count();
39size_t GrThreadSafeCache::approxBytesUsedForHash()
const {
42 return fUniquelyKeyedEntryMap.approxBytesUsed();
49 fUniquelyKeyedEntryMap.reset();
50 while (
auto tmp = fUniquelyKeyedEntryList.head()) {
51 fUniquelyKeyedEntryList.remove(tmp);
52 this->recycleEntry(tmp);
63 Entry* cur = fUniquelyKeyedEntryList.tail();
67 if (resourceCache && !resourceCache->
overBudget()) {
71 if (cur->uniquelyHeld()) {
72 fUniquelyKeyedEntryMap.remove(cur->key());
73 fUniquelyKeyedEntryList.remove(cur);
74 this->recycleEntry(cur);
78 prev = cur ? cur->fPrev :
nullptr;
86 Entry* cur = fUniquelyKeyedEntryList.tail();
90 if (cur->fLastAccess >= purgeTime) {
95 if (cur->uniquelyHeld()) {
96 fUniquelyKeyedEntryMap.remove(cur->key());
97 fUniquelyKeyedEntryList.remove(cur);
98 this->recycleEntry(cur);
102 prev = cur ? cur->fPrev :
nullptr;
106void GrThreadSafeCache::makeExistingEntryMRU(
Entry* entry) {
107 SkASSERT(fUniquelyKeyedEntryList.isInList(entry));
109 entry->fLastAccess = skgpu::StdSteadyClock::now();
110 fUniquelyKeyedEntryList.remove(entry);
111 fUniquelyKeyedEntryList.addToHead(entry);
114std::tuple<GrSurfaceProxyView, sk_sp<SkData>> GrThreadSafeCache::internalFind(
116 Entry* tmp = fUniquelyKeyedEntryMap.find(
key);
118 this->makeExistingEntryMRU(tmp);
119 return { tmp->view(), tmp->refCustomData() };
129 Entry* tmp = fUniquelyKeyedEntryMap.find(
key);
138 std::tie(view, std::ignore) = this->internalFind(
key);
146 return this->internalFind(
key);
153 if (fFreeEntryList) {
154 entry = fFreeEntryList;
155 fFreeEntryList = entry->fNext;
156 entry->fNext =
nullptr;
158 entry->set(
key, view);
163 return this->makeNewEntryMRU(entry);
167 entry->fLastAccess = skgpu::StdSteadyClock::now();
168 fUniquelyKeyedEntryList.addToHead(entry);
169 fUniquelyKeyedEntryMap.add(entry);
177 if (fFreeEntryList) {
178 entry = fFreeEntryList;
179 fFreeEntryList = entry->fNext;
180 entry->fNext =
nullptr;
182 entry->set(
key, std::move(vertData));
184 entry = fEntryAllocator.
make<
Entry>(
key, std::move(vertData));
187 return this->makeNewEntryMRU(entry);
190void GrThreadSafeCache::recycleEntry(
Entry* dead) {
191 SkASSERT(!dead->fPrev && !dead->fNext && !dead->fList);
195 dead->fNext = fFreeEntryList;
196 fFreeEntryList = dead;
199std::tuple<GrSurfaceProxyView, sk_sp<SkData>> GrThreadSafeCache::internalAdd(
202 Entry* tmp = fUniquelyKeyedEntryMap.find(
key);
204 tmp = this->getEntry(
key, view);
209 return { tmp->view(), tmp->refCustomData() };
217 std::tie(newView, std::ignore) = this->internalAdd(
key, view);
226 return this->internalAdd(
key, view);
234 std::tie(view, std::ignore) = this->internalFind(
key);
239 std::tie(view, std::ignore) = this->internalAdd(
key, v);
248 auto [view,
data] = this->internalFind(
key);
250 return { std::move(view), std::move(
data) };
253 return this->internalAdd(
key, v);
268std::tuple<sk_sp<GrThreadSafeCache::VertexData>,
sk_sp<SkData>>
270 Entry* tmp = fUniquelyKeyedEntryMap.find(
key);
272 this->makeExistingEntryMRU(tmp);
273 return { tmp->vertexData(), tmp->refCustomData() };
279std::tuple<sk_sp<GrThreadSafeCache::VertexData>,
sk_sp<SkData>>
283 return this->internalFindVerts(
key);
286std::tuple<sk_sp<GrThreadSafeCache::VertexData>,
sk_sp<SkData>> GrThreadSafeCache::internalAddVerts(
289 IsNewerBetter isNewerBetter) {
290 Entry* tmp = fUniquelyKeyedEntryMap.find(
key);
292 tmp = this->getEntry(
key, std::move(vertData));
295 }
else if (isNewerBetter(tmp->getCustomData(),
key.getCustomData())) {
298 tmp->set(
key, std::move(vertData));
301 return { tmp->vertexData(), tmp->refCustomData() };
307 IsNewerBetter isNewerBetter) {
310 return this->internalAddVerts(
key, std::move(vertData), isNewerBetter);
316 Entry* tmp = fUniquelyKeyedEntryMap.find(
key);
318 fUniquelyKeyedEntryMap.remove(
key);
319 fUniquelyKeyedEntryList.remove(tmp);
320 this->recycleEntry(tmp);
324std::tuple<GrSurfaceProxyView, sk_sp<GrThreadSafeCache::Trampoline>>
333 constexpr int kSampleCnt = 1;
348 if (!resourceProvider || !trampoline->fProxy ||
349 !trampoline->fProxy->isInstantiated()) {
350 return GrSurfaceProxy::LazyCallbackResult(nullptr, true);
353 SkASSERT(!trampoline->fProxy->peekTexture()->getUniqueKey().isValid());
355 sk_ref_sp(trampoline->fProxy->peekTexture()));
373 return {{std::move(proxy), origin, swizzle}, std::move(trampoline)};
static float prev(float f)
sk_sp< T > sk_ref_sp(T *obj)
static constexpr bool SkToBool(const T &x)
int find(T *array, int N, T item)
const GrCaps * caps() const
std::tuple< GrColorType, GrBackendFormat > getFallbackColorTypeAndFormat(GrColorType, int sampleCount) const
skgpu::Swizzle getReadSwizzle(const GrBackendFormat &format, GrColorType colorType) const
GrDirectContextPriv priv()
sk_sp< GrRenderTargetProxy > createLazyRenderTargetProxy(LazyInstantiateCallback &&, const GrBackendFormat &, SkISize dimensions, int renderTargetSampleCnt, GrInternalSurfaceFlags, const TextureInfo *, GrMipmapStatus, SkBackingFit, skgpu::Budgeted, GrProtected, bool wrapsVkSecondaryCB, UseAllocator useAllocator)
GrProxyProvider * proxyProvider()
static std::tuple< GrSurfaceProxyView, sk_sp< Trampoline > > CreateLazyView(GrDirectContext *, GrColorType, SkISize dimensions, GrSurfaceOrigin, SkBackingFit)
std::tuple< sk_sp< VertexData >, sk_sp< SkData > > addVertsWithData(const skgpu::UniqueKey &, sk_sp< VertexData >, IsNewerBetter) SK_EXCLUDES(fSpinLock)
SkDEBUGCODE(bool has(const skgpu::UniqueKey &) SK_EXCLUDES(fSpinLock);) GrSurfaceProxyView find(const skgpu std::tuple< GrSurfaceProxyView, sk_sp< SkData > > findWithData(const skgpu::UniqueKey &) SK_EXCLUDES(fSpinLock)
std::tuple< sk_sp< VertexData >, sk_sp< SkData > > findVertsWithData(const skgpu::UniqueKey &) SK_EXCLUDES(fSpinLock)
GrSurfaceProxyView add(const skgpu::UniqueKey &, const GrSurfaceProxyView &) SK_EXCLUDES(fSpinLock)
void dropUniqueRefsOlderThan(skgpu::StdSteadyClock::time_point purgeTime) SK_EXCLUDES(fSpinLock)
void remove(const skgpu::UniqueKey &) SK_EXCLUDES(fSpinLock)
std::tuple< GrSurfaceProxyView, sk_sp< SkData > > findOrAddWithData(const skgpu::UniqueKey &, const GrSurfaceProxyView &) SK_EXCLUDES(fSpinLock)
std::tuple< GrSurfaceProxyView, sk_sp< SkData > > addWithData(const skgpu::UniqueKey &, const GrSurfaceProxyView &) SK_EXCLUDES(fSpinLock)
GrSurfaceProxyView findOrAdd(const skgpu::UniqueKey &, const GrSurfaceProxyView &) SK_EXCLUDES(fSpinLock)
void dropUniqueRefs(GrResourceCache *resourceCache) SK_EXCLUDES(fSpinLock)
static sk_sp< VertexData > MakeVertexData(const void *vertices, int vertexCount, size_t vertexSize)
void dropAllRefs() SK_EXCLUDES(fSpinLock)
auto make(Ctor &&ctor) -> decltype(ctor(nullptr))
uint32_t uint32_t * format
DEF_SWITCHES_START aot vmservice shared library Name of the *so containing AOT compiled Dart assets for launching the service isolate vm snapshot The VM snapshot data that will be memory mapped as read only SnapshotAssetPath must be present isolate snapshot The isolate snapshot data that will be memory mapped as read only SnapshotAssetPath must be present cache dir Path to the cache directory This is different from the persistent_cache_path in embedder which is used for Skia shader cache icu native lib Path to the library file that exports the ICU data vm service The hostname IP address on which the Dart VM Service should be served If not defaults to or::depending on whether ipv6 is specified vm service A custom Dart VM Service port The default is to pick a randomly available open port disable vm Disable the Dart VM Service The Dart VM Service is never available in release mode disable vm service Disable mDNS Dart VM Service publication Bind to the IPv6 localhost address for the Dart VM Service Ignored if vm service host is set endless trace buffer
std::shared_ptr< const fml::Mapping > data