Flutter Engine
The Flutter Engine
GrThreadSafeCache.cpp
Go to the documentation of this file.
1/*
2 * Copyright 2020 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
9
19
21 this->reset();
22}
23
25 : fFreeEntryList(nullptr) {
26}
27
29 this->dropAllRefs();
30}
31
32#if defined(GR_TEST_UTILS)
33int GrThreadSafeCache::numEntries() const {
34 SkAutoSpinlock lock{fSpinLock};
35
36 return fUniquelyKeyedEntryMap.count();
37}
38
39size_t GrThreadSafeCache::approxBytesUsedForHash() const {
40 SkAutoSpinlock lock{fSpinLock};
41
42 return fUniquelyKeyedEntryMap.approxBytesUsed();
43}
44#endif
45
47 SkAutoSpinlock lock{fSpinLock};
48
49 fUniquelyKeyedEntryMap.reset();
50 while (auto tmp = fUniquelyKeyedEntryList.head()) {
51 fUniquelyKeyedEntryList.remove(tmp);
52 this->recycleEntry(tmp);
53 }
54 // TODO: should we empty out the fFreeEntryList and reset fEntryAllocator?
55}
56
57// TODO: If iterating becomes too expensive switch to using something like GrIORef for the
58// GrSurfaceProxy
60 SkAutoSpinlock lock{fSpinLock};
61
62 // Iterate from LRU to MRU
63 Entry* cur = fUniquelyKeyedEntryList.tail();
64 Entry* prev = cur ? cur->fPrev : nullptr;
65
66 while (cur) {
67 if (resourceCache && !resourceCache->overBudget()) {
68 return;
69 }
70
71 if (cur->uniquelyHeld()) {
72 fUniquelyKeyedEntryMap.remove(cur->key());
73 fUniquelyKeyedEntryList.remove(cur);
74 this->recycleEntry(cur);
75 }
76
77 cur = prev;
78 prev = cur ? cur->fPrev : nullptr;
79 }
80}
81
82void GrThreadSafeCache::dropUniqueRefsOlderThan(skgpu::StdSteadyClock::time_point purgeTime) {
83 SkAutoSpinlock lock{fSpinLock};
84
85 // Iterate from LRU to MRU
86 Entry* cur = fUniquelyKeyedEntryList.tail();
87 Entry* prev = cur ? cur->fPrev : nullptr;
88
89 while (cur) {
90 if (cur->fLastAccess >= purgeTime) {
91 // This entry and all the remaining ones in the list will be newer than 'purgeTime'
92 return;
93 }
94
95 if (cur->uniquelyHeld()) {
96 fUniquelyKeyedEntryMap.remove(cur->key());
97 fUniquelyKeyedEntryList.remove(cur);
98 this->recycleEntry(cur);
99 }
100
101 cur = prev;
102 prev = cur ? cur->fPrev : nullptr;
103 }
104}
105
106void GrThreadSafeCache::makeExistingEntryMRU(Entry* entry) {
107 SkASSERT(fUniquelyKeyedEntryList.isInList(entry));
108
109 entry->fLastAccess = skgpu::StdSteadyClock::now();
110 fUniquelyKeyedEntryList.remove(entry);
111 fUniquelyKeyedEntryList.addToHead(entry);
112}
113
114std::tuple<GrSurfaceProxyView, sk_sp<SkData>> GrThreadSafeCache::internalFind(
115 const skgpu::UniqueKey& key) {
116 Entry* tmp = fUniquelyKeyedEntryMap.find(key);
117 if (tmp) {
118 this->makeExistingEntryMRU(tmp);
119 return { tmp->view(), tmp->refCustomData() };
120 }
121
122 return {};
123}
124
125#ifdef SK_DEBUG
126bool GrThreadSafeCache::has(const skgpu::UniqueKey& key) {
127 SkAutoSpinlock lock{fSpinLock};
128
129 Entry* tmp = fUniquelyKeyedEntryMap.find(key);
130 return SkToBool(tmp);
131}
132#endif
133
135 SkAutoSpinlock lock{fSpinLock};
136
138 std::tie(view, std::ignore) = this->internalFind(key);
139 return view;
140}
141
142std::tuple<GrSurfaceProxyView, sk_sp<SkData>> GrThreadSafeCache::findWithData(
143 const skgpu::UniqueKey& key) {
144 SkAutoSpinlock lock{fSpinLock};
145
146 return this->internalFind(key);
147}
148
149GrThreadSafeCache::Entry* GrThreadSafeCache::getEntry(const skgpu::UniqueKey& key,
150 const GrSurfaceProxyView& view) {
151 Entry* entry;
152
153 if (fFreeEntryList) {
154 entry = fFreeEntryList;
155 fFreeEntryList = entry->fNext;
156 entry->fNext = nullptr;
157
158 entry->set(key, view);
159 } else {
160 entry = fEntryAllocator.make<Entry>(key, view);
161 }
162
163 return this->makeNewEntryMRU(entry);
164}
165
166GrThreadSafeCache::Entry* GrThreadSafeCache::makeNewEntryMRU(Entry* entry) {
167 entry->fLastAccess = skgpu::StdSteadyClock::now();
168 fUniquelyKeyedEntryList.addToHead(entry);
169 fUniquelyKeyedEntryMap.add(entry);
170 return entry;
171}
172
173GrThreadSafeCache::Entry* GrThreadSafeCache::getEntry(const skgpu::UniqueKey& key,
174 sk_sp<VertexData> vertData) {
175 Entry* entry;
176
177 if (fFreeEntryList) {
178 entry = fFreeEntryList;
179 fFreeEntryList = entry->fNext;
180 entry->fNext = nullptr;
181
182 entry->set(key, std::move(vertData));
183 } else {
184 entry = fEntryAllocator.make<Entry>(key, std::move(vertData));
185 }
186
187 return this->makeNewEntryMRU(entry);
188}
189
190void GrThreadSafeCache::recycleEntry(Entry* dead) {
191 SkASSERT(!dead->fPrev && !dead->fNext && !dead->fList);
192
193 dead->makeEmpty();
194
195 dead->fNext = fFreeEntryList;
196 fFreeEntryList = dead;
197}
198
199std::tuple<GrSurfaceProxyView, sk_sp<SkData>> GrThreadSafeCache::internalAdd(
200 const skgpu::UniqueKey& key,
201 const GrSurfaceProxyView& view) {
202 Entry* tmp = fUniquelyKeyedEntryMap.find(key);
203 if (!tmp) {
204 tmp = this->getEntry(key, view);
205
206 SkASSERT(fUniquelyKeyedEntryMap.find(key));
207 }
208
209 return { tmp->view(), tmp->refCustomData() };
210}
211
213 const GrSurfaceProxyView& view) {
214 SkAutoSpinlock lock{fSpinLock};
215
216 GrSurfaceProxyView newView;
217 std::tie(newView, std::ignore) = this->internalAdd(key, view);
218 return newView;
219}
220
221std::tuple<GrSurfaceProxyView, sk_sp<SkData>> GrThreadSafeCache::addWithData(
222 const skgpu::UniqueKey& key,
223 const GrSurfaceProxyView& view) {
224 SkAutoSpinlock lock{fSpinLock};
225
226 return this->internalAdd(key, view);
227}
228
230 const GrSurfaceProxyView& v) {
231 SkAutoSpinlock lock{fSpinLock};
232
234 std::tie(view, std::ignore) = this->internalFind(key);
235 if (view) {
236 return view;
237 }
238
239 std::tie(view, std::ignore) = this->internalAdd(key, v);
240 return view;
241}
242
243std::tuple<GrSurfaceProxyView, sk_sp<SkData>> GrThreadSafeCache::findOrAddWithData(
244 const skgpu::UniqueKey& key,
245 const GrSurfaceProxyView& v) {
246 SkAutoSpinlock lock{fSpinLock};
247
248 auto [view, data] = this->internalFind(key);
249 if (view) {
250 return { std::move(view), std::move(data) };
251 }
252
253 return this->internalAdd(key, v);
254}
255
257 int vertexCount,
258 size_t vertexSize) {
259 return sk_sp<VertexData>(new VertexData(vertices, vertexCount, vertexSize));
260}
261
263 int vertexCount,
264 size_t vertexSize) {
265 return sk_sp<VertexData>(new VertexData(std::move(buffer), vertexCount, vertexSize));
266}
267
268std::tuple<sk_sp<GrThreadSafeCache::VertexData>, sk_sp<SkData>>
269 GrThreadSafeCache::internalFindVerts(const skgpu::UniqueKey& key) {
270 Entry* tmp = fUniquelyKeyedEntryMap.find(key);
271 if (tmp) {
272 this->makeExistingEntryMRU(tmp);
273 return { tmp->vertexData(), tmp->refCustomData() };
274 }
275
276 return {};
277}
278
279std::tuple<sk_sp<GrThreadSafeCache::VertexData>, sk_sp<SkData>>
281 SkAutoSpinlock lock{fSpinLock};
282
283 return this->internalFindVerts(key);
284}
285
286std::tuple<sk_sp<GrThreadSafeCache::VertexData>, sk_sp<SkData>> GrThreadSafeCache::internalAddVerts(
287 const skgpu::UniqueKey& key,
288 sk_sp<VertexData> vertData,
289 IsNewerBetter isNewerBetter) {
290 Entry* tmp = fUniquelyKeyedEntryMap.find(key);
291 if (!tmp) {
292 tmp = this->getEntry(key, std::move(vertData));
293
294 SkASSERT(fUniquelyKeyedEntryMap.find(key));
295 } else if (isNewerBetter(tmp->getCustomData(), key.getCustomData())) {
296 // This orphans any existing uses of the prior vertex data but ensures the best
297 // version is in the cache.
298 tmp->set(key, std::move(vertData));
299 }
300
301 return { tmp->vertexData(), tmp->refCustomData() };
302}
303
304std::tuple<sk_sp<GrThreadSafeCache::VertexData>, sk_sp<SkData>> GrThreadSafeCache::addVertsWithData(
305 const skgpu::UniqueKey& key,
306 sk_sp<VertexData> vertData,
307 IsNewerBetter isNewerBetter) {
308 SkAutoSpinlock lock{fSpinLock};
309
310 return this->internalAddVerts(key, std::move(vertData), isNewerBetter);
311}
312
314 SkAutoSpinlock lock{fSpinLock};
315
316 Entry* tmp = fUniquelyKeyedEntryMap.find(key);
317 if (tmp) {
318 fUniquelyKeyedEntryMap.remove(key);
319 fUniquelyKeyedEntryList.remove(tmp);
320 this->recycleEntry(tmp);
321 }
322}
323
324std::tuple<GrSurfaceProxyView, sk_sp<GrThreadSafeCache::Trampoline>>
326 GrColorType origCT,
327 SkISize dimensions,
328 GrSurfaceOrigin origin,
329 SkBackingFit fit) {
330 GrProxyProvider* proxyProvider = dContext->priv().proxyProvider();
331 const GrCaps* caps = dContext->priv().caps();
332
333 constexpr int kSampleCnt = 1;
334 auto [newCT, format] = caps->getFallbackColorTypeAndFormat(origCT, kSampleCnt);
335
336 if (newCT == GrColorType::kUnknown) {
337 return {GrSurfaceProxyView(nullptr), nullptr};
338 }
339
340 sk_sp<Trampoline> trampoline(new Trampoline);
341
343
345 [trampoline](
346 GrResourceProvider* resourceProvider,
348 if (!resourceProvider || !trampoline->fProxy ||
349 !trampoline->fProxy->isInstantiated()) {
350 return GrSurfaceProxy::LazyCallbackResult(nullptr, true);
351 }
352
353 SkASSERT(!trampoline->fProxy->peekTexture()->getUniqueKey().isValid());
355 sk_ref_sp(trampoline->fProxy->peekTexture()));
356 },
357 format,
358 dimensions,
359 kSampleCnt,
361 &texInfo,
363 fit,
366 /* wrapsVkSecondaryCB */ false,
368
369 // TODO: It seems like this 'newCT' usage should be 'origCT' but this is
370 // what skgpu::ganesh::SurfaceDrawContext::MakeWithFallback does
371 skgpu::Swizzle swizzle = dContext->priv().caps()->getReadSwizzle(format, newCT);
372
373 return {{std::move(proxy), origin, swizzle}, std::move(trampoline)};
374}
GrColorType
Definition: GrTypesPriv.h:540
GrSurfaceOrigin
Definition: GrTypes.h:147
static float prev(float f)
#define SkASSERT(cond)
Definition: SkAssert.h:116
SkBackingFit
Definition: SkBackingFit.h:16
sk_sp< T > sk_ref_sp(T *obj)
Definition: SkRefCnt.h:381
static constexpr bool SkToBool(const T &x)
Definition: SkTo.h:35
int find(T *array, int N, T item)
const GrCaps * caps() const
Definition: GrCaps.h:57
std::tuple< GrColorType, GrBackendFormat > getFallbackColorTypeAndFormat(GrColorType, int sampleCount) const
Definition: GrCaps.cpp:499
skgpu::Swizzle getReadSwizzle(const GrBackendFormat &format, GrColorType colorType) const
Definition: GrCaps.cpp:443
GrDirectContextPriv priv()
sk_sp< GrRenderTargetProxy > createLazyRenderTargetProxy(LazyInstantiateCallback &&, const GrBackendFormat &, SkISize dimensions, int renderTargetSampleCnt, GrInternalSurfaceFlags, const TextureInfo *, GrMipmapStatus, SkBackingFit, skgpu::Budgeted, GrProtected, bool wrapsVkSecondaryCB, UseAllocator useAllocator)
GrProxyProvider * proxyProvider()
bool overBudget() const
static std::tuple< GrSurfaceProxyView, sk_sp< Trampoline > > CreateLazyView(GrDirectContext *, GrColorType, SkISize dimensions, GrSurfaceOrigin, SkBackingFit)
std::tuple< sk_sp< VertexData >, sk_sp< SkData > > addVertsWithData(const skgpu::UniqueKey &, sk_sp< VertexData >, IsNewerBetter) SK_EXCLUDES(fSpinLock)
SkDEBUGCODE(bool has(const skgpu::UniqueKey &) SK_EXCLUDES(fSpinLock);) GrSurfaceProxyView find(const skgpu std::tuple< GrSurfaceProxyView, sk_sp< SkData > > findWithData(const skgpu::UniqueKey &) SK_EXCLUDES(fSpinLock)
std::tuple< sk_sp< VertexData >, sk_sp< SkData > > findVertsWithData(const skgpu::UniqueKey &) SK_EXCLUDES(fSpinLock)
GrSurfaceProxyView add(const skgpu::UniqueKey &, const GrSurfaceProxyView &) SK_EXCLUDES(fSpinLock)
void dropUniqueRefsOlderThan(skgpu::StdSteadyClock::time_point purgeTime) SK_EXCLUDES(fSpinLock)
void remove(const skgpu::UniqueKey &) SK_EXCLUDES(fSpinLock)
std::tuple< GrSurfaceProxyView, sk_sp< SkData > > findOrAddWithData(const skgpu::UniqueKey &, const GrSurfaceProxyView &) SK_EXCLUDES(fSpinLock)
std::tuple< GrSurfaceProxyView, sk_sp< SkData > > addWithData(const skgpu::UniqueKey &, const GrSurfaceProxyView &) SK_EXCLUDES(fSpinLock)
GrSurfaceProxyView findOrAdd(const skgpu::UniqueKey &, const GrSurfaceProxyView &) SK_EXCLUDES(fSpinLock)
void dropUniqueRefs(GrResourceCache *resourceCache) SK_EXCLUDES(fSpinLock)
static sk_sp< VertexData > MakeVertexData(const void *vertices, int vertexCount, size_t vertexSize)
void dropAllRefs() SK_EXCLUDES(fSpinLock)
auto make(Ctor &&ctor) -> decltype(ctor(nullptr))
Definition: SkArenaAlloc.h:120
uint32_t uint32_t * format
DEF_SWITCHES_START aot vmservice shared library Name of the *so containing AOT compiled Dart assets for launching the service isolate vm snapshot The VM snapshot data that will be memory mapped as read only SnapshotAssetPath must be present isolate snapshot The isolate snapshot data that will be memory mapped as read only SnapshotAssetPath must be present cache dir Path to the cache directory This is different from the persistent_cache_path in embedder which is used for Skia shader cache icu native lib Path to the library file that exports the ICU data vm service The hostname IP address on which the Dart VM Service should be served If not defaults to or::depending on whether ipv6 is specified vm service A custom Dart VM Service port The default is to pick a randomly available open port disable vm Disable the Dart VM Service The Dart VM Service is never available in release mode disable vm service Disable mDNS Dart VM Service publication Bind to the IPv6 localhost address for the Dart VM Service Ignored if vm service host is set endless trace buffer
Definition: switches.h:126
Definition: SkSize.h:16
std::shared_ptr< const fml::Mapping > data
Definition: texture_gles.cc:63