Flutter Engine
The Flutter Engine
Loading...
Searching...
No Matches
GrResourceAllocator.h
Go to the documentation of this file.
1/*
2 * Copyright 2017 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#ifndef GrResourceAllocator_DEFINED
9#define GrResourceAllocator_DEFINED
10
11#include "src/core/SkTHash.h"
12
16
19
20class GrDirectContext;
21
22// Print out explicit allocation information
23#define GR_ALLOCATION_SPEW 0
24
25// Print out information about interval creation
26#define GR_TRACK_INTERVAL_CREATION 0
27
28/*
29 * The ResourceAllocator explicitly distributes GPU resources at flush time. It operates by
30 * being given the usage intervals of the various proxies. It keeps these intervals in a singly
31 * linked list sorted by increasing start index. (It also maintains a hash table from proxyID
32 * to interval to find proxy reuse). The ResourceAllocator uses Registers (in the sense of register
33 * allocation) to represent a future surface that will be used for each proxy during
34 * `planAssignment`, and then assigns actual surfaces during `assign`.
35 *
36 * Note: the op indices (used in the usage intervals) come from the order of the ops in
37 * their opsTasks after the opsTask DAG has been linearized.
38 *
39 * The planAssignment method traverses the sorted list and:
40 * moves intervals from the active list that have completed (returning their registers
41 * to the free pool) into the finished list (sorted by increasing start)
42 *
43 * allocates a new register (preferably from the free pool) for the new interval
44 * adds the new interval to the active list (that is sorted by increasing end index)
45 *
46 * After assignment planning, the user can choose to call `makeBudgetHeadroom` which:
47 * computes how much VRAM would be needed for new resources for all extant Registers
48 *
49 * asks the resource cache to purge enough resources to get that much free space
50 *
51 * if it's not possible, do nothing and return false. The user may opt to reset
52 * the allocator and start over with a different DAG.
53 *
54 * If the user wants to commit to the current assignment plan, they call `assign` which:
55 * instantiates lazy proxies
56 *
57 * instantantiates new surfaces for all registers that need them
58 *
59 * assigns the surface for each register to all the proxies that will use it
60 *
61 *************************************************************************************************
62 * How does instantiation failure handling work when explicitly allocating?
63 *
64 * In the gather usage intervals pass all the GrSurfaceProxies used in the flush should be
65 * gathered (i.e., in OpsTask::gatherProxyIntervals).
66 *
67 * During addInterval, read-only lazy proxies are instantiated. If that fails, the resource
68 * allocator will note the failure and ignore pretty much anything else until `reset`.
69 *
70 * During planAssignment, fully-lazy proxies are instantiated so that we can know their size for
71 * budgeting purposes. If this fails, return false.
72 *
73 * During assign, partially-lazy proxies are instantiated and new surfaces are created for all other
74 * proxies. If any of these fails, return false.
75 *
76 * The drawing manager will drop the flush if any proxies fail to instantiate.
77 */
79public:
81 : fDContext(dContext) {}
82
84
85 unsigned int curOp() const { return fNumOps; }
86 void incOps() { fNumOps++; }
87
88 /** Indicates whether a given call to addInterval represents an actual usage of the
89 * provided proxy. This is mainly here to accommodate deferred proxies attached to opsTasks.
90 * In that case we need to create an extra long interval for them (due to the upload) but
91 * don't want to count that usage/reference towards the proxy's recyclability.
92 */
93 enum class ActualUse : bool {
94 kNo = false,
95 kYes = true
96 };
97
98 /** Indicates whether we allow a gpu texture assigned to a register to be recycled or not. This
99 * comes up when dealing with with Vulkan Secondary CommandBuffers since offscreens sampled
100 * into the scb will all be drawn before being sampled in the scb. This is because the scb
101 * will get submitted in a later command buffer. Thus offscreens cannot share an allocation or
102 * later reuses will overwrite earlier ones.
103 */
104 enum class AllowRecycling : bool {
105 kNo = false,
106 kYes = true
107 };
108
109 // Add a usage interval from 'start' to 'end' inclusive. This is usually used for renderTargets.
110 // If an existing interval already exists it will be expanded to include the new range.
111 void addInterval(GrSurfaceProxy*, unsigned int start, unsigned int end, ActualUse actualUse,
112 AllowRecycling SkDEBUGCODE(, bool isDirectDstRead = false));
113
114 bool failedInstantiation() const { return fFailedInstantiation; }
115
116 // Generate an internal plan for resource allocation. After this you can optionally call
117 // `makeBudgetHeadroom` to check whether that plan would go over our memory budget.
118 // Fully-lazy proxies are also instantiated at this point so that their size can
119 // be known accurately. Returns false if any lazy proxy failed to instantiate, true otherwise.
120 bool planAssignment();
121
122 // Figure out how much VRAM headroom this plan requires. If there's enough purgeable resources,
123 // purge them and return true. Otherwise return false.
124 bool makeBudgetHeadroom();
125
126 // Clear all internal state in preparation for a new set of intervals.
127 void reset();
128
129 // Instantiate and assign resources to all proxies.
130 bool assign();
131
132#if GR_ALLOCATION_SPEW
133 void dumpIntervals();
134#endif
135
136private:
137 class Interval;
138 class Register;
139
140 // Remove dead intervals from the active list
141 void expire(unsigned int curIndex);
142
143 // These two methods wrap the interactions with the free pool
144 void recycleRegister(Register* r);
145 Register* findOrCreateRegisterFor(GrSurfaceProxy* proxy);
146
147 struct FreePoolTraits {
148 static const skgpu::ScratchKey& GetKey(const Register& r) {
149 return r.scratchKey();
150 }
151
152 static uint32_t Hash(const skgpu::ScratchKey& key) { return key.hash(); }
153 static void OnFree(Register* r) { }
154 };
156
158
159 struct UniqueKeyHash {
160 uint32_t operator()(const skgpu::UniqueKey& key) const { return key.hash(); }
161 };
163 UniqueKeyRegisterHash;
164
165 // Each proxy – with some exceptions – is assigned a register. After all assignments are made,
166 // another pass is performed to instantiate and assign actual surfaces to the proxies. Right
167 // now these are performed in one call, but in the future they will be separable and the user
168 // will be able to query re: memory cost before committing to surface creation.
169 class Register {
170 public:
171 // It's OK to pass an invalid scratch key iff the proxy has a unique key.
173
174 const skgpu::ScratchKey& scratchKey() const { return fScratchKey; }
175 const skgpu::UniqueKey& uniqueKey() const { return fOriginatingProxy->getUniqueKey(); }
176
177 bool accountedForInBudget() const { return fAccountedForInBudget; }
178 void setAccountedForInBudget() { fAccountedForInBudget = true; }
179
180 GrSurface* existingSurface() const { return fExistingSurface.get(); }
181
182 // Can this register be used by other proxies after this one?
183 bool isRecyclable(const GrCaps&, GrSurfaceProxy* proxy, int knownUseCount,
184 AllowRecycling) const;
185
186 // Resolve the register allocation to an actual GrSurface. 'fOriginatingProxy'
187 // is used to cache the allocation when a given register is used by multiple
188 // proxies.
189 bool instantiateSurface(GrSurfaceProxy*, GrResourceProvider*);
190
191 SkDEBUGCODE(uint32_t uniqueID() const { return fUniqueID; })
192
193 private:
194 GrSurfaceProxy* fOriginatingProxy;
195 skgpu::ScratchKey fScratchKey; // free pool wants a reference to this.
196 sk_sp<GrSurface> fExistingSurface; // queried from resource cache. may be null.
197 bool fAccountedForInBudget = false;
198
199#ifdef SK_DEBUG
200 uint32_t fUniqueID;
201
202 static uint32_t CreateUniqueID();
203#endif
204 };
205
206 class Interval {
207 public:
208 Interval(GrSurfaceProxy* proxy, unsigned int start, unsigned int end)
209 : fProxy(proxy)
210 , fStart(start)
211 , fEnd(end) {
212 SkASSERT(proxy);
213 SkDEBUGCODE(fUniqueID = CreateUniqueID());
214#if GR_TRACK_INTERVAL_CREATION
215 SkString proxyStr = proxy->dump();
216 SkDebugf("New intvl %d: %s [%d, %d]\n", fUniqueID, proxyStr.c_str(), start, end);
217#endif
218 }
219
220 const GrSurfaceProxy* proxy() const { return fProxy; }
221 GrSurfaceProxy* proxy() { return fProxy; }
222
223 unsigned int start() const { return fStart; }
224 unsigned int end() const { return fEnd; }
225
226 void setNext(Interval* next) { fNext = next; }
227 const Interval* next() const { return fNext; }
228 Interval* next() { return fNext; }
229
230 Register* getRegister() const { return fRegister; }
231 void setRegister(Register* r) { fRegister = r; }
232
233 void addUse() { fUses++; }
234 int uses() const { return fUses; }
235
236 void extendEnd(unsigned int newEnd) {
237 if (newEnd > fEnd) {
238 fEnd = newEnd;
239#if GR_TRACK_INTERVAL_CREATION
240 SkDebugf("intvl %d: extending from %d to %d\n", fUniqueID, fEnd, newEnd);
241#endif
242 }
243 }
244
245 void disallowRecycling() {
246 fAllowRecycling = AllowRecycling::kNo;
247 }
248 AllowRecycling allowRecycling() const { return fAllowRecycling; }
249
250 SkDEBUGCODE(uint32_t uniqueID() const { return fUniqueID; })
251
252 private:
254 unsigned int fStart;
255 unsigned int fEnd;
256 Interval* fNext = nullptr;
257 unsigned int fUses = 0;
258 Register* fRegister = nullptr;
259 AllowRecycling fAllowRecycling = AllowRecycling::kYes;
260
261#ifdef SK_DEBUG
262 uint32_t fUniqueID;
263
264 static uint32_t CreateUniqueID();
265#endif
266 };
267
268 class IntervalList {
269 public:
270 IntervalList() = default;
271 // N.B. No need for a destructor – the arena allocator will clean up for us.
272
273 bool empty() const {
274 SkASSERT(SkToBool(fHead) == SkToBool(fTail));
275 return !SkToBool(fHead);
276 }
277 const Interval* peekHead() const { return fHead; }
278 Interval* peekHead() { return fHead; }
279 Interval* popHead();
280 void insertByIncreasingStart(Interval*);
281 void insertByIncreasingEnd(Interval*);
282
283 private:
284 SkDEBUGCODE(void validate() const;)
285
286 Interval* fHead = nullptr;
287 Interval* fTail = nullptr;
288 };
289
290 // Compositing use cases can create > 80 intervals.
291 static const int kInitialArenaSize = 128 * sizeof(Interval);
292
293 GrDirectContext* fDContext;
294 FreePoolMultiMap fFreePool; // Recently created/used GrSurfaces
295 IntvlHash fIntvlHash; // All the intervals, hashed by proxyID
296
297 IntervalList fIntvlList; // All the intervals sorted by increasing start
298 IntervalList fActiveIntvls; // List of live intervals during assignment
299 // (sorted by increasing end)
300 IntervalList fFinishedIntvls; // All the completed intervals
301 // (sorted by increasing start)
302 UniqueKeyRegisterHash fUniqueKeyRegisters;
303 unsigned int fNumOps = 0;
304
305 SkDEBUGCODE(bool fPlanned = false;)
306 SkDEBUGCODE(bool fAssigned = false;)
307
308 SkSTArenaAllocWithReset<kInitialArenaSize> fInternalAllocator; // intervals & registers
309 bool fFailedInstantiation = false;
310};
311
312#endif // GrResourceAllocator_DEFINED
const TextureProxy * fProxy
Definition DrawPass.cpp:181
Instance * fNext
#define SkASSERT(cond)
Definition SkAssert.h:116
void SK_SPI SkDebugf(const char format[],...) SK_PRINTF_LIKE(1
#define SkDEBUGCODE(...)
Definition SkDebug.h:23
static constexpr bool SkToBool(const T &x)
Definition SkTo.h:35
void addInterval(GrSurfaceProxy *, unsigned int start, unsigned int end, ActualUse actualUse, AllowRecycling SkDEBUGCODE(, bool isDirectDstRead=false))
GrResourceAllocator(GrDirectContext *dContext)
unsigned int curOp() const
const char * c_str() const
Definition SkString.h:133
uint32_t hash() const
Definition ResourceKey.h:37
EMSCRIPTEN_KEEPALIVE void empty()
glong glong end