22uint32_t GrResourceAllocator::Interval::CreateUniqueID() {
23 static std::atomic<uint32_t> nextID{1};
26 id = nextID.fetch_add(1, std::memory_order_relaxed);
31uint32_t GrResourceAllocator::Register::CreateUniqueID() {
32 static std::atomic<uint32_t> nextID{1};
35 id = nextID.fetch_add(1, std::memory_order_relaxed);
42 SkASSERT(fFailedInstantiation || fIntvlList.empty());
63 fFailedInstantiation =
true;
72 if (Interval** intvlPtr = fIntvlHash.
find(proxyID)) {
74 Interval* intvl = *intvlPtr;
81 }
else if (isDirectDstRead) {
95 intvl->disallowRecycling();
97 intvl->extendEnd(
end);
100 Interval* newIntvl = fInternalAllocator.
make<Interval>(proxy,
start,
end);
106 newIntvl->disallowRecycling();
108 fIntvlList.insertByIncreasingStart(newIntvl);
109 fIntvlHash.
set(proxyID, newIntvl);
120 : fOriginatingProxy(originatingProxy)
121 , fScratchKey(
std::move(scratchKey)) {
126 if (fScratchKey.isValid()) {
129 fScratchKey,
"ResourceAllocatorRegister");
132 SkASSERT(this->uniqueKey().isValid());
137bool GrResourceAllocator::Register::isRecyclable(
const GrCaps& caps,
140 AllowRecycling allowRecycling)
const {
141 if (allowRecycling == AllowRecycling::kNo) {
149 if (!this->scratchKey().isValid()) {
152 if (this->uniqueKey().isValid()) {
160bool GrResourceAllocator::Register::instantiateSurface(
GrSurfaceProxy* proxy,
165 if (!fExistingSurface) {
166 if (proxy == fOriginatingProxy) {
169 newSurface =
sk_ref_sp(fOriginatingProxy->peekSurface());
172 if (!fExistingSurface && !newSurface) {
182 surface->resourcePriv().makeBudgeted();
187 if (!
surface->getUniqueKey().isValid()) {
192 proxy->
priv().
assign(fExistingSurface ? fExistingSurface : std::move(newSurface));
196GrResourceAllocator::Interval* GrResourceAllocator::IntervalList::popHead() {
201 fHead = temp->next();
205 temp->setNext(
nullptr);
213void GrResourceAllocator::IntervalList::insertByIncreasingStart(
Interval* intvl) {
219 fHead = fTail = intvl;
220 }
else if (intvl->start() <= fHead->start()) {
222 intvl->setNext(fHead);
224 }
else if (fTail->start() <= intvl->start()) {
226 fTail->setNext(intvl);
236 intvl->setNext(
next);
237 prev->setNext(intvl);
244void GrResourceAllocator::IntervalList::insertByIncreasingEnd(
Interval* intvl) {
250 fHead = fTail = intvl;
251 }
else if (intvl->end() <= fHead->end()) {
253 intvl->setNext(fHead);
255 }
else if (fTail->end() <= intvl->end()) {
257 fTail->setNext(intvl);
267 intvl->setNext(
next);
268 prev->setNext(intvl);
275void GrResourceAllocator::IntervalList::validate()
const {
279 for (
Interval* cur = fHead; cur;
prev = cur, cur = cur->next()) {
291 if (
auto p = fUniqueKeyRegisters.
find(uniqueKey)) {
298 fUniqueKeyRegisters.
set(uniqueKey, r);
304 proxy->priv().computeScratchKey(*fDContext->
priv().
caps(), &scratchKey);
306 auto filter = [] (
const Register* r) {
313 return fInternalAllocator.
make<
Register>(proxy, std::move(scratchKey), resourceProvider);
318void GrResourceAllocator::expire(
unsigned int curIndex) {
319 while (!fActiveIntvls.empty() && fActiveIntvls.peekHead()->end() < curIndex) {
320 Interval* intvl = fActiveIntvls.popHead();
324 if (r && r->isRecyclable(*fDContext->
priv().
caps(), intvl->proxy(), intvl->uses(),
325 intvl->allowRecycling())) {
326#if GR_ALLOCATION_SPEW
327 SkDebugf(
"putting register %d back into pool\n", r->uniqueID());
330 fFreePool.
insert(r->scratchKey(), r);
332 fFinishedIntvls.insertByIncreasingStart(intvl);
340 SkDEBUGCODE(fPlanned =
true;)
343 SkDebugf(
"assigning %d ops\n", fNumOps);
344 this->dumpIntervals();
348 while (Interval* cur = fIntvlList.popHead()) {
349 this->expire(cur->start());
350 fActiveIntvls.insertByIncreasingEnd(cur);
353 if (cur->proxy()->isInstantiated()) {
358 if (cur->proxy()->isLazy()) {
359 if (cur->proxy()->isFullyLazy()) {
360 fFailedInstantiation = !cur->proxy()->priv().doLazyInstantiation(resourceProvider);
361 if (fFailedInstantiation) {
368 Register* r = this->findOrCreateRegisterFor(cur->proxy());
369#if GR_ALLOCATION_SPEW
370 SkDebugf(
"Assigning register %d to %d\n",
372 cur->proxy()->uniqueID().asUInt());
374 SkASSERT(!cur->proxy()->peekSurface());
380 return !fFailedInstantiation;
386 size_t additionalBytesNeeded = 0;
387 for (Interval* cur = fFinishedIntvls.peekHead(); cur; cur = cur->next()) {
399 if (!r->accountedForInBudget() && !r->existingSurface()) {
402 r->setAccountedForInBudget();
412 SkDEBUGCODE(fPlanned =
false;)
413 SkDEBUGCODE(fAssigned =
false;)
415 fFinishedIntvls = IntervalList();
416 fIntvlList = IntervalList();
418 fUniqueKeyRegisters.
reset();
420 fInternalAllocator.
reset();
424 if (fFailedInstantiation) {
428 SkDEBUGCODE(fAssigned =
true;)
430 while (Interval* cur = fFinishedIntvls.popHead()) {
431 if (fFailedInstantiation) {
434 if (cur->proxy()->isInstantiated()) {
437 if (cur->proxy()->isLazy()) {
438 fFailedInstantiation = !cur->proxy()->priv().doLazyInstantiation(resourceProvider);
443 fFailedInstantiation = !r->instantiateSurface(cur->proxy(), resourceProvider);
445 return !fFailedInstantiation;
448#if GR_ALLOCATION_SPEW
449void GrResourceAllocator::dumpIntervals() {
451 SkDebugf(
"------------------------------------------------------------\n");
453 unsigned int max = 0;
454 for(
const Interval* cur = fIntvlList.peekHead(); cur; cur = cur->next()) {
455 SkDebugf(
"{ %3d,%3d }: [%2d, %2d] - refProxys:%d surfaceRefs:%d\n",
456 cur->proxy()->uniqueID().asUInt(),
457 cur->proxy()->isInstantiated() ? cur->proxy()->underlyingUniqueID().asUInt() : -1,
460 cur->proxy()->priv().getProxyRefCnt(),
461 cur->proxy()->testingOnly_getBackingRefCnt());
467 for(
const Interval* cur = fIntvlList.peekHead(); cur; cur = cur->next()) {
469 cur->proxy()->uniqueID().asUInt(),
470 cur->proxy()->isInstantiated() ? cur->proxy()->underlyingUniqueID().asUInt() : -1);
471 for (
unsigned int i =
min;
i <=
max; ++
i) {
472 if (
i >= cur->start() && i <= cur->
end()) {
static bool can_proxy_use_scratch(const GrCaps &caps, GrSurfaceProxy *proxy)
#define GR_ALLOCATION_SPEW
static float next(float f)
static float prev(float f)
void SK_SPI SkDebugf(const char format[],...) SK_PRINTF_LIKE(1
sk_sp< T > sk_ref_sp(T *obj)
SkDEBUGCODE(SK_SPI) SkThreadID SkGetThreadID()
static constexpr bool SkToBool(const T &x)
static constexpr uint32_t SK_InvalidUniqueID
const GrCaps * caps() const
bool reuseScratchTextures() const
GrResourceProvider * resourceProvider()
GrResourceCache * getResourceCache()
GrDirectContextPriv priv()
void addInterval(GrSurfaceProxy *, unsigned int start, unsigned int end, ActualUse actualUse, AllowRecycling SkDEBUGCODE(, bool isDirectDstRead=false))
bool makeBudgetHeadroom()
bool purgeToMakeHeadroom(size_t desiredHeadroomBytes)
std::enable_if< std::is_base_of< GrGpuResource, T >::value, sk_sp< T > >::type findByUniqueKey(const skgpu::UniqueKey &key)
void assignUniqueKeyToResource(const skgpu::UniqueKey &, GrGpuResource *)
sk_sp< GrTexture > findAndRefScratchTexture(const skgpu::ScratchKey &, std::string_view label)
const GrCaps * caps() const
bool doLazyInstantiation(GrResourceProvider *)
sk_sp< GrSurface > createSurface(GrResourceProvider *resourceProvider) const
void assign(sk_sp< GrSurface > surface)
virtual const skgpu::UniqueKey & getUniqueKey() const
virtual GrRenderTargetProxy * asRenderTargetProxy()
size_t gpuMemorySize() const
GrSurfaceProxyPriv priv()
skgpu::Budgeted isBudgeted() const
GrSurface * peekSurface() const
bool canSkipResourceAllocator() const
bool isInstantiated() const
UniqueID uniqueID() const
auto make(Ctor &&ctor) -> decltype(ctor(nullptr))
bool refCntGreaterThan(int32_t threadIsolatedTestCnt) const
void insert(const Key &key, T *value)
T * findAndRemove(const Key &key, const FindPredicate f)
V * find(const K &key) const
static float max(float r, float g, float b)
static float min(float r, float g, float b)