55 const PathRendererChain::Options& optionsForPathRendererChain,
56 bool reduceOpsTaskSplitting)
58 , fOptionsForPathRendererChain(optionsForPathRendererChain)
59 , fPathRendererChain(nullptr)
60 , fSoftwarePathRenderer(nullptr)
61 , fReduceOpsTaskSplitting(reduceOpsTaskSplitting) {
65 this->closeAllTasks();
66 this->removeRenderTasks();
69bool GrDrawingManager::wasAbandoned()
const {
74 for (
int i = fOnFlushCBObjects.
size() - 1;
i >= 0; --
i) {
75 if (!fOnFlushCBObjects[
i]->retainOnFreeGpuResources()) {
82 fPathRendererChain =
nullptr;
83 fSoftwarePathRenderer =
nullptr;
93 if (fFlushing || this->wasAbandoned()) {
94 if (
info.fSubmittedProc) {
95 info.fSubmittedProc(
info.fSubmittedContext,
false);
97 if (
info.fFinishedProc) {
98 info.fFinishedProc(
info.fFinishedContext);
103 SkDEBUGCODE(this->validate());
106 if (!proxies.
empty() && !
info.fNumSemaphores && !
info.fFinishedProc &&
109 bool used = std::any_of(fDAG.begin(), fDAG.end(), [&](auto& task) {
110 return task && task->isUsed(proxy);
115 if (
info.fSubmittedProc) {
116 info.fSubmittedProc(
info.fSubmittedContext,
true);
122 auto dContext =
fContext->asDirectContext();
124 dContext->priv().clientMappedBufferManager()->process();
126 GrGpu* gpu = dContext->priv().getGpu();
132 auto resourceProvider = dContext->priv().resourceProvider();
133 auto resourceCache = dContext->priv().getResourceCache();
139 this->closeAllTasks();
140 fActiveOpsTask =
nullptr;
144 if (!fCpuBufferCache) {
148 int maxCachedBuffers =
fContext->priv().caps()->preferClientSideDynamicBuffers() ? 2 : 6;
152 GrOpFlushState flushState(gpu, resourceProvider, &fTokenTracker, fCpuBufferCache);
157 bool preFlushSuccessful =
true;
159 preFlushSuccessful &= onFlushCBObject->preFlush(&onFlushProvider);
162 bool cachePurgeNeeded =
false;
164 if (preFlushSuccessful) {
165 bool usingReorderedDAG =
false;
167 if (fReduceOpsTaskSplitting) {
168 usingReorderedDAG = this->reorderTasks(&resourceAllocator);
169 if (!usingReorderedDAG) {
170 resourceAllocator.reset();
177 for (
const auto& task : fDAG) {
182 if (!resourceAllocator.failedInstantiation()) {
183 if (!usingReorderedDAG) {
184 for (
const auto& task : fDAG) {
186 task->gatherProxyIntervals(&resourceAllocator);
188 resourceAllocator.planAssignment();
190 resourceAllocator.assign();
193 cachePurgeNeeded = !resourceAllocator.failedInstantiation() &&
194 this->executeRenderTasks(&flushState);
196 this->removeRenderTasks();
201 if (cachePurgeNeeded) {
202 resourceCache->purgeAsNeeded();
203 cachePurgeNeeded =
false;
206 onFlushCBObject->postFlush(fTokenTracker.nextFlushToken());
207 cachePurgeNeeded =
true;
209 if (cachePurgeNeeded) {
210 resourceCache->purgeAsNeeded();
217bool GrDrawingManager::submitToGpu(
GrSyncCpu sync) {
218 if (fFlushing || this->wasAbandoned()) {
226 GrGpu* gpu = direct->priv().getGpu();
230bool GrDrawingManager::executeRenderTasks(
GrOpFlushState* flushState) {
231#if GR_FLUSH_TIME_OP_SPEW
232 SkDebugf(
"Flushing %d opsTasks\n", fDAG.size());
233 for (
int i = 0;
i < fDAG.size(); ++
i) {
236 label.
printf(
"task %d/%d",
i, fDAG.size());
237 fDAG[
i]->dump(label, {},
true,
true);
242 bool anyRenderTasksExecuted =
false;
244 for (
const auto& renderTask : fDAG) {
245 if (!renderTask || !renderTask->isInstantiated()) {
249 SkASSERT(renderTask->deferredProxiesAreInstantiated());
251 renderTask->prepare(flushState);
262 static constexpr int kMaxRenderTasksBeforeFlush = 100;
263 int numRenderTasksExecuted = 0;
266 for (
const auto& renderTask : fDAG) {
268 if (!renderTask->isInstantiated()) {
272 if (renderTask->execute(flushState)) {
273 anyRenderTasksExecuted =
true;
275 if (++numRenderTasksExecuted >= kMaxRenderTasksBeforeFlush) {
277 numRenderTasksExecuted = 0;
289 return anyRenderTasksExecuted;
292void GrDrawingManager::removeRenderTasks() {
293 for (
const auto& task : fDAG) {
295 if (!task->unique() || task->requiresExplicitCleanup()) {
299 task->endFlush(
this);
304 fReorderBlockerTaskIndices.clear();
305 fLastRenderTasks.
reset();
308void GrDrawingManager::sortTasks() {
311 end =
i == fReorderBlockerTaskIndices.size() ? fDAG.size() : fReorderBlockerTaskIndices[
i];
314 SkASSERT(std::none_of(span.begin(), span.end(), [](
const auto& t) {
315 return t->blocksReordering();
317 SkASSERT(span.end() == fDAG.end() || fDAG[
end]->blocksReordering());
323 SkASSERT(GrRenderTask::TopoSortTraits::WasOutput(task) ||
324 std::find_if(span.begin(), span.end(), [task](
const auto& n) {
325 return n.get() == task; }));
326 for (
int i = 0;
i < task->fDependencies.
size(); ++
i) {
330 for (
const auto& node : span) {
335 bool sorted = GrTTopoSort<GrRenderTask, GrRenderTask::TopoSortTraits>(span,
start);
341 if (sorted && !span.empty()) {
344 auto prevOpsTask = span[0]->asOpsTask();
345 for (
size_t j = 1; j < span.size(); ++j) {
346 auto curOpsTask = span[j]->asOpsTask();
348 if (prevOpsTask && curOpsTask) {
349 SkASSERT(!prevOpsTask->canMerge(curOpsTask));
352 prevOpsTask = curOpsTask;
367 [[maybe_unused]]
T* old = array->at(
i).release();
368 array->at(
i++).reset(t);
376 bool clustered =
false;
379 end =
i == fReorderBlockerTaskIndices.size() ? fDAG.size() : fReorderBlockerTaskIndices[
i];
381 SkASSERT(std::none_of(span.begin(), span.end(), [](
const auto& t) {
382 return t->blocksReordering();
390 if (
i < fReorderBlockerTaskIndices.size()) {
391 SkASSERT(fDAG[fReorderBlockerTaskIndices[
i]]->blocksReordering());
392 subllist.
addToTail(fDAG[fReorderBlockerTaskIndices[
i]].
get());
394 llist.
concat(std::move(subllist));
409 dContext->priv().getGpu()->stats()->incNumReorderedDAGsOverBudget();
415 for (
int i = 0;
i < fDAG.size();
i++) {
418 size_t remaining = fDAG.size() -
i - 1;
420 int removeCount = opsTask->mergeFrom(nextTasks);
421 for (
const auto& removed : nextTasks.first(removeCount)) {
422 removed->disown(
this);
426 fDAG[newCount++] = std::move(task);
428 fDAG.resize_back(newCount);
432void GrDrawingManager::closeAllTasks() {
433 for (
auto& task : fDAG) {
435 task->makeClosed(fContext);
445 return fDAG.push_back(std::move(task)).get();
447 if (!fReorderBlockerTaskIndices.empty() && fReorderBlockerTaskIndices.back() == fDAG.size()) {
448 fReorderBlockerTaskIndices.back()++;
450 fDAG.push_back(std::move(task));
451 auto& penultimate = fDAG.fromBack(1);
452 fDAG.back().swap(penultimate);
453 return penultimate.get();
461 fReorderBlockerTaskIndices.push_back(fDAG.size());
463 return fDAG.push_back(std::move(task)).get();
477 if (rtProxy->isMSAADirty()) {
478 SkASSERT(rtProxy->peekRenderTarget());
481 rtProxy->markMSAAResolved();
489 if (textureProxy->mipmapsAreDirty()) {
490 SkASSERT(textureProxy->peekTexture());
492 textureProxy->markMipmapsClean();
501 if (this->wasAbandoned()) {
502 if (
info.fSubmittedProc) {
503 info.fSubmittedProc(
info.fSubmittedContext,
false);
505 if (
info.fFinishedProc) {
506 info.fFinishedProc(
info.fFinishedContext);
510 SkDEBUGCODE(this->validate());
514 GrGpu* gpu = direct->priv().getGpu();
521 bool didFlush = this->
flush(proxies, access,
info, newState);
526 SkDEBUGCODE(this->validate());
528 if (!didFlush || (!direct->priv().caps()->backendSemaphoreSupport() &&
info.fNumSemaphores)) {
535 fOnFlushCBObjects.push_back(onFlushCBObject);
538#if defined(GR_TEST_UTILS)
540 int n =
std::find(fOnFlushCBObjects.begin(), fOnFlushCBObjects.end(), cb) -
541 fOnFlushCBObjects.begin();
542 SkASSERT(n < fOnFlushCBObjects.size());
543 fOnFlushCBObjects.removeShuffle(n);
550 SkASSERT(prior->isClosed() || prior == task);
555 fLastRenderTasks.
set(
key, task);
556 }
else if (fLastRenderTasks.
find(
key)) {
563 return entry ? *entry :
nullptr;
568 return task ? task->
asOpsTask() :
nullptr;
572 SkDEBUGCODE(this->validate());
575 this->closeAllTasks();
576 fActiveOpsTask =
nullptr;
580 fDAG.swap(ddl->fRenderTasks);
582 fReorderBlockerTaskIndices.clear();
584 for (
auto& renderTask : ddl->fRenderTasks) {
585 renderTask->disown(
this);
586 renderTask->prePrepare(fContext);
593 SkDEBUGCODE(this->validate());
598 SkDEBUGCODE(this->validate());
600 if (fActiveOpsTask) {
605 fActiveOpsTask->makeClosed(fContext);
606 fActiveOpsTask =
nullptr;
624 ddl->fLazyProxyData->fReplayDest = newDest.
get();
627 SkDEBUGCODE(
auto ddlTask =) this->appendTask(sk_make_sp<GrDDLTask>(
this,
632 SkDEBUGCODE(this->validate());
636void GrDrawingManager::validate()
const {
637 if (fActiveOpsTask) {
640 SkASSERT(fActiveOpsTask == fDAG.back().get());
643 for (
int i = 0;
i < fDAG.size(); ++
i) {
644 if (fActiveOpsTask != fDAG[
i].
get()) {
647 bool isActiveResolveTask =
648 fActiveOpsTask && fActiveOpsTask->fTextureResolveTask == fDAG[
i].get();
650 SkASSERT(isActiveResolveTask || isAtlas || fDAG[
i]->isClosed());
657 SkASSERT(fActiveOpsTask ==
nullptr);
659 }
else if (fDAG.back()->isClosed()) {
660 SkASSERT(fActiveOpsTask ==
nullptr);
662 SkASSERT(fActiveOpsTask == fDAG.back().get());
665 SkASSERT(fActiveOpsTask ==
nullptr);
670void GrDrawingManager::closeActiveOpsTask() {
671 if (fActiveOpsTask) {
676 fActiveOpsTask->makeClosed(fContext);
677 fActiveOpsTask =
nullptr;
683 SkDEBUGCODE(this->validate());
686 this->closeActiveOpsTask();
689 this, std::move(surfaceView), fContext->
priv().
auditTrail(), std::move(arenas)));
693 this->appendTask(opsTask);
695 fActiveOpsTask = opsTask.
get();
697 SkDEBUGCODE(this->validate());
703 SkDEBUGCODE(this->validate());
706 if (previousAtlasTask) {
707 previousAtlasTask->makeClosed(fContext);
714 previousAtlasUser->makeClosed(fContext);
715 if (previousAtlasUser == fActiveOpsTask) {
716 fActiveOpsTask =
nullptr;
722 this->insertTaskBeforeLast(std::move(atlasTask));
724 SkDEBUGCODE(this->validate());
738 GrRenderTask* task = this->insertTaskBeforeLast(sk_make_sp<GrTextureResolveRenderTask>());
745 SkDEBUGCODE(this->validate());
749 SkDEBUGCODE(this->validate());
755 SkDEBUGCODE(this->validate());
759 this->closeActiveOpsTask();
761 auto resolveTask = sk_make_sp<GrTextureResolveRenderTask>();
763 resolveTask->addProxy(
this, std::move(proxy),
flags, caps);
765 auto task = this->appendTask(std::move(resolveTask));
766 task->makeClosed(fContext);
771 SkDEBUGCODE(this->validate());
775 std::unique_ptr<std::unique_ptr<GrSemaphore>[]> semaphores,
777 SkDEBUGCODE(this->validate());
781 std::move(semaphores),
784 if (fActiveOpsTask && (fActiveOpsTask->
target(0) == proxy.
get())) {
786 this->insertTaskBeforeLast(waitTask);
799 waitTask->addDependenciesFromOtherTask(fActiveOpsTask);
808 waitTask->addDependency(lastTask);
811 this->closeActiveOpsTask();
812 this->appendTask(waitTask);
814 waitTask->makeClosed(fContext);
816 SkDEBUGCODE(this->validate());
825 SkDEBUGCODE(this->validate());
827 this->closeActiveOpsTask();
829 GrRenderTask* task = this->appendTask(sk_make_sp<GrTransferFromRenderTask>(
830 srcProxy, srcRect, surfaceColorType, dstColorType,
831 std::move(dstBuffer), dstOffset));
839 task->makeClosed(fContext);
844 SkDEBUGCODE(this->validate());
859 SkDEBUGCODE(this->validate());
862 this->closeActiveOpsTask();
871 this->appendTask(task);
872 task->makeClosed(fContext);
877 SkDEBUGCODE(this->validate());
889 SkDEBUGCODE(this->validate());
892 this->closeActiveOpsTask();
899 this->appendTask(task);
900 task->makeClosed(fContext);
905 SkDEBUGCODE(this->validate());
914 SkDEBUGCODE(this->validate());
923 if (
src->framebufferOnly()) {
927 this->closeActiveOpsTask();
940 this->appendTask(task);
947 task->makeClosed(fContext);
952 SkDEBUGCODE(this->validate());
962 SkDEBUGCODE(this->validate());
965 this->closeActiveOpsTask();
988 task->makeClosed(fContext);
993 SkDEBUGCODE(this->validate());
1008 if (!fPathRendererChain) {
1009 fPathRendererChain =
1010 std::make_unique<PathRendererChain>(fContext, fOptionsForPathRendererChain);
1013 auto pr = fPathRendererChain->getPathRenderer(
args, drawType, stencilSupport);
1014 if (!pr && allowSW) {
1021#if GR_PATH_RENDERER_SPEW
1023 SkDebugf(
"getPathRenderer: %s\n", pr->name());
1031 if (!fSoftwarePathRenderer) {
1036 return fSoftwarePathRenderer.
get();
1040 if (!fPathRendererChain) {
1041 fPathRendererChain = std::make_unique<PathRendererChain>(fContext,
1042 fOptionsForPathRendererChain);
1044 return fPathRendererChain->getAtlasPathRenderer();
1048 if (!fPathRendererChain) {
1049 fPathRendererChain = std::make_unique<PathRendererChain>(fContext,
1050 fOptionsForPathRendererChain);
1052 return fPathRendererChain->getTessellationPathRenderer();
1062 if (resourceCache && resourceCache->requestsFlush()) {
1066 resourceCache->purgeAsNeeded();
static void info(const char *fmt,...) SK_PRINTF_LIKE(1
static void resolve_and_mipmap(GrGpu *gpu, GrSurfaceProxy *proxy)
static void reorder_array_by_llist(const SkTInternalLList< T > &llist, TArray< sk_sp< T > > *array)
bool GrClusterRenderTasks(SkSpan< const sk_sp< GrRenderTask > > input, SkTInternalLList< GrRenderTask > *llist)
#define GR_CREATE_TRACE_MARKER_CONTEXT(classname, op, context)
#define check(reporter, ref, unref, make, kill)
#define SkDEBUGFAIL(message)
void SK_SPI SkDebugf(const char format[],...) SK_PRINTF_LIKE(1
SkDEBUGCODE(SK_SPI) SkThreadID SkGetThreadID()
constexpr size_t SkToSizeT(S x)
int find(T *array, int N, T item)
const GrCaps * caps() const
static sk_sp< CpuBufferCache > Make(int maxBuffersToCache)
static sk_sp< GrRenderTask > Make(sk_sp< GrGpuBuffer > src, size_t srcOffset, sk_sp< GrGpuBuffer > dst, size_t dstOffset, size_t size)
static sk_sp< GrRenderTask > Make(sk_sp< SkData > src, sk_sp< GrGpuBuffer > dst, size_t dstOffset)
bool preferVRAMUseOverFlushes() const
virtual GrDirectContext * asDirectContext()
static sk_sp< GrRenderTask > Make(GrDrawingManager *, sk_sp< GrSurfaceProxy > dst, SkIRect dstRect, sk_sp< GrSurfaceProxy > src, SkIRect srcRect, GrSamplerState::Filter filter, GrSurfaceOrigin)
GrRenderTargetProxy * targetProxy() const
GrDeferredDisplayListPriv priv()
SK_API const GrSurfaceCharacterization & characterization() const
GrResourceCache * getResourceCache()
GrDirectContextPriv priv()
void createDDLTask(sk_sp< const GrDeferredDisplayList >, sk_sp< GrRenderTargetProxy > newDest)
skgpu::ganesh::OpsTask * getLastOpsTask(const GrSurfaceProxy *) const
void addOnFlushCallbackObject(GrOnFlushCallbackObject *)
void newTransferFromRenderTask(const sk_sp< GrSurfaceProxy > &srcProxy, const SkIRect &srcRect, GrColorType surfaceColorType, GrColorType dstColorType, sk_sp< GrGpuBuffer > dstBuffer, size_t dstOffset)
sk_sp< skgpu::ganesh::OpsTask > newOpsTask(GrSurfaceProxyView, sk_sp< GrArenas > arenas)
void newWaitRenderTask(const sk_sp< GrSurfaceProxy > &proxy, std::unique_ptr< std::unique_ptr< GrSemaphore >[]>, int numSemaphores)
void moveRenderTasksToDDL(GrDeferredDisplayList *ddl)
void newBufferUpdateTask(sk_sp< SkData > src, sk_sp< GrGpuBuffer > dst, size_t dstOffset)
GrRenderTask * getLastRenderTask(const GrSurfaceProxy *) const
sk_sp< GrRenderTask > newCopyRenderTask(sk_sp< GrSurfaceProxy > dst, SkIRect dstRect, const sk_sp< GrSurfaceProxy > &src, SkIRect srcRect, GrSamplerState::Filter filter, GrSurfaceOrigin)
GrTextureResolveRenderTask * newTextureResolveRenderTaskBefore(const GrCaps &)
void setLastRenderTask(const GrSurfaceProxy *, GrRenderTask *)
skgpu::ganesh::AtlasPathRenderer * getAtlasPathRenderer()
void newTextureResolveRenderTask(sk_sp< GrSurfaceProxy > proxy, GrSurfaceProxy::ResolveFlags, const GrCaps &)
bool flush(SkSpan< GrSurfaceProxy * > proxies, SkSurfaces::BackendSurfaceAccess access, const GrFlushInfo &, const skgpu::MutableTextureState *newState)
PathRenderer * getTessellationPathRenderer()
void addAtlasTask(sk_sp< GrRenderTask > atlasTask, GrRenderTask *previousAtlasTask)
bool newWritePixelsTask(sk_sp< GrSurfaceProxy > dst, SkIRect rect, GrColorType srcColorType, GrColorType dstColorType, const GrMipLevel[], int levelCount)
PathRenderer * getSoftwarePathRenderer()
void newBufferTransferTask(sk_sp< GrGpuBuffer > src, size_t srcOffset, sk_sp< GrGpuBuffer > dst, size_t dstOffset, size_t size)
GrSemaphoresSubmitted flushSurfaces(SkSpan< GrSurfaceProxy * >, SkSurfaces::BackendSurfaceAccess, const GrFlushInfo &, const skgpu::MutableTextureState *newState)
PathRenderer * getPathRenderer(const PathRenderer::CanDrawPathArgs &, bool allowSW, PathRendererChain::DrawType, PathRenderer::StencilSupport *=nullptr)
bool submitToGpu(GrSyncCpu sync)
bool regenerateMipMapLevels(GrTexture *)
void resolveRenderTarget(GrRenderTarget *, const SkIRect &resolveRect)
void executeFlushInfo(SkSpan< GrSurfaceProxy * >, SkSurfaces::BackendSurfaceAccess access, const GrFlushInfo &, const skgpu::MutableTextureState *newState)
const V * set(K key, V val)
const V * find(const K &key) const
GrOpsRenderPass * opsRenderPass()
GrAuditTrail * auditTrail()
GrProxyProvider * proxyProvider()
GrRecordingContext::OwnedArenas && detachArenas()
void detachProgramData(skia_private::TArray< GrRecordingContext::ProgramData > *dst)
GrRecordingContextPriv priv()
bool abandoned() override
void markMSAADirty(SkIRect dirtyRect)
const SkIRect & msaaDirtyRect() const
GrSurfaceProxy * target(int i) const
bool isSetFlag(uint32_t flag) const
void setFlag(uint32_t flag)
void addDependency(GrDrawingManager *, GrSurfaceProxy *dependedOn, skgpu::Mipmapped, GrTextureResolveManager, const GrCaps &caps)
bool blocksReordering() const
virtual skgpu::ganesh::OpsTask * asOpsTask()
SkSpan< GrRenderTask * > dependents()
@ kAtlas_Flag
This task is atlas.
virtual void gatherProxyIntervals(GrResourceAllocator *) const =0
bool makeBudgetHeadroom()
SkISize backingStoreDimensions() const
virtual GrRenderTargetProxy * asRenderTargetProxy()
bool requiresManualMSAAResolve() const
virtual GrTextureProxy * asTextureProxy()
bool isInstantiated() const
UniqueID uniqueID() const
skgpu::Mipmapped mipmapped() const
static sk_sp< GrRenderTask > Make(GrDrawingManager *, sk_sp< GrSurfaceProxy >, SkIRect, GrColorType srcColorType, GrColorType dstColorType, const GrMipLevel[], int levelCount)
constexpr T * begin() const
constexpr T * end() const
constexpr bool empty() const
void printf(const char format[],...) SK_PRINTF_LIKE(2
void concat(SkTInternalLList &&list)
void reset(T *ptr=nullptr)
AtlasToken nextFlushToken() const
AtlasToken nextDrawToken() const
void removeShuffle(int n)
FlutterSemanticsFlag flags
G_BEGIN_DECLS G_MODULE_EXPORT FlValue * args
sk_sp< SkBlender > blender SkRect rect
@ kNoAccess
back-end surface will not be used by client
it will be possible to load the file into Perfetto s trace viewer disable asset Prevents usage of any non test fonts unless they were explicitly Loaded via prefetched default font Indicates whether the embedding started a prefetch of the default font manager before creating the engine run In non interactive keep the shell running after the Dart script has completed enable serial On low power devices with low core running concurrent GC tasks on threads can cause them to contend with the UI thread which could potentially lead to jank This option turns off all concurrent GC activities domain network JSON encoded network policy per domain This overrides the DisallowInsecureConnections switch Embedder can specify whether to allow or disallow insecure connections at a domain level old gen heap size
const myers::Point & get(const myers::Segment &)
static SkIRect MakeIRectRelativeTo(GrSurfaceOrigin origin, int rtHeight, SkIRect devRect)
constexpr int32_t height() const
bool fAllowPathMaskCaching