Flutter Engine
The Flutter Engine
GrDrawingManager.cpp
Go to the documentation of this file.
1/*
2 * Copyright 2015 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
9
10#include <algorithm>
11#include <memory>
12
50
51using namespace skia_private;
52
53///////////////////////////////////////////////////////////////////////////////////////////////////
54GrDrawingManager::GrDrawingManager(GrRecordingContext* rContext,
55 const PathRendererChain::Options& optionsForPathRendererChain,
56 bool reduceOpsTaskSplitting)
57 : fContext(rContext)
58 , fOptionsForPathRendererChain(optionsForPathRendererChain)
59 , fPathRendererChain(nullptr)
60 , fSoftwarePathRenderer(nullptr)
61 , fReduceOpsTaskSplitting(reduceOpsTaskSplitting) {
62}
63
65 this->closeAllTasks();
66 this->removeRenderTasks();
67}
68
69bool GrDrawingManager::wasAbandoned() const {
70 return fContext->abandoned();
71}
72
74 for (int i = fOnFlushCBObjects.size() - 1; i >= 0; --i) {
75 if (!fOnFlushCBObjects[i]->retainOnFreeGpuResources()) {
76 // it's safe to just do this because we're iterating in reverse
77 fOnFlushCBObjects.removeShuffle(i);
78 }
79 }
80
81 // a path renderer may be holding onto resources
82 fPathRendererChain = nullptr;
83 fSoftwarePathRenderer = nullptr;
84}
85
86// MDB TODO: make use of the 'proxies' parameter.
89 const GrFlushInfo& info,
90 const skgpu::MutableTextureState* newState) {
91 GR_CREATE_TRACE_MARKER_CONTEXT("GrDrawingManager", "flush", fContext);
92
93 if (fFlushing || this->wasAbandoned()) {
94 if (info.fSubmittedProc) {
95 info.fSubmittedProc(info.fSubmittedContext, false);
96 }
97 if (info.fFinishedProc) {
98 info.fFinishedProc(info.fFinishedContext);
99 }
100 return false;
101 }
102
103 SkDEBUGCODE(this->validate());
104
105 // As of now we only short-circuit if we got an explicit list of surfaces to flush.
106 if (!proxies.empty() && !info.fNumSemaphores && !info.fFinishedProc &&
107 access == SkSurfaces::BackendSurfaceAccess::kNoAccess && !newState) {
108 bool allUnused = std::all_of(proxies.begin(), proxies.end(), [&](GrSurfaceProxy* proxy) {
109 bool used = std::any_of(fDAG.begin(), fDAG.end(), [&](auto& task) {
110 return task && task->isUsed(proxy);
111 });
112 return !used;
113 });
114 if (allUnused) {
115 if (info.fSubmittedProc) {
116 info.fSubmittedProc(info.fSubmittedContext, true);
117 }
118 return false;
119 }
120 }
121
122 auto dContext = fContext->asDirectContext();
123 SkASSERT(dContext);
124 dContext->priv().clientMappedBufferManager()->process();
125
126 GrGpu* gpu = dContext->priv().getGpu();
127 // We have a non abandoned and direct GrContext. It must have a GrGpu.
128 SkASSERT(gpu);
129
130 fFlushing = true;
131
132 auto resourceProvider = dContext->priv().resourceProvider();
133 auto resourceCache = dContext->priv().getResourceCache();
134
135 // Semi-usually the GrRenderTasks are already closed at this point, but sometimes Ganesh needs
136 // to flush mid-draw. In that case, the SkGpuDevice's opsTasks won't be closed but need to be
137 // flushed anyway. Closing such opsTasks here will mean new ones will be created to replace them
138 // if the SkGpuDevice(s) write to them again.
139 this->closeAllTasks();
140 fActiveOpsTask = nullptr;
141
142 this->sortTasks();
143
144 if (!fCpuBufferCache) {
145 // We cache more buffers when the backend is using client side arrays. Otherwise, we
146 // expect each pool will use a CPU buffer as a staging buffer before uploading to a GPU
147 // buffer object. Each pool only requires one staging buffer at a time.
148 int maxCachedBuffers = fContext->priv().caps()->preferClientSideDynamicBuffers() ? 2 : 6;
149 fCpuBufferCache = GrBufferAllocPool::CpuBufferCache::Make(maxCachedBuffers);
150 }
151
152 GrOpFlushState flushState(gpu, resourceProvider, &fTokenTracker, fCpuBufferCache);
153
154 GrOnFlushResourceProvider onFlushProvider(this);
155
156 // Prepare any onFlush op lists (e.g. atlases).
157 bool preFlushSuccessful = true;
158 for (GrOnFlushCallbackObject* onFlushCBObject : fOnFlushCBObjects) {
159 preFlushSuccessful &= onFlushCBObject->preFlush(&onFlushProvider);
160 }
161
162 bool cachePurgeNeeded = false;
163
164 if (preFlushSuccessful) {
165 bool usingReorderedDAG = false;
166 GrResourceAllocator resourceAllocator(dContext);
167 if (fReduceOpsTaskSplitting) {
168 usingReorderedDAG = this->reorderTasks(&resourceAllocator);
169 if (!usingReorderedDAG) {
170 resourceAllocator.reset();
171 }
172 }
173
174#if 0
175 // Enable this to print out verbose GrOp information
176 SkDEBUGCODE(SkDebugf("RenderTasks (%d):\n", fDAG.count()));
177 for (const auto& task : fDAG) {
178 SkDEBUGCODE(task->dump(/* printDependencies */ true);)
179 }
180#endif
181
182 if (!resourceAllocator.failedInstantiation()) {
183 if (!usingReorderedDAG) {
184 for (const auto& task : fDAG) {
185 SkASSERT(task);
186 task->gatherProxyIntervals(&resourceAllocator);
187 }
188 resourceAllocator.planAssignment();
189 }
190 resourceAllocator.assign();
191 }
192
193 cachePurgeNeeded = !resourceAllocator.failedInstantiation() &&
194 this->executeRenderTasks(&flushState);
195 }
196 this->removeRenderTasks();
197
198 gpu->executeFlushInfo(proxies, access, info, newState);
199
200 // Give the cache a chance to purge resources that become purgeable due to flushing.
201 if (cachePurgeNeeded) {
202 resourceCache->purgeAsNeeded();
203 cachePurgeNeeded = false;
204 }
205 for (GrOnFlushCallbackObject* onFlushCBObject : fOnFlushCBObjects) {
206 onFlushCBObject->postFlush(fTokenTracker.nextFlushToken());
207 cachePurgeNeeded = true;
208 }
209 if (cachePurgeNeeded) {
210 resourceCache->purgeAsNeeded();
211 }
212 fFlushing = false;
213
214 return true;
215}
216
217bool GrDrawingManager::submitToGpu(GrSyncCpu sync) {
218 if (fFlushing || this->wasAbandoned()) {
219 return false;
220 }
221
222 auto direct = fContext->asDirectContext();
223 if (!direct) {
224 return false; // Can't submit while DDL recording
225 }
226 GrGpu* gpu = direct->priv().getGpu();
227 return gpu->submitToGpu(sync);
228}
229
230bool GrDrawingManager::executeRenderTasks(GrOpFlushState* flushState) {
231#if GR_FLUSH_TIME_OP_SPEW
232 SkDebugf("Flushing %d opsTasks\n", fDAG.size());
233 for (int i = 0; i < fDAG.size(); ++i) {
234 if (fDAG[i]) {
235 SkString label;
236 label.printf("task %d/%d", i, fDAG.size());
237 fDAG[i]->dump(label, {}, true, true);
238 }
239 }
240#endif
241
242 bool anyRenderTasksExecuted = false;
243
244 for (const auto& renderTask : fDAG) {
245 if (!renderTask || !renderTask->isInstantiated()) {
246 continue;
247 }
248
249 SkASSERT(renderTask->deferredProxiesAreInstantiated());
250
251 renderTask->prepare(flushState);
252 }
253
254 // Upload all data to the GPU
255 flushState->preExecuteDraws();
256
257 // For Vulkan, if we have too many oplists to be flushed we end up allocating a lot of resources
258 // for each command buffer associated with the oplists. If this gets too large we can cause the
259 // devices to go OOM. In practice we usually only hit this case in our tests, but to be safe we
260 // put a cap on the number of oplists we will execute before flushing to the GPU to relieve some
261 // memory pressure.
262 static constexpr int kMaxRenderTasksBeforeFlush = 100;
263 int numRenderTasksExecuted = 0;
264
265 // Execute the normal op lists.
266 for (const auto& renderTask : fDAG) {
267 SkASSERT(renderTask);
268 if (!renderTask->isInstantiated()) {
269 continue;
270 }
271
272 if (renderTask->execute(flushState)) {
273 anyRenderTasksExecuted = true;
274 }
275 if (++numRenderTasksExecuted >= kMaxRenderTasksBeforeFlush) {
276 flushState->gpu()->submitToGpu(GrSyncCpu::kNo);
277 numRenderTasksExecuted = 0;
278 }
279 }
280
281 SkASSERT(!flushState->opsRenderPass());
282 SkASSERT(fTokenTracker.nextDrawToken() == fTokenTracker.nextFlushToken());
283
284 // We reset the flush state before the RenderTasks so that the last resources to be freed are
285 // those that are written to in the RenderTasks. This helps to make sure the most recently used
286 // resources are the last to be purged by the resource cache.
287 flushState->reset();
288
289 return anyRenderTasksExecuted;
290}
291
292void GrDrawingManager::removeRenderTasks() {
293 for (const auto& task : fDAG) {
294 SkASSERT(task);
295 if (!task->unique() || task->requiresExplicitCleanup()) {
296 // TODO: Eventually uniqueness should be guaranteed: http://skbug.com/7111.
297 // DDLs, however, will always require an explicit notification for when they
298 // can clean up resources.
299 task->endFlush(this);
300 }
301 task->disown(this);
302 }
303 fDAG.clear();
304 fReorderBlockerTaskIndices.clear();
305 fLastRenderTasks.reset();
306}
307
308void GrDrawingManager::sortTasks() {
309 // We separately sort the ranges around non-reorderable tasks.
310 for (size_t i = 0, start = 0, end; start < SkToSizeT(fDAG.size()); ++i, start = end + 1) {
311 end = i == fReorderBlockerTaskIndices.size() ? fDAG.size() : fReorderBlockerTaskIndices[i];
312 SkSpan span(fDAG.begin() + start, end - start);
313
314 SkASSERT(std::none_of(span.begin(), span.end(), [](const auto& t) {
315 return t->blocksReordering();
316 }));
317 SkASSERT(span.end() == fDAG.end() || fDAG[end]->blocksReordering());
318
319#if defined(SK_DEBUG)
320 // In order to partition the dag array like this it must be the case that each partition
321 // only depends on nodes in the partition or earlier partitions.
322 auto check = [&](const GrRenderTask* task, auto&& check) -> void {
323 SkASSERT(GrRenderTask::TopoSortTraits::WasOutput(task) ||
324 std::find_if(span.begin(), span.end(), [task](const auto& n) {
325 return n.get() == task; }));
326 for (int i = 0; i < task->fDependencies.size(); ++i) {
327 check(task->fDependencies[i], check);
328 }
329 };
330 for (const auto& node : span) {
331 check(node.get(), check);
332 }
333#endif
334
335 bool sorted = GrTTopoSort<GrRenderTask, GrRenderTask::TopoSortTraits>(span, start);
336 if (!sorted) {
337 SkDEBUGFAIL("Render task topo sort failed.");
338 }
339
340#ifdef SK_DEBUG
341 if (sorted && !span.empty()) {
342 // This block checks for any unnecessary splits in the opsTasks. If two sequential
343 // opsTasks could have merged it means the opsTask was artificially split.
344 auto prevOpsTask = span[0]->asOpsTask();
345 for (size_t j = 1; j < span.size(); ++j) {
346 auto curOpsTask = span[j]->asOpsTask();
347
348 if (prevOpsTask && curOpsTask) {
349 SkASSERT(!prevOpsTask->canMerge(curOpsTask));
350 }
351
352 prevOpsTask = curOpsTask;
353 }
354 }
355#endif
356 }
357}
358
359// Reorder the array to match the llist without reffing & unreffing sk_sp's.
360// Both args must contain the same objects.
361// This is basically a shim because clustering uses LList but the rest of drawmgr uses array.
362template <typename T>
363static void reorder_array_by_llist(const SkTInternalLList<T>& llist, TArray<sk_sp<T>>* array) {
364 int i = 0;
365 for (T* t : llist) {
366 // Release the pointer that used to live here so it doesn't get unreffed.
367 [[maybe_unused]] T* old = array->at(i).release();
368 array->at(i++).reset(t);
369 }
370 SkASSERT(i == array->size());
371}
372
373bool GrDrawingManager::reorderTasks(GrResourceAllocator* resourceAllocator) {
374 SkASSERT(fReduceOpsTaskSplitting);
375 // We separately sort the ranges around non-reorderable tasks.
376 bool clustered = false;
378 for (size_t i = 0, start = 0, end; start < SkToSizeT(fDAG.size()); ++i, start = end + 1) {
379 end = i == fReorderBlockerTaskIndices.size() ? fDAG.size() : fReorderBlockerTaskIndices[i];
380 SkSpan span(fDAG.begin() + start, end - start);
381 SkASSERT(std::none_of(span.begin(), span.end(), [](const auto& t) {
382 return t->blocksReordering();
383 }));
384
386 if (GrClusterRenderTasks(span, &subllist)) {
387 clustered = true;
388 }
389
390 if (i < fReorderBlockerTaskIndices.size()) {
391 SkASSERT(fDAG[fReorderBlockerTaskIndices[i]]->blocksReordering());
392 subllist.addToTail(fDAG[fReorderBlockerTaskIndices[i]].get());
393 }
394 llist.concat(std::move(subllist));
395 }
396 if (!clustered) {
397 return false;
398 }
399
400 for (GrRenderTask* task : llist) {
401 task->gatherProxyIntervals(resourceAllocator);
402 }
403 if (!resourceAllocator->planAssignment()) {
404 return false;
405 }
406 if (!resourceAllocator->makeBudgetHeadroom()) {
407 auto dContext = fContext->asDirectContext();
408 SkASSERT(dContext);
409 dContext->priv().getGpu()->stats()->incNumReorderedDAGsOverBudget();
410 return false;
411 }
412 reorder_array_by_llist(llist, &fDAG);
413
414 int newCount = 0;
415 for (int i = 0; i < fDAG.size(); i++) {
416 sk_sp<GrRenderTask>& task = fDAG[i];
417 if (auto opsTask = task->asOpsTask()) {
418 size_t remaining = fDAG.size() - i - 1;
419 SkSpan<sk_sp<GrRenderTask>> nextTasks{fDAG.end() - remaining, remaining};
420 int removeCount = opsTask->mergeFrom(nextTasks);
421 for (const auto& removed : nextTasks.first(removeCount)) {
422 removed->disown(this);
423 }
424 i += removeCount;
425 }
426 fDAG[newCount++] = std::move(task);
427 }
428 fDAG.resize_back(newCount);
429 return true;
430}
431
432void GrDrawingManager::closeAllTasks() {
433 for (auto& task : fDAG) {
434 if (task) {
435 task->makeClosed(fContext);
436 }
437 }
438}
439
440GrRenderTask* GrDrawingManager::insertTaskBeforeLast(sk_sp<GrRenderTask> task) {
441 if (!task) {
442 return nullptr;
443 }
444 if (fDAG.empty()) {
445 return fDAG.push_back(std::move(task)).get();
446 }
447 if (!fReorderBlockerTaskIndices.empty() && fReorderBlockerTaskIndices.back() == fDAG.size()) {
448 fReorderBlockerTaskIndices.back()++;
449 }
450 fDAG.push_back(std::move(task));
451 auto& penultimate = fDAG.fromBack(1);
452 fDAG.back().swap(penultimate);
453 return penultimate.get();
454}
455
456GrRenderTask* GrDrawingManager::appendTask(sk_sp<GrRenderTask> task) {
457 if (!task) {
458 return nullptr;
459 }
460 if (task->blocksReordering()) {
461 fReorderBlockerTaskIndices.push_back(fDAG.size());
462 }
463 return fDAG.push_back(std::move(task)).get();
464}
465
466static void resolve_and_mipmap(GrGpu* gpu, GrSurfaceProxy* proxy) {
467 if (!proxy->isInstantiated()) {
468 return;
469 }
470
471 // In the flushSurfaces case, we need to resolve MSAA immediately after flush. This is
472 // because clients expect the flushed surface's backing texture to be fully resolved
473 // upon return.
474 if (proxy->requiresManualMSAAResolve()) {
475 auto* rtProxy = proxy->asRenderTargetProxy();
476 SkASSERT(rtProxy);
477 if (rtProxy->isMSAADirty()) {
478 SkASSERT(rtProxy->peekRenderTarget());
479 gpu->resolveRenderTarget(rtProxy->peekRenderTarget(), rtProxy->msaaDirtyRect());
481 rtProxy->markMSAAResolved();
482 }
483 }
484 // If, after a flush, any of the proxies of interest have dirty mipmaps, regenerate them in
485 // case their backend textures are being stolen.
486 // (This special case is exercised by the ReimportImageTextureWithMipLevels test.)
487 // FIXME: It may be more ideal to plumb down a "we're going to steal the backends" flag.
488 if (auto* textureProxy = proxy->asTextureProxy()) {
489 if (textureProxy->mipmapsAreDirty()) {
490 SkASSERT(textureProxy->peekTexture());
491 gpu->regenerateMipMapLevels(textureProxy->peekTexture());
492 textureProxy->markMipmapsClean();
493 }
494 }
495}
496
499 const GrFlushInfo& info,
500 const skgpu::MutableTextureState* newState) {
501 if (this->wasAbandoned()) {
502 if (info.fSubmittedProc) {
503 info.fSubmittedProc(info.fSubmittedContext, false);
504 }
505 if (info.fFinishedProc) {
506 info.fFinishedProc(info.fFinishedContext);
507 }
509 }
510 SkDEBUGCODE(this->validate());
511
512 auto direct = fContext->asDirectContext();
513 SkASSERT(direct);
514 GrGpu* gpu = direct->priv().getGpu();
515 // We have a non abandoned and direct GrContext. It must have a GrGpu.
516 SkASSERT(gpu);
517
518 // TODO: It is important to upgrade the drawingmanager to just flushing the
519 // portion of the DAG required by 'proxies' in order to restore some of the
520 // semantics of this method.
521 bool didFlush = this->flush(proxies, access, info, newState);
522 for (GrSurfaceProxy* proxy : proxies) {
523 resolve_and_mipmap(gpu, proxy);
524 }
525
526 SkDEBUGCODE(this->validate());
527
528 if (!didFlush || (!direct->priv().caps()->backendSemaphoreSupport() && info.fNumSemaphores)) {
530 }
532}
533
535 fOnFlushCBObjects.push_back(onFlushCBObject);
536}
537
538#if defined(GR_TEST_UTILS)
539void GrDrawingManager::testingOnly_removeOnFlushCallbackObject(GrOnFlushCallbackObject* cb) {
540 int n = std::find(fOnFlushCBObjects.begin(), fOnFlushCBObjects.end(), cb) -
541 fOnFlushCBObjects.begin();
542 SkASSERT(n < fOnFlushCBObjects.size());
543 fOnFlushCBObjects.removeShuffle(n);
544}
545#endif
546
548#ifdef SK_DEBUG
549 if (auto prior = this->getLastRenderTask(proxy)) {
550 SkASSERT(prior->isClosed() || prior == task);
551 }
552#endif
553 uint32_t key = proxy->uniqueID().asUInt();
554 if (task) {
555 fLastRenderTasks.set(key, task);
556 } else if (fLastRenderTasks.find(key)) {
557 fLastRenderTasks.remove(key);
558 }
559}
560
562 auto entry = fLastRenderTasks.find(proxy->uniqueID().asUInt());
563 return entry ? *entry : nullptr;
564}
565
567 GrRenderTask* task = this->getLastRenderTask(proxy);
568 return task ? task->asOpsTask() : nullptr;
569}
570
572 SkDEBUGCODE(this->validate());
573
574 // no renderTask should receive a new command after this
575 this->closeAllTasks();
576 fActiveOpsTask = nullptr;
577
578 this->sortTasks();
579
580 fDAG.swap(ddl->fRenderTasks);
581 SkASSERT(fDAG.empty());
582 fReorderBlockerTaskIndices.clear();
583
584 for (auto& renderTask : ddl->fRenderTasks) {
585 renderTask->disown(this);
586 renderTask->prePrepare(fContext);
587 }
588
589 ddl->fArenas = std::move(fContext->priv().detachArenas());
590
591 fContext->priv().detachProgramData(&ddl->fProgramData);
592
593 SkDEBUGCODE(this->validate());
594}
595
598 SkDEBUGCODE(this->validate());
599
600 if (fActiveOpsTask) {
601 // This is a temporary fix for the partial-MDB world. In that world we're not
602 // reordering so ops that (in the single opsTask world) would've just glommed onto the
603 // end of the single opsTask but referred to a far earlier RT need to appear in their
604 // own opsTask.
605 fActiveOpsTask->makeClosed(fContext);
606 fActiveOpsTask = nullptr;
607 }
608
609 // Propagate the DDL proxy's state information to the replay target.
610 if (ddl->priv().targetProxy()->isMSAADirty()) {
611 auto nativeRect = GrNativeRect::MakeIRectRelativeTo(
612 ddl->characterization().origin(),
614 ddl->priv().targetProxy()->msaaDirtyRect());
615 newDest->markMSAADirty(nativeRect);
616 }
617 GrTextureProxy* newTextureProxy = newDest->asTextureProxy();
618 if (newTextureProxy && skgpu::Mipmapped::kYes == newTextureProxy->mipmapped()) {
619 newTextureProxy->markMipmapsDirty();
620 }
621
622 // Here we jam the proxy that backs the current replay SkSurface into the LazyProxyData.
623 // The lazy proxy that references it (in the DDL opsTasks) will then steal its GrTexture.
624 ddl->fLazyProxyData->fReplayDest = newDest.get();
625
626 // Add a task to handle drawing and lifetime management of the DDL.
627 SkDEBUGCODE(auto ddlTask =) this->appendTask(sk_make_sp<GrDDLTask>(this,
628 std::move(newDest),
629 std::move(ddl)));
630 SkASSERT(ddlTask->isClosed());
631
632 SkDEBUGCODE(this->validate());
633}
634
635#ifdef SK_DEBUG
636void GrDrawingManager::validate() const {
637 if (fActiveOpsTask) {
638 SkASSERT(!fDAG.empty());
639 SkASSERT(!fActiveOpsTask->isClosed());
640 SkASSERT(fActiveOpsTask == fDAG.back().get());
641 }
642
643 for (int i = 0; i < fDAG.size(); ++i) {
644 if (fActiveOpsTask != fDAG[i].get()) {
645 // The resolveTask associated with the activeTask remains open for as long as the
646 // activeTask does.
647 bool isActiveResolveTask =
648 fActiveOpsTask && fActiveOpsTask->fTextureResolveTask == fDAG[i].get();
649 bool isAtlas = fDAG[i]->isSetFlag(GrRenderTask::kAtlas_Flag);
650 SkASSERT(isActiveResolveTask || isAtlas || fDAG[i]->isClosed());
651 }
652 }
653
654 // The active opsTask, if any, should always be at the back of the DAG.
655 if (!fDAG.empty()) {
656 if (fDAG.back()->isSetFlag(GrRenderTask::kAtlas_Flag)) {
657 SkASSERT(fActiveOpsTask == nullptr);
658 SkASSERT(!fDAG.back()->isClosed());
659 } else if (fDAG.back()->isClosed()) {
660 SkASSERT(fActiveOpsTask == nullptr);
661 } else {
662 SkASSERT(fActiveOpsTask == fDAG.back().get());
663 }
664 } else {
665 SkASSERT(fActiveOpsTask == nullptr);
666 }
667}
668#endif // SK_DEBUG
669
670void GrDrawingManager::closeActiveOpsTask() {
671 if (fActiveOpsTask) {
672 // This is a temporary fix for the partial-MDB world. In that world we're not
673 // reordering so ops that (in the single opsTask world) would've just glommed onto the
674 // end of the single opsTask but referred to a far earlier RT need to appear in their
675 // own opsTask.
676 fActiveOpsTask->makeClosed(fContext);
677 fActiveOpsTask = nullptr;
678 }
679}
680
682 sk_sp<GrArenas> arenas) {
683 SkDEBUGCODE(this->validate());
684 SkASSERT(fContext);
685
686 this->closeActiveOpsTask();
687
689 this, std::move(surfaceView), fContext->priv().auditTrail(), std::move(arenas)));
690
691 SkASSERT(this->getLastRenderTask(opsTask->target(0)) == opsTask.get());
692
693 this->appendTask(opsTask);
694
695 fActiveOpsTask = opsTask.get();
696
697 SkDEBUGCODE(this->validate());
698 return opsTask;
699}
700
702 GrRenderTask* previousAtlasTask) {
703 SkDEBUGCODE(this->validate());
704 SkASSERT(fContext);
705
706 if (previousAtlasTask) {
707 previousAtlasTask->makeClosed(fContext);
708 for (GrRenderTask* previousAtlasUser : previousAtlasTask->dependents()) {
709 // Make the new atlas depend on everybody who used the old atlas, and close their tasks.
710 // This guarantees that the previous atlas is totally out of service before we render
711 // the next one, meaning there is only ever one atlas active at a time and that they can
712 // all share the same texture.
713 atlasTask->addDependency(previousAtlasUser);
714 previousAtlasUser->makeClosed(fContext);
715 if (previousAtlasUser == fActiveOpsTask) {
716 fActiveOpsTask = nullptr;
717 }
718 }
719 }
720
722 this->insertTaskBeforeLast(std::move(atlasTask));
723
724 SkDEBUGCODE(this->validate());
725}
726
728 const GrCaps& caps) {
729 // Unlike in the "new opsTask" case, we do not want to close the active opsTask, nor (if we are
730 // in sorting and opsTask reduction mode) the render tasks that depend on any proxy's current
731 // state. This is because those opsTasks can still receive new ops and because if they refer to
732 // the mipmapped version of 'proxy', they will then come to depend on the render task being
733 // created here.
734 //
735 // Add the new textureResolveTask before the fActiveOpsTask (if not in
736 // sorting/opsTask-splitting-reduction mode) because it will depend upon this resolve task.
737 // NOTE: Putting it here will also reduce the amount of work required by the topological sort.
738 GrRenderTask* task = this->insertTaskBeforeLast(sk_make_sp<GrTextureResolveRenderTask>());
739 return static_cast<GrTextureResolveRenderTask*>(task);
740}
741
744 const GrCaps& caps) {
745 SkDEBUGCODE(this->validate());
746 SkASSERT(fContext);
747
748 if (!proxy->requiresManualMSAAResolve()) {
749 SkDEBUGCODE(this->validate());
750 return;
751 }
752
753 GrRenderTask* lastTask = this->getLastRenderTask(proxy.get());
754 if (!proxy->asRenderTargetProxy()->isMSAADirty() && (!lastTask || lastTask->isClosed())) {
755 SkDEBUGCODE(this->validate());
756 return;
757 }
758
759 this->closeActiveOpsTask();
760
761 auto resolveTask = sk_make_sp<GrTextureResolveRenderTask>();
762 // Add proxy also adds all the needed dependencies we need
763 resolveTask->addProxy(this, std::move(proxy), flags, caps);
764
765 auto task = this->appendTask(std::move(resolveTask));
766 task->makeClosed(fContext);
767
768 // We have closed the previous active oplist but since a new oplist isn't being added there
769 // shouldn't be an active one.
770 SkASSERT(!fActiveOpsTask);
771 SkDEBUGCODE(this->validate());
772}
773
775 std::unique_ptr<std::unique_ptr<GrSemaphore>[]> semaphores,
776 int numSemaphores) {
777 SkDEBUGCODE(this->validate());
778 SkASSERT(fContext);
779
780 sk_sp<GrWaitRenderTask> waitTask = sk_make_sp<GrWaitRenderTask>(GrSurfaceProxyView(proxy),
781 std::move(semaphores),
782 numSemaphores);
783
784 if (fActiveOpsTask && (fActiveOpsTask->target(0) == proxy.get())) {
785 SkASSERT(this->getLastRenderTask(proxy.get()) == fActiveOpsTask);
786 this->insertTaskBeforeLast(waitTask);
787 // In this case we keep the current renderTask open but just insert the new waitTask
788 // before it in the list. The waitTask will never need to trigger any resolves or mip
789 // map generation which is the main advantage of going through the proxy version.
790 // Additionally we would've had to temporarily set the wait task as the lastRenderTask
791 // on the proxy, add the dependency, and then reset the lastRenderTask to
792 // fActiveOpsTask. Additionally we make the waitTask depend on all of fActiveOpsTask
793 // dependencies so that we don't unnecessarily reorder the waitTask before them.
794 // Note: Any previous Ops already in fActiveOpsTask will get blocked by the wait
795 // semaphore even though they don't need to be for correctness.
796
797 // Make sure we add the dependencies of fActiveOpsTask to waitTask first or else we'll
798 // get a circular self dependency of waitTask on waitTask.
799 waitTask->addDependenciesFromOtherTask(fActiveOpsTask);
800 fActiveOpsTask->addDependency(waitTask.get());
801 } else {
802 // In this case we just close the previous RenderTask and start and append the waitTask
803 // to the DAG. Since it is the last task now we call setLastRenderTask on the proxy. If
804 // there is a lastTask on the proxy we make waitTask depend on that task. This
805 // dependency isn't strictly needed but it does keep the DAG from reordering the
806 // waitTask earlier and blocking more tasks.
807 if (GrRenderTask* lastTask = this->getLastRenderTask(proxy.get())) {
808 waitTask->addDependency(lastTask);
809 }
810 this->setLastRenderTask(proxy.get(), waitTask.get());
811 this->closeActiveOpsTask();
812 this->appendTask(waitTask);
813 }
814 waitTask->makeClosed(fContext);
815
816 SkDEBUGCODE(this->validate());
817}
818
820 const SkIRect& srcRect,
821 GrColorType surfaceColorType,
822 GrColorType dstColorType,
823 sk_sp<GrGpuBuffer> dstBuffer,
824 size_t dstOffset) {
825 SkDEBUGCODE(this->validate());
826 SkASSERT(fContext);
827 this->closeActiveOpsTask();
828
829 GrRenderTask* task = this->appendTask(sk_make_sp<GrTransferFromRenderTask>(
830 srcProxy, srcRect, surfaceColorType, dstColorType,
831 std::move(dstBuffer), dstOffset));
832
833 const GrCaps& caps = *fContext->priv().caps();
834
835 // We always say skgpu::Mipmapped::kNo here since we are always just copying from the base
836 // layer. We don't need to make sure the whole mip map chain is valid.
837 task->addDependency(
838 this, srcProxy.get(), skgpu::Mipmapped::kNo, GrTextureResolveManager(this), caps);
839 task->makeClosed(fContext);
840
841 // We have closed the previous active oplist but since a new oplist isn't being added there
842 // shouldn't be an active one.
843 SkASSERT(!fActiveOpsTask);
844 SkDEBUGCODE(this->validate());
845}
846
848 size_t srcOffset,
850 size_t dstOffset,
851 size_t size) {
852 SkASSERT(src);
853 SkASSERT(dst);
854 SkASSERT(srcOffset + size <= src->size());
855 SkASSERT(dstOffset + size <= dst->size());
856 SkASSERT(src->intendedType() == GrGpuBufferType::kXferCpuToGpu);
857 SkASSERT(dst->intendedType() != GrGpuBufferType::kXferCpuToGpu);
858
859 SkDEBUGCODE(this->validate());
860 SkASSERT(fContext);
861
862 this->closeActiveOpsTask();
863
865 srcOffset,
866 std::move(dst),
867 dstOffset,
868 size);
869 SkASSERT(task);
870
871 this->appendTask(task);
872 task->makeClosed(fContext);
873
874 // We have closed the previous active oplist but since a new oplist isn't being added there
875 // shouldn't be an active one.
876 SkASSERT(!fActiveOpsTask);
877 SkDEBUGCODE(this->validate());
878}
879
882 size_t dstOffset) {
883 SkASSERT(src);
884 SkASSERT(dst);
885 SkASSERT(dstOffset + src->size() <= dst->size());
886 SkASSERT(dst->intendedType() != GrGpuBufferType::kXferCpuToGpu);
887 SkASSERT(!dst->isMapped());
888
889 SkDEBUGCODE(this->validate());
890 SkASSERT(fContext);
891
892 this->closeActiveOpsTask();
893
895 std::move(dst),
896 dstOffset);
897 SkASSERT(task);
898
899 this->appendTask(task);
900 task->makeClosed(fContext);
901
902 // We have closed the previous active oplist but since a new oplist isn't being added there
903 // shouldn't be an active one.
904 SkASSERT(!fActiveOpsTask);
905 SkDEBUGCODE(this->validate());
906}
907
909 SkIRect dstRect,
911 SkIRect srcRect,
913 GrSurfaceOrigin origin) {
914 SkDEBUGCODE(this->validate());
915 SkASSERT(fContext);
916
917 // It'd be nicer to check this in GrCopyRenderTask::Make. This gets complicated because of
918 // "active ops task" tracking. dst will be the target of our copy task but it might also be the
919 // target of the active ops task. We currently require the active ops task to be closed before
920 // making a new task that targets the same proxy. However, if we first close the active ops
921 // task, then fail to make a copy task, the next active ops task may target the same proxy. This
922 // will trip an assert related to unnecessary ops task splitting.
923 if (src->framebufferOnly()) {
924 return nullptr;
925 }
926
927 this->closeActiveOpsTask();
928
930 std::move(dst),
931 dstRect,
932 src,
933 srcRect,
934 filter,
935 origin);
936 if (!task) {
937 return nullptr;
938 }
939
940 this->appendTask(task);
941
942 const GrCaps& caps = *fContext->priv().caps();
943 // We always say skgpu::Mipmapped::kNo here since we are always just copying from the base layer
944 // to another base layer. We don't need to make sure the whole mip map chain is valid.
945 task->addDependency(
946 this, src.get(), skgpu::Mipmapped::kNo, GrTextureResolveManager(this), caps);
947 task->makeClosed(fContext);
948
949 // We have closed the previous active oplist but since a new oplist isn't being added there
950 // shouldn't be an active one.
951 SkASSERT(!fActiveOpsTask);
952 SkDEBUGCODE(this->validate());
953 return task;
954}
955
958 GrColorType srcColorType,
959 GrColorType dstColorType,
960 const GrMipLevel levels[],
961 int levelCount) {
962 SkDEBUGCODE(this->validate());
963 SkASSERT(fContext);
964
965 this->closeActiveOpsTask();
966 const GrCaps& caps = *fContext->priv().caps();
967
968 // On platforms that prefer flushes over VRAM use (i.e., ANGLE) we're better off forcing a
969 // complete flush here.
970 if (!caps.preferVRAMUseOverFlushes()) {
973 GrFlushInfo{},
974 nullptr);
975 }
976
977 GrRenderTask* task = this->appendTask(GrWritePixelsTask::Make(this,
978 std::move(dst),
979 rect,
980 srcColorType,
981 dstColorType,
982 levels,
983 levelCount));
984 if (!task) {
985 return false;
986 }
987
988 task->makeClosed(fContext);
989
990 // We have closed the previous active oplist but since a new oplist isn't being added there
991 // shouldn't be an active one.
992 SkASSERT(!fActiveOpsTask);
993 SkDEBUGCODE(this->validate());
994 return true;
995}
996
997/*
998 * This method finds a path renderer that can draw the specified path on
999 * the provided target.
1000 * Due to its expense, the software path renderer has split out so it can
1001 * can be individually allowed/disallowed via the "allowSW" boolean.
1002 */
1005 bool allowSW,
1007 PathRenderer::StencilSupport* stencilSupport) {
1008 if (!fPathRendererChain) {
1009 fPathRendererChain =
1010 std::make_unique<PathRendererChain>(fContext, fOptionsForPathRendererChain);
1011 }
1012
1013 auto pr = fPathRendererChain->getPathRenderer(args, drawType, stencilSupport);
1014 if (!pr && allowSW) {
1015 auto swPR = this->getSoftwarePathRenderer();
1016 if (PathRenderer::CanDrawPath::kNo != swPR->canDrawPath(args)) {
1017 pr = swPR;
1018 }
1019 }
1020
1021#if GR_PATH_RENDERER_SPEW
1022 if (pr) {
1023 SkDebugf("getPathRenderer: %s\n", pr->name());
1024 }
1025#endif
1026
1027 return pr;
1028}
1029
1031 if (!fSoftwarePathRenderer) {
1032 fSoftwarePathRenderer.reset(new skgpu::ganesh::SoftwarePathRenderer(
1033 fContext->priv().proxyProvider(),
1034 fOptionsForPathRendererChain.fAllowPathMaskCaching));
1035 }
1036 return fSoftwarePathRenderer.get();
1037}
1038
1040 if (!fPathRendererChain) {
1041 fPathRendererChain = std::make_unique<PathRendererChain>(fContext,
1042 fOptionsForPathRendererChain);
1043 }
1044 return fPathRendererChain->getAtlasPathRenderer();
1045}
1046
1048 if (!fPathRendererChain) {
1049 fPathRendererChain = std::make_unique<PathRendererChain>(fContext,
1050 fOptionsForPathRendererChain);
1051 }
1052 return fPathRendererChain->getTessellationPathRenderer();
1053}
1054
1056 auto direct = fContext->asDirectContext();
1057 if (!direct) {
1058 return;
1059 }
1060
1061 auto resourceCache = direct->priv().getResourceCache();
1062 if (resourceCache && resourceCache->requestsFlush()) {
1064 this->submitToGpu(GrSyncCpu::kNo);
1065 }
1066 resourceCache->purgeAsNeeded();
1067 }
1068}
static void info(const char *fmt,...) SK_PRINTF_LIKE(1
Definition: DM.cpp:213
static void resolve_and_mipmap(GrGpu *gpu, GrSurfaceProxy *proxy)
static void reorder_array_by_llist(const SkTInternalLList< T > &llist, TArray< sk_sp< T > > *array)
bool GrClusterRenderTasks(SkSpan< const sk_sp< GrRenderTask > > input, SkTInternalLList< GrRenderTask > *llist)
#define GR_CREATE_TRACE_MARKER_CONTEXT(classname, op, context)
Definition: GrTracing.h:18
GrColorType
Definition: GrTypesPriv.h:540
GrSurfaceOrigin
Definition: GrTypes.h:147
GrSemaphoresSubmitted
Definition: GrTypes.h:229
GrSyncCpu
Definition: GrTypes.h:239
#define check(reporter, ref, unref, make, kill)
Definition: RefCntTest.cpp:85
#define SkDEBUGFAIL(message)
Definition: SkAssert.h:118
#define SkASSERT(cond)
Definition: SkAssert.h:116
void SK_SPI SkDebugf(const char format[],...) SK_PRINTF_LIKE(1
const Context & fContext
SkFilterMode
SkDEBUGCODE(SK_SPI) SkThreadID SkGetThreadID()
constexpr size_t SkToSizeT(S x)
Definition: SkTo.h:31
int find(T *array, int N, T item)
const GrCaps * caps() const
static sk_sp< CpuBufferCache > Make(int maxBuffersToCache)
static sk_sp< GrRenderTask > Make(sk_sp< GrGpuBuffer > src, size_t srcOffset, sk_sp< GrGpuBuffer > dst, size_t dstOffset, size_t size)
static sk_sp< GrRenderTask > Make(sk_sp< SkData > src, sk_sp< GrGpuBuffer > dst, size_t dstOffset)
Definition: GrCaps.h:57
bool preferVRAMUseOverFlushes() const
Definition: GrCaps.h:137
virtual GrDirectContext * asDirectContext()
static sk_sp< GrRenderTask > Make(GrDrawingManager *, sk_sp< GrSurfaceProxy > dst, SkIRect dstRect, sk_sp< GrSurfaceProxy > src, SkIRect srcRect, GrSamplerState::Filter filter, GrSurfaceOrigin)
GrRenderTargetProxy * targetProxy() const
GrDeferredDisplayListPriv priv()
SK_API const GrSurfaceCharacterization & characterization() const
GrResourceCache * getResourceCache()
GrDirectContextPriv priv()
void createDDLTask(sk_sp< const GrDeferredDisplayList >, sk_sp< GrRenderTargetProxy > newDest)
skgpu::ganesh::OpsTask * getLastOpsTask(const GrSurfaceProxy *) const
void addOnFlushCallbackObject(GrOnFlushCallbackObject *)
void newTransferFromRenderTask(const sk_sp< GrSurfaceProxy > &srcProxy, const SkIRect &srcRect, GrColorType surfaceColorType, GrColorType dstColorType, sk_sp< GrGpuBuffer > dstBuffer, size_t dstOffset)
sk_sp< skgpu::ganesh::OpsTask > newOpsTask(GrSurfaceProxyView, sk_sp< GrArenas > arenas)
void newWaitRenderTask(const sk_sp< GrSurfaceProxy > &proxy, std::unique_ptr< std::unique_ptr< GrSemaphore >[]>, int numSemaphores)
void moveRenderTasksToDDL(GrDeferredDisplayList *ddl)
void newBufferUpdateTask(sk_sp< SkData > src, sk_sp< GrGpuBuffer > dst, size_t dstOffset)
GrRenderTask * getLastRenderTask(const GrSurfaceProxy *) const
sk_sp< GrRenderTask > newCopyRenderTask(sk_sp< GrSurfaceProxy > dst, SkIRect dstRect, const sk_sp< GrSurfaceProxy > &src, SkIRect srcRect, GrSamplerState::Filter filter, GrSurfaceOrigin)
GrTextureResolveRenderTask * newTextureResolveRenderTaskBefore(const GrCaps &)
void setLastRenderTask(const GrSurfaceProxy *, GrRenderTask *)
skgpu::ganesh::AtlasPathRenderer * getAtlasPathRenderer()
void newTextureResolveRenderTask(sk_sp< GrSurfaceProxy > proxy, GrSurfaceProxy::ResolveFlags, const GrCaps &)
bool flush(SkSpan< GrSurfaceProxy * > proxies, SkSurfaces::BackendSurfaceAccess access, const GrFlushInfo &, const skgpu::MutableTextureState *newState)
PathRenderer * getTessellationPathRenderer()
void addAtlasTask(sk_sp< GrRenderTask > atlasTask, GrRenderTask *previousAtlasTask)
bool newWritePixelsTask(sk_sp< GrSurfaceProxy > dst, SkIRect rect, GrColorType srcColorType, GrColorType dstColorType, const GrMipLevel[], int levelCount)
PathRenderer * getSoftwarePathRenderer()
void newBufferTransferTask(sk_sp< GrGpuBuffer > src, size_t srcOffset, sk_sp< GrGpuBuffer > dst, size_t dstOffset, size_t size)
GrSemaphoresSubmitted flushSurfaces(SkSpan< GrSurfaceProxy * >, SkSurfaces::BackendSurfaceAccess, const GrFlushInfo &, const skgpu::MutableTextureState *newState)
PathRenderer * getPathRenderer(const PathRenderer::CanDrawPathArgs &, bool allowSW, PathRendererChain::DrawType, PathRenderer::StencilSupport *=nullptr)
Definition: GrGpu.h:62
bool submitToGpu(GrSyncCpu sync)
Definition: GrGpu.cpp:748
bool regenerateMipMapLevels(GrTexture *)
Definition: GrGpu.cpp:632
void resolveRenderTarget(GrRenderTarget *, const SkIRect &resolveRect)
Definition: GrGpu.cpp:659
void executeFlushInfo(SkSpan< GrSurfaceProxy * >, SkSurfaces::BackendSurfaceAccess access, const GrFlushInfo &, const skgpu::MutableTextureState *newState)
Definition: GrGpu.cpp:682
const V * set(K key, V val)
const V * find(const K &key) const
GrOpsRenderPass * opsRenderPass()
GrProxyProvider * proxyProvider()
GrRecordingContext::OwnedArenas && detachArenas()
void detachProgramData(skia_private::TArray< GrRecordingContext::ProgramData > *dst)
GrRecordingContextPriv priv()
bool abandoned() override
void markMSAADirty(SkIRect dirtyRect)
const SkIRect & msaaDirtyRect() const
GrSurfaceProxy * target(int i) const
Definition: GrRenderTask.h:104
bool isClosed() const
Definition: GrRenderTask.h:56
bool isSetFlag(uint32_t flag) const
Definition: GrRenderTask.h:208
void setFlag(uint32_t flag)
Definition: GrRenderTask.h:200
void addDependency(GrDrawingManager *, GrSurfaceProxy *dependedOn, skgpu::Mipmapped, GrTextureResolveManager, const GrCaps &caps)
bool blocksReordering() const
Definition: GrRenderTask.h:73
virtual skgpu::ganesh::OpsTask * asOpsTask()
Definition: GrRenderTask.h:109
SkSpan< GrRenderTask * > dependents()
Definition: GrRenderTask.h:91
@ kAtlas_Flag
This task is atlas.
Definition: GrRenderTask.h:193
virtual void gatherProxyIntervals(GrResourceAllocator *) const =0
uint32_t asUInt() const
SkISize backingStoreDimensions() const
virtual GrRenderTargetProxy * asRenderTargetProxy()
bool requiresManualMSAAResolve() const
virtual GrTextureProxy * asTextureProxy()
bool isInstantiated() const
UniqueID uniqueID() const
void markMipmapsDirty()
skgpu::Mipmapped mipmapped() const
static sk_sp< GrRenderTask > Make(GrDrawingManager *, sk_sp< GrSurfaceProxy >, SkIRect, GrColorType srcColorType, GrColorType dstColorType, const GrMipLevel[], int levelCount)
constexpr T * begin() const
Definition: SkSpan_impl.h:90
constexpr T * end() const
Definition: SkSpan_impl.h:91
constexpr bool empty() const
Definition: SkSpan_impl.h:96
void printf(const char format[],...) SK_PRINTF_LIKE(2
Definition: SkString.cpp:534
void concat(SkTInternalLList &&list)
void addToTail(T *entry)
T * get() const
Definition: SkRefCnt.h:303
void reset(T *ptr=nullptr)
Definition: SkRefCnt.h:310
AtlasToken nextFlushToken() const
Definition: AtlasTypes.h:207
AtlasToken nextDrawToken() const
Definition: AtlasTypes.h:214
void removeShuffle(int n)
Definition: SkTArray.h:188
int size() const
Definition: SkTArray.h:421
FlutterSemanticsFlag flags
glong glong end
G_BEGIN_DECLS G_MODULE_EXPORT FlValue * args
sk_sp< SkBlender > blender SkRect rect
Definition: SkRecords.h:350
BackendSurfaceAccess
Definition: SkSurface.h:44
@ kNoAccess
back-end surface will not be used by client
it will be possible to load the file into Perfetto s trace viewer disable asset Prevents usage of any non test fonts unless they were explicitly Loaded via prefetched default font Indicates whether the embedding started a prefetch of the default font manager before creating the engine run In non interactive keep the shell running after the Dart script has completed enable serial On low power devices with low core running concurrent GC tasks on threads can cause them to contend with the UI thread which could potentially lead to jank This option turns off all concurrent GC activities domain network JSON encoded network policy per domain This overrides the DisallowInsecureConnections switch Embedder can specify whether to allow or disallow insecure connections at a domain level old gen heap size
Definition: switches.h:259
dst
Definition: cp.py:12
const myers::Point & get(const myers::Segment &)
#define T
Definition: precompiler.cc:65
static SkIRect MakeIRectRelativeTo(GrSurfaceOrigin origin, int rtHeight, SkIRect devRect)
Definition: GrNativeRect.h:31
Definition: SkRect.h:32
constexpr int32_t height() const
Definition: SkSize.h:37