42#include <unordered_map>
51template <u
int64_t Bits, u
int64_t Offset>
53 static constexpr uint64_t kMask = ((uint64_t) 1 << Bits) - 1;
54 static constexpr uint64_t
kOffset = Offset;
55 static constexpr uint64_t
kBits = Bits;
57 static uint32_t get(uint64_t v) {
return static_cast<uint32_t
>((v >>
kOffset) & kMask); }
58 static uint64_t
set(uint32_t v) {
return (v & kMask) <<
kOffset; }
62template <
typename T,
typename V = T,
typename C = V>
65 using Index = uint32_t;
71 bool empty()
const {
return fIndexToData.empty(); }
72 size_t size()
const {
return fIndexToData.size(); }
74 Index insert(
const T& data) {
75 Index* index = fDataToIndex.find(data);
78 index = fDataToIndex.set(data, (Index) fIndexToData.size());
79 fIndexToData.push_back(
C{
data});
84 const V& lookup(Index index) {
86 return fIndexToData[index];
89 SkSpan<V> data() {
return {fIndexToData.data(), fIndexToData.size()}; }
91 TArray<V>&& detach() {
return std::move(fIndexToData); }
106 CpuOrGpuData(
const UniformDataBlock* cpuData) :
fCpuData(cpuData) {}
111struct TextureBinding {
115 bool operator==(
const TextureBinding& other)
const {
119 bool operator!=(
const TextureBinding& other)
const {
return !(*
this == other); }
121 int numTextures()
const {
127using UniformCache = DenseBiMap<const UniformDataBlock*, CpuOrGpuData>;
128using TextureBindingCache = DenseBiMap<TextureBinding>;
129using GraphicsPipelineCache = DenseBiMap<GraphicsPipelineDesc>;
134class TextureBindingTracker {
136 TextureBindingCache::Index trackTextures(
const TextureDataBlock* paintTextures,
137 const TextureDataBlock* stepTextures) {
138 if (!paintTextures && !stepTextures) {
139 return TextureBindingCache::kInvalidIndex;
141 return fBindingCache.insert({paintTextures, stepTextures});
144 bool setCurrentTextureBindings(TextureBindingCache::Index bindingIndex) {
145 if (bindingIndex < TextureBindingCache::kInvalidIndex && fLastIndex != bindingIndex) {
146 fLastIndex = bindingIndex;
153 void bindTextures(DrawPassCommands::List* commandList) {
154 SkASSERT(fLastIndex < TextureBindingCache::kInvalidIndex);
155 const TextureBinding& binding = fBindingCache.lookup(fLastIndex);
157 auto [texIndices, samplerIndices] =
158 commandList->bindDeferredTexturesAndSamplers(binding.numTextures());
160 if (binding.fPaintTextures) {
161 for (
int i = 0; i < binding.fPaintTextures->numTextures(); ++i) {
162 auto [tex, sampler] = binding.fPaintTextures->texture(i);
163 *texIndices++ = fProxyCache.insert(tex.get());
164 *samplerIndices++ = fSamplerCache.insert(sampler);
167 if (binding.fStepTextures) {
168 for (
int i = 0; i < binding.fStepTextures->numTextures(); ++i) {
169 auto [tex, sampler] = binding.fStepTextures->texture(i);
170 *texIndices++ = fProxyCache.insert(tex.get());
171 *samplerIndices++ = fSamplerCache.insert(sampler);
184 using TextureProxyCache = DenseBiMap<const TextureProxy*, sk_sp<TextureProxy>, ProxyRef>;
185 using SamplerDescCache = DenseBiMap<SamplerDesc>;
187 TextureBindingCache fBindingCache;
189 TextureProxyCache fProxyCache;
190 SamplerDescCache fSamplerCache;
192 TextureBindingCache::Index fLastIndex = TextureBindingCache::kInvalidIndex;
197class UniformTracker {
199 UniformTracker(
bool useStorageBuffers) : fUseStorageBuffers(useStorageBuffers) {}
203 UniformCache::Index trackUniforms(GraphicsPipelineCache::Index pipelineIndex,
204 const UniformDataBlock* cpuData) {
206 return UniformCache::kInvalidIndex;
209 if (pipelineIndex >=
SkToU32(fPerPipelineCaches.size())) {
210 fPerPipelineCaches.resize(pipelineIndex + 1);
213 return fPerPipelineCaches[pipelineIndex].insert(cpuData);
220 bool writeUniforms(DrawBufferManager* bufferMgr) {
221 for (UniformCache& cache : fPerPipelineCaches) {
227 size_t udbSize =
cache.lookup(0).fCpuData->size();
228 size_t udbDataSize = udbSize;
229 if (!fUseStorageBuffers) {
230 udbSize = bufferMgr->alignUniformBlockSize(udbSize);
232 auto [writer, bufferInfo] =
233 fUseStorageBuffers ? bufferMgr->getSsboWriter(udbSize *
cache.size())
234 : bufferMgr->getUniformWriter(udbSize *
cache.size());
239 uint32_t bindingSize;
240 if (fUseStorageBuffers) {
242 bindingSize =
static_cast<uint32_t
>(udbSize *
cache.size());
246 bindingSize =
static_cast<uint32_t
>(udbSize);
249 for (CpuOrGpuData& dataBlock :
cache.
data()) {
250 SkASSERT(dataBlock.fCpuData->size() == udbDataSize);
251 writer.write(dataBlock.fCpuData->data(), udbDataSize);
253 dataBlock.fGpuData.fBuffer = bufferInfo.fBuffer;
254 dataBlock.fGpuData.fOffset = bufferInfo.fOffset;
255 dataBlock.fGpuData.fBindingSize = bindingSize;
257 if (!fUseStorageBuffers) {
258 bufferInfo.fOffset += bindingSize;
259 writer.skipBytes(bindingSize - udbDataSize);
269 bool setCurrentUniforms(GraphicsPipelineCache::Index pipelineIndex,
270 UniformCache::Index uniformIndex) {
271 if (uniformIndex >= UniformCache::kInvalidIndex) {
275 uniformIndex < fPerPipelineCaches[pipelineIndex].size());
277 if (fUseStorageBuffers) {
280 if (fLastPipeline != pipelineIndex || fLastIndex != uniformIndex) {
281 fLastPipeline = pipelineIndex;
282 fLastIndex = uniformIndex;
291 void bindUniforms(
UniformSlot slot, DrawPassCommands::List* commandList) {
292 SkASSERT(fLastPipeline < GraphicsPipelineCache::kInvalidIndex &&
293 fLastIndex < UniformCache::kInvalidIndex);
294 SkASSERT(!fUseStorageBuffers || fLastIndex == 0);
295 const BindUniformBufferInfo& binding =
296 fPerPipelineCaches[fLastPipeline].lookup(fLastIndex).fGpuData;
297 commandList->bindUniformBuffer(binding, slot);
306 const bool fUseStorageBuffers;
308 GraphicsPipelineCache::Index fLastPipeline = GraphicsPipelineCache::kInvalidIndex;
309 UniformCache::Index fLastIndex = UniformCache::kInvalidIndex;
344 UniformCache::Index geomUniformIndex,
347 : fPipelineKey(ColorDepthOrderField::set(
draw->fDrawParams.order().paintOrder().bits()) |
348 StencilIndexField::set(
draw->fDrawParams.order().stencilIndex().bits()) |
349 RenderStepField::set(static_cast<uint32_t>(
renderStep)) |
351 , fUniformKey(GeometryUniformField::set(geomUniformIndex) |
356 SkASSERT(renderStep <= draw->fRenderer->numRenderSteps());
360 return fPipelineKey < k.fPipelineKey ||
361 (fPipelineKey == k.fPipelineKey && fUniformKey < k.fUniformKey);
365 return fDraw->
fRenderer->
step(RenderStepField::get(fPipelineKey));
371 return PipelineField::get(fPipelineKey);
374 return GeometryUniformField::get(fUniformKey);
377 return ShadingUniformField::get(fUniformKey);
380 return TextureBindingsField::get(fUniformKey);
387 using ColorDepthOrderField = Bitfield<16, 48>;
388 using StencilIndexField = Bitfield<16, 32>;
389 using RenderStepField = Bitfield<2, 30>;
390 using PipelineField = Bitfield<30, 0>;
391 uint64_t fPipelineKey;
396 using GeometryUniformField = Bitfield<17, 47>;
397 using ShadingUniformField = Bitfield<17, 30>;
398 using TextureBindingsField = Bitfield<30, 0>;
399 uint64_t fUniformKey;
427 SKGPU_LOG_W(
"Failed to create destination copy texture for dst read.");
434 SKGPU_LOG_W(
"Failed to create destination copy task for dst read.");
438 recorder->
priv().
add(std::move(copyTask));
443 std::pair<LoadOp, StoreOp>
ops,
444 std::array<float, 4> clearColor)
448 , fClearColor(clearColor) {}
450DrawPass::~DrawPass() =
default;
452std::unique_ptr<DrawPass> DrawPass::Make(
Recorder* recorder,
453 std::unique_ptr<DrawList> draws,
456 std::pair<LoadOp, StoreOp>
ops,
457 std::array<float, 4> clearColor) {
481 Rect passBounds = Rect::InfiniteInverted();
489 SKGPU_LOG_W(
"Buffer mapping has already failed; dropping draw pass!");
493 GraphicsPipelineCache pipelineCache;
502 UniformTracker geometryUniformTracker(useStorageBuffers);
503 UniformTracker shadingUniformTracker(useStorageBuffers);
504 TextureBindingTracker textureBindingTracker;
516 if (!draws->dstCopyBounds().isEmptyNegativeOrNaN()) {
519 SkIRect dstCopyPixelBounds = draws->dstCopyBounds().makeRoundOut().asSkIRect();
520 dstOffset = dstCopyPixelBounds.
topLeft();
524 SKGPU_LOG_W(
"Failed to copy destination for reading. Dropping draw pass!");
529 std::vector<SortKey> keys;
530 keys.reserve(draws->renderStepCount());
539 if (
draw.fPaintParams.has_value()) {
541 draw.fPaintParams->dstReadRequirement() == DstReadRequirement::kTextureCopy
544 std::tie(shaderID, shadingUniforms, paintTextures) =
549 draw.fDrawParams.transform(),
550 draw.fPaintParams.value(),
556 for (
int stepIndex = 0; stepIndex <
draw.fRenderer->numRenderSteps(); ++stepIndex) {
560 GraphicsPipelineCache::Index pipelineIndex = pipelineCache.insert(
561 {
step, performsShading ? shaderID : UniquePaintParamsID::InvalidID()});
569 UniformCache::Index geomUniformIndex = geometryUniformTracker.trackUniforms(
570 pipelineIndex, geometryUniforms);
571 UniformCache::Index shadingUniformIndex = shadingUniformTracker.trackUniforms(
572 pipelineIndex, performsShading ? shadingUniforms :
nullptr);
573 TextureBindingCache::Index textureIndex = textureBindingTracker.trackTextures(
574 performsShading ? paintTextures :
nullptr, stepTextures);
576 keys.push_back({&
draw, stepIndex, pipelineIndex,
577 geomUniformIndex, shadingUniformIndex, textureIndex});
580 passBounds.
join(
draw.fDrawParams.clip().drawBounds());
581 drawPass->fDepthStencilFlags |=
draw.fRenderer->depthStencilFlags();
582 drawPass->fRequiresMSAA |=
draw.fRenderer->requiresMSAA();
585 if (!geometryUniformTracker.writeUniforms(bufferMgr) ||
586 !shadingUniformTracker.writeUniforms(bufferMgr)) {
599 std::sort(keys.begin(), keys.end());
602 DrawWriter drawWriter(&drawPass->fCommandList, bufferMgr);
603 GraphicsPipelineCache::Index lastPipeline = GraphicsPipelineCache::kInvalidIndex;
606 SkASSERT(drawPass->fTarget->isFullyLazy() ||
608 drawPass->fCommandList.setScissor(lastScissor);
614 const bool pipelineChange =
key.pipelineIndex() != lastPipeline;
616 const bool geomBindingChange = geometryUniformTracker.setCurrentUniforms(
617 key.pipelineIndex(),
key.geometryUniformIndex());
618 const bool shadingBindingChange = shadingUniformTracker.setCurrentUniforms(
619 key.pipelineIndex(),
key.shadingUniformIndex());
620 const bool textureBindingsChange = textureBindingTracker.setCurrentTextureBindings(
621 key.textureBindingIndex());
622 const SkIRect* newScissor =
draw.fDrawParams.clip().scissor() != lastScissor ?
623 &
draw.fDrawParams.clip().scissor() :
nullptr;
625 const bool stateChange = geomBindingChange ||
626 shadingBindingChange ||
627 textureBindingsChange ||
632 if (pipelineChange) {
636 }
else if (stateChange) {
641 if (pipelineChange) {
642 drawPass->fCommandList.bindGraphicsPipeline(
key.pipelineIndex());
643 lastPipeline =
key.pipelineIndex();
646 if (geomBindingChange) {
647 geometryUniformTracker.bindUniforms(UniformSlot::kRenderStep,
648 &drawPass->fCommandList);
650 if (shadingBindingChange) {
651 shadingUniformTracker.bindUniforms(UniformSlot::kPaint, &drawPass->fCommandList);
653 if (textureBindingsChange) {
654 textureBindingTracker.bindTextures(&drawPass->fCommandList);
657 drawPass->fCommandList.setScissor(*newScissor);
658 lastScissor = *newScissor;
662 UniformCache::Index geometrySsboIndex =
663 (
key.geometryUniformIndex() == UniformCache::kInvalidIndex)
665 :
key.geometryUniformIndex();
666 UniformCache::Index shadingSsboIndex =
667 (
key.shadingUniformIndex() == UniformCache::kInvalidIndex)
669 :
key.shadingUniformIndex();
674 SKGPU_LOG_W(
"Failed to write necessary vertex/instance data for DrawPass, dropping!");
683 drawPass->fPipelineDescs = pipelineCache.detach();
684 drawPass->fSamplerDescs = textureBindingTracker.detachSamplers();
685 drawPass->fSampledTextures = textureBindingTracker.detachTextures();
687 TRACE_COUNTER1(
"skia.gpu",
"# pipelines", drawPass->fPipelineDescs.size());
688 TRACE_COUNTER1(
"skia.gpu",
"# textures", drawPass->fSampledTextures.size());
689 TRACE_COUNTER1(
"skia.gpu",
"# commands", drawPass->fCommandList.count());
699 fFullPipelines.reserve(fFullPipelines.size() + fPipelineDescs.size());
705 SKGPU_LOG_W(
"Failed to create GraphicsPipeline for draw in RenderPass. Dropping pass!");
708 fFullPipelines.push_back(std::move(pipeline));
712 fPipelineDescs.clear();
714 for (
int i = 0; i < fSampledTextures.size(); ++i) {
718 if (!fSampledTextures[i]->textureInfo().isValid()) {
719 SKGPU_LOG_W(
"Failed to validate sampled texture. Will not create renderpass!");
722 if (!TextureProxy::InstantiateIfNotLazy(resourceProvider, fSampledTextures[i].get())) {
723 SKGPU_LOG_W(
"Failed to instantiate sampled texture. Will not create renderpass!");
728 fSamplers.reserve(fSamplers.size() + fSamplerDescs.size());
729 for (
int i = 0; i < fSamplerDescs.size(); ++i) {
732 SKGPU_LOG_W(
"Failed to create sampler. Will not create renderpass!");
735 fSamplers.push_back(std::move(sampler));
739 fSamplerDescs.clear();
745 for (
int i = 0; i < fFullPipelines.size(); ++i) {
748 for (
int i = 0; i < fSampledTextures.size(); ++i) {
751 for (
int i = 0; i < fSamplers.size(); ++i) {
756const Texture* DrawPass::getTexture(
size_t index)
const {
760 return fSampledTextures[index]->texture();
762const Sampler* DrawPass::getSampler(
size_t index)
const {
765 return fSamplers[index].get();
static int step(int x, SkScalar min, SkScalar max)
const TextureProxy * fProxy
static constexpr uint64_t kBits
const TextureDataBlock * fPaintTextures
const UniformDataBlock * fCpuData
BindUniformBufferInfo fGpuData
static constexpr Index kInvalidIndex
static constexpr uint64_t kOffset
const TextureDataBlock * fStepTextures
#define SKGPU_LOG_W(fmt,...)
static constexpr size_t SkAlignTo(size_t x, size_t alignment)
constexpr int SkNextLog2_portable(uint32_t value)
bool operator!=(const sk_sp< T > &a, const sk_sp< U > &b)
sk_sp< T > sk_ref_sp(T *obj)
constexpr size_t SkToSizeT(S x)
constexpr uint16_t SkToU16(S x)
static constexpr bool SkToBool(const T &x)
constexpr uint32_t SkToU32(S x)
#define TRACE_COUNTER1(category_group, name, value)
#define TRACE_EVENT_SCOPE_THREAD
static void draw(SkCanvas *canvas, SkRect &target, int x, int y)
const ResourceBindingRequirements & resourceBindingRequirements() const
bool isTexturable(const TextureInfo &) const
bool storageBufferPreferred() const
void trackResource(sk_sp< Resource > resource)
void trackCommandBufferResource(sk_sp< Resource > resource)
static sk_sp< CopyTextureToTextureTask > Make(sk_sp< TextureProxy > srcProxy, SkIRect srcRect, sk_sp< TextureProxy > dstProxy, SkIPoint dstPoint, int dstLevel=0)
bool hasMappingFailed() const
static constexpr int kMaxRenderSteps
UniformCache::Index shadingUniformIndex() const
UniformCache::Index geometryUniformIndex() const
SortKey(const DrawList::Draw *draw, int renderStep, GraphicsPipelineCache::Index pipelineIndex, UniformCache::Index geomUniformIndex, UniformCache::Index shadingUniformIndex, TextureBindingCache::Index textureBindingIndex)
const DrawList::Draw & draw() const
const RenderStep & renderStep() const
bool operator<(const SortKey &k) const
TextureBindingCache::Index textureBindingIndex() const
GraphicsPipelineCache::Index pipelineIndex() const
void newPipelineState(PrimitiveType type, size_t vertexStride, size_t instanceStride)
const ShaderCodeDictionary * shaderCodeDictionary() const
TextureDataCache * textureDataCache()
const Caps * caps() const
ResourceProvider * resourceProvider()
DrawBufferManager * drawBufferManager()
AI Rect & join(Rect rect)
AI SkIRect asSkIRect() const
size_t vertexStride() const
PrimitiveType primitiveType() const
virtual void writeVertices(DrawWriter *, const DrawParams &, skvx::ushort2 ssboIndices) const =0
size_t instanceStride() const
bool performsShading() const
static constexpr int kMaxRenderSteps
const RenderStep & step(int i) const
sk_sp< GraphicsPipeline > findOrCreateGraphicsPipeline(const RuntimeEffectDictionary *, const GraphicsPipelineDesc &, const RenderPassDesc &)
sk_sp< Sampler > findOrCreateCompatibleSampler(const SamplerDesc &)
static sk_sp< TextureProxy > Make(const Caps *, ResourceProvider *, SkISize dimensions, const TextureInfo &, skgpu::Budgeted)
bool operator==(const FlutterPoint &a, const FlutterPoint &b)
EMSCRIPTEN_KEEPALIVE void empty()
T __attribute__((ext_vector_type(N))) V
DEF_SWITCHES_START aot vmservice shared library Name of the *so containing AOT compiled Dart assets for launching the service isolate vm snapshot The VM snapshot data that will be memory mapped as read only SnapshotAssetPath must be present isolate snapshot The isolate snapshot data that will be memory mapped as read only SnapshotAssetPath must be present cache dir Path to the cache directory This is different from the persistent_cache_path in embedder which is used for Skia shader cache icu native lib Path to the library file that exports the ICU data vm service The hostname IP address on which the Dart VM Service should be served If not defaults to or::depending on whether ipv6 is specified vm service A custom Dart VM Service port The default is to pick a randomly available open port disable vm Disable the Dart VM Service The Dart VM Service is never available in release mode disable vm service Disable mDNS Dart VM Service publication Bind to the IPv6 localhost address for the Dart VM Service Ignored if vm service host is set endless trace Enable an endless trace buffer The default is a ring buffer This is useful when very old events need to viewed For during application launch Memory usage will continue to grow indefinitely however Start app with an specific route defined on the framework flutter assets Path to the Flutter assets directory enable service port Allow the VM service to fallback to automatic port selection if binding to a specified port fails trace Trace early application lifecycle Automatically switches to an endless trace buffer trace skia Filters out all Skia trace event categories except those that are specified in this comma separated list dump skp on shader Automatically dump the skp that triggers new shader compilations This is useful for writing custom ShaderWarmUp to reduce jank By this is not enabled to reduce the overhead purge persistent cache
DEF_SWITCHES_START aot vmservice shared library Name of the *so containing AOT compiled Dart assets for launching the service isolate vm snapshot data
it will be possible to load the file into Perfetto s trace viewer disable asset Prevents usage of any non test fonts unless they were explicitly Loaded via prefetched default font Indicates whether the embedding started a prefetch of the default font manager before creating the engine run In non interactive keep the shell running after the Dart script has completed enable serial On low power devices with low core running concurrent GC tasks on threads can cause them to contend with the UI thread which could potentially lead to jank This option turns off all concurrent GC activities domain network JSON encoded network policy per domain This overrides the DisallowInsecureConnections switch Embedder can specify whether to allow or disallow insecure connections at a domain level old gen heap size
DEF_SWITCHES_START aot vmservice shared library Name of the *so containing AOT compiled Dart assets for launching the service isolate vm snapshot The VM snapshot data that will be memory mapped as read only SnapshotAssetPath must be present isolate snapshot The isolate snapshot data that will be memory mapped as read only SnapshotAssetPath must be present cache dir Path to the cache directory This is different from the persistent_cache_path in embedder which is used for Skia shader cache icu native lib Path to the library file that exports the ICU data vm service The hostname IP address on which the Dart VM Service should be served If not set
MonotonicValue< DisjointStencilIndexSequence > DisjointStencilIndex
std::tuple< UniquePaintParamsID, const UniformDataBlock *, const TextureDataBlock * > ExtractPaintData(Recorder *recorder, PipelineDataGatherer *gatherer, PaintParamsKeyBuilder *builder, const Layout layout, const SkM44 &local2Dev, const PaintParams &p, sk_sp< TextureProxy > dstTexture, SkIPoint dstOffset, const SkColorInfo &targetColorInfo)
sk_sp< TextureProxy > add_copy_target_task(Recorder *recorder, sk_sp< TextureProxy > target, const SkImageInfo &targetInfo, const SkIPoint &targetOffset)
std::tuple< const UniformDataBlock *, const TextureDataBlock * > ExtractRenderStepData(UniformDataCache *uniformDataCache, TextureDataCache *textureDataCache, PipelineDataGatherer *gatherer, const Layout layout, const RenderStep *step, const DrawParams ¶ms)
MonotonicValue< CompressedPaintersOrderSequence > CompressedPaintersOrder
constexpr SkISize size() const
static constexpr SkIRect MakeSize(const SkISize &size)
constexpr SkIPoint topLeft() const
static constexpr SkIRect MakePtSize(SkIPoint pt, SkISize size)
bool contains(int32_t x, int32_t y) const
const SkColorInfo & colorInfo() const
SkImageInfo makeDimensions(SkISize newSize) const
SkISize dimensions() const
const Renderer * fRenderer
Layout fUniformBufferLayout
Layout fStorageBufferLayout
#define TRACE_EVENT0(category_group, name)
#define TRACE_EVENT_INSTANT0(category_group, name)
#define TRACE_EVENT1(category_group, name, arg1_name, arg1_val)