29using IntrinsicConstant =
float[4];
31constexpr int kBufferBindingOffsetAlignment = 256;
33constexpr int kIntrinsicConstantAlignedSize =
34 SkAlignTo(
sizeof(IntrinsicConstant), kBufferBindingOffsetAlignment);
36#if defined(__EMSCRIPTEN__)
40constexpr int kNumSlotsForIntrinsicConstantBuffer = 8;
47 std::unique_ptr<DawnCommandBuffer> cmdBuffer(
49 if (!cmdBuffer->setNewCommandBufferResources()) {
57 : fSharedContext(sharedContext)
58 , fResourceProvider(resourceProvider) {}
64 wgpu::CommandBuffer cmdBuffer = fCommandEncoder.Finish();
66 fCommandEncoder =
nullptr;
71void DawnCommandBuffer::onResetCommandBuffer() {
72 fIntrinsicConstantBuffer =
nullptr;
74 fActiveGraphicsPipeline =
nullptr;
75 fActiveRenderPassEncoder =
nullptr;
76 fActiveComputePassEncoder =
nullptr;
77 fCommandEncoder =
nullptr;
79 for (
auto& bufferSlot : fBoundUniformBuffers) {
82 fBoundUniformBuffersDirty =
true;
85bool DawnCommandBuffer::setNewCommandBufferResources() {
87 fCommandEncoder = fSharedContext->
device().CreateCommandEncoder();
92bool DawnCommandBuffer::onAddRenderPass(
const RenderPassDesc& renderPassDesc,
93 const Texture* colorTexture,
94 const Texture* resolveTexture,
95 const Texture* depthStencilTexture,
97 const DrawPassList& drawPasses) {
99 this->preprocessViewport(viewport);
101 if (!this->beginRenderPass(renderPassDesc, colorTexture, resolveTexture, depthStencilTexture)) {
105 this->setViewport(viewport);
107 for (
const auto& drawPass : drawPasses) {
108 if (!this->addDrawPass(drawPass.get()))
SK_UNLIKELY {
109 this->endRenderPass();
114 this->endRenderPass();
118bool DawnCommandBuffer::onAddComputePass(DispatchGroupSpan groups) {
119 this->beginComputePass();
120 for (
const auto&
group : groups) {
121 group->addResourceRefs(
this);
122 for (
const auto& dispatch :
group->dispatches()) {
123 this->bindComputePipeline(
group->getPipeline(dispatch.fPipelineIndex));
124 this->bindDispatchResources(*
group, dispatch);
125 if (
const WorkgroupSize* globalSize =
126 std::get_if<WorkgroupSize>(&dispatch.fGlobalSizeOrIndirect)) {
127 this->dispatchWorkgroups(*globalSize);
129 SkASSERT(std::holds_alternative<BufferView>(dispatch.fGlobalSizeOrIndirect));
130 const BufferView& indirect =
131 *std::get_if<BufferView>(&dispatch.fGlobalSizeOrIndirect);
132 this->dispatchWorkgroupsIndirect(indirect.fInfo.fBuffer, indirect.fInfo.fOffset);
136 this->endComputePass();
140bool DawnCommandBuffer::beginRenderPass(
const RenderPassDesc& renderPassDesc,
141 const Texture* colorTexture,
142 const Texture* resolveTexture,
143 const Texture* depthStencilTexture) {
144 SkASSERT(!fActiveRenderPassEncoder);
145 SkASSERT(!fActiveComputePassEncoder);
157 constexpr static wgpu::StoreOp wgpuStoreActionMap[]{wgpu::StoreOp::Store,
158 wgpu::StoreOp::Discard};
163 wgpu::RenderPassDescriptor wgpuRenderPass = {};
164 wgpu::RenderPassColorAttachment wgpuColorAttachment;
165 wgpu::RenderPassDepthStencilAttachment wgpuDepthStencilAttachment;
168#if !defined(__EMSCRIPTEN__)
169 wgpu::DawnRenderPassColorAttachmentRenderToSingleSampled mssaRenderToSingleSampledDesc;
172 auto& colorInfo = renderPassDesc.fColorAttachment;
173 bool loadMSAAFromResolveExplicitly =
false;
175 wgpuRenderPass.colorAttachments = &wgpuColorAttachment;
176 wgpuRenderPass.colorAttachmentCount = 1;
179 const auto* dawnColorTexture =
static_cast<const DawnTexture*
>(colorTexture);
180 SkASSERT(dawnColorTexture->renderTextureView());
181 wgpuColorAttachment.view = dawnColorTexture->renderTextureView();
183 const std::array<float, 4>& clearColor = renderPassDesc.fClearColor;
184 wgpuColorAttachment.clearValue = {
185 clearColor[0], clearColor[1], clearColor[2], clearColor[3]};
186 wgpuColorAttachment.loadOp = wgpuLoadActionMap[
static_cast<int>(colorInfo.fLoadOp)];
187 wgpuColorAttachment.storeOp = wgpuStoreActionMap[
static_cast<int>(colorInfo.fStoreOp)];
190 if (resolveTexture) {
193 const auto* dawnResolveTexture =
static_cast<const DawnTexture*
>(resolveTexture);
194 SkASSERT(dawnResolveTexture->renderTextureView());
195 wgpuColorAttachment.resolveTarget = dawnResolveTexture->renderTextureView();
199 SkASSERT(wgpuColorAttachment.storeOp == wgpu::StoreOp::Discard);
202 if (renderPassDesc.fColorResolveAttachment.fLoadOp ==
LoadOp::kLoad) {
203 std::optional<wgpu::LoadOp> resolveLoadOp =
205 if (resolveLoadOp.has_value()) {
206 wgpuColorAttachment.loadOp = *resolveLoadOp;
209 loadMSAAFromResolveExplicitly =
true;
216 [[maybe_unused]]
bool isMSAAToSingleSampled = renderPassDesc.fSampleCount > 1 &&
217 colorTexture->numSamples() == 1;
218#if defined(__EMSCRIPTEN__)
221 if (isMSAAToSingleSampled) {
226 wgpu::FeatureName::MSAARenderToSingleSampled));
228 wgpuColorAttachment.nextInChain = &mssaRenderToSingleSampledDesc;
229 mssaRenderToSingleSampledDesc.implicitSampleCount = renderPassDesc.fSampleCount;
236 auto& depthStencilInfo = renderPassDesc.fDepthStencilAttachment;
237 if (depthStencilTexture) {
238 const auto* dawnDepthStencilTexture =
static_cast<const DawnTexture*
>(depthStencilTexture);
239 auto format = dawnDepthStencilTexture->textureInfo().dawnTextureSpec().getViewFormat();
243 SkASSERT(dawnDepthStencilTexture->renderTextureView());
244 wgpuDepthStencilAttachment.view = dawnDepthStencilTexture->renderTextureView();
247 wgpuDepthStencilAttachment.depthClearValue = renderPassDesc.fClearDepth;
248 wgpuDepthStencilAttachment.depthLoadOp =
249 wgpuLoadActionMap[
static_cast<int>(depthStencilInfo.fLoadOp)];
250 wgpuDepthStencilAttachment.depthStoreOp =
251 wgpuStoreActionMap[
static_cast<int>(depthStencilInfo.fStoreOp)];
255 wgpuDepthStencilAttachment.stencilClearValue = renderPassDesc.fClearStencil;
256 wgpuDepthStencilAttachment.stencilLoadOp =
257 wgpuLoadActionMap[
static_cast<int>(depthStencilInfo.fLoadOp)];
258 wgpuDepthStencilAttachment.stencilStoreOp =
259 wgpuStoreActionMap[
static_cast<int>(depthStencilInfo.fStoreOp)];
262 wgpuRenderPass.depthStencilAttachment = &wgpuDepthStencilAttachment;
264 SkASSERT(!depthStencilInfo.fTextureInfo.isValid());
267 if (loadMSAAFromResolveExplicitly) {
271 if (!this->loadMSAAFromResolveAndBeginRenderPassEncoder(
274 static_cast<const DawnTexture*
>(colorTexture))) {
279 fActiveRenderPassEncoder = fCommandEncoder.BeginRenderPass(&wgpuRenderPass);
285bool DawnCommandBuffer::loadMSAAFromResolveAndBeginRenderPassEncoder(
286 const RenderPassDesc& frontendRenderPassDesc,
287 const wgpu::RenderPassDescriptor& wgpuRenderPassDesc,
288 const DawnTexture* msaaTexture) {
289 SkASSERT(!fActiveRenderPassEncoder);
296 msaaTexture->dimensions(), msaaTexture->textureInfo());
297 if (!msaaLoadTexture) {
298 SKGPU_LOG_E(
"DawnCommandBuffer::loadMSAAFromResolveAndBeginRenderPassEncoder: "
299 "Can't create MSAA Load Texture.");
306 RenderPassDesc intermediateRenderPassDesc = {};
308 intermediateRenderPassDesc.fColorAttachment.fStoreOp =
StoreOp::kStore;
309 intermediateRenderPassDesc.fColorAttachment.fTextureInfo =
310 frontendRenderPassDesc.fColorResolveAttachment.fTextureInfo;
312 wgpu::RenderPassColorAttachment wgpuIntermediateColorAttachment;
314 wgpuIntermediateColorAttachment.loadOp = wgpu::LoadOp::Clear;
315 wgpuIntermediateColorAttachment.clearValue = {1, 1, 1, 1};
316 wgpuIntermediateColorAttachment.storeOp = wgpu::StoreOp::Store;
317 wgpuIntermediateColorAttachment.view = msaaLoadTexture->renderTextureView();
319 wgpu::RenderPassDescriptor wgpuIntermediateRenderPassDesc;
320 wgpuIntermediateRenderPassDesc.colorAttachmentCount = 1;
321 wgpuIntermediateRenderPassDesc.colorAttachments = &wgpuIntermediateColorAttachment;
323 auto renderPassEncoder = fCommandEncoder.BeginRenderPass(&wgpuIntermediateRenderPassDesc);
325 bool blitSucceeded = this->doBlitWithDraw(
327 intermediateRenderPassDesc,
328 wgpuRenderPassDesc.colorAttachments[0].resolveTarget,
329 msaaTexture->dimensions().width(),
330 msaaTexture->dimensions().height());
332 renderPassEncoder.End();
334 if (!blitSucceeded) {
339 renderPassEncoder = fCommandEncoder.BeginRenderPass(&wgpuRenderPassDesc);
341 if (!this->doBlitWithDraw(renderPassEncoder,
342 frontendRenderPassDesc,
343 msaaLoadTexture->renderTextureView(),
344 msaaTexture->dimensions().width(),
345 msaaTexture->dimensions().height())) {
346 renderPassEncoder.End();
350 fActiveRenderPassEncoder = renderPassEncoder;
355bool DawnCommandBuffer::doBlitWithDraw(
const wgpu::RenderPassEncoder& renderEncoder,
356 const RenderPassDesc& frontendRenderPassDesc,
357 const wgpu::TextureView& sourceTextureView,
362 SKGPU_LOG_E(
"Unable to create pipeline to blit with draw");
368 renderEncoder.SetPipeline(loadPipeline);
375 wgpu::BindGroupEntry entry;
377 entry.textureView = sourceTextureView;
379 wgpu::BindGroupDescriptor
desc;
380 desc.layout = loadPipeline.GetBindGroupLayout(0);
382 desc.entries = &entry;
384 auto bindGroup = fSharedContext->
device().CreateBindGroup(&
desc);
386 renderEncoder.SetBindGroup(0, bindGroup);
389 renderEncoder.SetViewport(0, 0,
width,
height, 0, 1);
392 renderEncoder.Draw(3);
397void DawnCommandBuffer::endRenderPass() {
399 fActiveRenderPassEncoder.End();
400 fActiveRenderPassEncoder =
nullptr;
403bool DawnCommandBuffer::addDrawPass(
const DrawPass* drawPass) {
404 drawPass->addResourceRefs(
this);
405 for (
auto [
type, cmdPtr] : drawPass->commands()) {
407 case DrawPassCommands::Type::kBindGraphicsPipeline: {
408 auto bgp =
static_cast<DrawPassCommands::BindGraphicsPipeline*
>(cmdPtr);
409 if (!this->bindGraphicsPipeline(drawPass->getPipeline(bgp->fPipelineIndex)))
413 case DrawPassCommands::Type::kSetBlendConstants: {
414 auto sbc =
static_cast<DrawPassCommands::SetBlendConstants*
>(cmdPtr);
415 this->setBlendConstants(sbc->fBlendConstants);
418 case DrawPassCommands::Type::kBindUniformBuffer: {
419 auto bub =
static_cast<DrawPassCommands::BindUniformBuffer*
>(cmdPtr);
420 this->bindUniformBuffer(bub->fInfo, bub->fSlot);
423 case DrawPassCommands::Type::kBindDrawBuffers: {
424 auto bdb =
static_cast<DrawPassCommands::BindDrawBuffers*
>(cmdPtr);
425 this->bindDrawBuffers(
426 bdb->fVertices, bdb->fInstances, bdb->fIndices, bdb->fIndirect);
429 case DrawPassCommands::Type::kBindTexturesAndSamplers: {
430 auto bts =
static_cast<DrawPassCommands::BindTexturesAndSamplers*
>(cmdPtr);
431 bindTextureAndSamplers(*drawPass, *bts);
434 case DrawPassCommands::Type::kSetScissor: {
435 auto ss =
static_cast<DrawPassCommands::SetScissor*
>(cmdPtr);
440 case DrawPassCommands::Type::kDraw: {
442 this->draw(draw->fType, draw->fBaseVertex, draw->fVertexCount);
445 case DrawPassCommands::Type::kDrawIndexed: {
446 auto draw =
static_cast<DrawPassCommands::DrawIndexed*
>(cmdPtr);
448 draw->fType, draw->fBaseIndex, draw->fIndexCount, draw->fBaseVertex);
451 case DrawPassCommands::Type::kDrawInstanced: {
452 auto draw =
static_cast<DrawPassCommands::DrawInstanced*
>(cmdPtr);
453 this->drawInstanced(draw->fType,
457 draw->fInstanceCount);
460 case DrawPassCommands::Type::kDrawIndexedInstanced: {
461 auto draw =
static_cast<DrawPassCommands::DrawIndexedInstanced*
>(cmdPtr);
462 this->drawIndexedInstanced(draw->fType,
467 draw->fInstanceCount);
470 case DrawPassCommands::Type::kDrawIndirect: {
471 auto draw =
static_cast<DrawPassCommands::DrawIndirect*
>(cmdPtr);
472 this->drawIndirect(draw->fType);
475 case DrawPassCommands::Type::kDrawIndexedIndirect: {
476 auto draw =
static_cast<DrawPassCommands::DrawIndexedIndirect*
>(cmdPtr);
477 this->drawIndexedIndirect(draw->fType);
486bool DawnCommandBuffer::bindGraphicsPipeline(
const GraphicsPipeline* graphicsPipeline) {
489 auto* dawnGraphicsPipeline =
static_cast<const DawnGraphicsPipeline*
>(graphicsPipeline);
490 auto& wgpuPipeline = dawnGraphicsPipeline->dawnRenderPipeline();
494 fActiveGraphicsPipeline = dawnGraphicsPipeline;
495 fActiveRenderPassEncoder.SetPipeline(wgpuPipeline);
496 fBoundUniformBuffersDirty =
true;
501void DawnCommandBuffer::bindUniformBuffer(
const BindUniformBufferInfo&
info,
UniformSlot slot) {
504 auto dawnBuffer =
static_cast<const DawnBuffer*
>(
info.fBuffer);
506 unsigned int bufferIndex = 0;
521 fBoundUniformBuffers[bufferIndex] = dawnBuffer;
522 fBoundUniformBufferOffsets[bufferIndex] =
static_cast<uint32_t
>(
info.fOffset);
523 fBoundUniformBufferSizes[bufferIndex] =
info.fBindingSize;
525 fBoundUniformBuffersDirty =
true;
528void DawnCommandBuffer::bindDrawBuffers(
const BindBufferInfo& vertices,
529 const BindBufferInfo& instances,
530 const BindBufferInfo& indices,
531 const BindBufferInfo& indirect) {
534 if (vertices.fBuffer) {
535 auto dawnBuffer =
static_cast<const DawnBuffer*
>(vertices.fBuffer)->dawnBuffer();
536 fActiveRenderPassEncoder.SetVertexBuffer(
539 if (instances.fBuffer) {
540 auto dawnBuffer =
static_cast<const DawnBuffer*
>(instances.fBuffer)->dawnBuffer();
541 fActiveRenderPassEncoder.SetVertexBuffer(
544 if (indices.fBuffer) {
545 auto dawnBuffer =
static_cast<const DawnBuffer*
>(indices.fBuffer)->dawnBuffer();
546 fActiveRenderPassEncoder.SetIndexBuffer(
547 dawnBuffer, wgpu::IndexFormat::Uint16, indices.fOffset);
549 if (indirect.fBuffer) {
550 fCurrentIndirectBuffer =
static_cast<const DawnBuffer*
>(indirect.fBuffer)->dawnBuffer();
551 fCurrentIndirectBufferOffset = indirect.fOffset;
553 fCurrentIndirectBuffer =
nullptr;
554 fCurrentIndirectBufferOffset = 0;
558void DawnCommandBuffer::bindTextureAndSamplers(
559 const DrawPass& drawPass,
const DrawPassCommands::BindTexturesAndSamplers&
command) {
563 wgpu::BindGroup bindGroup;
564 if (
command.fNumTexSamplers == 1) {
569 static_cast<const DawnTexture*
>(drawPass.getTexture(
command.fTextureIndices[0]));
570 const auto* sampler =
571 static_cast<const DawnSampler*
>(drawPass.getSampler(
command.fSamplerIndices[0]));
575 std::vector<wgpu::BindGroupEntry> entries(2 *
command.fNumTexSamplers);
577 for (
int i = 0;
i <
command.fNumTexSamplers; ++
i) {
578 const auto*
texture =
static_cast<const DawnTexture*
>(
579 drawPass.getTexture(
command.fTextureIndices[
i]));
580 const auto* sampler =
static_cast<const DawnSampler*
>(
581 drawPass.getSampler(
command.fSamplerIndices[
i]));
582 auto& wgpuTextureView =
texture->sampleTextureView();
583 auto& wgpuSampler = sampler->dawnSampler();
590 entries[2 *
i].binding = 2 *
i;
591 entries[2 *
i].sampler = wgpuSampler;
593 entries[2 *
i + 1].binding = 2 *
i + 1;
594 entries[2 *
i + 1].textureView = wgpuTextureView;
597 wgpu::BindGroupDescriptor
desc;
600 desc.entryCount = entries.size();
601 desc.entries = entries.data();
603 bindGroup = fSharedContext->
device().CreateBindGroup(&
desc);
609void DawnCommandBuffer::syncUniformBuffers() {
610 if (fBoundUniformBuffersDirty) {
611 fBoundUniformBuffersDirty =
false;
613 std::array<uint32_t, 4> dynamicOffsets;
614 std::array<std::pair<const DawnBuffer*, uint32_t>, 4> boundBuffersAndSizes;
615 boundBuffersAndSizes[0].first = fIntrinsicConstantBuffer.get();
616 boundBuffersAndSizes[0].second =
sizeof(IntrinsicConstant);
618 int activeIntrinsicBufferSlot = fIntrinsicConstantBufferSlotsUsed - 1;
619 dynamicOffsets[0] = activeIntrinsicBufferSlot * kIntrinsicConstantAlignedSize;
623 boundBuffersAndSizes[1].first =
625 boundBuffersAndSizes[1].second =
631 boundBuffersAndSizes[1].first =
nullptr;
632 dynamicOffsets[1] = 0;
637 boundBuffersAndSizes[2].first =
639 boundBuffersAndSizes[2].second =
645 boundBuffersAndSizes[2].first =
nullptr;
646 dynamicOffsets[2] = 0;
651 boundBuffersAndSizes[3].first =
653 boundBuffersAndSizes[3].second =
659 boundBuffersAndSizes[3].first =
nullptr;
660 dynamicOffsets[3] = 0;
668 dynamicOffsets.size(),
669 dynamicOffsets.data());
673void DawnCommandBuffer::setScissor(
unsigned int left,
683 fActiveRenderPassEncoder.SetScissorRect(
684 scissor.
x(), scissor.
y(), scissor.
width(), scissor.
height());
687void DawnCommandBuffer::preprocessViewport(
const SkRect& viewport) {
693 const float invTwoW = 2.f / viewport.
width();
694 const float invTwoH = 2.f / viewport.
height();
695 const IntrinsicConstant rtAdjust = {invTwoW, -invTwoH, -1.f -
x * invTwoW, 1.f +
y * invTwoH};
702 SkASSERT(!fActiveRenderPassEncoder);
703 SkASSERT(!fActiveComputePassEncoder);
705#if !defined(__EMSCRIPTEN__)
706 if (!fIntrinsicConstantBuffer) {
709 SkASSERT(fIntrinsicConstantBuffer->size() ==
sizeof(IntrinsicConstant));
712 fCommandEncoder.WriteBuffer(fIntrinsicConstantBuffer->dawnBuffer(),
714 reinterpret_cast<const uint8_t*
>(rtAdjust),
716 fIntrinsicConstantBufferSlotsUsed = 1;
718 if (!fIntrinsicConstantBuffer ||
719 fIntrinsicConstantBufferSlotsUsed == kNumSlotsForIntrinsicConstantBuffer) {
720 size_t bufferSize = kIntrinsicConstantAlignedSize * kNumSlotsForIntrinsicConstantBuffer;
721 fIntrinsicConstantBuffer =
725 "IntrinsicConstantBuffer");
727 fIntrinsicConstantBufferSlotsUsed = 0;
731 uint64_t
offset = fIntrinsicConstantBufferSlotsUsed * kIntrinsicConstantAlignedSize;
732 fSharedContext->
queue().WriteBuffer(
733 fIntrinsicConstantBuffer->dawnBuffer(),
offset, &rtAdjust,
sizeof(rtAdjust));
734 fIntrinsicConstantBufferSlotsUsed++;
738void DawnCommandBuffer::setViewport(
const SkRect& viewport) {
740 fActiveRenderPassEncoder.SetViewport(
741 viewport.
x(), viewport.
y(), viewport.
width(), viewport.
height(), 0, 1);
744void DawnCommandBuffer::setBlendConstants(
float* blendConstants) {
747 blendConstants[0], blendConstants[1], blendConstants[2], blendConstants[3]};
748 fActiveRenderPassEncoder.SetBlendConstant(&blendConst);
752 unsigned int baseVertex,
753 unsigned int vertexCount) {
757 this->syncUniformBuffers();
759 fActiveRenderPassEncoder.Draw(vertexCount, 1, baseVertex);
763 unsigned int baseIndex,
764 unsigned int indexCount,
765 unsigned int baseVertex) {
769 this->syncUniformBuffers();
771 fActiveRenderPassEncoder.DrawIndexed(indexCount, 1, baseIndex, baseVertex);
775 unsigned int baseVertex,
776 unsigned int vertexCount,
777 unsigned int baseInstance,
778 unsigned int instanceCount) {
782 this->syncUniformBuffers();
784 fActiveRenderPassEncoder.Draw(vertexCount, instanceCount, baseVertex, baseInstance);
788 unsigned int baseIndex,
789 unsigned int indexCount,
790 unsigned int baseVertex,
791 unsigned int baseInstance,
792 unsigned int instanceCount) {
796 this->syncUniformBuffers();
798 fActiveRenderPassEncoder.DrawIndexed(
799 indexCount, instanceCount, baseIndex, baseVertex, baseInstance);
807 this->syncUniformBuffers();
809 fActiveRenderPassEncoder.DrawIndirect(fCurrentIndirectBuffer, fCurrentIndirectBufferOffset);
817 this->syncUniformBuffers();
819 fActiveRenderPassEncoder.DrawIndexedIndirect(fCurrentIndirectBuffer,
820 fCurrentIndirectBufferOffset);
823void DawnCommandBuffer::beginComputePass() {
824 SkASSERT(!fActiveRenderPassEncoder);
825 SkASSERT(!fActiveComputePassEncoder);
826 fActiveComputePassEncoder = fCommandEncoder.BeginComputePass();
829void DawnCommandBuffer::bindComputePipeline(
const ComputePipeline* computePipeline) {
830 SkASSERT(fActiveComputePassEncoder);
832 fActiveComputePipeline =
static_cast<const DawnComputePipeline*
>(computePipeline);
836void DawnCommandBuffer::bindDispatchResources(
const DispatchGroup&
group,
837 const DispatchGroup::Dispatch& dispatch) {
838 SkASSERT(fActiveComputePassEncoder);
846 entries.
reserve(dispatch.fBindings.size());
848 for (
const ResourceBinding& binding : dispatch.fBindings) {
849 wgpu::BindGroupEntry& entry = entries.
push_back();
850 entry.binding = binding.fIndex;
851 if (
const BufferView*
buffer = std::get_if<BufferView>(&binding.fResource)) {
852 entry.buffer =
static_cast<const DawnBuffer*
>(
buffer->fInfo.fBuffer)->dawnBuffer();
853 entry.offset =
buffer->fInfo.fOffset;
854 entry.size =
buffer->fSize;
855 }
else if (
const TextureIndex* texIdx = std::get_if<TextureIndex>(&binding.fResource)) {
857 static_cast<const DawnTexture*
>(
group.getTexture(texIdx->fValue));
859 entry.textureView =
texture->sampleTextureView();
860 }
else if (
const SamplerIndex* samplerIdx = std::get_if<SamplerIndex>(&binding.fResource)) {
861 const DawnSampler* sampler =
862 static_cast<const DawnSampler*
>(
group.getSampler(samplerIdx->fValue));
863 entry.sampler = sampler->dawnSampler();
865 SK_ABORT(
"unsupported dispatch resource type");
869 wgpu::BindGroupDescriptor
desc;
874 auto bindGroup = fSharedContext->
device().CreateBindGroup(&
desc);
875 fActiveComputePassEncoder.SetBindGroup(0, bindGroup);
878void DawnCommandBuffer::dispatchWorkgroups(
const WorkgroupSize& globalSize) {
879 SkASSERT(fActiveComputePassEncoder);
882 fActiveComputePassEncoder.DispatchWorkgroups(
883 globalSize.fWidth, globalSize.fHeight, globalSize.fDepth);
886void DawnCommandBuffer::dispatchWorkgroupsIndirect(
const Buffer* indirectBuffer,
887 size_t indirectBufferOffset) {
888 SkASSERT(fActiveComputePassEncoder);
891 auto& wgpuIndirectBuffer =
static_cast<const DawnBuffer*
>(indirectBuffer)->dawnBuffer();
892 fActiveComputePassEncoder.DispatchWorkgroupsIndirect(wgpuIndirectBuffer, indirectBufferOffset);
895void DawnCommandBuffer::endComputePass() {
896 SkASSERT(fActiveComputePassEncoder);
897 fActiveComputePassEncoder.End();
898 fActiveComputePassEncoder =
nullptr;
901bool DawnCommandBuffer::onCopyBufferToBuffer(
const Buffer* srcBuffer,
903 const Buffer* dstBuffer,
906 SkASSERT(!fActiveRenderPassEncoder);
907 SkASSERT(!fActiveComputePassEncoder);
909 auto& wgpuBufferSrc =
static_cast<const DawnBuffer*
>(srcBuffer)->dawnBuffer();
910 auto& wgpuBufferDst =
static_cast<const DawnBuffer*
>(dstBuffer)->dawnBuffer();
912 fCommandEncoder.CopyBufferToBuffer(wgpuBufferSrc, srcOffset, wgpuBufferDst, dstOffset,
size);
916bool DawnCommandBuffer::onCopyTextureToBuffer(
const Texture*
texture,
920 size_t bufferRowBytes) {
921 SkASSERT(!fActiveRenderPassEncoder);
922 SkASSERT(!fActiveComputePassEncoder);
924 const auto* wgpuTexture =
static_cast<const DawnTexture*
>(
texture);
925 auto& wgpuBuffer =
static_cast<const DawnBuffer*
>(
buffer)->dawnBuffer();
927 wgpu::ImageCopyTexture
src;
928 src.texture = wgpuTexture->dawnTexture();
929 src.origin.x = srcRect.
x();
930 src.origin.y = srcRect.
y();
931 src.aspect = wgpuTexture->textureInfo().dawnTextureSpec().fAspect;
933 wgpu::ImageCopyBuffer
dst;
934 dst.buffer = wgpuBuffer;
935 dst.layout.offset = bufferOffset;
938 SkASSERT((bufferRowBytes & 0xFF) == 0);
939 dst.layout.bytesPerRow = bufferRowBytes;
941 wgpu::Extent3D copySize = {
942 static_cast<uint32_t
>(srcRect.
width()),
static_cast<uint32_t
>(srcRect.
height()), 1};
943 fCommandEncoder.CopyTextureToBuffer(&
src, &
dst, ©Size);
948bool DawnCommandBuffer::onCopyBufferToTexture(
const Buffer*
buffer,
950 const BufferTextureCopyData* copyData,
952 SkASSERT(!fActiveRenderPassEncoder);
953 SkASSERT(!fActiveComputePassEncoder);
955 auto& wgpuTexture =
static_cast<const DawnTexture*
>(
texture)->dawnTexture();
956 auto& wgpuBuffer =
static_cast<const DawnBuffer*
>(
buffer)->dawnBuffer();
958 wgpu::ImageCopyBuffer
src;
959 src.buffer = wgpuBuffer;
961 wgpu::ImageCopyTexture
dst;
962 dst.texture = wgpuTexture;
965 src.layout.offset = copyData[
i].fBufferOffset;
968 SkASSERT((copyData[
i].fBufferRowBytes & 0xFF) == 0);
969 src.layout.bytesPerRow = copyData[
i].fBufferRowBytes;
971 dst.origin.x = copyData[
i].fRect.x();
972 dst.origin.y = copyData[
i].fRect.y();
973 dst.mipLevel = copyData[
i].fMipLevel;
975 wgpu::Extent3D copySize = {
static_cast<uint32_t
>(copyData[
i].fRect.width()),
978 fCommandEncoder.CopyBufferToTexture(&
src, &
dst, ©Size);
984bool DawnCommandBuffer::onCopyTextureToTexture(
const Texture*
src,
989 SkASSERT(!fActiveRenderPassEncoder);
990 SkASSERT(!fActiveComputePassEncoder);
992 auto& wgpuTextureSrc =
static_cast<const DawnTexture*
>(
src)->dawnTexture();
993 auto& wgpuTextureDst =
static_cast<const DawnTexture*
>(
dst)->dawnTexture();
995 wgpu::ImageCopyTexture srcArgs;
996 srcArgs.texture = wgpuTextureSrc;
997 srcArgs.origin.x = srcRect.
fLeft;
998 srcArgs.origin.y = srcRect.
fTop;
1000 wgpu::ImageCopyTexture dstArgs;
1001 dstArgs.texture = wgpuTextureDst;
1002 dstArgs.origin.x = dstPoint.
fX;
1003 dstArgs.origin.y = dstPoint.
fY;
1004 dstArgs.mipLevel = mipLevel;
1006 wgpu::Extent3D copySize = {
1007 static_cast<uint32_t
>(srcRect.
width()),
static_cast<uint32_t
>(srcRect.
height()), 1};
1009 fCommandEncoder.CopyTextureToTexture(&srcArgs, &dstArgs, ©Size);
1014bool DawnCommandBuffer::onSynchronizeBufferToCpu(
const Buffer*
buffer,
bool* outDidResultInWork) {
1018bool DawnCommandBuffer::onClearBuffer(
const Buffer*
buffer,
size_t offset,
size_t size) {
1019 SkASSERT(!fActiveRenderPassEncoder);
1020 SkASSERT(!fActiveComputePassEncoder);
1022 auto& wgpuBuffer =
static_cast<const DawnBuffer*
>(
buffer)->dawnBuffer();
1023 fCommandEncoder.ClearBuffer(wgpuBuffer,
offset,
size);
static void info(const char *fmt,...) SK_PRINTF_LIKE(1
#define SKGPU_LOG_E(fmt,...)
static constexpr size_t SkAlignTo(size_t x, size_t alignment)
#define SK_ABORT(message,...)
SkIVector fReplayTranslation
void trackResource(sk_sp< Resource > resource)
void trackCommandBufferResource(sk_sp< Resource > resource)
std::optional< wgpu::LoadOp > resolveTextureLoadOp() const
wgpu::CommandBuffer finishEncoding()
static std::unique_ptr< DawnCommandBuffer > Make(const DawnSharedContext *, DawnResourceProvider *)
~DawnCommandBuffer() override
const wgpu::BindGroupLayout & dawnGroupLayout() const
const wgpu::ComputePipeline & dawnComputePipeline() const
bool hasStepUniforms() const
static constexpr unsigned int kUniformBufferBindGroupIndex
static constexpr unsigned int kTextureBindGroupIndex
static constexpr unsigned int kRenderStepUniformBufferIndex
static constexpr unsigned int kPaintUniformBufferIndex
int numTexturesAndSamplers() const
static constexpr unsigned int kInstanceBufferIndex
const BindGroupLayouts & dawnGroupLayouts() const
bool hasPaintUniforms() const
static constexpr unsigned int kGradientBufferIndex
bool hasGradientBuffer() const
PrimitiveType primitiveType() const
static constexpr unsigned int kVertexBufferIndex
const wgpu::BindGroup & findOrCreateSingleTextureSamplerBindGroup(const DawnSampler *sampler, const DawnTexture *texture)
sk_sp< DawnTexture > findOrCreateDiscardableMSAALoadTexture(SkISize dimensions, const TextureInfo &msaaInfo)
const sk_sp< DawnBuffer > & getOrCreateIntrinsicConstantBuffer()
const wgpu::BindGroup & findOrCreateUniformBuffersBindGroup(const std::array< std::pair< const DawnBuffer *, uint32_t >, 4 > &boundBuffersAndSizes)
wgpu::RenderPipeline findOrCreateBlitWithDrawPipeline(const RenderPassDesc &renderPassDesc)
sk_sp< DawnBuffer > findOrCreateDawnBuffer(size_t size, BufferType type, AccessPattern, std::string_view label)
const DawnCaps * dawnCaps() const
const wgpu::Device & device() const
const wgpu::Queue & queue() const
static void Draw(SkCanvas *canvas, const SkRect &rect)
uint32_t uint32_t * format
sk_sp< SkBlender > blender SkRect rect
SK_API sk_sp< SkShader > Color(SkColor)
DEF_SWITCHES_START aot vmservice shared library Name of the *so containing AOT compiled Dart assets for launching the service isolate vm snapshot The VM snapshot data that will be memory mapped as read only SnapshotAssetPath must be present isolate snapshot The isolate snapshot data that will be memory mapped as read only SnapshotAssetPath must be present cache dir Path to the cache directory This is different from the persistent_cache_path in embedder which is used for Skia shader cache icu native lib Path to the library file that exports the ICU data vm service The hostname IP address on which the Dart VM Service should be served If not defaults to or::depending on whether ipv6 is specified vm service A custom Dart VM Service port The default is to pick a randomly available open port disable vm Disable the Dart VM Service The Dart VM Service is never available in release mode disable vm service Disable mDNS Dart VM Service publication Bind to the IPv6 localhost address for the Dart VM Service Ignored if vm service host is set endless trace buffer
it will be possible to load the file into Perfetto s trace viewer disable asset Prevents usage of any non test fonts unless they were explicitly Loaded via prefetched default font Indicates whether the embedding started a prefetch of the default font manager before creating the engine run In non interactive keep the shell running after the Dart script has completed enable serial On low power devices with low core running concurrent GC tasks on threads can cause them to contend with the UI thread which could potentially lead to jank This option turns off all concurrent GC activities domain network JSON encoded network policy per domain This overrides the DisallowInsecureConnections switch Embedder can specify whether to allow or disallow insecure connections at a domain level old gen heap size
static constexpr int kLoadOpCount
bool DawnFormatIsDepthOrStencil(wgpu::TextureFormat format)
bool DawnFormatIsDepth(wgpu::TextureFormat format)
static constexpr int kStoreOpCount
bool DawnFormatIsStencil(wgpu::TextureFormat format)
constexpr int32_t y() const
constexpr int32_t x() const
constexpr int32_t x() const
constexpr int32_t y() const
bool intersect(const SkIRect &r)
constexpr int32_t height() const
int32_t fTop
smaller y-axis bounds
static constexpr SkIRect MakeSize(const SkISize &size)
constexpr int32_t width() const
static constexpr SkIRect MakeXYWH(int32_t x, int32_t y, int32_t w, int32_t h)
int32_t fLeft
smaller x-axis bounds
constexpr float x() const
constexpr float y() const
constexpr float height() const
constexpr float width() const