Flutter Engine
The Flutter Engine
Loading...
Searching...
No Matches
DawnCommandBuffer.cpp
Go to the documentation of this file.
1/*
2 * Copyright 2022 Google LLC
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
9
24
25namespace skgpu::graphite {
26
27namespace {
28
29using IntrinsicConstant = float[4];
30
31constexpr int kBufferBindingOffsetAlignment = 256;
32
33constexpr int kIntrinsicConstantAlignedSize =
34 SkAlignTo(sizeof(IntrinsicConstant), kBufferBindingOffsetAlignment);
35
36#if defined(__EMSCRIPTEN__)
37// When running against WebGPU in WASM we don't have the wgpu::CommandBuffer::WriteBuffer method. We
38// allocate a fixed size buffer to hold the intrinsics constants. If we overflow we allocate another
39// buffer.
40constexpr int kNumSlotsForIntrinsicConstantBuffer = 8;
41#else
42// Dawn has an in-band WriteBuffer command, so we can just keep overwriting the same slot between
43// render passes. Zero indicates this behavior.
44constexpr int kNumSlotsForIntrinsicConstantBuffer = 0;
45#endif
46
47} // namespace
48
49std::unique_ptr<DawnCommandBuffer> DawnCommandBuffer::Make(const DawnSharedContext* sharedContext,
50 DawnResourceProvider* resourceProvider) {
51 std::unique_ptr<DawnCommandBuffer> cmdBuffer(
52 new DawnCommandBuffer(sharedContext, resourceProvider));
53 if (!cmdBuffer->setNewCommandBufferResources()) {
54 return {};
55 }
56 return cmdBuffer;
57}
58
59DawnCommandBuffer::DawnCommandBuffer(const DawnSharedContext* sharedContext,
60 DawnResourceProvider* resourceProvider)
61 : fSharedContext(sharedContext)
62 , fResourceProvider(resourceProvider) {}
63
65
66wgpu::CommandBuffer DawnCommandBuffer::finishEncoding() {
67 SkASSERT(fCommandEncoder);
68 wgpu::CommandBuffer cmdBuffer = fCommandEncoder.Finish();
69
70 fCommandEncoder = nullptr;
71
72 return cmdBuffer;
73}
74
76 fIntrinsicConstantBuffer = nullptr;
77
78 fActiveGraphicsPipeline = nullptr;
79 fActiveRenderPassEncoder = nullptr;
80 fActiveComputePassEncoder = nullptr;
81 fCommandEncoder = nullptr;
82
83 for (auto& bufferSlot : fBoundUniformBuffers) {
84 bufferSlot = nullptr;
85 }
86 fBoundUniformBuffersDirty = true;
87}
88
90 SkASSERT(!fCommandEncoder);
91 fCommandEncoder = fSharedContext->device().CreateCommandEncoder();
92 SkASSERT(fCommandEncoder);
93 return true;
94}
95
97 const Texture* colorTexture,
98 const Texture* resolveTexture,
99 const Texture* depthStencilTexture,
100 SkRect viewport,
101 const DrawPassList& drawPasses) {
102 // Update viewport's constant buffer before starting a render pass.
103 this->preprocessViewport(viewport);
104
105 if (!this->beginRenderPass(renderPassDesc, colorTexture, resolveTexture, depthStencilTexture)) {
106 return false;
107 }
108
109 this->setViewport(viewport);
110
111 for (const auto& drawPass : drawPasses) {
112 this->addDrawPass(drawPass.get());
113 }
114
115 this->endRenderPass();
116 return true;
117}
118
120 this->beginComputePass();
121 for (const auto& group : groups) {
122 group->addResourceRefs(this);
123 for (const auto& dispatch : group->dispatches()) {
124 this->bindComputePipeline(group->getPipeline(dispatch.fPipelineIndex));
125 this->bindDispatchResources(*group, dispatch);
126 if (const WorkgroupSize* globalSize =
127 std::get_if<WorkgroupSize>(&dispatch.fGlobalSizeOrIndirect)) {
128 this->dispatchWorkgroups(*globalSize);
129 } else {
130 SkASSERT(std::holds_alternative<BufferView>(dispatch.fGlobalSizeOrIndirect));
131 const BufferView& indirect =
132 *std::get_if<BufferView>(&dispatch.fGlobalSizeOrIndirect);
133 this->dispatchWorkgroupsIndirect(indirect.fInfo.fBuffer, indirect.fInfo.fOffset);
134 }
135 }
136 }
137 this->endComputePass();
138 return true;
139}
140
141bool DawnCommandBuffer::beginRenderPass(const RenderPassDesc& renderPassDesc,
142 const Texture* colorTexture,
143 const Texture* resolveTexture,
144 const Texture* depthStencilTexture) {
145 SkASSERT(!fActiveRenderPassEncoder);
146 SkASSERT(!fActiveComputePassEncoder);
147
148 constexpr static wgpu::LoadOp wgpuLoadActionMap[]{
149 wgpu::LoadOp::Load,
150 wgpu::LoadOp::Clear,
151 wgpu::LoadOp::Clear // Don't care
152 };
153 static_assert((int)LoadOp::kLoad == 0);
154 static_assert((int)LoadOp::kClear == 1);
155 static_assert((int)LoadOp::kDiscard == 2);
156 static_assert(std::size(wgpuLoadActionMap) == kLoadOpCount);
157
158 constexpr static wgpu::StoreOp wgpuStoreActionMap[]{wgpu::StoreOp::Store,
159 wgpu::StoreOp::Discard};
160 static_assert((int)StoreOp::kStore == 0);
161 static_assert((int)StoreOp::kDiscard == 1);
162 static_assert(std::size(wgpuStoreActionMap) == kStoreOpCount);
163
164 wgpu::RenderPassDescriptor wgpuRenderPass = {};
165 wgpu::RenderPassColorAttachment wgpuColorAttachment;
166 wgpu::RenderPassDepthStencilAttachment wgpuDepthStencilAttachment;
167
168 // Set up color attachment.
169#ifndef __EMSCRIPTEN__
170 wgpu::DawnRenderPassColorAttachmentRenderToSingleSampled mssaRenderToSingleSampledDesc;
171#endif
172
173 auto& colorInfo = renderPassDesc.fColorAttachment;
174 bool loadMSAAFromResolveExplicitly = false;
175 if (colorTexture) {
176 wgpuRenderPass.colorAttachments = &wgpuColorAttachment;
177 wgpuRenderPass.colorAttachmentCount = 1;
178
179 // TODO: check Texture matches RenderPassDesc
180 const auto* dawnColorTexture = static_cast<const DawnTexture*>(colorTexture);
181 SkASSERT(dawnColorTexture->renderTextureView());
182 wgpuColorAttachment.view = dawnColorTexture->renderTextureView();
183
184 const std::array<float, 4>& clearColor = renderPassDesc.fClearColor;
185 wgpuColorAttachment.clearValue = {
186 clearColor[0], clearColor[1], clearColor[2], clearColor[3]};
187 wgpuColorAttachment.loadOp = wgpuLoadActionMap[static_cast<int>(colorInfo.fLoadOp)];
188 wgpuColorAttachment.storeOp = wgpuStoreActionMap[static_cast<int>(colorInfo.fStoreOp)];
189
190 // Set up resolve attachment
191 if (resolveTexture) {
193 // TODO: check Texture matches RenderPassDesc
194 const auto* dawnResolveTexture = static_cast<const DawnTexture*>(resolveTexture);
195 SkASSERT(dawnResolveTexture->renderTextureView());
196 wgpuColorAttachment.resolveTarget = dawnResolveTexture->renderTextureView();
197
198 // Inclusion of a resolve texture implies the client wants to finish the
199 // renderpass with a resolve.
200 SkASSERT(wgpuColorAttachment.storeOp == wgpu::StoreOp::Discard);
201
202 // But it also means we have to load the resolve texture into the MSAA color attachment
203 loadMSAAFromResolveExplicitly =
205 // TODO: If the color resolve texture is read-only we can use a private (vs. memoryless)
206 // msaa attachment that's coupled to the framebuffer and the StoreAndMultisampleResolve
207 // action instead of loading as a draw.
208 } else {
209 [[maybe_unused]] bool isMSAAToSingleSampled = renderPassDesc.fSampleCount > 1 &&
210 colorTexture->numSamples() == 1;
211#if defined(__EMSCRIPTEN__)
212 SkASSERT(!isMSAAToSingleSampled);
213#else
214 if (isMSAAToSingleSampled) {
215 // If render pass is multi sampled but the color attachment is single sampled, we
216 // need to activate multisampled render to single sampled feature for this render
217 // pass.
218 SkASSERT(fSharedContext->device().HasFeature(
219 wgpu::FeatureName::MSAARenderToSingleSampled));
220
221 wgpuColorAttachment.nextInChain = &mssaRenderToSingleSampledDesc;
222 mssaRenderToSingleSampledDesc.implicitSampleCount = renderPassDesc.fSampleCount;
223 }
224#endif
225 }
226 }
227
228 // Set up stencil/depth attachment
229 auto& depthStencilInfo = renderPassDesc.fDepthStencilAttachment;
230 if (depthStencilTexture) {
231 const auto* dawnDepthStencilTexture = static_cast<const DawnTexture*>(depthStencilTexture);
232 auto format = dawnDepthStencilTexture->textureInfo().dawnTextureSpec().getViewFormat();
234
235 // TODO: check Texture matches RenderPassDesc
236 SkASSERT(dawnDepthStencilTexture->renderTextureView());
237 wgpuDepthStencilAttachment.view = dawnDepthStencilTexture->renderTextureView();
238
240 wgpuDepthStencilAttachment.depthClearValue = renderPassDesc.fClearDepth;
241 wgpuDepthStencilAttachment.depthLoadOp =
242 wgpuLoadActionMap[static_cast<int>(depthStencilInfo.fLoadOp)];
243 wgpuDepthStencilAttachment.depthStoreOp =
244 wgpuStoreActionMap[static_cast<int>(depthStencilInfo.fStoreOp)];
245 }
246
248 wgpuDepthStencilAttachment.stencilClearValue = renderPassDesc.fClearStencil;
249 wgpuDepthStencilAttachment.stencilLoadOp =
250 wgpuLoadActionMap[static_cast<int>(depthStencilInfo.fLoadOp)];
251 wgpuDepthStencilAttachment.stencilStoreOp =
252 wgpuStoreActionMap[static_cast<int>(depthStencilInfo.fStoreOp)];
253 }
254
255 wgpuRenderPass.depthStencilAttachment = &wgpuDepthStencilAttachment;
256 } else {
257 SkASSERT(!depthStencilInfo.fTextureInfo.isValid());
258 }
259
260 if (loadMSAAFromResolveExplicitly) {
261 // Manually load the contents of the resolve texture into the MSAA attachment as a draw,
262 // so the actual load op for the MSAA attachment had better have been discard.
263
264 if (!this->loadMSAAFromResolveAndBeginRenderPassEncoder(
265 renderPassDesc,
266 wgpuRenderPass,
267 static_cast<const DawnTexture*>(colorTexture))) {
268 return false;
269 }
270 }
271 else {
272 fActiveRenderPassEncoder = fCommandEncoder.BeginRenderPass(&wgpuRenderPass);
273 }
274
275 return true;
276}
277
278bool DawnCommandBuffer::loadMSAAFromResolveAndBeginRenderPassEncoder(
279 const RenderPassDesc& frontendRenderPassDesc,
280 const wgpu::RenderPassDescriptor& wgpuRenderPassDesc,
281 const DawnTexture* msaaTexture) {
282 SkASSERT(!fActiveRenderPassEncoder);
283
284 // Copy from resolve texture to an intermediate texture. Using blit with draw
285 // pipeline because the resolveTexture might be created from a swapchain, and it
286 // is possible that only its texture view is available. So onCopyTextureToTexture()
287 // which operates on wgpu::Texture instead of wgpu::TextureView cannot be used in that case.
288 auto msaaLoadTexture = fResourceProvider->findOrCreateDiscardableMSAALoadTexture(
289 msaaTexture->dimensions(), msaaTexture->textureInfo());
290 if (!msaaLoadTexture) {
291 SKGPU_LOG_E("DawnCommandBuffer::loadMSAAFromResolveAndBeginRenderPassEncoder: "
292 "Can't create MSAA Load Texture.");
293 return false;
294 }
295
296 this->trackCommandBufferResource(msaaLoadTexture);
297
298 // Creating intermediate render pass (copy from resolve texture -> MSAA load texture)
299 RenderPassDesc intermediateRenderPassDesc = {};
300 intermediateRenderPassDesc.fColorAttachment.fLoadOp = LoadOp::kDiscard;
301 intermediateRenderPassDesc.fColorAttachment.fStoreOp = StoreOp::kStore;
302 intermediateRenderPassDesc.fColorAttachment.fTextureInfo =
303 frontendRenderPassDesc.fColorResolveAttachment.fTextureInfo;
304
305 wgpu::RenderPassColorAttachment wgpuIntermediateColorAttachment;
306 // Dawn doesn't support actual DontCare so use LoadOp::Clear.
307 wgpuIntermediateColorAttachment.loadOp = wgpu::LoadOp::Clear;
308 wgpuIntermediateColorAttachment.clearValue = {1, 1, 1, 1};
309 wgpuIntermediateColorAttachment.storeOp = wgpu::StoreOp::Store;
310 wgpuIntermediateColorAttachment.view = msaaLoadTexture->renderTextureView();
311
312 wgpu::RenderPassDescriptor wgpuIntermediateRenderPassDesc;
313 wgpuIntermediateRenderPassDesc.colorAttachmentCount = 1;
314 wgpuIntermediateRenderPassDesc.colorAttachments = &wgpuIntermediateColorAttachment;
315
316 auto renderPassEncoder = fCommandEncoder.BeginRenderPass(&wgpuIntermediateRenderPassDesc);
317
318 bool blitSucceeded = this->doBlitWithDraw(
319 renderPassEncoder,
320 intermediateRenderPassDesc,
321 /*sourceTextureView=*/wgpuRenderPassDesc.colorAttachments[0].resolveTarget,
322 msaaTexture->dimensions().width(),
323 msaaTexture->dimensions().height());
324
325 renderPassEncoder.End();
326
327 if (!blitSucceeded) {
328 return false;
329 }
330
331 // Start actual render pass (blit from MSAA load texture -> MSAA texture)
332 renderPassEncoder = fCommandEncoder.BeginRenderPass(&wgpuRenderPassDesc);
333
334 if (!this->doBlitWithDraw(renderPassEncoder,
335 frontendRenderPassDesc,
336 /*sourceTextureView=*/msaaLoadTexture->renderTextureView(),
337 msaaTexture->dimensions().width(),
338 msaaTexture->dimensions().height())) {
339 renderPassEncoder.End();
340 return false;
341 }
342
343 fActiveRenderPassEncoder = renderPassEncoder;
344
345 return true;
346}
347
348bool DawnCommandBuffer::doBlitWithDraw(const wgpu::RenderPassEncoder& renderEncoder,
349 const RenderPassDesc& frontendRenderPassDesc,
350 const wgpu::TextureView& sourceTextureView,
351 int width,
352 int height) {
353 auto loadPipeline = fResourceProvider->findOrCreateBlitWithDrawPipeline(frontendRenderPassDesc);
354 if (!loadPipeline) {
355 SKGPU_LOG_E("Unable to create pipeline to blit with draw");
356 return false;
357 }
358
359 SkASSERT(renderEncoder);
360
361 renderEncoder.SetPipeline(loadPipeline);
362
363 // The load msaa pipeline takes no uniforms, no vertex/instance attributes and only uses
364 // one texture that does not require a sampler.
365
366 // TODO: b/260368758
367 // cache single texture's bind group creation.
368 wgpu::BindGroupEntry entry;
369 entry.binding = 0;
370 entry.textureView = sourceTextureView;
371
372 wgpu::BindGroupDescriptor desc;
373 desc.layout = loadPipeline.GetBindGroupLayout(0);
374 desc.entryCount = 1;
375 desc.entries = &entry;
376
377 auto bindGroup = fSharedContext->device().CreateBindGroup(&desc);
378
379 renderEncoder.SetBindGroup(0, bindGroup);
380
381 renderEncoder.SetScissorRect(0, 0, width, height);
382 renderEncoder.SetViewport(0, 0, width, height, 0, 1);
383
384 // Fullscreen triangle
385 renderEncoder.Draw(3);
386
387 return true;
388}
389
390void DawnCommandBuffer::endRenderPass() {
391 SkASSERT(fActiveRenderPassEncoder);
392 fActiveRenderPassEncoder.End();
393 fActiveRenderPassEncoder = nullptr;
394}
395
396void DawnCommandBuffer::addDrawPass(const DrawPass* drawPass) {
397 drawPass->addResourceRefs(this);
398 for (auto [type, cmdPtr] : drawPass->commands()) {
399 switch (type) {
400 case DrawPassCommands::Type::kBindGraphicsPipeline: {
401 auto bgp = static_cast<DrawPassCommands::BindGraphicsPipeline*>(cmdPtr);
402 this->bindGraphicsPipeline(drawPass->getPipeline(bgp->fPipelineIndex));
403 break;
404 }
405 case DrawPassCommands::Type::kSetBlendConstants: {
406 auto sbc = static_cast<DrawPassCommands::SetBlendConstants*>(cmdPtr);
407 this->setBlendConstants(sbc->fBlendConstants);
408 break;
409 }
410 case DrawPassCommands::Type::kBindUniformBuffer: {
411 auto bub = static_cast<DrawPassCommands::BindUniformBuffer*>(cmdPtr);
412 this->bindUniformBuffer(bub->fInfo, bub->fSlot);
413 break;
414 }
415 case DrawPassCommands::Type::kBindDrawBuffers: {
416 auto bdb = static_cast<DrawPassCommands::BindDrawBuffers*>(cmdPtr);
417 this->bindDrawBuffers(
418 bdb->fVertices, bdb->fInstances, bdb->fIndices, bdb->fIndirect);
419 break;
420 }
421 case DrawPassCommands::Type::kBindTexturesAndSamplers: {
422 auto bts = static_cast<DrawPassCommands::BindTexturesAndSamplers*>(cmdPtr);
423 bindTextureAndSamplers(*drawPass, *bts);
424 break;
425 }
426 case DrawPassCommands::Type::kSetScissor: {
427 auto ss = static_cast<DrawPassCommands::SetScissor*>(cmdPtr);
428 const SkIRect& rect = ss->fScissor;
429 this->setScissor(rect.fLeft, rect.fTop, rect.width(), rect.height());
430 break;
431 }
432 case DrawPassCommands::Type::kDraw: {
433 auto draw = static_cast<DrawPassCommands::Draw*>(cmdPtr);
434 this->draw(draw->fType, draw->fBaseVertex, draw->fVertexCount);
435 break;
436 }
437 case DrawPassCommands::Type::kDrawIndexed: {
438 auto draw = static_cast<DrawPassCommands::DrawIndexed*>(cmdPtr);
439 this->drawIndexed(
440 draw->fType, draw->fBaseIndex, draw->fIndexCount, draw->fBaseVertex);
441 break;
442 }
443 case DrawPassCommands::Type::kDrawInstanced: {
444 auto draw = static_cast<DrawPassCommands::DrawInstanced*>(cmdPtr);
445 this->drawInstanced(draw->fType,
446 draw->fBaseVertex,
447 draw->fVertexCount,
448 draw->fBaseInstance,
449 draw->fInstanceCount);
450 break;
451 }
452 case DrawPassCommands::Type::kDrawIndexedInstanced: {
453 auto draw = static_cast<DrawPassCommands::DrawIndexedInstanced*>(cmdPtr);
454 this->drawIndexedInstanced(draw->fType,
455 draw->fBaseIndex,
456 draw->fIndexCount,
457 draw->fBaseVertex,
458 draw->fBaseInstance,
459 draw->fInstanceCount);
460 break;
461 }
462 case DrawPassCommands::Type::kDrawIndirect: {
463 auto draw = static_cast<DrawPassCommands::DrawIndirect*>(cmdPtr);
464 this->drawIndirect(draw->fType);
465 break;
466 }
467 case DrawPassCommands::Type::kDrawIndexedIndirect: {
468 auto draw = static_cast<DrawPassCommands::DrawIndexedIndirect*>(cmdPtr);
469 this->drawIndexedIndirect(draw->fType);
470 break;
471 }
472 }
473 }
474}
475
476void DawnCommandBuffer::bindGraphicsPipeline(const GraphicsPipeline* graphicsPipeline) {
477 SkASSERT(fActiveRenderPassEncoder);
478
479 fActiveGraphicsPipeline = static_cast<const DawnGraphicsPipeline*>(graphicsPipeline);
480 fActiveRenderPassEncoder.SetPipeline(fActiveGraphicsPipeline->dawnRenderPipeline());
481 fBoundUniformBuffersDirty = true;
482}
483
484void DawnCommandBuffer::bindUniformBuffer(const BindUniformBufferInfo& info, UniformSlot slot) {
485 SkASSERT(fActiveRenderPassEncoder);
486
487 auto dawnBuffer = static_cast<const DawnBuffer*>(info.fBuffer);
488
489 unsigned int bufferIndex = 0;
490 switch (slot) {
493 break;
496 break;
497 default:
498 SkASSERT(false);
499 }
500
501 fBoundUniformBuffers[bufferIndex] = dawnBuffer;
502 fBoundUniformBufferOffsets[bufferIndex] = static_cast<uint32_t>(info.fOffset);
503 fBoundUniformBufferSizes[bufferIndex] = info.fBindingSize;
504
505 fBoundUniformBuffersDirty = true;
506}
507
508void DawnCommandBuffer::bindDrawBuffers(const BindBufferInfo& vertices,
509 const BindBufferInfo& instances,
510 const BindBufferInfo& indices,
511 const BindBufferInfo& indirect) {
512 SkASSERT(fActiveRenderPassEncoder);
513
514 if (vertices.fBuffer) {
515 auto dawnBuffer = static_cast<const DawnBuffer*>(vertices.fBuffer)->dawnBuffer();
516 fActiveRenderPassEncoder.SetVertexBuffer(
517 DawnGraphicsPipeline::kVertexBufferIndex, dawnBuffer, vertices.fOffset);
518 }
519 if (instances.fBuffer) {
520 auto dawnBuffer = static_cast<const DawnBuffer*>(instances.fBuffer)->dawnBuffer();
521 fActiveRenderPassEncoder.SetVertexBuffer(
522 DawnGraphicsPipeline::kInstanceBufferIndex, dawnBuffer, instances.fOffset);
523 }
524 if (indices.fBuffer) {
525 auto dawnBuffer = static_cast<const DawnBuffer*>(indices.fBuffer)->dawnBuffer();
526 fActiveRenderPassEncoder.SetIndexBuffer(
527 dawnBuffer, wgpu::IndexFormat::Uint16, indices.fOffset);
528 }
529 if (indirect.fBuffer) {
530 fCurrentIndirectBuffer = static_cast<const DawnBuffer*>(indirect.fBuffer)->dawnBuffer();
531 fCurrentIndirectBufferOffset = indirect.fOffset;
532 } else {
533 fCurrentIndirectBuffer = nullptr;
534 fCurrentIndirectBufferOffset = 0;
535 }
536}
537
538void DawnCommandBuffer::bindTextureAndSamplers(
539 const DrawPass& drawPass, const DrawPassCommands::BindTexturesAndSamplers& command) {
540 SkASSERT(fActiveRenderPassEncoder);
541 SkASSERT(fActiveGraphicsPipeline);
542
543 wgpu::BindGroup bindGroup;
544 if (command.fNumTexSamplers == 1) {
545 // Optimize for single texture.
546 SkASSERT(fActiveGraphicsPipeline->numTexturesAndSamplers() == 2);
547
548 const auto* texture =
549 static_cast<const DawnTexture*>(drawPass.getTexture(command.fTextureIndices[0]));
550 const auto* sampler =
551 static_cast<const DawnSampler*>(drawPass.getSampler(command.fSamplerIndices[0]));
552
553 bindGroup = fResourceProvider->findOrCreateSingleTextureSamplerBindGroup(sampler, texture);
554 } else {
555 std::vector<wgpu::BindGroupEntry> entries(2 * command.fNumTexSamplers);
556
557 for (int i = 0; i < command.fNumTexSamplers; ++i) {
558 const auto* texture = static_cast<const DawnTexture*>(
559 drawPass.getTexture(command.fTextureIndices[i]));
560 const auto* sampler = static_cast<const DawnSampler*>(
561 drawPass.getSampler(command.fSamplerIndices[i]));
562 auto& wgpuTextureView = texture->sampleTextureView();
563 auto& wgpuSampler = sampler->dawnSampler();
564
565 // Assuming shader generator assigns binding slot to sampler then texture,
566 // then the next sampler and texture, and so on, we need to use
567 // 2 * i as base binding index of the sampler and texture.
568 // TODO: https://b.corp.google.com/issues/259457090:
569 // Better configurable way of assigning samplers and textures' bindings.
570 entries[2 * i].binding = 2 * i;
571 entries[2 * i].sampler = wgpuSampler;
572
573 entries[2 * i + 1].binding = 2 * i + 1;
574 entries[2 * i + 1].textureView = wgpuTextureView;
575 }
576
577 wgpu::BindGroupDescriptor desc;
578 const auto& groupLayouts = fActiveGraphicsPipeline->dawnGroupLayouts();
580 desc.entryCount = entries.size();
581 desc.entries = entries.data();
582
583 bindGroup = fSharedContext->device().CreateBindGroup(&desc);
584 }
585
586 fActiveRenderPassEncoder.SetBindGroup(DawnGraphicsPipeline::kTextureBindGroupIndex, bindGroup);
587}
588
589void DawnCommandBuffer::syncUniformBuffers() {
590 if (fBoundUniformBuffersDirty) {
591 fBoundUniformBuffersDirty = false;
592
593 std::array<uint32_t, 3> dynamicOffsets;
594 std::array<std::pair<const DawnBuffer*, uint32_t>, 3> boundBuffersAndSizes;
595 boundBuffersAndSizes[0].first = fIntrinsicConstantBuffer.get();
596 boundBuffersAndSizes[0].second = sizeof(IntrinsicConstant);
597
598 int activeIntrinsicBufferSlot = fIntrinsicConstantBufferSlotsUsed - 1;
599 dynamicOffsets[0] = activeIntrinsicBufferSlot * kIntrinsicConstantAlignedSize;
600
601 if (fActiveGraphicsPipeline->hasStepUniforms() &&
603 boundBuffersAndSizes[1].first =
605 boundBuffersAndSizes[1].second =
607 dynamicOffsets[1] =
608 fBoundUniformBufferOffsets[DawnGraphicsPipeline::kRenderStepUniformBufferIndex];
609 } else {
610 // Unused buffer entry
611 boundBuffersAndSizes[1].first = nullptr;
612 dynamicOffsets[1] = 0;
613 }
614
615 if (fActiveGraphicsPipeline->hasPaintUniforms() &&
617 boundBuffersAndSizes[2].first =
619 boundBuffersAndSizes[2].second =
620 fBoundUniformBufferSizes[DawnGraphicsPipeline::kPaintUniformBufferIndex];
621 dynamicOffsets[2] =
622 fBoundUniformBufferOffsets[DawnGraphicsPipeline::kPaintUniformBufferIndex];
623 } else {
624 // Unused buffer entry
625 boundBuffersAndSizes[2].first = nullptr;
626 dynamicOffsets[2] = 0;
627 }
628
629 auto bindGroup =
630 fResourceProvider->findOrCreateUniformBuffersBindGroup(boundBuffersAndSizes);
631
632 fActiveRenderPassEncoder.SetBindGroup(DawnGraphicsPipeline::kUniformBufferBindGroupIndex,
633 bindGroup,
634 dynamicOffsets.size(),
635 dynamicOffsets.data());
636 }
637}
638
639void DawnCommandBuffer::setScissor(unsigned int left,
640 unsigned int top,
641 unsigned int width,
642 unsigned int height) {
643 SkASSERT(fActiveRenderPassEncoder);
644 SkIRect scissor = SkIRect::MakeXYWH(
647 scissor.setEmpty();
648 }
649 fActiveRenderPassEncoder.SetScissorRect(
650 scissor.x(), scissor.y(), scissor.width(), scissor.height());
651}
652
653void DawnCommandBuffer::preprocessViewport(const SkRect& viewport) {
654 // Dawn's framebuffer space has (0, 0) at the top left. This agrees with Skia's device coords.
655 // However, in NDC (-1, -1) is the bottom left. So we flip the origin here (assuming all
656 // surfaces we have are TopLeft origin).
657 const float x = viewport.x() - fReplayTranslation.x();
658 const float y = viewport.y() - fReplayTranslation.y();
659 const float invTwoW = 2.f / viewport.width();
660 const float invTwoH = 2.f / viewport.height();
661 const IntrinsicConstant rtAdjust = {invTwoW, -invTwoH, -1.f - x * invTwoW, 1.f + y * invTwoH};
662
663 bool needNewBuffer = !fIntrinsicConstantBuffer;
664 if (!needNewBuffer && kNumSlotsForIntrinsicConstantBuffer > 0) {
665 needNewBuffer = (fIntrinsicConstantBufferSlotsUsed == kNumSlotsForIntrinsicConstantBuffer);
666 }
667
668 if (needNewBuffer) {
669 size_t bufferSize;
670
671 if constexpr (kNumSlotsForIntrinsicConstantBuffer > 1) {
672 // With multipule slots in the one constant buffer, each slot must be bindable,
673 // slot's offset must be aligned.
674 bufferSize = kIntrinsicConstantAlignedSize * kNumSlotsForIntrinsicConstantBuffer;
675 } else {
676 // For single slot case, the slot offset is always 0.
677 bufferSize = sizeof(IntrinsicConstant);
678 }
679
680 fIntrinsicConstantBuffer =
681 fResourceProvider->findOrCreateDawnBuffer(bufferSize,
684 "InstrinsicConstantBuffer");
685
686 fIntrinsicConstantBufferSlotsUsed = 0;
687 SkASSERT(fIntrinsicConstantBuffer);
688
689 this->trackResource(fIntrinsicConstantBuffer);
690 }
691
692 // TODO: https://b.corp.google.com/issues/259267703
693 // Make updating intrinsic constants faster. Metal has setVertexBytes method
694 // to quickly sending intrinsic constants to vertex shader without any buffer. But Dawn doesn't
695 // have similar capability. So we have to use WriteBuffer(), and this method is not allowed to
696 // be called when there is an active render pass.
697 SkASSERT(!fActiveRenderPassEncoder);
698 SkASSERT(!fActiveComputePassEncoder);
699
700 if constexpr (kNumSlotsForIntrinsicConstantBuffer > 0) {
701 uint64_t offset = fIntrinsicConstantBufferSlotsUsed * kIntrinsicConstantAlignedSize;
702 fSharedContext->queue().WriteBuffer(fIntrinsicConstantBuffer->dawnBuffer(),
703 offset,
704 &rtAdjust,
705 sizeof(rtAdjust));
706 fIntrinsicConstantBufferSlotsUsed++;
707 } else {
708#if !defined(__EMSCRIPTEN__)
709 fCommandEncoder.WriteBuffer(fIntrinsicConstantBuffer->dawnBuffer(),
710 0,
711 reinterpret_cast<const uint8_t*>(rtAdjust),
712 sizeof(rtAdjust));
713#endif
714 fIntrinsicConstantBufferSlotsUsed = 1;
715 }
716}
717
718void DawnCommandBuffer::setViewport(const SkRect& viewport) {
719 SkASSERT(fActiveRenderPassEncoder);
720 fActiveRenderPassEncoder.SetViewport(
721 viewport.x(), viewport.y(), viewport.width(), viewport.height(), 0, 1);
722}
723
724void DawnCommandBuffer::setBlendConstants(float* blendConstants) {
725 SkASSERT(fActiveRenderPassEncoder);
726 wgpu::Color blendConst = {
727 blendConstants[0], blendConstants[1], blendConstants[2], blendConstants[3]};
728 fActiveRenderPassEncoder.SetBlendConstant(&blendConst);
729}
730
731void DawnCommandBuffer::draw(PrimitiveType type,
732 unsigned int baseVertex,
733 unsigned int vertexCount) {
734 SkASSERT(fActiveRenderPassEncoder);
735 SkASSERT(fActiveGraphicsPipeline->primitiveType() == type);
736
737 this->syncUniformBuffers();
738
739 fActiveRenderPassEncoder.Draw(vertexCount, /*instanceCount=*/1, baseVertex);
740}
741
742void DawnCommandBuffer::drawIndexed(PrimitiveType type,
743 unsigned int baseIndex,
744 unsigned int indexCount,
745 unsigned int baseVertex) {
746 SkASSERT(fActiveRenderPassEncoder);
747 SkASSERT(fActiveGraphicsPipeline->primitiveType() == type);
748
749 this->syncUniformBuffers();
750
751 fActiveRenderPassEncoder.DrawIndexed(indexCount, /*instanceCount=*/1, baseIndex, baseVertex);
752}
753
754void DawnCommandBuffer::drawInstanced(PrimitiveType type,
755 unsigned int baseVertex,
756 unsigned int vertexCount,
757 unsigned int baseInstance,
758 unsigned int instanceCount) {
759 SkASSERT(fActiveRenderPassEncoder);
760 SkASSERT(fActiveGraphicsPipeline->primitiveType() == type);
761
762 this->syncUniformBuffers();
763
764 fActiveRenderPassEncoder.Draw(vertexCount, instanceCount, baseVertex, baseInstance);
765}
766
767void DawnCommandBuffer::drawIndexedInstanced(PrimitiveType type,
768 unsigned int baseIndex,
769 unsigned int indexCount,
770 unsigned int baseVertex,
771 unsigned int baseInstance,
772 unsigned int instanceCount) {
773 SkASSERT(fActiveRenderPassEncoder);
774 SkASSERT(fActiveGraphicsPipeline->primitiveType() == type);
775
776 this->syncUniformBuffers();
777
778 fActiveRenderPassEncoder.DrawIndexed(
779 indexCount, instanceCount, baseIndex, baseVertex, baseInstance);
780}
781
782void DawnCommandBuffer::drawIndirect(PrimitiveType type) {
783 SkASSERT(fActiveRenderPassEncoder);
784 SkASSERT(fActiveGraphicsPipeline->primitiveType() == type);
785 SkASSERT(fCurrentIndirectBuffer);
786
787 this->syncUniformBuffers();
788
789 fActiveRenderPassEncoder.DrawIndirect(fCurrentIndirectBuffer, fCurrentIndirectBufferOffset);
790}
791
792void DawnCommandBuffer::drawIndexedIndirect(PrimitiveType type) {
793 SkASSERT(fActiveRenderPassEncoder);
794 SkASSERT(fActiveGraphicsPipeline->primitiveType() == type);
795 SkASSERT(fCurrentIndirectBuffer);
796
797 this->syncUniformBuffers();
798
799 fActiveRenderPassEncoder.DrawIndexedIndirect(fCurrentIndirectBuffer,
800 fCurrentIndirectBufferOffset);
801}
802
803void DawnCommandBuffer::beginComputePass() {
804 SkASSERT(!fActiveRenderPassEncoder);
805 SkASSERT(!fActiveComputePassEncoder);
806 fActiveComputePassEncoder = fCommandEncoder.BeginComputePass();
807}
808
809void DawnCommandBuffer::bindComputePipeline(const ComputePipeline* computePipeline) {
810 SkASSERT(fActiveComputePassEncoder);
811
812 fActiveComputePipeline = static_cast<const DawnComputePipeline*>(computePipeline);
813 fActiveComputePassEncoder.SetPipeline(fActiveComputePipeline->dawnComputePipeline());
814}
815
816void DawnCommandBuffer::bindDispatchResources(const DispatchGroup& group,
817 const DispatchGroup::Dispatch& dispatch) {
818 SkASSERT(fActiveComputePassEncoder);
819 SkASSERT(fActiveComputePipeline);
820
821 // Bind all pipeline resources to a single new bind group at index 0.
822 // NOTE: Caching the bind groups here might be beneficial based on the layout and the bound
823 // resources (though it's questionable how often a bind group will end up getting reused since
824 // the bound objects change often).
826 entries.reserve(dispatch.fBindings.size());
827
828 for (const ResourceBinding& binding : dispatch.fBindings) {
829 wgpu::BindGroupEntry& entry = entries.push_back();
830 entry.binding = binding.fIndex;
831 if (const BufferView* buffer = std::get_if<BufferView>(&binding.fResource)) {
832 entry.buffer = static_cast<const DawnBuffer*>(buffer->fInfo.fBuffer)->dawnBuffer();
833 entry.offset = buffer->fInfo.fOffset;
834 entry.size = buffer->fSize;
835 } else if (const TextureIndex* texIdx = std::get_if<TextureIndex>(&binding.fResource)) {
836 const DawnTexture* texture =
837 static_cast<const DawnTexture*>(group.getTexture(texIdx->fValue));
839 entry.textureView = texture->sampleTextureView();
840 } else if (const SamplerIndex* samplerIdx = std::get_if<SamplerIndex>(&binding.fResource)) {
841 const DawnSampler* sampler =
842 static_cast<const DawnSampler*>(group.getSampler(samplerIdx->fValue));
843 entry.sampler = sampler->dawnSampler();
844 } else {
845 SK_ABORT("unsupported dispatch resource type");
846 }
847 }
848
849 wgpu::BindGroupDescriptor desc;
850 desc.layout = fActiveComputePipeline->dawnGroupLayout();
851 desc.entryCount = entries.size();
852 desc.entries = entries.data();
853
854 auto bindGroup = fSharedContext->device().CreateBindGroup(&desc);
855 fActiveComputePassEncoder.SetBindGroup(0, bindGroup);
856}
857
858void DawnCommandBuffer::dispatchWorkgroups(const WorkgroupSize& globalSize) {
859 SkASSERT(fActiveComputePassEncoder);
860 SkASSERT(fActiveComputePipeline);
861
862 fActiveComputePassEncoder.DispatchWorkgroups(
863 globalSize.fWidth, globalSize.fHeight, globalSize.fDepth);
864}
865
866void DawnCommandBuffer::dispatchWorkgroupsIndirect(const Buffer* indirectBuffer,
867 size_t indirectBufferOffset) {
868 SkASSERT(fActiveComputePassEncoder);
869 SkASSERT(fActiveComputePipeline);
870
871 auto& wgpuIndirectBuffer = static_cast<const DawnBuffer*>(indirectBuffer)->dawnBuffer();
872 fActiveComputePassEncoder.DispatchWorkgroupsIndirect(wgpuIndirectBuffer, indirectBufferOffset);
873}
874
875void DawnCommandBuffer::endComputePass() {
876 SkASSERT(fActiveComputePassEncoder);
877 fActiveComputePassEncoder.End();
878 fActiveComputePassEncoder = nullptr;
879}
880
882 size_t srcOffset,
883 const Buffer* dstBuffer,
884 size_t dstOffset,
885 size_t size) {
886 SkASSERT(!fActiveRenderPassEncoder);
887 SkASSERT(!fActiveComputePassEncoder);
888
889 auto& wgpuBufferSrc = static_cast<const DawnBuffer*>(srcBuffer)->dawnBuffer();
890 auto& wgpuBufferDst = static_cast<const DawnBuffer*>(dstBuffer)->dawnBuffer();
891
892 fCommandEncoder.CopyBufferToBuffer(wgpuBufferSrc, srcOffset, wgpuBufferDst, dstOffset, size);
893 return true;
894}
895
897 SkIRect srcRect,
898 const Buffer* buffer,
899 size_t bufferOffset,
900 size_t bufferRowBytes) {
901 SkASSERT(!fActiveRenderPassEncoder);
902 SkASSERT(!fActiveComputePassEncoder);
903
904 const auto* wgpuTexture = static_cast<const DawnTexture*>(texture);
905 auto& wgpuBuffer = static_cast<const DawnBuffer*>(buffer)->dawnBuffer();
906
907 wgpu::ImageCopyTexture src;
908 src.texture = wgpuTexture->dawnTexture();
909 src.origin.x = srcRect.x();
910 src.origin.y = srcRect.y();
911 src.aspect = wgpuTexture->textureInfo().dawnTextureSpec().fAspect;
912
913 wgpu::ImageCopyBuffer dst;
914 dst.buffer = wgpuBuffer;
915 dst.layout.offset = bufferOffset;
916 // Dawn requires buffer's alignment to be multiples of 256.
917 // https://b.corp.google.com/issues/259264489
918 SkASSERT((bufferRowBytes & 0xFF) == 0);
919 dst.layout.bytesPerRow = bufferRowBytes;
920
921 wgpu::Extent3D copySize = {
922 static_cast<uint32_t>(srcRect.width()), static_cast<uint32_t>(srcRect.height()), 1};
923 fCommandEncoder.CopyTextureToBuffer(&src, &dst, &copySize);
924
925 return true;
926}
927
929 const Texture* texture,
930 const BufferTextureCopyData* copyData,
931 int count) {
932 SkASSERT(!fActiveRenderPassEncoder);
933 SkASSERT(!fActiveComputePassEncoder);
934
935 auto& wgpuTexture = static_cast<const DawnTexture*>(texture)->dawnTexture();
936 auto& wgpuBuffer = static_cast<const DawnBuffer*>(buffer)->dawnBuffer();
937
938 wgpu::ImageCopyBuffer src;
939 src.buffer = wgpuBuffer;
940
941 wgpu::ImageCopyTexture dst;
942 dst.texture = wgpuTexture;
943
944 for (int i = 0; i < count; ++i) {
945 src.layout.offset = copyData[i].fBufferOffset;
946 // Dawn requires buffer's alignment to be multiples of 256.
947 // https://b.corp.google.com/issues/259264489
948 SkASSERT((copyData[i].fBufferRowBytes & 0xFF) == 0);
949 src.layout.bytesPerRow = copyData[i].fBufferRowBytes;
950
951 dst.origin.x = copyData[i].fRect.x();
952 dst.origin.y = copyData[i].fRect.y();
953 dst.mipLevel = copyData[i].fMipLevel;
954
955 wgpu::Extent3D copySize = {static_cast<uint32_t>(copyData[i].fRect.width()),
956 static_cast<uint32_t>(copyData[i].fRect.height()),
957 1};
958 fCommandEncoder.CopyBufferToTexture(&src, &dst, &copySize);
959 }
960
961 return true;
962}
963
965 SkIRect srcRect,
966 const Texture* dst,
967 SkIPoint dstPoint,
968 int mipLevel) {
969 SkASSERT(!fActiveRenderPassEncoder);
970 SkASSERT(!fActiveComputePassEncoder);
971
972 auto& wgpuTextureSrc = static_cast<const DawnTexture*>(src)->dawnTexture();
973 auto& wgpuTextureDst = static_cast<const DawnTexture*>(dst)->dawnTexture();
974
975 wgpu::ImageCopyTexture srcArgs;
976 srcArgs.texture = wgpuTextureSrc;
977 srcArgs.origin.x = srcRect.fLeft;
978 srcArgs.origin.y = srcRect.fTop;
979
980 wgpu::ImageCopyTexture dstArgs;
981 dstArgs.texture = wgpuTextureDst;
982 dstArgs.origin.x = dstPoint.fX;
983 dstArgs.origin.y = dstPoint.fY;
984 dstArgs.mipLevel = mipLevel;
985
986 wgpu::Extent3D copySize = {
987 static_cast<uint32_t>(srcRect.width()), static_cast<uint32_t>(srcRect.height()), 1};
988
989 fCommandEncoder.CopyTextureToTexture(&srcArgs, &dstArgs, &copySize);
990
991 return true;
992}
993
994bool DawnCommandBuffer::onSynchronizeBufferToCpu(const Buffer* buffer, bool* outDidResultInWork) {
995 return true;
996}
997
998bool DawnCommandBuffer::onClearBuffer(const Buffer* buffer, size_t offset, size_t size) {
999 SkASSERT(!fActiveRenderPassEncoder);
1000 SkASSERT(!fActiveComputePassEncoder);
1001
1002 auto& wgpuBuffer = static_cast<const DawnBuffer*>(buffer)->dawnBuffer();
1003 fCommandEncoder.ClearBuffer(wgpuBuffer, offset, size);
1004
1005 return true;
1006}
1007
1008} // namespace skgpu::graphite
static void info(const char *fmt,...) SK_PRINTF_LIKE(1
Definition DM.cpp:213
SkRect fRect
int count
#define SKGPU_LOG_E(fmt,...)
Definition Log.h:38
static constexpr size_t SkAlignTo(size_t x, size_t alignment)
Definition SkAlign.h:33
#define SK_ABORT(message,...)
Definition SkAssert.h:70
#define SkASSERT(cond)
Definition SkAssert.h:116
static bool left(const SkPoint &p0, const SkPoint &p1)
Type::kYUV Type::kRGBA() int(0.7 *637)
void trackResource(sk_sp< Resource > resource)
void trackCommandBufferResource(sk_sp< Resource > resource)
bool onClearBuffer(const Buffer *, size_t offset, size_t size) override
bool onAddRenderPass(const RenderPassDesc &, const Texture *colorTexture, const Texture *resolveTexture, const Texture *depthStencilTexture, SkRect viewport, const DrawPassList &) override
bool onCopyBufferToTexture(const Buffer *, const Texture *, const BufferTextureCopyData *copyData, int count) override
bool onCopyBufferToBuffer(const Buffer *srcBuffer, size_t srcOffset, const Buffer *dstBuffer, size_t dstOffset, size_t size) override
bool onSynchronizeBufferToCpu(const Buffer *, bool *outDidResultInWork) override
static std::unique_ptr< DawnCommandBuffer > Make(const DawnSharedContext *, DawnResourceProvider *)
bool onCopyTextureToTexture(const Texture *src, SkIRect srcRect, const Texture *dst, SkIPoint dstPoint, int mipLevel) override
bool onAddComputePass(DispatchGroupSpan) override
bool onCopyTextureToBuffer(const Texture *, SkIRect srcRect, const Buffer *, size_t bufferOffset, size_t bufferRowBytes) override
const wgpu::BindGroupLayout & dawnGroupLayout() const
const wgpu::ComputePipeline & dawnComputePipeline() const
static constexpr unsigned int kUniformBufferBindGroupIndex
static constexpr unsigned int kTextureBindGroupIndex
static constexpr unsigned int kRenderStepUniformBufferIndex
const wgpu::RenderPipeline & dawnRenderPipeline() const
static constexpr unsigned int kPaintUniformBufferIndex
static constexpr unsigned int kInstanceBufferIndex
const BindGroupLayouts & dawnGroupLayouts() const
static constexpr unsigned int kVertexBufferIndex
const wgpu::BindGroup & findOrCreateSingleTextureSamplerBindGroup(const DawnSampler *sampler, const DawnTexture *texture)
sk_sp< DawnTexture > findOrCreateDiscardableMSAALoadTexture(SkISize dimensions, const TextureInfo &msaaInfo)
const wgpu::BindGroup & findOrCreateUniformBuffersBindGroup(const std::array< std::pair< const DawnBuffer *, uint32_t >, 3 > &boundBuffersAndSizes)
wgpu::RenderPipeline findOrCreateBlitWithDrawPipeline(const RenderPassDesc &renderPassDesc)
sk_sp< DawnBuffer > findOrCreateDawnBuffer(size_t size, BufferType type, AccessPattern, std::string_view label)
const wgpu::Device & device() const
const wgpu::Queue & queue() const
int numSamples() const
Definition Texture.h:28
void reserve(int n)
Definition SkTArray.h:165
int size() const
Definition SkTArray.h:416
static const uint8_t buffer[]
uint32_t uint32_t * format
FlTexture * texture
double y
double x
sk_sp< SkBlender > blender SkRect rect
Definition SkRecords.h:350
dict commands
Definition dom.py:171
static constexpr int kLoadOpCount
bool DawnFormatIsDepthOrStencil(wgpu::TextureFormat format)
bool DawnFormatIsDepth(wgpu::TextureFormat format)
static constexpr int kStoreOpCount
bool DawnFormatIsStencil(wgpu::TextureFormat format)
list command
Definition valgrind.py:24
int32_t height
int32_t width
Point offset
constexpr int32_t y() const
int32_t fX
x-axis value
int32_t fY
y-axis value
constexpr int32_t x() const
constexpr int32_t x() const
Definition SkRect.h:141
constexpr int32_t y() const
Definition SkRect.h:148
bool intersect(const SkIRect &r)
Definition SkRect.h:513
constexpr int32_t height() const
Definition SkRect.h:165
int32_t fTop
smaller y-axis bounds
Definition SkRect.h:34
static constexpr SkIRect MakeSize(const SkISize &size)
Definition SkRect.h:66
constexpr int32_t width() const
Definition SkRect.h:158
void setEmpty()
Definition SkRect.h:242
static constexpr SkIRect MakeXYWH(int32_t x, int32_t y, int32_t w, int32_t h)
Definition SkRect.h:104
int32_t fLeft
smaller x-axis bounds
Definition SkRect.h:33
constexpr float x() const
Definition SkRect.h:720
constexpr float y() const
Definition SkRect.h:727
constexpr float height() const
Definition SkRect.h:769
constexpr float width() const
Definition SkRect.h:762
std::array< float, 4 > fClearColor