Flutter Engine
The Flutter Engine
DawnCommandBuffer.cpp
Go to the documentation of this file.
1/*
2 * Copyright 2022 Google LLC
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
9
24
25namespace skgpu::graphite {
26
27namespace {
28
29using IntrinsicConstant = float[4];
30
31constexpr int kBufferBindingOffsetAlignment = 256;
32
33constexpr int kIntrinsicConstantAlignedSize =
34 SkAlignTo(sizeof(IntrinsicConstant), kBufferBindingOffsetAlignment);
35
36#if defined(__EMSCRIPTEN__)
37// When running against WebGPU in WASM we don't have the wgpu::CommandBuffer::WriteBuffer method. We
38// allocate a fixed size buffer to hold the intrinsics constants. If we overflow we allocate another
39// buffer.
40constexpr int kNumSlotsForIntrinsicConstantBuffer = 8;
41#endif
42
43} // namespace
44
45std::unique_ptr<DawnCommandBuffer> DawnCommandBuffer::Make(const DawnSharedContext* sharedContext,
46 DawnResourceProvider* resourceProvider) {
47 std::unique_ptr<DawnCommandBuffer> cmdBuffer(
48 new DawnCommandBuffer(sharedContext, resourceProvider));
49 if (!cmdBuffer->setNewCommandBufferResources()) {
50 return {};
51 }
52 return cmdBuffer;
53}
54
55DawnCommandBuffer::DawnCommandBuffer(const DawnSharedContext* sharedContext,
56 DawnResourceProvider* resourceProvider)
57 : fSharedContext(sharedContext)
58 , fResourceProvider(resourceProvider) {}
59
61
62wgpu::CommandBuffer DawnCommandBuffer::finishEncoding() {
63 SkASSERT(fCommandEncoder);
64 wgpu::CommandBuffer cmdBuffer = fCommandEncoder.Finish();
65
66 fCommandEncoder = nullptr;
67
68 return cmdBuffer;
69}
70
71void DawnCommandBuffer::onResetCommandBuffer() {
72 fIntrinsicConstantBuffer = nullptr;
73
74 fActiveGraphicsPipeline = nullptr;
75 fActiveRenderPassEncoder = nullptr;
76 fActiveComputePassEncoder = nullptr;
77 fCommandEncoder = nullptr;
78
79 for (auto& bufferSlot : fBoundUniformBuffers) {
80 bufferSlot = nullptr;
81 }
82 fBoundUniformBuffersDirty = true;
83}
84
85bool DawnCommandBuffer::setNewCommandBufferResources() {
86 SkASSERT(!fCommandEncoder);
87 fCommandEncoder = fSharedContext->device().CreateCommandEncoder();
88 SkASSERT(fCommandEncoder);
89 return true;
90}
91
92bool DawnCommandBuffer::onAddRenderPass(const RenderPassDesc& renderPassDesc,
93 const Texture* colorTexture,
94 const Texture* resolveTexture,
95 const Texture* depthStencilTexture,
96 SkRect viewport,
97 const DrawPassList& drawPasses) {
98 // Update viewport's constant buffer before starting a render pass.
99 this->preprocessViewport(viewport);
100
101 if (!this->beginRenderPass(renderPassDesc, colorTexture, resolveTexture, depthStencilTexture)) {
102 return false;
103 }
104
105 this->setViewport(viewport);
106
107 for (const auto& drawPass : drawPasses) {
108 if (!this->addDrawPass(drawPass.get())) SK_UNLIKELY {
109 this->endRenderPass();
110 return false;
111 }
112 }
113
114 this->endRenderPass();
115 return true;
116}
117
118bool DawnCommandBuffer::onAddComputePass(DispatchGroupSpan groups) {
119 this->beginComputePass();
120 for (const auto& group : groups) {
121 group->addResourceRefs(this);
122 for (const auto& dispatch : group->dispatches()) {
123 this->bindComputePipeline(group->getPipeline(dispatch.fPipelineIndex));
124 this->bindDispatchResources(*group, dispatch);
125 if (const WorkgroupSize* globalSize =
126 std::get_if<WorkgroupSize>(&dispatch.fGlobalSizeOrIndirect)) {
127 this->dispatchWorkgroups(*globalSize);
128 } else {
129 SkASSERT(std::holds_alternative<BufferView>(dispatch.fGlobalSizeOrIndirect));
130 const BufferView& indirect =
131 *std::get_if<BufferView>(&dispatch.fGlobalSizeOrIndirect);
132 this->dispatchWorkgroupsIndirect(indirect.fInfo.fBuffer, indirect.fInfo.fOffset);
133 }
134 }
135 }
136 this->endComputePass();
137 return true;
138}
139
140bool DawnCommandBuffer::beginRenderPass(const RenderPassDesc& renderPassDesc,
141 const Texture* colorTexture,
142 const Texture* resolveTexture,
143 const Texture* depthStencilTexture) {
144 SkASSERT(!fActiveRenderPassEncoder);
145 SkASSERT(!fActiveComputePassEncoder);
146
147 constexpr static wgpu::LoadOp wgpuLoadActionMap[]{
148 wgpu::LoadOp::Load,
149 wgpu::LoadOp::Clear,
150 wgpu::LoadOp::Clear // Don't care
151 };
152 static_assert((int)LoadOp::kLoad == 0);
153 static_assert((int)LoadOp::kClear == 1);
154 static_assert((int)LoadOp::kDiscard == 2);
155 static_assert(std::size(wgpuLoadActionMap) == kLoadOpCount);
156
157 constexpr static wgpu::StoreOp wgpuStoreActionMap[]{wgpu::StoreOp::Store,
158 wgpu::StoreOp::Discard};
159 static_assert((int)StoreOp::kStore == 0);
160 static_assert((int)StoreOp::kDiscard == 1);
161 static_assert(std::size(wgpuStoreActionMap) == kStoreOpCount);
162
163 wgpu::RenderPassDescriptor wgpuRenderPass = {};
164 wgpu::RenderPassColorAttachment wgpuColorAttachment;
165 wgpu::RenderPassDepthStencilAttachment wgpuDepthStencilAttachment;
166
167 // Set up color attachment.
168#if !defined(__EMSCRIPTEN__)
169 wgpu::DawnRenderPassColorAttachmentRenderToSingleSampled mssaRenderToSingleSampledDesc;
170#endif
171
172 auto& colorInfo = renderPassDesc.fColorAttachment;
173 bool loadMSAAFromResolveExplicitly = false;
174 if (colorTexture) {
175 wgpuRenderPass.colorAttachments = &wgpuColorAttachment;
176 wgpuRenderPass.colorAttachmentCount = 1;
177
178 // TODO: check Texture matches RenderPassDesc
179 const auto* dawnColorTexture = static_cast<const DawnTexture*>(colorTexture);
180 SkASSERT(dawnColorTexture->renderTextureView());
181 wgpuColorAttachment.view = dawnColorTexture->renderTextureView();
182
183 const std::array<float, 4>& clearColor = renderPassDesc.fClearColor;
184 wgpuColorAttachment.clearValue = {
185 clearColor[0], clearColor[1], clearColor[2], clearColor[3]};
186 wgpuColorAttachment.loadOp = wgpuLoadActionMap[static_cast<int>(colorInfo.fLoadOp)];
187 wgpuColorAttachment.storeOp = wgpuStoreActionMap[static_cast<int>(colorInfo.fStoreOp)];
188
189 // Set up resolve attachment
190 if (resolveTexture) {
191 SkASSERT(renderPassDesc.fColorResolveAttachment.fStoreOp == StoreOp::kStore);
192 // TODO: check Texture matches RenderPassDesc
193 const auto* dawnResolveTexture = static_cast<const DawnTexture*>(resolveTexture);
194 SkASSERT(dawnResolveTexture->renderTextureView());
195 wgpuColorAttachment.resolveTarget = dawnResolveTexture->renderTextureView();
196
197 // Inclusion of a resolve texture implies the client wants to finish the
198 // renderpass with a resolve.
199 SkASSERT(wgpuColorAttachment.storeOp == wgpu::StoreOp::Discard);
200
201 // But it also means we have to load the resolve texture into the MSAA color attachment
202 if (renderPassDesc.fColorResolveAttachment.fLoadOp == LoadOp::kLoad) {
203 std::optional<wgpu::LoadOp> resolveLoadOp =
204 fSharedContext->dawnCaps()->resolveTextureLoadOp();
205 if (resolveLoadOp.has_value()) {
206 wgpuColorAttachment.loadOp = *resolveLoadOp;
207 } else {
208 // No Dawn built-in support, we need to manually load the resolve texture.
209 loadMSAAFromResolveExplicitly = true;
210 }
211 }
212 // TODO: If the color resolve texture is read-only we can use a private (vs. memoryless)
213 // msaa attachment that's coupled to the framebuffer and the StoreAndMultisampleResolve
214 // action instead of loading as a draw.
215 } else {
216 [[maybe_unused]] bool isMSAAToSingleSampled = renderPassDesc.fSampleCount > 1 &&
217 colorTexture->numSamples() == 1;
218#if defined(__EMSCRIPTEN__)
219 SkASSERT(!isMSAAToSingleSampled);
220#else
221 if (isMSAAToSingleSampled) {
222 // If render pass is multi sampled but the color attachment is single sampled, we
223 // need to activate multisampled render to single sampled feature for this render
224 // pass.
225 SkASSERT(fSharedContext->device().HasFeature(
226 wgpu::FeatureName::MSAARenderToSingleSampled));
227
228 wgpuColorAttachment.nextInChain = &mssaRenderToSingleSampledDesc;
229 mssaRenderToSingleSampledDesc.implicitSampleCount = renderPassDesc.fSampleCount;
230 }
231#endif
232 }
233 }
234
235 // Set up stencil/depth attachment
236 auto& depthStencilInfo = renderPassDesc.fDepthStencilAttachment;
237 if (depthStencilTexture) {
238 const auto* dawnDepthStencilTexture = static_cast<const DawnTexture*>(depthStencilTexture);
239 auto format = dawnDepthStencilTexture->textureInfo().dawnTextureSpec().getViewFormat();
241
242 // TODO: check Texture matches RenderPassDesc
243 SkASSERT(dawnDepthStencilTexture->renderTextureView());
244 wgpuDepthStencilAttachment.view = dawnDepthStencilTexture->renderTextureView();
245
247 wgpuDepthStencilAttachment.depthClearValue = renderPassDesc.fClearDepth;
248 wgpuDepthStencilAttachment.depthLoadOp =
249 wgpuLoadActionMap[static_cast<int>(depthStencilInfo.fLoadOp)];
250 wgpuDepthStencilAttachment.depthStoreOp =
251 wgpuStoreActionMap[static_cast<int>(depthStencilInfo.fStoreOp)];
252 }
253
255 wgpuDepthStencilAttachment.stencilClearValue = renderPassDesc.fClearStencil;
256 wgpuDepthStencilAttachment.stencilLoadOp =
257 wgpuLoadActionMap[static_cast<int>(depthStencilInfo.fLoadOp)];
258 wgpuDepthStencilAttachment.stencilStoreOp =
259 wgpuStoreActionMap[static_cast<int>(depthStencilInfo.fStoreOp)];
260 }
261
262 wgpuRenderPass.depthStencilAttachment = &wgpuDepthStencilAttachment;
263 } else {
264 SkASSERT(!depthStencilInfo.fTextureInfo.isValid());
265 }
266
267 if (loadMSAAFromResolveExplicitly) {
268 // Manually load the contents of the resolve texture into the MSAA attachment as a draw,
269 // so the actual load op for the MSAA attachment had better have been discard.
270
271 if (!this->loadMSAAFromResolveAndBeginRenderPassEncoder(
272 renderPassDesc,
273 wgpuRenderPass,
274 static_cast<const DawnTexture*>(colorTexture))) {
275 return false;
276 }
277 }
278 else {
279 fActiveRenderPassEncoder = fCommandEncoder.BeginRenderPass(&wgpuRenderPass);
280 }
281
282 return true;
283}
284
285bool DawnCommandBuffer::loadMSAAFromResolveAndBeginRenderPassEncoder(
286 const RenderPassDesc& frontendRenderPassDesc,
287 const wgpu::RenderPassDescriptor& wgpuRenderPassDesc,
288 const DawnTexture* msaaTexture) {
289 SkASSERT(!fActiveRenderPassEncoder);
290
291 // Copy from resolve texture to an intermediate texture. Using blit with draw
292 // pipeline because the resolveTexture might be created from a swapchain, and it
293 // is possible that only its texture view is available. So onCopyTextureToTexture()
294 // which operates on wgpu::Texture instead of wgpu::TextureView cannot be used in that case.
295 auto msaaLoadTexture = fResourceProvider->findOrCreateDiscardableMSAALoadTexture(
296 msaaTexture->dimensions(), msaaTexture->textureInfo());
297 if (!msaaLoadTexture) {
298 SKGPU_LOG_E("DawnCommandBuffer::loadMSAAFromResolveAndBeginRenderPassEncoder: "
299 "Can't create MSAA Load Texture.");
300 return false;
301 }
302
303 this->trackCommandBufferResource(msaaLoadTexture);
304
305 // Creating intermediate render pass (copy from resolve texture -> MSAA load texture)
306 RenderPassDesc intermediateRenderPassDesc = {};
307 intermediateRenderPassDesc.fColorAttachment.fLoadOp = LoadOp::kDiscard;
308 intermediateRenderPassDesc.fColorAttachment.fStoreOp = StoreOp::kStore;
309 intermediateRenderPassDesc.fColorAttachment.fTextureInfo =
310 frontendRenderPassDesc.fColorResolveAttachment.fTextureInfo;
311
312 wgpu::RenderPassColorAttachment wgpuIntermediateColorAttachment;
313 // Dawn doesn't support actual DontCare so use LoadOp::Clear.
314 wgpuIntermediateColorAttachment.loadOp = wgpu::LoadOp::Clear;
315 wgpuIntermediateColorAttachment.clearValue = {1, 1, 1, 1};
316 wgpuIntermediateColorAttachment.storeOp = wgpu::StoreOp::Store;
317 wgpuIntermediateColorAttachment.view = msaaLoadTexture->renderTextureView();
318
319 wgpu::RenderPassDescriptor wgpuIntermediateRenderPassDesc;
320 wgpuIntermediateRenderPassDesc.colorAttachmentCount = 1;
321 wgpuIntermediateRenderPassDesc.colorAttachments = &wgpuIntermediateColorAttachment;
322
323 auto renderPassEncoder = fCommandEncoder.BeginRenderPass(&wgpuIntermediateRenderPassDesc);
324
325 bool blitSucceeded = this->doBlitWithDraw(
326 renderPassEncoder,
327 intermediateRenderPassDesc,
328 /*sourceTextureView=*/wgpuRenderPassDesc.colorAttachments[0].resolveTarget,
329 msaaTexture->dimensions().width(),
330 msaaTexture->dimensions().height());
331
332 renderPassEncoder.End();
333
334 if (!blitSucceeded) {
335 return false;
336 }
337
338 // Start actual render pass (blit from MSAA load texture -> MSAA texture)
339 renderPassEncoder = fCommandEncoder.BeginRenderPass(&wgpuRenderPassDesc);
340
341 if (!this->doBlitWithDraw(renderPassEncoder,
342 frontendRenderPassDesc,
343 /*sourceTextureView=*/msaaLoadTexture->renderTextureView(),
344 msaaTexture->dimensions().width(),
345 msaaTexture->dimensions().height())) {
346 renderPassEncoder.End();
347 return false;
348 }
349
350 fActiveRenderPassEncoder = renderPassEncoder;
351
352 return true;
353}
354
355bool DawnCommandBuffer::doBlitWithDraw(const wgpu::RenderPassEncoder& renderEncoder,
356 const RenderPassDesc& frontendRenderPassDesc,
357 const wgpu::TextureView& sourceTextureView,
358 int width,
359 int height) {
360 auto loadPipeline = fResourceProvider->findOrCreateBlitWithDrawPipeline(frontendRenderPassDesc);
361 if (!loadPipeline) {
362 SKGPU_LOG_E("Unable to create pipeline to blit with draw");
363 return false;
364 }
365
366 SkASSERT(renderEncoder);
367
368 renderEncoder.SetPipeline(loadPipeline);
369
370 // The load msaa pipeline takes no uniforms, no vertex/instance attributes and only uses
371 // one texture that does not require a sampler.
372
373 // TODO: b/260368758
374 // cache single texture's bind group creation.
375 wgpu::BindGroupEntry entry;
376 entry.binding = 0;
377 entry.textureView = sourceTextureView;
378
379 wgpu::BindGroupDescriptor desc;
380 desc.layout = loadPipeline.GetBindGroupLayout(0);
381 desc.entryCount = 1;
382 desc.entries = &entry;
383
384 auto bindGroup = fSharedContext->device().CreateBindGroup(&desc);
385
386 renderEncoder.SetBindGroup(0, bindGroup);
387
388 renderEncoder.SetScissorRect(0, 0, width, height);
389 renderEncoder.SetViewport(0, 0, width, height, 0, 1);
390
391 // Fullscreen triangle
392 renderEncoder.Draw(3);
393
394 return true;
395}
396
397void DawnCommandBuffer::endRenderPass() {
398 SkASSERT(fActiveRenderPassEncoder);
399 fActiveRenderPassEncoder.End();
400 fActiveRenderPassEncoder = nullptr;
401}
402
403bool DawnCommandBuffer::addDrawPass(const DrawPass* drawPass) {
404 drawPass->addResourceRefs(this);
405 for (auto [type, cmdPtr] : drawPass->commands()) {
406 switch (type) {
407 case DrawPassCommands::Type::kBindGraphicsPipeline: {
408 auto bgp = static_cast<DrawPassCommands::BindGraphicsPipeline*>(cmdPtr);
409 if (!this->bindGraphicsPipeline(drawPass->getPipeline(bgp->fPipelineIndex)))
410 SK_UNLIKELY { return false; }
411 break;
412 }
413 case DrawPassCommands::Type::kSetBlendConstants: {
414 auto sbc = static_cast<DrawPassCommands::SetBlendConstants*>(cmdPtr);
415 this->setBlendConstants(sbc->fBlendConstants);
416 break;
417 }
418 case DrawPassCommands::Type::kBindUniformBuffer: {
419 auto bub = static_cast<DrawPassCommands::BindUniformBuffer*>(cmdPtr);
420 this->bindUniformBuffer(bub->fInfo, bub->fSlot);
421 break;
422 }
423 case DrawPassCommands::Type::kBindDrawBuffers: {
424 auto bdb = static_cast<DrawPassCommands::BindDrawBuffers*>(cmdPtr);
425 this->bindDrawBuffers(
426 bdb->fVertices, bdb->fInstances, bdb->fIndices, bdb->fIndirect);
427 break;
428 }
429 case DrawPassCommands::Type::kBindTexturesAndSamplers: {
430 auto bts = static_cast<DrawPassCommands::BindTexturesAndSamplers*>(cmdPtr);
431 bindTextureAndSamplers(*drawPass, *bts);
432 break;
433 }
434 case DrawPassCommands::Type::kSetScissor: {
435 auto ss = static_cast<DrawPassCommands::SetScissor*>(cmdPtr);
436 const SkIRect& rect = ss->fScissor;
437 this->setScissor(rect.fLeft, rect.fTop, rect.width(), rect.height());
438 break;
439 }
440 case DrawPassCommands::Type::kDraw: {
441 auto draw = static_cast<DrawPassCommands::Draw*>(cmdPtr);
442 this->draw(draw->fType, draw->fBaseVertex, draw->fVertexCount);
443 break;
444 }
445 case DrawPassCommands::Type::kDrawIndexed: {
446 auto draw = static_cast<DrawPassCommands::DrawIndexed*>(cmdPtr);
447 this->drawIndexed(
448 draw->fType, draw->fBaseIndex, draw->fIndexCount, draw->fBaseVertex);
449 break;
450 }
451 case DrawPassCommands::Type::kDrawInstanced: {
452 auto draw = static_cast<DrawPassCommands::DrawInstanced*>(cmdPtr);
453 this->drawInstanced(draw->fType,
454 draw->fBaseVertex,
455 draw->fVertexCount,
456 draw->fBaseInstance,
457 draw->fInstanceCount);
458 break;
459 }
460 case DrawPassCommands::Type::kDrawIndexedInstanced: {
461 auto draw = static_cast<DrawPassCommands::DrawIndexedInstanced*>(cmdPtr);
462 this->drawIndexedInstanced(draw->fType,
463 draw->fBaseIndex,
464 draw->fIndexCount,
465 draw->fBaseVertex,
466 draw->fBaseInstance,
467 draw->fInstanceCount);
468 break;
469 }
470 case DrawPassCommands::Type::kDrawIndirect: {
471 auto draw = static_cast<DrawPassCommands::DrawIndirect*>(cmdPtr);
472 this->drawIndirect(draw->fType);
473 break;
474 }
475 case DrawPassCommands::Type::kDrawIndexedIndirect: {
476 auto draw = static_cast<DrawPassCommands::DrawIndexedIndirect*>(cmdPtr);
477 this->drawIndexedIndirect(draw->fType);
478 break;
479 }
480 }
481 }
482
483 return true;
484}
485
486bool DawnCommandBuffer::bindGraphicsPipeline(const GraphicsPipeline* graphicsPipeline) {
487 SkASSERT(fActiveRenderPassEncoder);
488
489 auto* dawnGraphicsPipeline = static_cast<const DawnGraphicsPipeline*>(graphicsPipeline);
490 auto& wgpuPipeline = dawnGraphicsPipeline->dawnRenderPipeline();
491 if (!wgpuPipeline) SK_UNLIKELY {
492 return false;
493 }
494 fActiveGraphicsPipeline = dawnGraphicsPipeline;
495 fActiveRenderPassEncoder.SetPipeline(wgpuPipeline);
496 fBoundUniformBuffersDirty = true;
497
498 return true;
499}
500
501void DawnCommandBuffer::bindUniformBuffer(const BindUniformBufferInfo& info, UniformSlot slot) {
502 SkASSERT(fActiveRenderPassEncoder);
503
504 auto dawnBuffer = static_cast<const DawnBuffer*>(info.fBuffer);
505
506 unsigned int bufferIndex = 0;
507 switch (slot) {
510 break;
513 break;
516 break;
517 default:
518 SkASSERT(false);
519 }
520
521 fBoundUniformBuffers[bufferIndex] = dawnBuffer;
522 fBoundUniformBufferOffsets[bufferIndex] = static_cast<uint32_t>(info.fOffset);
523 fBoundUniformBufferSizes[bufferIndex] = info.fBindingSize;
524
525 fBoundUniformBuffersDirty = true;
526}
527
528void DawnCommandBuffer::bindDrawBuffers(const BindBufferInfo& vertices,
529 const BindBufferInfo& instances,
530 const BindBufferInfo& indices,
531 const BindBufferInfo& indirect) {
532 SkASSERT(fActiveRenderPassEncoder);
533
534 if (vertices.fBuffer) {
535 auto dawnBuffer = static_cast<const DawnBuffer*>(vertices.fBuffer)->dawnBuffer();
536 fActiveRenderPassEncoder.SetVertexBuffer(
537 DawnGraphicsPipeline::kVertexBufferIndex, dawnBuffer, vertices.fOffset);
538 }
539 if (instances.fBuffer) {
540 auto dawnBuffer = static_cast<const DawnBuffer*>(instances.fBuffer)->dawnBuffer();
541 fActiveRenderPassEncoder.SetVertexBuffer(
542 DawnGraphicsPipeline::kInstanceBufferIndex, dawnBuffer, instances.fOffset);
543 }
544 if (indices.fBuffer) {
545 auto dawnBuffer = static_cast<const DawnBuffer*>(indices.fBuffer)->dawnBuffer();
546 fActiveRenderPassEncoder.SetIndexBuffer(
547 dawnBuffer, wgpu::IndexFormat::Uint16, indices.fOffset);
548 }
549 if (indirect.fBuffer) {
550 fCurrentIndirectBuffer = static_cast<const DawnBuffer*>(indirect.fBuffer)->dawnBuffer();
551 fCurrentIndirectBufferOffset = indirect.fOffset;
552 } else {
553 fCurrentIndirectBuffer = nullptr;
554 fCurrentIndirectBufferOffset = 0;
555 }
556}
557
558void DawnCommandBuffer::bindTextureAndSamplers(
559 const DrawPass& drawPass, const DrawPassCommands::BindTexturesAndSamplers& command) {
560 SkASSERT(fActiveRenderPassEncoder);
561 SkASSERT(fActiveGraphicsPipeline);
562
563 wgpu::BindGroup bindGroup;
564 if (command.fNumTexSamplers == 1) {
565 // Optimize for single texture.
566 SkASSERT(fActiveGraphicsPipeline->numTexturesAndSamplers() == 2);
567
568 const auto* texture =
569 static_cast<const DawnTexture*>(drawPass.getTexture(command.fTextureIndices[0]));
570 const auto* sampler =
571 static_cast<const DawnSampler*>(drawPass.getSampler(command.fSamplerIndices[0]));
572
573 bindGroup = fResourceProvider->findOrCreateSingleTextureSamplerBindGroup(sampler, texture);
574 } else {
575 std::vector<wgpu::BindGroupEntry> entries(2 * command.fNumTexSamplers);
576
577 for (int i = 0; i < command.fNumTexSamplers; ++i) {
578 const auto* texture = static_cast<const DawnTexture*>(
579 drawPass.getTexture(command.fTextureIndices[i]));
580 const auto* sampler = static_cast<const DawnSampler*>(
581 drawPass.getSampler(command.fSamplerIndices[i]));
582 auto& wgpuTextureView = texture->sampleTextureView();
583 auto& wgpuSampler = sampler->dawnSampler();
584
585 // Assuming shader generator assigns binding slot to sampler then texture,
586 // then the next sampler and texture, and so on, we need to use
587 // 2 * i as base binding index of the sampler and texture.
588 // TODO: https://b.corp.google.com/issues/259457090:
589 // Better configurable way of assigning samplers and textures' bindings.
590 entries[2 * i].binding = 2 * i;
591 entries[2 * i].sampler = wgpuSampler;
592
593 entries[2 * i + 1].binding = 2 * i + 1;
594 entries[2 * i + 1].textureView = wgpuTextureView;
595 }
596
597 wgpu::BindGroupDescriptor desc;
598 const auto& groupLayouts = fActiveGraphicsPipeline->dawnGroupLayouts();
600 desc.entryCount = entries.size();
601 desc.entries = entries.data();
602
603 bindGroup = fSharedContext->device().CreateBindGroup(&desc);
604 }
605
606 fActiveRenderPassEncoder.SetBindGroup(DawnGraphicsPipeline::kTextureBindGroupIndex, bindGroup);
607}
608
609void DawnCommandBuffer::syncUniformBuffers() {
610 if (fBoundUniformBuffersDirty) {
611 fBoundUniformBuffersDirty = false;
612
613 std::array<uint32_t, 4> dynamicOffsets;
614 std::array<std::pair<const DawnBuffer*, uint32_t>, 4> boundBuffersAndSizes;
615 boundBuffersAndSizes[0].first = fIntrinsicConstantBuffer.get();
616 boundBuffersAndSizes[0].second = sizeof(IntrinsicConstant);
617
618 int activeIntrinsicBufferSlot = fIntrinsicConstantBufferSlotsUsed - 1;
619 dynamicOffsets[0] = activeIntrinsicBufferSlot * kIntrinsicConstantAlignedSize;
620
621 if (fActiveGraphicsPipeline->hasStepUniforms() &&
623 boundBuffersAndSizes[1].first =
625 boundBuffersAndSizes[1].second =
627 dynamicOffsets[1] =
628 fBoundUniformBufferOffsets[DawnGraphicsPipeline::kRenderStepUniformBufferIndex];
629 } else {
630 // Unused buffer entry
631 boundBuffersAndSizes[1].first = nullptr;
632 dynamicOffsets[1] = 0;
633 }
634
635 if (fActiveGraphicsPipeline->hasPaintUniforms() &&
637 boundBuffersAndSizes[2].first =
639 boundBuffersAndSizes[2].second =
640 fBoundUniformBufferSizes[DawnGraphicsPipeline::kPaintUniformBufferIndex];
641 dynamicOffsets[2] =
642 fBoundUniformBufferOffsets[DawnGraphicsPipeline::kPaintUniformBufferIndex];
643 } else {
644 // Unused buffer entry
645 boundBuffersAndSizes[2].first = nullptr;
646 dynamicOffsets[2] = 0;
647 }
648
649 if (fActiveGraphicsPipeline->hasGradientBuffer() &&
650 fBoundUniformBuffers[DawnGraphicsPipeline::kGradientBufferIndex]) {
651 boundBuffersAndSizes[3].first =
652 fBoundUniformBuffers[DawnGraphicsPipeline::kGradientBufferIndex];
653 boundBuffersAndSizes[3].second =
654 fBoundUniformBufferSizes[DawnGraphicsPipeline::kGradientBufferIndex];
655 dynamicOffsets[3] =
656 fBoundUniformBufferOffsets[DawnGraphicsPipeline::kGradientBufferIndex];
657 } else {
658 // Unused buffer entry
659 boundBuffersAndSizes[3].first = nullptr;
660 dynamicOffsets[3] = 0;
661 }
662
663 auto bindGroup =
664 fResourceProvider->findOrCreateUniformBuffersBindGroup(boundBuffersAndSizes);
665
666 fActiveRenderPassEncoder.SetBindGroup(DawnGraphicsPipeline::kUniformBufferBindGroupIndex,
667 bindGroup,
668 dynamicOffsets.size(),
669 dynamicOffsets.data());
670 }
671}
672
673void DawnCommandBuffer::setScissor(unsigned int left,
674 unsigned int top,
675 unsigned int width,
676 unsigned int height) {
677 SkASSERT(fActiveRenderPassEncoder);
678 SkIRect scissor = SkIRect::MakeXYWH(
681 scissor.setEmpty();
682 }
683 fActiveRenderPassEncoder.SetScissorRect(
684 scissor.x(), scissor.y(), scissor.width(), scissor.height());
685}
686
687void DawnCommandBuffer::preprocessViewport(const SkRect& viewport) {
688 // Dawn's framebuffer space has (0, 0) at the top left. This agrees with Skia's device coords.
689 // However, in NDC (-1, -1) is the bottom left. So we flip the origin here (assuming all
690 // surfaces we have are TopLeft origin).
691 const float x = viewport.x() - fReplayTranslation.x();
692 const float y = viewport.y() - fReplayTranslation.y();
693 const float invTwoW = 2.f / viewport.width();
694 const float invTwoH = 2.f / viewport.height();
695 const IntrinsicConstant rtAdjust = {invTwoW, -invTwoH, -1.f - x * invTwoW, 1.f + y * invTwoH};
696
697 // TODO: https://b.corp.google.com/issues/259267703
698 // Make updating intrinsic constants faster. Metal has setVertexBytes method
699 // to quickly sending intrinsic constants to vertex shader without any buffer. But Dawn doesn't
700 // have similar capability. So we have to use WriteBuffer(), and this method is not allowed to
701 // be called when there is an active render pass.
702 SkASSERT(!fActiveRenderPassEncoder);
703 SkASSERT(!fActiveComputePassEncoder);
704
705#if !defined(__EMSCRIPTEN__)
706 if (!fIntrinsicConstantBuffer) {
707 fIntrinsicConstantBuffer = fResourceProvider->getOrCreateIntrinsicConstantBuffer();
708 SkASSERT(fIntrinsicConstantBuffer);
709 SkASSERT(fIntrinsicConstantBuffer->size() == sizeof(IntrinsicConstant));
710 this->trackResource(fIntrinsicConstantBuffer);
711 }
712 fCommandEncoder.WriteBuffer(fIntrinsicConstantBuffer->dawnBuffer(),
713 0,
714 reinterpret_cast<const uint8_t*>(rtAdjust),
715 sizeof(rtAdjust));
716 fIntrinsicConstantBufferSlotsUsed = 1;
717#else // defined(__EMSCRIPTEN__)
718 if (!fIntrinsicConstantBuffer ||
719 fIntrinsicConstantBufferSlotsUsed == kNumSlotsForIntrinsicConstantBuffer) {
720 size_t bufferSize = kIntrinsicConstantAlignedSize * kNumSlotsForIntrinsicConstantBuffer;
721 fIntrinsicConstantBuffer =
722 fResourceProvider->findOrCreateDawnBuffer(bufferSize,
725 "IntrinsicConstantBuffer");
726
727 fIntrinsicConstantBufferSlotsUsed = 0;
728 SkASSERT(fIntrinsicConstantBuffer);
729 this->trackResource(fIntrinsicConstantBuffer);
730 }
731 uint64_t offset = fIntrinsicConstantBufferSlotsUsed * kIntrinsicConstantAlignedSize;
732 fSharedContext->queue().WriteBuffer(
733 fIntrinsicConstantBuffer->dawnBuffer(), offset, &rtAdjust, sizeof(rtAdjust));
734 fIntrinsicConstantBufferSlotsUsed++;
735#endif // defined(__EMSCRIPTEN__)
736}
737
738void DawnCommandBuffer::setViewport(const SkRect& viewport) {
739 SkASSERT(fActiveRenderPassEncoder);
740 fActiveRenderPassEncoder.SetViewport(
741 viewport.x(), viewport.y(), viewport.width(), viewport.height(), 0, 1);
742}
743
744void DawnCommandBuffer::setBlendConstants(float* blendConstants) {
745 SkASSERT(fActiveRenderPassEncoder);
746 wgpu::Color blendConst = {
747 blendConstants[0], blendConstants[1], blendConstants[2], blendConstants[3]};
748 fActiveRenderPassEncoder.SetBlendConstant(&blendConst);
749}
750
751void DawnCommandBuffer::draw(PrimitiveType type,
752 unsigned int baseVertex,
753 unsigned int vertexCount) {
754 SkASSERT(fActiveRenderPassEncoder);
755 SkASSERT(fActiveGraphicsPipeline->primitiveType() == type);
756
757 this->syncUniformBuffers();
758
759 fActiveRenderPassEncoder.Draw(vertexCount, /*instanceCount=*/1, baseVertex);
760}
761
762void DawnCommandBuffer::drawIndexed(PrimitiveType type,
763 unsigned int baseIndex,
764 unsigned int indexCount,
765 unsigned int baseVertex) {
766 SkASSERT(fActiveRenderPassEncoder);
767 SkASSERT(fActiveGraphicsPipeline->primitiveType() == type);
768
769 this->syncUniformBuffers();
770
771 fActiveRenderPassEncoder.DrawIndexed(indexCount, /*instanceCount=*/1, baseIndex, baseVertex);
772}
773
774void DawnCommandBuffer::drawInstanced(PrimitiveType type,
775 unsigned int baseVertex,
776 unsigned int vertexCount,
777 unsigned int baseInstance,
778 unsigned int instanceCount) {
779 SkASSERT(fActiveRenderPassEncoder);
780 SkASSERT(fActiveGraphicsPipeline->primitiveType() == type);
781
782 this->syncUniformBuffers();
783
784 fActiveRenderPassEncoder.Draw(vertexCount, instanceCount, baseVertex, baseInstance);
785}
786
787void DawnCommandBuffer::drawIndexedInstanced(PrimitiveType type,
788 unsigned int baseIndex,
789 unsigned int indexCount,
790 unsigned int baseVertex,
791 unsigned int baseInstance,
792 unsigned int instanceCount) {
793 SkASSERT(fActiveRenderPassEncoder);
794 SkASSERT(fActiveGraphicsPipeline->primitiveType() == type);
795
796 this->syncUniformBuffers();
797
798 fActiveRenderPassEncoder.DrawIndexed(
799 indexCount, instanceCount, baseIndex, baseVertex, baseInstance);
800}
801
802void DawnCommandBuffer::drawIndirect(PrimitiveType type) {
803 SkASSERT(fActiveRenderPassEncoder);
804 SkASSERT(fActiveGraphicsPipeline->primitiveType() == type);
805 SkASSERT(fCurrentIndirectBuffer);
806
807 this->syncUniformBuffers();
808
809 fActiveRenderPassEncoder.DrawIndirect(fCurrentIndirectBuffer, fCurrentIndirectBufferOffset);
810}
811
812void DawnCommandBuffer::drawIndexedIndirect(PrimitiveType type) {
813 SkASSERT(fActiveRenderPassEncoder);
814 SkASSERT(fActiveGraphicsPipeline->primitiveType() == type);
815 SkASSERT(fCurrentIndirectBuffer);
816
817 this->syncUniformBuffers();
818
819 fActiveRenderPassEncoder.DrawIndexedIndirect(fCurrentIndirectBuffer,
820 fCurrentIndirectBufferOffset);
821}
822
823void DawnCommandBuffer::beginComputePass() {
824 SkASSERT(!fActiveRenderPassEncoder);
825 SkASSERT(!fActiveComputePassEncoder);
826 fActiveComputePassEncoder = fCommandEncoder.BeginComputePass();
827}
828
829void DawnCommandBuffer::bindComputePipeline(const ComputePipeline* computePipeline) {
830 SkASSERT(fActiveComputePassEncoder);
831
832 fActiveComputePipeline = static_cast<const DawnComputePipeline*>(computePipeline);
833 fActiveComputePassEncoder.SetPipeline(fActiveComputePipeline->dawnComputePipeline());
834}
835
836void DawnCommandBuffer::bindDispatchResources(const DispatchGroup& group,
837 const DispatchGroup::Dispatch& dispatch) {
838 SkASSERT(fActiveComputePassEncoder);
839 SkASSERT(fActiveComputePipeline);
840
841 // Bind all pipeline resources to a single new bind group at index 0.
842 // NOTE: Caching the bind groups here might be beneficial based on the layout and the bound
843 // resources (though it's questionable how often a bind group will end up getting reused since
844 // the bound objects change often).
846 entries.reserve(dispatch.fBindings.size());
847
848 for (const ResourceBinding& binding : dispatch.fBindings) {
849 wgpu::BindGroupEntry& entry = entries.push_back();
850 entry.binding = binding.fIndex;
851 if (const BufferView* buffer = std::get_if<BufferView>(&binding.fResource)) {
852 entry.buffer = static_cast<const DawnBuffer*>(buffer->fInfo.fBuffer)->dawnBuffer();
853 entry.offset = buffer->fInfo.fOffset;
854 entry.size = buffer->fSize;
855 } else if (const TextureIndex* texIdx = std::get_if<TextureIndex>(&binding.fResource)) {
856 const DawnTexture* texture =
857 static_cast<const DawnTexture*>(group.getTexture(texIdx->fValue));
859 entry.textureView = texture->sampleTextureView();
860 } else if (const SamplerIndex* samplerIdx = std::get_if<SamplerIndex>(&binding.fResource)) {
861 const DawnSampler* sampler =
862 static_cast<const DawnSampler*>(group.getSampler(samplerIdx->fValue));
863 entry.sampler = sampler->dawnSampler();
864 } else {
865 SK_ABORT("unsupported dispatch resource type");
866 }
867 }
868
869 wgpu::BindGroupDescriptor desc;
870 desc.layout = fActiveComputePipeline->dawnGroupLayout();
871 desc.entryCount = entries.size();
872 desc.entries = entries.data();
873
874 auto bindGroup = fSharedContext->device().CreateBindGroup(&desc);
875 fActiveComputePassEncoder.SetBindGroup(0, bindGroup);
876}
877
878void DawnCommandBuffer::dispatchWorkgroups(const WorkgroupSize& globalSize) {
879 SkASSERT(fActiveComputePassEncoder);
880 SkASSERT(fActiveComputePipeline);
881
882 fActiveComputePassEncoder.DispatchWorkgroups(
883 globalSize.fWidth, globalSize.fHeight, globalSize.fDepth);
884}
885
886void DawnCommandBuffer::dispatchWorkgroupsIndirect(const Buffer* indirectBuffer,
887 size_t indirectBufferOffset) {
888 SkASSERT(fActiveComputePassEncoder);
889 SkASSERT(fActiveComputePipeline);
890
891 auto& wgpuIndirectBuffer = static_cast<const DawnBuffer*>(indirectBuffer)->dawnBuffer();
892 fActiveComputePassEncoder.DispatchWorkgroupsIndirect(wgpuIndirectBuffer, indirectBufferOffset);
893}
894
895void DawnCommandBuffer::endComputePass() {
896 SkASSERT(fActiveComputePassEncoder);
897 fActiveComputePassEncoder.End();
898 fActiveComputePassEncoder = nullptr;
899}
900
901bool DawnCommandBuffer::onCopyBufferToBuffer(const Buffer* srcBuffer,
902 size_t srcOffset,
903 const Buffer* dstBuffer,
904 size_t dstOffset,
905 size_t size) {
906 SkASSERT(!fActiveRenderPassEncoder);
907 SkASSERT(!fActiveComputePassEncoder);
908
909 auto& wgpuBufferSrc = static_cast<const DawnBuffer*>(srcBuffer)->dawnBuffer();
910 auto& wgpuBufferDst = static_cast<const DawnBuffer*>(dstBuffer)->dawnBuffer();
911
912 fCommandEncoder.CopyBufferToBuffer(wgpuBufferSrc, srcOffset, wgpuBufferDst, dstOffset, size);
913 return true;
914}
915
916bool DawnCommandBuffer::onCopyTextureToBuffer(const Texture* texture,
917 SkIRect srcRect,
918 const Buffer* buffer,
919 size_t bufferOffset,
920 size_t bufferRowBytes) {
921 SkASSERT(!fActiveRenderPassEncoder);
922 SkASSERT(!fActiveComputePassEncoder);
923
924 const auto* wgpuTexture = static_cast<const DawnTexture*>(texture);
925 auto& wgpuBuffer = static_cast<const DawnBuffer*>(buffer)->dawnBuffer();
926
927 wgpu::ImageCopyTexture src;
928 src.texture = wgpuTexture->dawnTexture();
929 src.origin.x = srcRect.x();
930 src.origin.y = srcRect.y();
931 src.aspect = wgpuTexture->textureInfo().dawnTextureSpec().fAspect;
932
933 wgpu::ImageCopyBuffer dst;
934 dst.buffer = wgpuBuffer;
935 dst.layout.offset = bufferOffset;
936 // Dawn requires buffer's alignment to be multiples of 256.
937 // https://b.corp.google.com/issues/259264489
938 SkASSERT((bufferRowBytes & 0xFF) == 0);
939 dst.layout.bytesPerRow = bufferRowBytes;
940
941 wgpu::Extent3D copySize = {
942 static_cast<uint32_t>(srcRect.width()), static_cast<uint32_t>(srcRect.height()), 1};
943 fCommandEncoder.CopyTextureToBuffer(&src, &dst, &copySize);
944
945 return true;
946}
947
948bool DawnCommandBuffer::onCopyBufferToTexture(const Buffer* buffer,
949 const Texture* texture,
950 const BufferTextureCopyData* copyData,
951 int count) {
952 SkASSERT(!fActiveRenderPassEncoder);
953 SkASSERT(!fActiveComputePassEncoder);
954
955 auto& wgpuTexture = static_cast<const DawnTexture*>(texture)->dawnTexture();
956 auto& wgpuBuffer = static_cast<const DawnBuffer*>(buffer)->dawnBuffer();
957
958 wgpu::ImageCopyBuffer src;
959 src.buffer = wgpuBuffer;
960
961 wgpu::ImageCopyTexture dst;
962 dst.texture = wgpuTexture;
963
964 for (int i = 0; i < count; ++i) {
965 src.layout.offset = copyData[i].fBufferOffset;
966 // Dawn requires buffer's alignment to be multiples of 256.
967 // https://b.corp.google.com/issues/259264489
968 SkASSERT((copyData[i].fBufferRowBytes & 0xFF) == 0);
969 src.layout.bytesPerRow = copyData[i].fBufferRowBytes;
970
971 dst.origin.x = copyData[i].fRect.x();
972 dst.origin.y = copyData[i].fRect.y();
973 dst.mipLevel = copyData[i].fMipLevel;
974
975 wgpu::Extent3D copySize = {static_cast<uint32_t>(copyData[i].fRect.width()),
976 static_cast<uint32_t>(copyData[i].fRect.height()),
977 1};
978 fCommandEncoder.CopyBufferToTexture(&src, &dst, &copySize);
979 }
980
981 return true;
982}
983
984bool DawnCommandBuffer::onCopyTextureToTexture(const Texture* src,
985 SkIRect srcRect,
986 const Texture* dst,
987 SkIPoint dstPoint,
988 int mipLevel) {
989 SkASSERT(!fActiveRenderPassEncoder);
990 SkASSERT(!fActiveComputePassEncoder);
991
992 auto& wgpuTextureSrc = static_cast<const DawnTexture*>(src)->dawnTexture();
993 auto& wgpuTextureDst = static_cast<const DawnTexture*>(dst)->dawnTexture();
994
995 wgpu::ImageCopyTexture srcArgs;
996 srcArgs.texture = wgpuTextureSrc;
997 srcArgs.origin.x = srcRect.fLeft;
998 srcArgs.origin.y = srcRect.fTop;
999
1000 wgpu::ImageCopyTexture dstArgs;
1001 dstArgs.texture = wgpuTextureDst;
1002 dstArgs.origin.x = dstPoint.fX;
1003 dstArgs.origin.y = dstPoint.fY;
1004 dstArgs.mipLevel = mipLevel;
1005
1006 wgpu::Extent3D copySize = {
1007 static_cast<uint32_t>(srcRect.width()), static_cast<uint32_t>(srcRect.height()), 1};
1008
1009 fCommandEncoder.CopyTextureToTexture(&srcArgs, &dstArgs, &copySize);
1010
1011 return true;
1012}
1013
1014bool DawnCommandBuffer::onSynchronizeBufferToCpu(const Buffer* buffer, bool* outDidResultInWork) {
1015 return true;
1016}
1017
1018bool DawnCommandBuffer::onClearBuffer(const Buffer* buffer, size_t offset, size_t size) {
1019 SkASSERT(!fActiveRenderPassEncoder);
1020 SkASSERT(!fActiveComputePassEncoder);
1021
1022 auto& wgpuBuffer = static_cast<const DawnBuffer*>(buffer)->dawnBuffer();
1023 fCommandEncoder.ClearBuffer(wgpuBuffer, offset, size);
1024
1025 return true;
1026}
1027
1028} // namespace skgpu::graphite
static void info(const char *fmt,...) SK_PRINTF_LIKE(1
Definition: DM.cpp:213
SkRect fRect
Definition: FillRRectOp.cpp:73
int count
Definition: FontMgrTest.cpp:50
#define SKGPU_LOG_E(fmt,...)
Definition: Log.h:38
static constexpr size_t SkAlignTo(size_t x, size_t alignment)
Definition: SkAlign.h:33
#define SK_ABORT(message,...)
Definition: SkAssert.h:70
#define SkASSERT(cond)
Definition: SkAssert.h:116
#define SK_UNLIKELY
Definition: SkAssert.h:28
GLenum type
void trackResource(sk_sp< Resource > resource)
void trackCommandBufferResource(sk_sp< Resource > resource)
std::optional< wgpu::LoadOp > resolveTextureLoadOp() const
Definition: DawnCaps.h:31
static std::unique_ptr< DawnCommandBuffer > Make(const DawnSharedContext *, DawnResourceProvider *)
const wgpu::BindGroupLayout & dawnGroupLayout() const
const wgpu::ComputePipeline & dawnComputePipeline() const
static constexpr unsigned int kUniformBufferBindGroupIndex
static constexpr unsigned int kTextureBindGroupIndex
static constexpr unsigned int kRenderStepUniformBufferIndex
static constexpr unsigned int kPaintUniformBufferIndex
static constexpr unsigned int kInstanceBufferIndex
const BindGroupLayouts & dawnGroupLayouts() const
static constexpr unsigned int kGradientBufferIndex
static constexpr unsigned int kVertexBufferIndex
const wgpu::BindGroup & findOrCreateSingleTextureSamplerBindGroup(const DawnSampler *sampler, const DawnTexture *texture)
sk_sp< DawnTexture > findOrCreateDiscardableMSAALoadTexture(SkISize dimensions, const TextureInfo &msaaInfo)
const sk_sp< DawnBuffer > & getOrCreateIntrinsicConstantBuffer()
const wgpu::BindGroup & findOrCreateUniformBuffersBindGroup(const std::array< std::pair< const DawnBuffer *, uint32_t >, 4 > &boundBuffersAndSizes)
wgpu::RenderPipeline findOrCreateBlitWithDrawPipeline(const RenderPassDesc &renderPassDesc)
sk_sp< DawnBuffer > findOrCreateDawnBuffer(size_t size, BufferType type, AccessPattern, std::string_view label)
const DawnCaps * dawnCaps() const
const wgpu::Device & device() const
const wgpu::Queue & queue() const
void reserve(int n)
Definition: SkTArray.h:170
int size() const
Definition: SkTArray.h:421
static void Draw(SkCanvas *canvas, const SkRect &rect)
uint32_t uint32_t * format
FlTexture * texture
double y
double x
sk_sp< SkBlender > blender SkRect rect
Definition: SkRecords.h:350
SK_API sk_sp< SkShader > Color(SkColor)
DEF_SWITCHES_START aot vmservice shared library Name of the *so containing AOT compiled Dart assets for launching the service isolate vm snapshot The VM snapshot data that will be memory mapped as read only SnapshotAssetPath must be present isolate snapshot The isolate snapshot data that will be memory mapped as read only SnapshotAssetPath must be present cache dir Path to the cache directory This is different from the persistent_cache_path in embedder which is used for Skia shader cache icu native lib Path to the library file that exports the ICU data vm service The hostname IP address on which the Dart VM Service should be served If not defaults to or::depending on whether ipv6 is specified vm service A custom Dart VM Service port The default is to pick a randomly available open port disable vm Disable the Dart VM Service The Dart VM Service is never available in release mode disable vm service Disable mDNS Dart VM Service publication Bind to the IPv6 localhost address for the Dart VM Service Ignored if vm service host is set endless trace buffer
Definition: switches.h:126
it will be possible to load the file into Perfetto s trace viewer disable asset Prevents usage of any non test fonts unless they were explicitly Loaded via prefetched default font Indicates whether the embedding started a prefetch of the default font manager before creating the engine run In non interactive keep the shell running after the Dart script has completed enable serial On low power devices with low core running concurrent GC tasks on threads can cause them to contend with the UI thread which could potentially lead to jank This option turns off all concurrent GC activities domain network JSON encoded network policy per domain This overrides the DisallowInsecureConnections switch Embedder can specify whether to allow or disallow insecure connections at a domain level old gen heap size
Definition: switches.h:259
dst
Definition: cp.py:12
static constexpr int kLoadOpCount
Definition: ResourceTypes.h:41
bool DawnFormatIsDepthOrStencil(wgpu::TextureFormat format)
bool DawnFormatIsDepth(wgpu::TextureFormat format)
static constexpr int kStoreOpCount
Definition: ResourceTypes.h:52
bool DawnFormatIsStencil(wgpu::TextureFormat format)
list command
Definition: valgrind.py:24
int32_t height
int32_t width
SeparatedVector2 offset
constexpr int32_t y() const
Definition: SkPoint_impl.h:52
int32_t fX
x-axis value
Definition: SkPoint_impl.h:29
int32_t fY
y-axis value
Definition: SkPoint_impl.h:30
constexpr int32_t x() const
Definition: SkPoint_impl.h:46
Definition: SkRect.h:32
constexpr int32_t x() const
Definition: SkRect.h:141
constexpr int32_t y() const
Definition: SkRect.h:148
bool intersect(const SkIRect &r)
Definition: SkRect.h:513
constexpr int32_t height() const
Definition: SkRect.h:165
int32_t fTop
smaller y-axis bounds
Definition: SkRect.h:34
static constexpr SkIRect MakeSize(const SkISize &size)
Definition: SkRect.h:66
constexpr int32_t width() const
Definition: SkRect.h:158
void setEmpty()
Definition: SkRect.h:242
static constexpr SkIRect MakeXYWH(int32_t x, int32_t y, int32_t w, int32_t h)
Definition: SkRect.h:104
int32_t fLeft
smaller x-axis bounds
Definition: SkRect.h:33
constexpr float x() const
Definition: SkRect.h:720
constexpr float y() const
Definition: SkRect.h:727
constexpr float height() const
Definition: SkRect.h:769
constexpr float width() const
Definition: SkRect.h:762