Flutter Engine
The Flutter Engine
MtlCommandBuffer.mm
Go to the documentation of this file.
1/*
2 * Copyright 2021 Google LLC
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
9
27
28namespace skgpu::graphite {
29
30std::unique_ptr<MtlCommandBuffer> MtlCommandBuffer::Make(id<MTLCommandQueue> queue,
31 const MtlSharedContext* sharedContext,
32 MtlResourceProvider* resourceProvider) {
33 auto commandBuffer = std::unique_ptr<MtlCommandBuffer>(
34 new MtlCommandBuffer(queue, sharedContext, resourceProvider));
35 if (!commandBuffer) {
36 return nullptr;
37 }
38 if (!commandBuffer->createNewMTLCommandBuffer()) {
39 return nullptr;
40 }
41 return commandBuffer;
42}
43
44MtlCommandBuffer::MtlCommandBuffer(id<MTLCommandQueue> queue,
45 const MtlSharedContext* sharedContext,
46 MtlResourceProvider* resourceProvider)
47 : fQueue(queue)
48 , fSharedContext(sharedContext)
49 , fResourceProvider(resourceProvider) {}
50
52 SkASSERT(!fActiveRenderCommandEncoder);
53 SkASSERT(!fActiveComputeCommandEncoder);
54 SkASSERT(!fActiveBlitCommandEncoder);
55}
56
58 return this->createNewMTLCommandBuffer();
59}
60
61bool MtlCommandBuffer::createNewMTLCommandBuffer() {
62 SkASSERT(fCommandBuffer == nil);
63
64 // Inserting a pool here so the autorelease occurs when we return and the
65 // only remaining ref is the retain below.
66 @autoreleasepool {
67 if (@available(macOS 11.0, iOS 14.0, tvOS 14.0, *)) {
68 sk_cfp<MTLCommandBufferDescriptor*> desc([[MTLCommandBufferDescriptor alloc] init]);
69 (*desc).retainedReferences = NO;
70#ifdef SK_ENABLE_MTL_DEBUG_INFO
71 (*desc).errorOptions = MTLCommandBufferErrorOptionEncoderExecutionStatus;
72#endif
73 // We add a retain here because the command buffer is set to autorelease (not alloc or copy)
74 fCommandBuffer.reset([[fQueue commandBufferWithDescriptor:desc.get()] retain]);
75 } else {
76 // We add a retain here because the command buffer is set to autorelease (not alloc or copy)
77 fCommandBuffer.reset([[fQueue commandBufferWithUnretainedReferences] retain]);
78 }
79 }
80 return fCommandBuffer != nil;
81}
82
84 SkASSERT(!fActiveRenderCommandEncoder);
85 SkASSERT(!fActiveComputeCommandEncoder);
86 this->endBlitCommandEncoder();
87 [(*fCommandBuffer) commit];
88
89 if ((*fCommandBuffer).status == MTLCommandBufferStatusError) {
90 NSString* description = (*fCommandBuffer).error.localizedDescription;
91 const char* errorString = [description UTF8String];
92 SKGPU_LOG_E("Failure submitting command buffer: %s", errorString);
93 }
94
95 return ((*fCommandBuffer).status != MTLCommandBufferStatusError);
96}
97
98void MtlCommandBuffer::onResetCommandBuffer() {
99 fCommandBuffer.reset();
100 fActiveRenderCommandEncoder.reset();
101 fActiveComputeCommandEncoder.reset();
102 fActiveBlitCommandEncoder.reset();
103 fCurrentIndexBuffer = nil;
104 fCurrentIndexBufferOffset = 0;
105}
106
107void MtlCommandBuffer::addWaitSemaphores(size_t numWaitSemaphores,
108 const BackendSemaphore* waitSemaphores) {
109 if (!waitSemaphores) {
110 SkASSERT(numWaitSemaphores == 0);
111 return;
112 }
113
114 // Can only insert events with no active encoder
115 SkASSERT(!fActiveRenderCommandEncoder);
116 SkASSERT(!fActiveComputeCommandEncoder);
117 this->endBlitCommandEncoder();
118 if (@available(macOS 10.14, iOS 12.0, tvOS 12.0, *)) {
119 for (size_t i = 0; i < numWaitSemaphores; ++i) {
120 auto semaphore = waitSemaphores[i];
121 if (semaphore.isValid() && semaphore.backend() == BackendApi::kMetal) {
122 id<MTLEvent> mtlEvent = (__bridge id<MTLEvent>)semaphore.getMtlEvent();
123 [(*fCommandBuffer) encodeWaitForEvent: mtlEvent
124 value: semaphore.getMtlValue()];
125 }
126 }
127 }
128}
129
130void MtlCommandBuffer::addSignalSemaphores(size_t numSignalSemaphores,
131 const BackendSemaphore* signalSemaphores) {
132 if (!signalSemaphores) {
133 SkASSERT(numSignalSemaphores == 0);
134 return;
135 }
136
137 // Can only insert events with no active encoder
138 SkASSERT(!fActiveRenderCommandEncoder);
139 SkASSERT(!fActiveComputeCommandEncoder);
140 this->endBlitCommandEncoder();
141
142 if (@available(macOS 10.14, iOS 12.0, tvOS 12.0, *)) {
143 for (size_t i = 0; i < numSignalSemaphores; ++i) {
144 auto semaphore = signalSemaphores[i];
145 if (semaphore.isValid() && semaphore.backend() == BackendApi::kMetal) {
146 id<MTLEvent> mtlEvent = (__bridge id<MTLEvent>)semaphore.getMtlEvent();
147 [(*fCommandBuffer) encodeSignalEvent: mtlEvent
148 value: semaphore.getMtlValue()];
149 }
150 }
151 }
152}
153
154bool MtlCommandBuffer::onAddRenderPass(const RenderPassDesc& renderPassDesc,
155 const Texture* colorTexture,
156 const Texture* resolveTexture,
157 const Texture* depthStencilTexture,
158 SkRect viewport,
159 const DrawPassList& drawPasses) {
160 if (!this->beginRenderPass(renderPassDesc, colorTexture, resolveTexture, depthStencilTexture)) {
161 return false;
162 }
163
164 this->setViewport(viewport.x(), viewport.y(), viewport.width(), viewport.height(), 0, 1);
165
166 for (const auto& drawPass : drawPasses) {
167 this->addDrawPass(drawPass.get());
168 }
169
170 this->endRenderPass();
171 return true;
172}
173
174bool MtlCommandBuffer::onAddComputePass(DispatchGroupSpan groups) {
175 this->beginComputePass();
176 for (const auto& group : groups) {
177 group->addResourceRefs(this);
178 for (const auto& dispatch : group->dispatches()) {
179 this->bindComputePipeline(group->getPipeline(dispatch.fPipelineIndex));
180 for (const ResourceBinding& binding : dispatch.fBindings) {
181 if (const BufferView* buffer = std::get_if<BufferView>(&binding.fResource)) {
182 this->bindBuffer(buffer->fInfo.fBuffer, buffer->fInfo.fOffset, binding.fIndex);
183 } else if (const TextureIndex* texIdx =
184 std::get_if<TextureIndex>(&binding.fResource)) {
185 SkASSERT(texIdx);
186 this->bindTexture(group->getTexture(texIdx->fValue), binding.fIndex);
187 } else {
188 const SamplerIndex* samplerIdx = std::get_if<SamplerIndex>(&binding.fResource);
189 SkASSERT(samplerIdx);
190 this->bindSampler(group->getSampler(samplerIdx->fValue), binding.fIndex);
191 }
192 }
193 SkASSERT(fActiveComputeCommandEncoder);
194 for (const ComputeStep::WorkgroupBufferDesc& wgBuf : dispatch.fWorkgroupBuffers) {
195 fActiveComputeCommandEncoder->setThreadgroupMemoryLength(
196 SkAlignTo(wgBuf.size, 16),
197 wgBuf.index);
198 }
199 if (const WorkgroupSize* globalSize =
200 std::get_if<WorkgroupSize>(&dispatch.fGlobalSizeOrIndirect)) {
201 this->dispatchThreadgroups(*globalSize, dispatch.fLocalSize);
202 } else {
203 SkASSERT(std::holds_alternative<BufferView>(dispatch.fGlobalSizeOrIndirect));
204 const BufferView& indirect =
205 *std::get_if<BufferView>(&dispatch.fGlobalSizeOrIndirect);
206 this->dispatchThreadgroupsIndirect(
207 dispatch.fLocalSize, indirect.fInfo.fBuffer, indirect.fInfo.fOffset);
208 }
209 }
210 }
211 this->endComputePass();
212 return true;
213}
214
215bool MtlCommandBuffer::beginRenderPass(const RenderPassDesc& renderPassDesc,
216 const Texture* colorTexture,
217 const Texture* resolveTexture,
218 const Texture* depthStencilTexture) {
219 SkASSERT(!fActiveRenderCommandEncoder);
220 SkASSERT(!fActiveComputeCommandEncoder);
221 this->endBlitCommandEncoder();
222
223 const static MTLLoadAction mtlLoadAction[] {
224 MTLLoadActionLoad,
225 MTLLoadActionClear,
226 MTLLoadActionDontCare
227 };
228 static_assert((int)LoadOp::kLoad == 0);
229 static_assert((int)LoadOp::kClear == 1);
230 static_assert((int)LoadOp::kDiscard == 2);
231 static_assert(std::size(mtlLoadAction) == kLoadOpCount);
232
233 const static MTLStoreAction mtlStoreAction[] {
234 MTLStoreActionStore,
235 MTLStoreActionDontCare
236 };
237 static_assert((int)StoreOp::kStore == 0);
238 static_assert((int)StoreOp::kDiscard == 1);
239 static_assert(std::size(mtlStoreAction) == kStoreOpCount);
240
241 sk_cfp<MTLRenderPassDescriptor*> descriptor([[MTLRenderPassDescriptor alloc] init]);
242 // Set up color attachment.
243 auto& colorInfo = renderPassDesc.fColorAttachment;
244 bool loadMSAAFromResolve = false;
245 if (colorTexture) {
246 // TODO: check Texture matches RenderPassDesc
247 auto colorAttachment = (*descriptor).colorAttachments[0];
248 colorAttachment.texture = ((const MtlTexture*)colorTexture)->mtlTexture();
249 const std::array<float, 4>& clearColor = renderPassDesc.fClearColor;
250 colorAttachment.clearColor =
251 MTLClearColorMake(clearColor[0], clearColor[1], clearColor[2], clearColor[3]);
252 colorAttachment.loadAction = mtlLoadAction[static_cast<int>(colorInfo.fLoadOp)];
253 colorAttachment.storeAction = mtlStoreAction[static_cast<int>(colorInfo.fStoreOp)];
254 // Set up resolve attachment
255 if (resolveTexture) {
256 SkASSERT(renderPassDesc.fColorResolveAttachment.fStoreOp == StoreOp::kStore);
257 // TODO: check Texture matches RenderPassDesc
258 colorAttachment.resolveTexture = ((const MtlTexture*)resolveTexture)->mtlTexture();
259 // Inclusion of a resolve texture implies the client wants to finish the
260 // renderpass with a resolve.
261 if (@available(macOS 10.12, iOS 10.0, tvOS 10.0, *)) {
262 SkASSERT(colorAttachment.storeAction == MTLStoreActionDontCare);
263 colorAttachment.storeAction = MTLStoreActionMultisampleResolve;
264 } else {
265 // We expect at least Metal 2
266 // TODO: Add error output
267 SkASSERT(false);
268 }
269 // But it also means we have to load the resolve texture into the MSAA color attachment
270 loadMSAAFromResolve = renderPassDesc.fColorResolveAttachment.fLoadOp == LoadOp::kLoad;
271 // TODO: If the color resolve texture is read-only we can use a private (vs. memoryless)
272 // msaa attachment that's coupled to the framebuffer and the StoreAndMultisampleResolve
273 // action instead of loading as a draw.
274 }
275 }
276
277 // Set up stencil/depth attachment
278 auto& depthStencilInfo = renderPassDesc.fDepthStencilAttachment;
279 if (depthStencilTexture) {
280 // TODO: check Texture matches RenderPassDesc
281 id<MTLTexture> mtlTexture = ((const MtlTexture*)depthStencilTexture)->mtlTexture();
282 if (MtlFormatIsDepth(mtlTexture.pixelFormat)) {
283 auto depthAttachment = (*descriptor).depthAttachment;
284 depthAttachment.texture = mtlTexture;
285 depthAttachment.clearDepth = renderPassDesc.fClearDepth;
286 depthAttachment.loadAction =
287 mtlLoadAction[static_cast<int>(depthStencilInfo.fLoadOp)];
288 depthAttachment.storeAction =
289 mtlStoreAction[static_cast<int>(depthStencilInfo.fStoreOp)];
290 }
291 if (MtlFormatIsStencil(mtlTexture.pixelFormat)) {
292 auto stencilAttachment = (*descriptor).stencilAttachment;
293 stencilAttachment.texture = mtlTexture;
294 stencilAttachment.clearStencil = renderPassDesc.fClearStencil;
295 stencilAttachment.loadAction =
296 mtlLoadAction[static_cast<int>(depthStencilInfo.fLoadOp)];
297 stencilAttachment.storeAction =
298 mtlStoreAction[static_cast<int>(depthStencilInfo.fStoreOp)];
299 }
300 } else {
301 SkASSERT(!depthStencilInfo.fTextureInfo.isValid());
302 }
303
304 fActiveRenderCommandEncoder = MtlRenderCommandEncoder::Make(fSharedContext,
305 fCommandBuffer.get(),
306 descriptor.get());
307 this->trackResource(fActiveRenderCommandEncoder);
308
309 if (loadMSAAFromResolve) {
310 // Manually load the contents of the resolve texture into the MSAA attachment as a draw,
311 // so the actual load op for the MSAA attachment had better have been discard.
312 SkASSERT(colorInfo.fLoadOp == LoadOp::kDiscard);
313 auto loadPipeline = fResourceProvider->findOrCreateLoadMSAAPipeline(renderPassDesc);
314 if (!loadPipeline) {
315 SKGPU_LOG_E("Unable to create pipeline to load resolve texture into MSAA attachment");
316 return false;
317 }
318 this->bindGraphicsPipeline(loadPipeline.get());
319 // The load msaa pipeline takes no uniforms, no vertex/instance attributes and only uses
320 // one texture that does not require a sampler.
321 fActiveRenderCommandEncoder->setFragmentTexture(
322 ((const MtlTexture*) resolveTexture)->mtlTexture(), 0);
323 this->draw(PrimitiveType::kTriangleStrip, 0, 4);
324 }
325
326 return true;
327}
328
329void MtlCommandBuffer::endRenderPass() {
330 SkASSERT(fActiveRenderCommandEncoder);
331 fActiveRenderCommandEncoder->endEncoding();
332 fActiveRenderCommandEncoder.reset();
333 fDrawIsOffscreen = false;
334}
335
336void MtlCommandBuffer::addDrawPass(const DrawPass* drawPass) {
337 SkIRect replayPassBounds = drawPass->bounds().makeOffset(fReplayTranslation.x(),
339 if (!SkIRect::Intersects(replayPassBounds, SkIRect::MakeSize(fRenderPassSize))) {
340 // The entire DrawPass is offscreen given the replay translation so skip adding any
341 // commands. When the DrawPass is partially offscreen individual draw commands will be
342 // culled while preserving state changing commands.
343 return;
344 }
345
346 drawPass->addResourceRefs(this);
347
348 for (auto[type, cmdPtr] : drawPass->commands()) {
349 // Skip draw commands if they'd be offscreen.
350 if (fDrawIsOffscreen) {
351 switch (type) {
352 case DrawPassCommands::Type::kDraw:
353 case DrawPassCommands::Type::kDrawIndexed:
354 case DrawPassCommands::Type::kDrawInstanced:
355 case DrawPassCommands::Type::kDrawIndexedInstanced:
356 continue;
357 default:
358 break;
359 }
360 }
361
362 switch (type) {
363 case DrawPassCommands::Type::kBindGraphicsPipeline: {
364 auto bgp = static_cast<DrawPassCommands::BindGraphicsPipeline*>(cmdPtr);
365 this->bindGraphicsPipeline(drawPass->getPipeline(bgp->fPipelineIndex));
366 break;
367 }
368 case DrawPassCommands::Type::kSetBlendConstants: {
369 auto sbc = static_cast<DrawPassCommands::SetBlendConstants*>(cmdPtr);
370 this->setBlendConstants(sbc->fBlendConstants);
371 break;
372 }
373 case DrawPassCommands::Type::kBindUniformBuffer: {
374 auto bub = static_cast<DrawPassCommands::BindUniformBuffer*>(cmdPtr);
375 this->bindUniformBuffer(bub->fInfo, bub->fSlot);
376 break;
377 }
378 case DrawPassCommands::Type::kBindDrawBuffers: {
379 auto bdb = static_cast<DrawPassCommands::BindDrawBuffers*>(cmdPtr);
380 this->bindDrawBuffers(
381 bdb->fVertices, bdb->fInstances, bdb->fIndices, bdb->fIndirect);
382 break;
383 }
384 case DrawPassCommands::Type::kBindTexturesAndSamplers: {
385 auto bts = static_cast<DrawPassCommands::BindTexturesAndSamplers*>(cmdPtr);
386 for (int j = 0; j < bts->fNumTexSamplers; ++j) {
387 this->bindTextureAndSampler(drawPass->getTexture(bts->fTextureIndices[j]),
388 drawPass->getSampler(bts->fSamplerIndices[j]),
389 j);
390 }
391 break;
392 }
393 case DrawPassCommands::Type::kSetScissor: {
394 auto ss = static_cast<DrawPassCommands::SetScissor*>(cmdPtr);
395 const SkIRect& rect = ss->fScissor;
396 this->setScissor(rect.fLeft, rect.fTop, rect.width(), rect.height());
397 break;
398 }
399 case DrawPassCommands::Type::kDraw: {
400 auto draw = static_cast<DrawPassCommands::Draw*>(cmdPtr);
401 this->draw(draw->fType, draw->fBaseVertex, draw->fVertexCount);
402 break;
403 }
404 case DrawPassCommands::Type::kDrawIndexed: {
405 auto draw = static_cast<DrawPassCommands::DrawIndexed*>(cmdPtr);
406 this->drawIndexed(draw->fType,
407 draw->fBaseIndex,
408 draw->fIndexCount,
409 draw->fBaseVertex);
410 break;
411 }
412 case DrawPassCommands::Type::kDrawInstanced: {
413 auto draw = static_cast<DrawPassCommands::DrawInstanced*>(cmdPtr);
414 this->drawInstanced(draw->fType,
415 draw->fBaseVertex,
416 draw->fVertexCount,
417 draw->fBaseInstance,
418 draw->fInstanceCount);
419 break;
420 }
421 case DrawPassCommands::Type::kDrawIndexedInstanced: {
422 auto draw = static_cast<DrawPassCommands::DrawIndexedInstanced*>(cmdPtr);
423 this->drawIndexedInstanced(draw->fType,
424 draw->fBaseIndex,
425 draw->fIndexCount,
426 draw->fBaseVertex,
427 draw->fBaseInstance,
428 draw->fInstanceCount);
429 break;
430 }
431 case DrawPassCommands::Type::kDrawIndirect: {
432 auto draw = static_cast<DrawPassCommands::DrawIndirect*>(cmdPtr);
433 this->drawIndirect(draw->fType);
434 break;
435 }
436 case DrawPassCommands::Type::kDrawIndexedIndirect: {
437 auto draw = static_cast<DrawPassCommands::DrawIndexedIndirect*>(cmdPtr);
438 this->drawIndexedIndirect(draw->fType);
439 break;
440 }
441 }
442 }
443}
444
445MtlBlitCommandEncoder* MtlCommandBuffer::getBlitCommandEncoder() {
446 if (fActiveBlitCommandEncoder) {
447 return fActiveBlitCommandEncoder.get();
448 }
449
450 fActiveBlitCommandEncoder = MtlBlitCommandEncoder::Make(fSharedContext, fCommandBuffer.get());
451
452 if (!fActiveBlitCommandEncoder) {
453 return nullptr;
454 }
455
456 // We add the ref on the command buffer for the BlitCommandEncoder now so that we don't need
457 // to add a ref for every copy we do.
458 this->trackResource(fActiveBlitCommandEncoder);
459 return fActiveBlitCommandEncoder.get();
460}
461
462void MtlCommandBuffer::endBlitCommandEncoder() {
463 if (fActiveBlitCommandEncoder) {
464 fActiveBlitCommandEncoder->endEncoding();
465 fActiveBlitCommandEncoder.reset();
466 }
467}
468
469void MtlCommandBuffer::bindGraphicsPipeline(const GraphicsPipeline* graphicsPipeline) {
470 SkASSERT(fActiveRenderCommandEncoder);
471
472 auto mtlPipeline = static_cast<const MtlGraphicsPipeline*>(graphicsPipeline);
473 auto pipelineState = mtlPipeline->mtlPipelineState();
474 fActiveRenderCommandEncoder->setRenderPipelineState(pipelineState);
475 auto depthStencilState = mtlPipeline->mtlDepthStencilState();
476 fActiveRenderCommandEncoder->setDepthStencilState(depthStencilState);
477 uint32_t stencilRefValue = mtlPipeline->stencilReferenceValue();
478 fActiveRenderCommandEncoder->setStencilReferenceValue(stencilRefValue);
479}
480
481void MtlCommandBuffer::bindUniformBuffer(const BindBufferInfo& info, UniformSlot slot) {
482 SkASSERT(fActiveRenderCommandEncoder);
483
484 id<MTLBuffer> mtlBuffer = info.fBuffer ?
485 static_cast<const MtlBuffer*>(info.fBuffer)->mtlBuffer() : nullptr;
486
487 unsigned int bufferIndex;
488 switch(slot) {
491 break;
494 break;
497 break;
498 }
499
500 fActiveRenderCommandEncoder->setVertexBuffer(mtlBuffer, info.fOffset, bufferIndex);
501 fActiveRenderCommandEncoder->setFragmentBuffer(mtlBuffer, info.fOffset, bufferIndex);
502}
503
504void MtlCommandBuffer::bindDrawBuffers(const BindBufferInfo& vertices,
505 const BindBufferInfo& instances,
506 const BindBufferInfo& indices,
507 const BindBufferInfo& indirect) {
508 this->bindVertexBuffers(vertices.fBuffer,
509 vertices.fOffset,
510 instances.fBuffer,
511 instances.fOffset);
512 this->bindIndexBuffer(indices.fBuffer, indices.fOffset);
513 this->bindIndirectBuffer(indirect.fBuffer, indirect.fOffset);
514}
515
516void MtlCommandBuffer::bindVertexBuffers(const Buffer* vertexBuffer,
517 size_t vertexOffset,
518 const Buffer* instanceBuffer,
519 size_t instanceOffset) {
520 SkASSERT(fActiveRenderCommandEncoder);
521
522 if (vertexBuffer) {
523 id<MTLBuffer> mtlBuffer = static_cast<const MtlBuffer*>(vertexBuffer)->mtlBuffer();
524 // Metal requires buffer offsets to be aligned to the data type, which is at most 4 bytes
525 // since we use [[attribute]] to automatically unpack float components into SIMD arrays.
526 SkASSERT((vertexOffset & 0b11) == 0);
527 fActiveRenderCommandEncoder->setVertexBuffer(mtlBuffer, vertexOffset,
529 }
530 if (instanceBuffer) {
531 id<MTLBuffer> mtlBuffer = static_cast<const MtlBuffer*>(instanceBuffer)->mtlBuffer();
532 SkASSERT((instanceOffset & 0b11) == 0);
533 fActiveRenderCommandEncoder->setVertexBuffer(mtlBuffer, instanceOffset,
535 }
536}
537
538void MtlCommandBuffer::bindIndexBuffer(const Buffer* indexBuffer, size_t offset) {
539 if (indexBuffer) {
540 fCurrentIndexBuffer = static_cast<const MtlBuffer*>(indexBuffer)->mtlBuffer();
541 fCurrentIndexBufferOffset = offset;
542 } else {
543 fCurrentIndexBuffer = nil;
544 fCurrentIndexBufferOffset = 0;
545 }
546}
547
548void MtlCommandBuffer::bindIndirectBuffer(const Buffer* indirectBuffer, size_t offset) {
549 if (indirectBuffer) {
550 fCurrentIndirectBuffer = static_cast<const MtlBuffer*>(indirectBuffer)->mtlBuffer();
551 fCurrentIndirectBufferOffset = offset;
552 } else {
553 fCurrentIndirectBuffer = nil;
554 fCurrentIndirectBufferOffset = 0;
555 }
556}
557
558void MtlCommandBuffer::bindTextureAndSampler(const Texture* texture,
559 const Sampler* sampler,
560 unsigned int bindIndex) {
561 SkASSERT(texture && sampler);
562 SkASSERT(fActiveRenderCommandEncoder);
563
564 id<MTLTexture> mtlTexture = ((const MtlTexture*)texture)->mtlTexture();
565 id<MTLSamplerState> mtlSamplerState = ((const MtlSampler*)sampler)->mtlSamplerState();
566 fActiveRenderCommandEncoder->setFragmentTexture(mtlTexture, bindIndex);
567 fActiveRenderCommandEncoder->setFragmentSamplerState(mtlSamplerState, bindIndex);
568}
569
570void MtlCommandBuffer::setScissor(unsigned int left, unsigned int top,
571 unsigned int width, unsigned int height) {
572 SkASSERT(fActiveRenderCommandEncoder);
573 SkIRect scissor = SkIRect::MakeXYWH(
575 fDrawIsOffscreen = !scissor.intersect(SkIRect::MakeSize(fRenderPassSize));
576 if (fDrawIsOffscreen) {
577 scissor.setEmpty();
578 }
579
580 fActiveRenderCommandEncoder->setScissorRect({
581 static_cast<unsigned int>(scissor.x()),
582 static_cast<unsigned int>(scissor.y()),
583 static_cast<unsigned int>(scissor.width()),
584 static_cast<unsigned int>(scissor.height()),
585 });
586}
587
588void MtlCommandBuffer::setViewport(float x, float y, float width, float height,
589 float minDepth, float maxDepth) {
590 SkASSERT(fActiveRenderCommandEncoder);
591 MTLViewport viewport = {x + fReplayTranslation.x(),
593 width,
594 height,
595 minDepth,
596 maxDepth};
597 fActiveRenderCommandEncoder->setViewport(viewport);
598
599 float invTwoW = 2.f / width;
600 float invTwoH = 2.f / height;
601 // Metal's framebuffer space has (0, 0) at the top left. This agrees with Skia's device coords.
602 // However, in NDC (-1, -1) is the bottom left. So we flip the origin here (assuming all
603 // surfaces we have are TopLeft origin).
604 float rtAdjust[4] = {invTwoW, -invTwoH, -1.f - x * invTwoW, 1.f + y * invTwoH};
605 fActiveRenderCommandEncoder->setVertexBytes(rtAdjust, 4 * sizeof(float),
607}
608
609void MtlCommandBuffer::setBlendConstants(float* blendConstants) {
610 SkASSERT(fActiveRenderCommandEncoder);
611
612 fActiveRenderCommandEncoder->setBlendColor(blendConstants);
613}
614
615static MTLPrimitiveType graphite_to_mtl_primitive(PrimitiveType primitiveType) {
616 const static MTLPrimitiveType mtlPrimitiveType[] {
617 MTLPrimitiveTypeTriangle,
618 MTLPrimitiveTypeTriangleStrip,
619 MTLPrimitiveTypePoint,
620 };
621 static_assert((int)PrimitiveType::kTriangles == 0);
622 static_assert((int)PrimitiveType::kTriangleStrip == 1);
623 static_assert((int)PrimitiveType::kPoints == 2);
624
625 SkASSERT(primitiveType <= PrimitiveType::kPoints);
626 return mtlPrimitiveType[static_cast<int>(primitiveType)];
627}
628
629void MtlCommandBuffer::draw(PrimitiveType type,
630 unsigned int baseVertex,
631 unsigned int vertexCount) {
632 SkASSERT(fActiveRenderCommandEncoder);
633
634 auto mtlPrimitiveType = graphite_to_mtl_primitive(type);
635
636 fActiveRenderCommandEncoder->drawPrimitives(mtlPrimitiveType, baseVertex, vertexCount);
637}
638
639void MtlCommandBuffer::drawIndexed(PrimitiveType type, unsigned int baseIndex,
640 unsigned int indexCount, unsigned int baseVertex) {
641 SkASSERT(fActiveRenderCommandEncoder);
642
643 if (@available(macOS 10.11, iOS 9.0, tvOS 9.0, *)) {
644 auto mtlPrimitiveType = graphite_to_mtl_primitive(type);
645 size_t indexOffset = fCurrentIndexBufferOffset + sizeof(uint16_t )* baseIndex;
646 // Use the "instance" variant witha count of 1 so that we can pass in a base vertex
647 // instead of rebinding a vertex buffer offset.
648 fActiveRenderCommandEncoder->drawIndexedPrimitives(mtlPrimitiveType, indexCount,
649 MTLIndexTypeUInt16, fCurrentIndexBuffer,
650 indexOffset, 1, baseVertex, 0);
651
652 } else {
653 SKGPU_LOG_E("Skipping unsupported draw call.");
654 }
655}
656
657void MtlCommandBuffer::drawInstanced(PrimitiveType type, unsigned int baseVertex,
658 unsigned int vertexCount, unsigned int baseInstance,
659 unsigned int instanceCount) {
660 SkASSERT(fActiveRenderCommandEncoder);
661
662 auto mtlPrimitiveType = graphite_to_mtl_primitive(type);
663
664 // This ordering is correct
665 fActiveRenderCommandEncoder->drawPrimitives(mtlPrimitiveType, baseVertex, vertexCount,
666 instanceCount, baseInstance);
667}
668
669void MtlCommandBuffer::drawIndexedInstanced(PrimitiveType type,
670 unsigned int baseIndex,
671 unsigned int indexCount,
672 unsigned int baseVertex,
673 unsigned int baseInstance,
674 unsigned int instanceCount) {
675 SkASSERT(fActiveRenderCommandEncoder);
676
677 if (@available(macOS 10.11, iOS 9.0, tvOS 9.0, *)) {
678 auto mtlPrimitiveType = graphite_to_mtl_primitive(type);
679 size_t indexOffset = fCurrentIndexBufferOffset + sizeof(uint16_t) * baseIndex;
680 fActiveRenderCommandEncoder->drawIndexedPrimitives(mtlPrimitiveType, indexCount,
681 MTLIndexTypeUInt16, fCurrentIndexBuffer,
682 indexOffset, instanceCount,
683 baseVertex, baseInstance);
684 } else {
685 SKGPU_LOG_E("Skipping unsupported draw call.");
686 }
687}
688
689void MtlCommandBuffer::drawIndirect(PrimitiveType type) {
690 SkASSERT(fActiveRenderCommandEncoder);
691 SkASSERT(fCurrentIndirectBuffer);
692
693 if (@available(macOS 10.11, iOS 9.0, tvOS 9.0, *)) {
694 auto mtlPrimitiveType = graphite_to_mtl_primitive(type);
695 fActiveRenderCommandEncoder->drawPrimitives(
696 mtlPrimitiveType, fCurrentIndirectBuffer, fCurrentIndirectBufferOffset);
697 } else {
698 SKGPU_LOG_E("Skipping unsupported draw call.");
699 }
700}
701
702void MtlCommandBuffer::drawIndexedIndirect(PrimitiveType type) {
703 SkASSERT(fActiveRenderCommandEncoder);
704 SkASSERT(fCurrentIndirectBuffer);
705
706 if (@available(macOS 10.11, iOS 9.0, tvOS 9.0, *)) {
707 auto mtlPrimitiveType = graphite_to_mtl_primitive(type);
708 fActiveRenderCommandEncoder->drawIndexedPrimitives(mtlPrimitiveType,
709 MTLIndexTypeUInt32,
710 fCurrentIndexBuffer,
711 fCurrentIndexBufferOffset,
712 fCurrentIndirectBuffer,
713 fCurrentIndirectBufferOffset);
714 } else {
715 SKGPU_LOG_E("Skipping unsupported draw call.");
716 }
717}
718
719void MtlCommandBuffer::beginComputePass() {
720 SkASSERT(!fActiveRenderCommandEncoder);
721 SkASSERT(!fActiveComputeCommandEncoder);
722 this->endBlitCommandEncoder();
723 fActiveComputeCommandEncoder = MtlComputeCommandEncoder::Make(fSharedContext,
724 fCommandBuffer.get());
725}
726
727void MtlCommandBuffer::bindComputePipeline(const ComputePipeline* computePipeline) {
728 SkASSERT(fActiveComputeCommandEncoder);
729
730 auto mtlPipeline = static_cast<const MtlComputePipeline*>(computePipeline);
731 fActiveComputeCommandEncoder->setComputePipelineState(mtlPipeline->mtlPipelineState());
732}
733
734void MtlCommandBuffer::bindBuffer(const Buffer* buffer, unsigned int offset, unsigned int index) {
735 SkASSERT(fActiveComputeCommandEncoder);
736
737 id<MTLBuffer> mtlBuffer = buffer ? static_cast<const MtlBuffer*>(buffer)->mtlBuffer() : nil;
738 fActiveComputeCommandEncoder->setBuffer(mtlBuffer, offset, index);
739}
740
741void MtlCommandBuffer::bindTexture(const Texture* texture, unsigned int index) {
742 SkASSERT(fActiveComputeCommandEncoder);
743
744 id<MTLTexture> mtlTexture =
745 texture ? static_cast<const MtlTexture*>(texture)->mtlTexture() : nil;
746 fActiveComputeCommandEncoder->setTexture(mtlTexture, index);
747}
748
749void MtlCommandBuffer::bindSampler(const Sampler* sampler, unsigned int index) {
750 SkASSERT(fActiveComputeCommandEncoder);
751
752 id<MTLSamplerState> mtlSamplerState =
753 sampler ? static_cast<const MtlSampler*>(sampler)->mtlSamplerState() : nil;
754 fActiveComputeCommandEncoder->setSamplerState(mtlSamplerState, index);
755}
756
757void MtlCommandBuffer::dispatchThreadgroups(const WorkgroupSize& globalSize,
758 const WorkgroupSize& localSize) {
759 SkASSERT(fActiveComputeCommandEncoder);
760 fActiveComputeCommandEncoder->dispatchThreadgroups(globalSize, localSize);
761}
762
763void MtlCommandBuffer::dispatchThreadgroupsIndirect(const WorkgroupSize& localSize,
764 const Buffer* indirectBuffer,
765 size_t indirectBufferOffset) {
766 SkASSERT(fActiveComputeCommandEncoder);
767
768 id<MTLBuffer> mtlIndirectBuffer = static_cast<const MtlBuffer*>(indirectBuffer)->mtlBuffer();
769 fActiveComputeCommandEncoder->dispatchThreadgroupsWithIndirectBuffer(
770 mtlIndirectBuffer, indirectBufferOffset, localSize);
771}
772
773void MtlCommandBuffer::endComputePass() {
774 SkASSERT(fActiveComputeCommandEncoder);
775 fActiveComputeCommandEncoder->endEncoding();
776 fActiveComputeCommandEncoder.reset();
777}
778
779static bool check_max_blit_width(int widthInPixels) {
780 if (widthInPixels > 32767) {
781 SkASSERT(false); // surfaces should not be this wide anyway
782 return false;
783 }
784 return true;
785}
786
787bool MtlCommandBuffer::onCopyBufferToBuffer(const Buffer* srcBuffer,
788 size_t srcOffset,
789 const Buffer* dstBuffer,
790 size_t dstOffset,
791 size_t size) {
792 SkASSERT(!fActiveRenderCommandEncoder);
793 SkASSERT(!fActiveComputeCommandEncoder);
794
795 id<MTLBuffer> mtlSrcBuffer = static_cast<const MtlBuffer*>(srcBuffer)->mtlBuffer();
796 id<MTLBuffer> mtlDstBuffer = static_cast<const MtlBuffer*>(dstBuffer)->mtlBuffer();
797
798 MtlBlitCommandEncoder* blitCmdEncoder = this->getBlitCommandEncoder();
799 if (!blitCmdEncoder) {
800 return false;
801 }
802
803#ifdef SK_ENABLE_MTL_DEBUG_INFO
804 blitCmdEncoder->pushDebugGroup(@"copyBufferToBuffer");
805#endif
806 blitCmdEncoder->copyBufferToBuffer(mtlSrcBuffer, srcOffset, mtlDstBuffer, dstOffset, size);
807#ifdef SK_ENABLE_MTL_DEBUG_INFO
808 blitCmdEncoder->popDebugGroup();
809#endif
810 return true;
811}
812
813bool MtlCommandBuffer::onCopyTextureToBuffer(const Texture* texture,
814 SkIRect srcRect,
815 const Buffer* buffer,
816 size_t bufferOffset,
817 size_t bufferRowBytes) {
818 SkASSERT(!fActiveRenderCommandEncoder);
819 SkASSERT(!fActiveComputeCommandEncoder);
820
821 if (!check_max_blit_width(srcRect.width())) {
822 return false;
823 }
824
825 id<MTLTexture> mtlTexture = static_cast<const MtlTexture*>(texture)->mtlTexture();
826 id<MTLBuffer> mtlBuffer = static_cast<const MtlBuffer*>(buffer)->mtlBuffer();
827
828 MtlBlitCommandEncoder* blitCmdEncoder = this->getBlitCommandEncoder();
829 if (!blitCmdEncoder) {
830 return false;
831 }
832
833#ifdef SK_ENABLE_MTL_DEBUG_INFO
834 blitCmdEncoder->pushDebugGroup(@"copyTextureToBuffer");
835#endif
836 blitCmdEncoder->copyFromTexture(mtlTexture, srcRect, mtlBuffer, bufferOffset, bufferRowBytes);
837#ifdef SK_ENABLE_MTL_DEBUG_INFO
838 blitCmdEncoder->popDebugGroup();
839#endif
840 return true;
841}
842
843bool MtlCommandBuffer::onCopyBufferToTexture(const Buffer* buffer,
844 const Texture* texture,
845 const BufferTextureCopyData* copyData,
846 int count) {
847 SkASSERT(!fActiveRenderCommandEncoder);
848 SkASSERT(!fActiveComputeCommandEncoder);
849
850 id<MTLBuffer> mtlBuffer = static_cast<const MtlBuffer*>(buffer)->mtlBuffer();
851 id<MTLTexture> mtlTexture = static_cast<const MtlTexture*>(texture)->mtlTexture();
852
853 MtlBlitCommandEncoder* blitCmdEncoder = this->getBlitCommandEncoder();
854 if (!blitCmdEncoder) {
855 return false;
856 }
857
858#ifdef SK_ENABLE_MTL_DEBUG_INFO
859 blitCmdEncoder->pushDebugGroup(@"copyBufferToTexture");
860#endif
861 for (int i = 0; i < count; ++i) {
862 if (!check_max_blit_width(copyData[i].fRect.width())) {
863 return false;
864 }
865
866 blitCmdEncoder->copyFromBuffer(mtlBuffer,
867 copyData[i].fBufferOffset,
868 copyData[i].fBufferRowBytes,
869 mtlTexture,
870 copyData[i].fRect,
871 copyData[i].fMipLevel);
872 }
873
874#ifdef SK_ENABLE_MTL_DEBUG_INFO
875 blitCmdEncoder->popDebugGroup();
876#endif
877 return true;
878}
879
880bool MtlCommandBuffer::onCopyTextureToTexture(const Texture* src,
881 SkIRect srcRect,
882 const Texture* dst,
883 SkIPoint dstPoint,
884 int mipLevel) {
885 SkASSERT(!fActiveRenderCommandEncoder);
886 SkASSERT(!fActiveComputeCommandEncoder);
887
888 id<MTLTexture> srcMtlTexture = static_cast<const MtlTexture*>(src)->mtlTexture();
889 id<MTLTexture> dstMtlTexture = static_cast<const MtlTexture*>(dst)->mtlTexture();
890
891 MtlBlitCommandEncoder* blitCmdEncoder = this->getBlitCommandEncoder();
892 if (!blitCmdEncoder) {
893 return false;
894 }
895
896#ifdef SK_ENABLE_MTL_DEBUG_INFO
897 blitCmdEncoder->pushDebugGroup(@"copyTextureToTexture");
898#endif
899
900 blitCmdEncoder->copyTextureToTexture(srcMtlTexture, srcRect, dstMtlTexture, dstPoint, mipLevel);
901
902#ifdef SK_ENABLE_MTL_DEBUG_INFO
903 blitCmdEncoder->popDebugGroup();
904#endif
905 return true;
906}
907
908bool MtlCommandBuffer::onSynchronizeBufferToCpu(const Buffer* buffer, bool* outDidResultInWork) {
909#ifdef SK_BUILD_FOR_MAC
910 SkASSERT(!fActiveRenderCommandEncoder);
911 SkASSERT(!fActiveComputeCommandEncoder);
912
913 id<MTLBuffer> mtlBuffer = static_cast<const MtlBuffer*>(buffer)->mtlBuffer();
914 if ([mtlBuffer storageMode] != MTLStorageModeManaged) {
915 *outDidResultInWork = false;
916 return true;
917 }
918
919 MtlBlitCommandEncoder* blitCmdEncoder = this->getBlitCommandEncoder();
920 if (!blitCmdEncoder) {
921 return false;
922 }
923
924#ifdef SK_ENABLE_MTL_DEBUG_INFO
925 blitCmdEncoder->pushDebugGroup(@"synchronizeToCpu");
926#endif
927 blitCmdEncoder->synchronizeResource(mtlBuffer);
928#ifdef SK_ENABLE_MTL_DEBUG_INFO
929 blitCmdEncoder->popDebugGroup();
930#endif
931
932 *outDidResultInWork = true;
933 return true;
934#else // SK_BUILD_FOR_MAC
935 // Explicit synchronization is never necessary on builds that are not macOS since we never use
936 // discrete GPUs with managed mode buffers outside of macOS.
937 *outDidResultInWork = false;
938 return true;
939#endif // SK_BUILD_FOR_MAC
940}
941
942bool MtlCommandBuffer::onClearBuffer(const Buffer* buffer, size_t offset, size_t size) {
943 SkASSERT(!fActiveRenderCommandEncoder);
944 SkASSERT(!fActiveComputeCommandEncoder);
945
946 MtlBlitCommandEncoder* blitCmdEncoder = this->getBlitCommandEncoder();
947 if (!blitCmdEncoder) {
948 return false;
949 }
950
951 id<MTLBuffer> mtlBuffer = static_cast<const MtlBuffer*>(buffer)->mtlBuffer();
952 blitCmdEncoder->fillBuffer(mtlBuffer, offset, size, 0);
953
954 return true;
955}
956
957} // namespace skgpu::graphite
static void info(const char *fmt,...) SK_PRINTF_LIKE(1
Definition: DM.cpp:213
SkRect fRect
Definition: FillRRectOp.cpp:73
int count
Definition: FontMgrTest.cpp:50
#define SKGPU_LOG_E(fmt,...)
Definition: Log.h:38
static constexpr size_t SkAlignTo(size_t x, size_t alignment)
Definition: SkAlign.h:33
#define SkASSERT(cond)
Definition: SkAssert.h:116
GLenum type
void trackResource(sk_sp< Resource > resource)
static sk_sp< MtlBlitCommandEncoder > Make(const SharedContext *sharedContext, id< MTLCommandBuffer > commandBuffer)
void addWaitSemaphores(size_t numWaitSemaphores, const BackendSemaphore *waitSemaphores) override
void addSignalSemaphores(size_t numSignalSemaphores, const BackendSemaphore *signalSemaphores) override
static std::unique_ptr< MtlCommandBuffer > Make(id< MTLCommandQueue >, const MtlSharedContext *, MtlResourceProvider *)
static sk_sp< MtlComputeCommandEncoder > Make(const SharedContext *sharedContext, id< MTLCommandBuffer > commandBuffer)
static constexpr unsigned int kVertexBufferIndex
static constexpr unsigned int kInstanceBufferIndex
static constexpr unsigned int kPaintUniformBufferIndex
static constexpr unsigned int kRenderStepUniformBufferIndex
static constexpr unsigned int kIntrinsicUniformBufferIndex
static constexpr unsigned int kGradientBufferIndex
static sk_sp< MtlRenderCommandEncoder > Make(const SharedContext *sharedContext, id< MTLCommandBuffer > commandBuffer, MTLRenderPassDescriptor *descriptor)
sk_sp< MtlGraphicsPipeline > findOrCreateLoadMSAAPipeline(const RenderPassDesc &)
static void Draw(SkCanvas *canvas, const SkRect &rect)
VkQueue queue
Definition: main.cc:55
uint8_t value
FlTexture * texture
double y
double x
static bool init()
sk_sp< SkBlender > blender SkRect rect
Definition: SkRecords.h:350
std::function< ProfileSample(void)> Sampler
Sampler is run during SamplingProfiler::SampleRepeatedly. Each platform should implement its version ...
DEF_SWITCHES_START aot vmservice shared library Name of the *so containing AOT compiled Dart assets for launching the service isolate vm snapshot The VM snapshot data that will be memory mapped as read only SnapshotAssetPath must be present isolate snapshot The isolate snapshot data that will be memory mapped as read only SnapshotAssetPath must be present cache dir Path to the cache directory This is different from the persistent_cache_path in embedder which is used for Skia shader cache icu native lib Path to the library file that exports the ICU data vm service The hostname IP address on which the Dart VM Service should be served If not defaults to or::depending on whether ipv6 is specified vm service A custom Dart VM Service port The default is to pick a randomly available open port disable vm Disable the Dart VM Service The Dart VM Service is never available in release mode disable vm service Disable mDNS Dart VM Service publication Bind to the IPv6 localhost address for the Dart VM Service Ignored if vm service host is set endless trace buffer
Definition: switches.h:126
it will be possible to load the file into Perfetto s trace viewer disable asset Prevents usage of any non test fonts unless they were explicitly Loaded via prefetched default font Indicates whether the embedding started a prefetch of the default font manager before creating the engine run In non interactive keep the shell running after the Dart script has completed enable serial On low power devices with low core running concurrent GC tasks on threads can cause them to contend with the UI thread which could potentially lead to jank This option turns off all concurrent GC activities domain network JSON encoded network policy per domain This overrides the DisallowInsecureConnections switch Embedder can specify whether to allow or disallow insecure connections at a domain level old gen heap size
Definition: switches.h:259
dst
Definition: cp.py:12
static constexpr int kLoadOpCount
Definition: ResourceTypes.h:41
static bool check_max_blit_width(int widthInPixels)
static constexpr int kStoreOpCount
Definition: ResourceTypes.h:52
static MTLPrimitiveType graphite_to_mtl_primitive(PrimitiveType primitiveType)
bool MtlFormatIsStencil(MTLPixelFormat format)
Definition: MtlUtils.mm:47
bool MtlFormatIsDepth(MTLPixelFormat format)
Definition: MtlUtils.mm:33
int32_t height
int32_t width
SeparatedVector2 offset
constexpr int32_t y() const
Definition: SkPoint_impl.h:52
constexpr int32_t x() const
Definition: SkPoint_impl.h:46
Definition: SkRect.h:32
constexpr int32_t x() const
Definition: SkRect.h:141
constexpr int32_t y() const
Definition: SkRect.h:148
bool intersect(const SkIRect &r)
Definition: SkRect.h:513
static bool Intersects(const SkIRect &a, const SkIRect &b)
Definition: SkRect.h:535
constexpr int32_t height() const
Definition: SkRect.h:165
static constexpr SkIRect MakeSize(const SkISize &size)
Definition: SkRect.h:66
constexpr int32_t width() const
Definition: SkRect.h:158
void setEmpty()
Definition: SkRect.h:242
constexpr SkIRect makeOffset(int32_t dx, int32_t dy) const
Definition: SkRect.h:300
static constexpr SkIRect MakeXYWH(int32_t x, int32_t y, int32_t w, int32_t h)
Definition: SkRect.h:104
constexpr float x() const
Definition: SkRect.h:720
constexpr float y() const
Definition: SkRect.h:727
constexpr float height() const
Definition: SkRect.h:769
constexpr float width() const
Definition: SkRect.h:762