Flutter Engine
The Flutter Engine
Loading...
Searching...
No Matches
MtlCommandBuffer.mm
Go to the documentation of this file.
1/*
2 * Copyright 2021 Google LLC
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
9
27
28namespace skgpu::graphite {
29
30std::unique_ptr<MtlCommandBuffer> MtlCommandBuffer::Make(id<MTLCommandQueue> queue,
31 const MtlSharedContext* sharedContext,
32 MtlResourceProvider* resourceProvider) {
33 auto commandBuffer = std::unique_ptr<MtlCommandBuffer>(
34 new MtlCommandBuffer(queue, sharedContext, resourceProvider));
35 if (!commandBuffer) {
36 return nullptr;
37 }
38 if (!commandBuffer->createNewMTLCommandBuffer()) {
39 return nullptr;
40 }
41 return commandBuffer;
42}
43
44MtlCommandBuffer::MtlCommandBuffer(id<MTLCommandQueue> queue,
45 const MtlSharedContext* sharedContext,
46 MtlResourceProvider* resourceProvider)
47 : fQueue(queue)
48 , fSharedContext(sharedContext)
49 , fResourceProvider(resourceProvider) {}
50
52 SkASSERT(!fActiveRenderCommandEncoder);
53 SkASSERT(!fActiveComputeCommandEncoder);
54 SkASSERT(!fActiveBlitCommandEncoder);
55}
56
58 return this->createNewMTLCommandBuffer();
59}
60
61bool MtlCommandBuffer::createNewMTLCommandBuffer() {
62 SkASSERT(fCommandBuffer == nil);
63
64 // Inserting a pool here so the autorelease occurs when we return and the
65 // only remaining ref is the retain below.
66 @autoreleasepool {
67 if (@available(macOS 11.0, iOS 14.0, tvOS 14.0, *)) {
68 sk_cfp<MTLCommandBufferDescriptor*> desc([[MTLCommandBufferDescriptor alloc] init]);
69 (*desc).retainedReferences = NO;
70#ifdef SK_ENABLE_MTL_DEBUG_INFO
71 (*desc).errorOptions = MTLCommandBufferErrorOptionEncoderExecutionStatus;
72#endif
73 // We add a retain here because the command buffer is set to autorelease (not alloc or copy)
74 fCommandBuffer.reset([[fQueue commandBufferWithDescriptor:desc.get()] retain]);
75 } else {
76 // We add a retain here because the command buffer is set to autorelease (not alloc or copy)
77 fCommandBuffer.reset([[fQueue commandBufferWithUnretainedReferences] retain]);
78 }
79 }
80 return fCommandBuffer != nil;
81}
82
84 SkASSERT(!fActiveRenderCommandEncoder);
85 SkASSERT(!fActiveComputeCommandEncoder);
86 this->endBlitCommandEncoder();
87 [(*fCommandBuffer) commit];
88
89 if ((*fCommandBuffer).status == MTLCommandBufferStatusError) {
90 NSString* description = (*fCommandBuffer).error.localizedDescription;
91 const char* errorString = [description UTF8String];
92 SKGPU_LOG_E("Failure submitting command buffer: %s", errorString);
93 }
94
95 return ((*fCommandBuffer).status != MTLCommandBufferStatusError);
96}
97
99 fCommandBuffer.reset();
100 fActiveRenderCommandEncoder.reset();
101 fActiveComputeCommandEncoder.reset();
102 fActiveBlitCommandEncoder.reset();
103 fCurrentIndexBuffer = nil;
104 fCurrentIndexBufferOffset = 0;
105}
106
107void MtlCommandBuffer::addWaitSemaphores(size_t numWaitSemaphores,
108 const BackendSemaphore* waitSemaphores) {
109 if (!waitSemaphores) {
110 SkASSERT(numWaitSemaphores == 0);
111 return;
112 }
113
114 // Can only insert events with no active encoder
115 SkASSERT(!fActiveRenderCommandEncoder);
116 SkASSERT(!fActiveComputeCommandEncoder);
117 this->endBlitCommandEncoder();
118 if (@available(macOS 10.14, iOS 12.0, tvOS 12.0, *)) {
119 for (size_t i = 0; i < numWaitSemaphores; ++i) {
120 auto semaphore = waitSemaphores[i];
121 if (semaphore.isValid() && semaphore.backend() == BackendApi::kMetal) {
122 id<MTLEvent> mtlEvent = (__bridge id<MTLEvent>)semaphore.getMtlEvent();
123 [(*fCommandBuffer) encodeWaitForEvent: mtlEvent
124 value: semaphore.getMtlValue()];
125 }
126 }
127 }
128}
129
130void MtlCommandBuffer::addSignalSemaphores(size_t numSignalSemaphores,
131 const BackendSemaphore* signalSemaphores) {
132 if (!signalSemaphores) {
133 SkASSERT(numSignalSemaphores == 0);
134 return;
135 }
136
137 // Can only insert events with no active encoder
138 SkASSERT(!fActiveRenderCommandEncoder);
139 SkASSERT(!fActiveComputeCommandEncoder);
140 this->endBlitCommandEncoder();
141
142 if (@available(macOS 10.14, iOS 12.0, tvOS 12.0, *)) {
143 for (size_t i = 0; i < numSignalSemaphores; ++i) {
144 auto semaphore = signalSemaphores[i];
145 if (semaphore.isValid() && semaphore.backend() == BackendApi::kMetal) {
146 id<MTLEvent> mtlEvent = (__bridge id<MTLEvent>)semaphore.getMtlEvent();
147 [(*fCommandBuffer) encodeSignalEvent: mtlEvent
148 value: semaphore.getMtlValue()];
149 }
150 }
151 }
152}
153
155 const Texture* colorTexture,
156 const Texture* resolveTexture,
157 const Texture* depthStencilTexture,
158 SkRect viewport,
159 const DrawPassList& drawPasses) {
160 if (!this->beginRenderPass(renderPassDesc, colorTexture, resolveTexture, depthStencilTexture)) {
161 return false;
162 }
163
164 this->setViewport(viewport.x(), viewport.y(), viewport.width(), viewport.height(), 0, 1);
165
166 for (const auto& drawPass : drawPasses) {
167 this->addDrawPass(drawPass.get());
168 }
169
170 this->endRenderPass();
171 return true;
172}
173
175 this->beginComputePass();
176 for (const auto& group : groups) {
177 group->addResourceRefs(this);
178 for (const auto& dispatch : group->dispatches()) {
179 this->bindComputePipeline(group->getPipeline(dispatch.fPipelineIndex));
180 for (const ResourceBinding& binding : dispatch.fBindings) {
181 if (const BufferView* buffer = std::get_if<BufferView>(&binding.fResource)) {
182 this->bindBuffer(buffer->fInfo.fBuffer, buffer->fInfo.fOffset, binding.fIndex);
183 } else if (const TextureIndex* texIdx =
184 std::get_if<TextureIndex>(&binding.fResource)) {
185 SkASSERT(texIdx);
186 this->bindTexture(group->getTexture(texIdx->fValue), binding.fIndex);
187 } else {
188 const SamplerIndex* samplerIdx = std::get_if<SamplerIndex>(&binding.fResource);
189 SkASSERT(samplerIdx);
190 this->bindSampler(group->getSampler(samplerIdx->fValue), binding.fIndex);
191 }
192 }
193 SkASSERT(fActiveComputeCommandEncoder);
194 for (const ComputeStep::WorkgroupBufferDesc& wgBuf : dispatch.fWorkgroupBuffers) {
195 fActiveComputeCommandEncoder->setThreadgroupMemoryLength(
196 SkAlignTo(wgBuf.size, 16),
197 wgBuf.index);
198 }
199 if (const WorkgroupSize* globalSize =
200 std::get_if<WorkgroupSize>(&dispatch.fGlobalSizeOrIndirect)) {
201 this->dispatchThreadgroups(*globalSize, dispatch.fLocalSize);
202 } else {
203 SkASSERT(std::holds_alternative<BufferView>(dispatch.fGlobalSizeOrIndirect));
204 const BufferView& indirect =
205 *std::get_if<BufferView>(&dispatch.fGlobalSizeOrIndirect);
206 this->dispatchThreadgroupsIndirect(
207 dispatch.fLocalSize, indirect.fInfo.fBuffer, indirect.fInfo.fOffset);
208 }
209 }
210 }
211 this->endComputePass();
212 return true;
213}
214
215bool MtlCommandBuffer::beginRenderPass(const RenderPassDesc& renderPassDesc,
216 const Texture* colorTexture,
217 const Texture* resolveTexture,
218 const Texture* depthStencilTexture) {
219 SkASSERT(!fActiveRenderCommandEncoder);
220 SkASSERT(!fActiveComputeCommandEncoder);
221 this->endBlitCommandEncoder();
222
223 const static MTLLoadAction mtlLoadAction[] {
224 MTLLoadActionLoad,
225 MTLLoadActionClear,
226 MTLLoadActionDontCare
227 };
228 static_assert((int)LoadOp::kLoad == 0);
229 static_assert((int)LoadOp::kClear == 1);
230 static_assert((int)LoadOp::kDiscard == 2);
231 static_assert(std::size(mtlLoadAction) == kLoadOpCount);
232
233 const static MTLStoreAction mtlStoreAction[] {
234 MTLStoreActionStore,
235 MTLStoreActionDontCare
236 };
237 static_assert((int)StoreOp::kStore == 0);
238 static_assert((int)StoreOp::kDiscard == 1);
239 static_assert(std::size(mtlStoreAction) == kStoreOpCount);
240
241 sk_cfp<MTLRenderPassDescriptor*> descriptor([[MTLRenderPassDescriptor alloc] init]);
242 // Set up color attachment.
243 auto& colorInfo = renderPassDesc.fColorAttachment;
244 bool loadMSAAFromResolve = false;
245 if (colorTexture) {
246 // TODO: check Texture matches RenderPassDesc
247 auto colorAttachment = (*descriptor).colorAttachments[0];
248 colorAttachment.texture = ((const MtlTexture*)colorTexture)->mtlTexture();
249 const std::array<float, 4>& clearColor = renderPassDesc.fClearColor;
250 colorAttachment.clearColor =
251 MTLClearColorMake(clearColor[0], clearColor[1], clearColor[2], clearColor[3]);
252 colorAttachment.loadAction = mtlLoadAction[static_cast<int>(colorInfo.fLoadOp)];
253 colorAttachment.storeAction = mtlStoreAction[static_cast<int>(colorInfo.fStoreOp)];
254 // Set up resolve attachment
255 if (resolveTexture) {
257 // TODO: check Texture matches RenderPassDesc
258 colorAttachment.resolveTexture = ((const MtlTexture*)resolveTexture)->mtlTexture();
259 // Inclusion of a resolve texture implies the client wants to finish the
260 // renderpass with a resolve.
261 if (@available(macOS 10.12, iOS 10.0, tvOS 10.0, *)) {
262 SkASSERT(colorAttachment.storeAction == MTLStoreActionDontCare);
263 colorAttachment.storeAction = MTLStoreActionMultisampleResolve;
264 } else {
265 // We expect at least Metal 2
266 // TODO: Add error output
267 SkASSERT(false);
268 }
269 // But it also means we have to load the resolve texture into the MSAA color attachment
270 loadMSAAFromResolve = renderPassDesc.fColorResolveAttachment.fLoadOp == LoadOp::kLoad;
271 // TODO: If the color resolve texture is read-only we can use a private (vs. memoryless)
272 // msaa attachment that's coupled to the framebuffer and the StoreAndMultisampleResolve
273 // action instead of loading as a draw.
274 }
275 }
276
277 // Set up stencil/depth attachment
278 auto& depthStencilInfo = renderPassDesc.fDepthStencilAttachment;
279 if (depthStencilTexture) {
280 // TODO: check Texture matches RenderPassDesc
281 id<MTLTexture> mtlTexture = ((const MtlTexture*)depthStencilTexture)->mtlTexture();
282 if (MtlFormatIsDepth(mtlTexture.pixelFormat)) {
283 auto depthAttachment = (*descriptor).depthAttachment;
284 depthAttachment.texture = mtlTexture;
285 depthAttachment.clearDepth = renderPassDesc.fClearDepth;
286 depthAttachment.loadAction =
287 mtlLoadAction[static_cast<int>(depthStencilInfo.fLoadOp)];
288 depthAttachment.storeAction =
289 mtlStoreAction[static_cast<int>(depthStencilInfo.fStoreOp)];
290 }
291 if (MtlFormatIsStencil(mtlTexture.pixelFormat)) {
292 auto stencilAttachment = (*descriptor).stencilAttachment;
293 stencilAttachment.texture = mtlTexture;
294 stencilAttachment.clearStencil = renderPassDesc.fClearStencil;
295 stencilAttachment.loadAction =
296 mtlLoadAction[static_cast<int>(depthStencilInfo.fLoadOp)];
297 stencilAttachment.storeAction =
298 mtlStoreAction[static_cast<int>(depthStencilInfo.fStoreOp)];
299 }
300 } else {
301 SkASSERT(!depthStencilInfo.fTextureInfo.isValid());
302 }
303
304 fActiveRenderCommandEncoder = MtlRenderCommandEncoder::Make(fSharedContext,
305 fCommandBuffer.get(),
306 descriptor.get());
307 this->trackResource(fActiveRenderCommandEncoder);
308
309 if (loadMSAAFromResolve) {
310 // Manually load the contents of the resolve texture into the MSAA attachment as a draw,
311 // so the actual load op for the MSAA attachment had better have been discard.
312 SkASSERT(colorInfo.fLoadOp == LoadOp::kDiscard);
313 auto loadPipeline = fResourceProvider->findOrCreateLoadMSAAPipeline(renderPassDesc);
314 if (!loadPipeline) {
315 SKGPU_LOG_E("Unable to create pipeline to load resolve texture into MSAA attachment");
316 return false;
317 }
318 this->bindGraphicsPipeline(loadPipeline.get());
319 // The load msaa pipeline takes no uniforms, no vertex/instance attributes and only uses
320 // one texture that does not require a sampler.
321 fActiveRenderCommandEncoder->setFragmentTexture(
322 ((const MtlTexture*) resolveTexture)->mtlTexture(), 0);
323 this->draw(PrimitiveType::kTriangleStrip, 0, 4);
324 }
325
326 return true;
327}
328
329void MtlCommandBuffer::endRenderPass() {
330 SkASSERT(fActiveRenderCommandEncoder);
331 fActiveRenderCommandEncoder->endEncoding();
332 fActiveRenderCommandEncoder.reset();
333 fDrawIsOffscreen = false;
334}
335
336void MtlCommandBuffer::addDrawPass(const DrawPass* drawPass) {
337 SkIRect replayPassBounds = drawPass->bounds().makeOffset(fReplayTranslation.x(),
339 if (!SkIRect::Intersects(replayPassBounds, SkIRect::MakeSize(fRenderPassSize))) {
340 // The entire DrawPass is offscreen given the replay translation so skip adding any
341 // commands. When the DrawPass is partially offscreen individual draw commands will be
342 // culled while preserving state changing commands.
343 return;
344 }
345
346 drawPass->addResourceRefs(this);
347
348 for (auto[type, cmdPtr] : drawPass->commands()) {
349 // Skip draw commands if they'd be offscreen.
350 if (fDrawIsOffscreen) {
351 switch (type) {
352 case DrawPassCommands::Type::kDraw:
353 case DrawPassCommands::Type::kDrawIndexed:
354 case DrawPassCommands::Type::kDrawInstanced:
355 case DrawPassCommands::Type::kDrawIndexedInstanced:
356 continue;
357 default:
358 break;
359 }
360 }
361
362 switch (type) {
363 case DrawPassCommands::Type::kBindGraphicsPipeline: {
364 auto bgp = static_cast<DrawPassCommands::BindGraphicsPipeline*>(cmdPtr);
365 this->bindGraphicsPipeline(drawPass->getPipeline(bgp->fPipelineIndex));
366 break;
367 }
368 case DrawPassCommands::Type::kSetBlendConstants: {
369 auto sbc = static_cast<DrawPassCommands::SetBlendConstants*>(cmdPtr);
370 this->setBlendConstants(sbc->fBlendConstants);
371 break;
372 }
373 case DrawPassCommands::Type::kBindUniformBuffer: {
374 auto bub = static_cast<DrawPassCommands::BindUniformBuffer*>(cmdPtr);
375 this->bindUniformBuffer(bub->fInfo, bub->fSlot);
376 break;
377 }
378 case DrawPassCommands::Type::kBindDrawBuffers: {
379 auto bdb = static_cast<DrawPassCommands::BindDrawBuffers*>(cmdPtr);
380 this->bindDrawBuffers(
381 bdb->fVertices, bdb->fInstances, bdb->fIndices, bdb->fIndirect);
382 break;
383 }
384 case DrawPassCommands::Type::kBindTexturesAndSamplers: {
385 auto bts = static_cast<DrawPassCommands::BindTexturesAndSamplers*>(cmdPtr);
386 for (int j = 0; j < bts->fNumTexSamplers; ++j) {
387 this->bindTextureAndSampler(drawPass->getTexture(bts->fTextureIndices[j]),
388 drawPass->getSampler(bts->fSamplerIndices[j]),
389 j);
390 }
391 break;
392 }
393 case DrawPassCommands::Type::kSetScissor: {
394 auto ss = static_cast<DrawPassCommands::SetScissor*>(cmdPtr);
395 const SkIRect& rect = ss->fScissor;
396 this->setScissor(rect.fLeft, rect.fTop, rect.width(), rect.height());
397 break;
398 }
399 case DrawPassCommands::Type::kDraw: {
400 auto draw = static_cast<DrawPassCommands::Draw*>(cmdPtr);
401 this->draw(draw->fType, draw->fBaseVertex, draw->fVertexCount);
402 break;
403 }
404 case DrawPassCommands::Type::kDrawIndexed: {
405 auto draw = static_cast<DrawPassCommands::DrawIndexed*>(cmdPtr);
406 this->drawIndexed(draw->fType,
407 draw->fBaseIndex,
408 draw->fIndexCount,
409 draw->fBaseVertex);
410 break;
411 }
412 case DrawPassCommands::Type::kDrawInstanced: {
413 auto draw = static_cast<DrawPassCommands::DrawInstanced*>(cmdPtr);
414 this->drawInstanced(draw->fType,
415 draw->fBaseVertex,
416 draw->fVertexCount,
417 draw->fBaseInstance,
418 draw->fInstanceCount);
419 break;
420 }
421 case DrawPassCommands::Type::kDrawIndexedInstanced: {
422 auto draw = static_cast<DrawPassCommands::DrawIndexedInstanced*>(cmdPtr);
423 this->drawIndexedInstanced(draw->fType,
424 draw->fBaseIndex,
425 draw->fIndexCount,
426 draw->fBaseVertex,
427 draw->fBaseInstance,
428 draw->fInstanceCount);
429 break;
430 }
431 case DrawPassCommands::Type::kDrawIndirect: {
432 auto draw = static_cast<DrawPassCommands::DrawIndirect*>(cmdPtr);
433 this->drawIndirect(draw->fType);
434 break;
435 }
436 case DrawPassCommands::Type::kDrawIndexedIndirect: {
437 auto draw = static_cast<DrawPassCommands::DrawIndexedIndirect*>(cmdPtr);
438 this->drawIndexedIndirect(draw->fType);
439 break;
440 }
441 }
442 }
443}
444
445MtlBlitCommandEncoder* MtlCommandBuffer::getBlitCommandEncoder() {
446 if (fActiveBlitCommandEncoder) {
447 return fActiveBlitCommandEncoder.get();
448 }
449
450 fActiveBlitCommandEncoder = MtlBlitCommandEncoder::Make(fSharedContext, fCommandBuffer.get());
451
452 if (!fActiveBlitCommandEncoder) {
453 return nullptr;
454 }
455
456 // We add the ref on the command buffer for the BlitCommandEncoder now so that we don't need
457 // to add a ref for every copy we do.
458 this->trackResource(fActiveBlitCommandEncoder);
459 return fActiveBlitCommandEncoder.get();
460}
461
462void MtlCommandBuffer::endBlitCommandEncoder() {
463 if (fActiveBlitCommandEncoder) {
464 fActiveBlitCommandEncoder->endEncoding();
465 fActiveBlitCommandEncoder.reset();
466 }
467}
468
469void MtlCommandBuffer::bindGraphicsPipeline(const GraphicsPipeline* graphicsPipeline) {
470 SkASSERT(fActiveRenderCommandEncoder);
471
472 auto mtlPipeline = static_cast<const MtlGraphicsPipeline*>(graphicsPipeline);
473 auto pipelineState = mtlPipeline->mtlPipelineState();
474 fActiveRenderCommandEncoder->setRenderPipelineState(pipelineState);
475 auto depthStencilState = mtlPipeline->mtlDepthStencilState();
476 fActiveRenderCommandEncoder->setDepthStencilState(depthStencilState);
477 uint32_t stencilRefValue = mtlPipeline->stencilReferenceValue();
478 fActiveRenderCommandEncoder->setStencilReferenceValue(stencilRefValue);
479}
480
481void MtlCommandBuffer::bindUniformBuffer(const BindBufferInfo& info, UniformSlot slot) {
482 SkASSERT(fActiveRenderCommandEncoder);
483
484 id<MTLBuffer> mtlBuffer = info.fBuffer ?
485 static_cast<const MtlBuffer*>(info.fBuffer)->mtlBuffer() : nullptr;
486
487 unsigned int bufferIndex;
488 switch(slot) {
491 break;
494 break;
495 }
496
497 fActiveRenderCommandEncoder->setVertexBuffer(mtlBuffer, info.fOffset, bufferIndex);
498 fActiveRenderCommandEncoder->setFragmentBuffer(mtlBuffer, info.fOffset, bufferIndex);
499}
500
501void MtlCommandBuffer::bindDrawBuffers(const BindBufferInfo& vertices,
502 const BindBufferInfo& instances,
503 const BindBufferInfo& indices,
504 const BindBufferInfo& indirect) {
505 this->bindVertexBuffers(vertices.fBuffer,
506 vertices.fOffset,
507 instances.fBuffer,
508 instances.fOffset);
509 this->bindIndexBuffer(indices.fBuffer, indices.fOffset);
510 this->bindIndirectBuffer(indirect.fBuffer, indirect.fOffset);
511}
512
513void MtlCommandBuffer::bindVertexBuffers(const Buffer* vertexBuffer,
514 size_t vertexOffset,
515 const Buffer* instanceBuffer,
516 size_t instanceOffset) {
517 SkASSERT(fActiveRenderCommandEncoder);
518
519 if (vertexBuffer) {
520 id<MTLBuffer> mtlBuffer = static_cast<const MtlBuffer*>(vertexBuffer)->mtlBuffer();
521 // Metal requires buffer offsets to be aligned to the data type, which is at most 4 bytes
522 // since we use [[attribute]] to automatically unpack float components into SIMD arrays.
523 SkASSERT((vertexOffset & 0b11) == 0);
524 fActiveRenderCommandEncoder->setVertexBuffer(mtlBuffer, vertexOffset,
526 }
527 if (instanceBuffer) {
528 id<MTLBuffer> mtlBuffer = static_cast<const MtlBuffer*>(instanceBuffer)->mtlBuffer();
529 SkASSERT((instanceOffset & 0b11) == 0);
530 fActiveRenderCommandEncoder->setVertexBuffer(mtlBuffer, instanceOffset,
532 }
533}
534
535void MtlCommandBuffer::bindIndexBuffer(const Buffer* indexBuffer, size_t offset) {
536 if (indexBuffer) {
537 fCurrentIndexBuffer = static_cast<const MtlBuffer*>(indexBuffer)->mtlBuffer();
538 fCurrentIndexBufferOffset = offset;
539 } else {
540 fCurrentIndexBuffer = nil;
541 fCurrentIndexBufferOffset = 0;
542 }
543}
544
545void MtlCommandBuffer::bindIndirectBuffer(const Buffer* indirectBuffer, size_t offset) {
546 if (indirectBuffer) {
547 fCurrentIndirectBuffer = static_cast<const MtlBuffer*>(indirectBuffer)->mtlBuffer();
548 fCurrentIndirectBufferOffset = offset;
549 } else {
550 fCurrentIndirectBuffer = nil;
551 fCurrentIndirectBufferOffset = 0;
552 }
553}
554
555void MtlCommandBuffer::bindTextureAndSampler(const Texture* texture,
556 const Sampler* sampler,
557 unsigned int bindIndex) {
558 SkASSERT(texture && sampler);
559 SkASSERT(fActiveRenderCommandEncoder);
560
561 id<MTLTexture> mtlTexture = ((const MtlTexture*)texture)->mtlTexture();
562 id<MTLSamplerState> mtlSamplerState = ((const MtlSampler*)sampler)->mtlSamplerState();
563 fActiveRenderCommandEncoder->setFragmentTexture(mtlTexture, bindIndex);
564 fActiveRenderCommandEncoder->setFragmentSamplerState(mtlSamplerState, bindIndex);
565}
566
567void MtlCommandBuffer::setScissor(unsigned int left, unsigned int top,
568 unsigned int width, unsigned int height) {
569 SkASSERT(fActiveRenderCommandEncoder);
570 SkIRect scissor = SkIRect::MakeXYWH(
572 fDrawIsOffscreen = !scissor.intersect(SkIRect::MakeSize(fRenderPassSize));
573 if (fDrawIsOffscreen) {
574 scissor.setEmpty();
575 }
576
577 fActiveRenderCommandEncoder->setScissorRect({
578 static_cast<unsigned int>(scissor.x()),
579 static_cast<unsigned int>(scissor.y()),
580 static_cast<unsigned int>(scissor.width()),
581 static_cast<unsigned int>(scissor.height()),
582 });
583}
584
585void MtlCommandBuffer::setViewport(float x, float y, float width, float height,
586 float minDepth, float maxDepth) {
587 SkASSERT(fActiveRenderCommandEncoder);
588 MTLViewport viewport = {x + fReplayTranslation.x(),
590 width,
591 height,
592 minDepth,
593 maxDepth};
594 fActiveRenderCommandEncoder->setViewport(viewport);
595
596 float invTwoW = 2.f / width;
597 float invTwoH = 2.f / height;
598 // Metal's framebuffer space has (0, 0) at the top left. This agrees with Skia's device coords.
599 // However, in NDC (-1, -1) is the bottom left. So we flip the origin here (assuming all
600 // surfaces we have are TopLeft origin).
601 float rtAdjust[4] = {invTwoW, -invTwoH, -1.f - x * invTwoW, 1.f + y * invTwoH};
602 fActiveRenderCommandEncoder->setVertexBytes(rtAdjust, 4 * sizeof(float),
604}
605
606void MtlCommandBuffer::setBlendConstants(float* blendConstants) {
607 SkASSERT(fActiveRenderCommandEncoder);
608
609 fActiveRenderCommandEncoder->setBlendColor(blendConstants);
610}
611
612static MTLPrimitiveType graphite_to_mtl_primitive(PrimitiveType primitiveType) {
613 const static MTLPrimitiveType mtlPrimitiveType[] {
614 MTLPrimitiveTypeTriangle,
615 MTLPrimitiveTypeTriangleStrip,
616 MTLPrimitiveTypePoint,
617 };
618 static_assert((int)PrimitiveType::kTriangles == 0);
619 static_assert((int)PrimitiveType::kTriangleStrip == 1);
620 static_assert((int)PrimitiveType::kPoints == 2);
621
622 SkASSERT(primitiveType <= PrimitiveType::kPoints);
623 return mtlPrimitiveType[static_cast<int>(primitiveType)];
624}
625
626void MtlCommandBuffer::draw(PrimitiveType type,
627 unsigned int baseVertex,
628 unsigned int vertexCount) {
629 SkASSERT(fActiveRenderCommandEncoder);
630
631 auto mtlPrimitiveType = graphite_to_mtl_primitive(type);
632
633 fActiveRenderCommandEncoder->drawPrimitives(mtlPrimitiveType, baseVertex, vertexCount);
634}
635
636void MtlCommandBuffer::drawIndexed(PrimitiveType type, unsigned int baseIndex,
637 unsigned int indexCount, unsigned int baseVertex) {
638 SkASSERT(fActiveRenderCommandEncoder);
639
640 if (@available(macOS 10.11, iOS 9.0, tvOS 9.0, *)) {
641 auto mtlPrimitiveType = graphite_to_mtl_primitive(type);
642 size_t indexOffset = fCurrentIndexBufferOffset + sizeof(uint16_t )* baseIndex;
643 // Use the "instance" variant witha count of 1 so that we can pass in a base vertex
644 // instead of rebinding a vertex buffer offset.
645 fActiveRenderCommandEncoder->drawIndexedPrimitives(mtlPrimitiveType, indexCount,
646 MTLIndexTypeUInt16, fCurrentIndexBuffer,
647 indexOffset, 1, baseVertex, 0);
648
649 } else {
650 SKGPU_LOG_E("Skipping unsupported draw call.");
651 }
652}
653
654void MtlCommandBuffer::drawInstanced(PrimitiveType type, unsigned int baseVertex,
655 unsigned int vertexCount, unsigned int baseInstance,
656 unsigned int instanceCount) {
657 SkASSERT(fActiveRenderCommandEncoder);
658
659 auto mtlPrimitiveType = graphite_to_mtl_primitive(type);
660
661 // This ordering is correct
662 fActiveRenderCommandEncoder->drawPrimitives(mtlPrimitiveType, baseVertex, vertexCount,
663 instanceCount, baseInstance);
664}
665
666void MtlCommandBuffer::drawIndexedInstanced(PrimitiveType type,
667 unsigned int baseIndex,
668 unsigned int indexCount,
669 unsigned int baseVertex,
670 unsigned int baseInstance,
671 unsigned int instanceCount) {
672 SkASSERT(fActiveRenderCommandEncoder);
673
674 if (@available(macOS 10.11, iOS 9.0, tvOS 9.0, *)) {
675 auto mtlPrimitiveType = graphite_to_mtl_primitive(type);
676 size_t indexOffset = fCurrentIndexBufferOffset + sizeof(uint16_t) * baseIndex;
677 fActiveRenderCommandEncoder->drawIndexedPrimitives(mtlPrimitiveType, indexCount,
678 MTLIndexTypeUInt16, fCurrentIndexBuffer,
679 indexOffset, instanceCount,
680 baseVertex, baseInstance);
681 } else {
682 SKGPU_LOG_E("Skipping unsupported draw call.");
683 }
684}
685
686void MtlCommandBuffer::drawIndirect(PrimitiveType type) {
687 SkASSERT(fActiveRenderCommandEncoder);
688 SkASSERT(fCurrentIndirectBuffer);
689
690 if (@available(macOS 10.11, iOS 9.0, tvOS 9.0, *)) {
691 auto mtlPrimitiveType = graphite_to_mtl_primitive(type);
692 fActiveRenderCommandEncoder->drawPrimitives(
693 mtlPrimitiveType, fCurrentIndirectBuffer, fCurrentIndirectBufferOffset);
694 } else {
695 SKGPU_LOG_E("Skipping unsupported draw call.");
696 }
697}
698
699void MtlCommandBuffer::drawIndexedIndirect(PrimitiveType type) {
700 SkASSERT(fActiveRenderCommandEncoder);
701 SkASSERT(fCurrentIndirectBuffer);
702
703 if (@available(macOS 10.11, iOS 9.0, tvOS 9.0, *)) {
704 auto mtlPrimitiveType = graphite_to_mtl_primitive(type);
705 fActiveRenderCommandEncoder->drawIndexedPrimitives(mtlPrimitiveType,
706 MTLIndexTypeUInt32,
707 fCurrentIndexBuffer,
708 fCurrentIndexBufferOffset,
709 fCurrentIndirectBuffer,
710 fCurrentIndirectBufferOffset);
711 } else {
712 SKGPU_LOG_E("Skipping unsupported draw call.");
713 }
714}
715
716void MtlCommandBuffer::beginComputePass() {
717 SkASSERT(!fActiveRenderCommandEncoder);
718 SkASSERT(!fActiveComputeCommandEncoder);
719 this->endBlitCommandEncoder();
720 fActiveComputeCommandEncoder = MtlComputeCommandEncoder::Make(fSharedContext,
721 fCommandBuffer.get());
722}
723
724void MtlCommandBuffer::bindComputePipeline(const ComputePipeline* computePipeline) {
725 SkASSERT(fActiveComputeCommandEncoder);
726
727 auto mtlPipeline = static_cast<const MtlComputePipeline*>(computePipeline);
728 fActiveComputeCommandEncoder->setComputePipelineState(mtlPipeline->mtlPipelineState());
729}
730
731void MtlCommandBuffer::bindBuffer(const Buffer* buffer, unsigned int offset, unsigned int index) {
732 SkASSERT(fActiveComputeCommandEncoder);
733
734 id<MTLBuffer> mtlBuffer = buffer ? static_cast<const MtlBuffer*>(buffer)->mtlBuffer() : nil;
735 fActiveComputeCommandEncoder->setBuffer(mtlBuffer, offset, index);
736}
737
738void MtlCommandBuffer::bindTexture(const Texture* texture, unsigned int index) {
739 SkASSERT(fActiveComputeCommandEncoder);
740
741 id<MTLTexture> mtlTexture =
742 texture ? static_cast<const MtlTexture*>(texture)->mtlTexture() : nil;
743 fActiveComputeCommandEncoder->setTexture(mtlTexture, index);
744}
745
746void MtlCommandBuffer::bindSampler(const Sampler* sampler, unsigned int index) {
747 SkASSERT(fActiveComputeCommandEncoder);
748
749 id<MTLSamplerState> mtlSamplerState =
750 sampler ? static_cast<const MtlSampler*>(sampler)->mtlSamplerState() : nil;
751 fActiveComputeCommandEncoder->setSamplerState(mtlSamplerState, index);
752}
753
754void MtlCommandBuffer::dispatchThreadgroups(const WorkgroupSize& globalSize,
755 const WorkgroupSize& localSize) {
756 SkASSERT(fActiveComputeCommandEncoder);
757 fActiveComputeCommandEncoder->dispatchThreadgroups(globalSize, localSize);
758}
759
760void MtlCommandBuffer::dispatchThreadgroupsIndirect(const WorkgroupSize& localSize,
761 const Buffer* indirectBuffer,
762 size_t indirectBufferOffset) {
763 SkASSERT(fActiveComputeCommandEncoder);
764
765 id<MTLBuffer> mtlIndirectBuffer = static_cast<const MtlBuffer*>(indirectBuffer)->mtlBuffer();
766 fActiveComputeCommandEncoder->dispatchThreadgroupsWithIndirectBuffer(
767 mtlIndirectBuffer, indirectBufferOffset, localSize);
768}
769
770void MtlCommandBuffer::endComputePass() {
771 SkASSERT(fActiveComputeCommandEncoder);
772 fActiveComputeCommandEncoder->endEncoding();
773 fActiveComputeCommandEncoder.reset();
774}
775
776static bool check_max_blit_width(int widthInPixels) {
777 if (widthInPixels > 32767) {
778 SkASSERT(false); // surfaces should not be this wide anyway
779 return false;
780 }
781 return true;
782}
783
785 size_t srcOffset,
786 const Buffer* dstBuffer,
787 size_t dstOffset,
788 size_t size) {
789 SkASSERT(!fActiveRenderCommandEncoder);
790 SkASSERT(!fActiveComputeCommandEncoder);
791
792 id<MTLBuffer> mtlSrcBuffer = static_cast<const MtlBuffer*>(srcBuffer)->mtlBuffer();
793 id<MTLBuffer> mtlDstBuffer = static_cast<const MtlBuffer*>(dstBuffer)->mtlBuffer();
794
795 MtlBlitCommandEncoder* blitCmdEncoder = this->getBlitCommandEncoder();
796 if (!blitCmdEncoder) {
797 return false;
798 }
799
800#ifdef SK_ENABLE_MTL_DEBUG_INFO
801 blitCmdEncoder->pushDebugGroup(@"copyBufferToBuffer");
802#endif
803 blitCmdEncoder->copyBufferToBuffer(mtlSrcBuffer, srcOffset, mtlDstBuffer, dstOffset, size);
804#ifdef SK_ENABLE_MTL_DEBUG_INFO
805 blitCmdEncoder->popDebugGroup();
806#endif
807 return true;
808}
809
811 SkIRect srcRect,
812 const Buffer* buffer,
813 size_t bufferOffset,
814 size_t bufferRowBytes) {
815 SkASSERT(!fActiveRenderCommandEncoder);
816 SkASSERT(!fActiveComputeCommandEncoder);
817
818 if (!check_max_blit_width(srcRect.width())) {
819 return false;
820 }
821
822 id<MTLTexture> mtlTexture = static_cast<const MtlTexture*>(texture)->mtlTexture();
823 id<MTLBuffer> mtlBuffer = static_cast<const MtlBuffer*>(buffer)->mtlBuffer();
824
825 MtlBlitCommandEncoder* blitCmdEncoder = this->getBlitCommandEncoder();
826 if (!blitCmdEncoder) {
827 return false;
828 }
829
830#ifdef SK_ENABLE_MTL_DEBUG_INFO
831 blitCmdEncoder->pushDebugGroup(@"copyTextureToBuffer");
832#endif
833 blitCmdEncoder->copyFromTexture(mtlTexture, srcRect, mtlBuffer, bufferOffset, bufferRowBytes);
834#ifdef SK_ENABLE_MTL_DEBUG_INFO
835 blitCmdEncoder->popDebugGroup();
836#endif
837 return true;
838}
839
841 const Texture* texture,
842 const BufferTextureCopyData* copyData,
843 int count) {
844 SkASSERT(!fActiveRenderCommandEncoder);
845 SkASSERT(!fActiveComputeCommandEncoder);
846
847 id<MTLBuffer> mtlBuffer = static_cast<const MtlBuffer*>(buffer)->mtlBuffer();
848 id<MTLTexture> mtlTexture = static_cast<const MtlTexture*>(texture)->mtlTexture();
849
850 MtlBlitCommandEncoder* blitCmdEncoder = this->getBlitCommandEncoder();
851 if (!blitCmdEncoder) {
852 return false;
853 }
854
855#ifdef SK_ENABLE_MTL_DEBUG_INFO
856 blitCmdEncoder->pushDebugGroup(@"copyBufferToTexture");
857#endif
858 for (int i = 0; i < count; ++i) {
859 if (!check_max_blit_width(copyData[i].fRect.width())) {
860 return false;
861 }
862
863 blitCmdEncoder->copyFromBuffer(mtlBuffer,
864 copyData[i].fBufferOffset,
865 copyData[i].fBufferRowBytes,
866 mtlTexture,
867 copyData[i].fRect,
868 copyData[i].fMipLevel);
869 }
870
871#ifdef SK_ENABLE_MTL_DEBUG_INFO
872 blitCmdEncoder->popDebugGroup();
873#endif
874 return true;
875}
876
878 SkIRect srcRect,
879 const Texture* dst,
880 SkIPoint dstPoint,
881 int mipLevel) {
882 SkASSERT(!fActiveRenderCommandEncoder);
883 SkASSERT(!fActiveComputeCommandEncoder);
884
885 id<MTLTexture> srcMtlTexture = static_cast<const MtlTexture*>(src)->mtlTexture();
886 id<MTLTexture> dstMtlTexture = static_cast<const MtlTexture*>(dst)->mtlTexture();
887
888 MtlBlitCommandEncoder* blitCmdEncoder = this->getBlitCommandEncoder();
889 if (!blitCmdEncoder) {
890 return false;
891 }
892
893#ifdef SK_ENABLE_MTL_DEBUG_INFO
894 blitCmdEncoder->pushDebugGroup(@"copyTextureToTexture");
895#endif
896
897 blitCmdEncoder->copyTextureToTexture(srcMtlTexture, srcRect, dstMtlTexture, dstPoint, mipLevel);
898
899#ifdef SK_ENABLE_MTL_DEBUG_INFO
900 blitCmdEncoder->popDebugGroup();
901#endif
902 return true;
903}
904
905bool MtlCommandBuffer::onSynchronizeBufferToCpu(const Buffer* buffer, bool* outDidResultInWork) {
906#ifdef SK_BUILD_FOR_MAC
907 SkASSERT(!fActiveRenderCommandEncoder);
908 SkASSERT(!fActiveComputeCommandEncoder);
909
910 id<MTLBuffer> mtlBuffer = static_cast<const MtlBuffer*>(buffer)->mtlBuffer();
911 if ([mtlBuffer storageMode] != MTLStorageModeManaged) {
912 *outDidResultInWork = false;
913 return true;
914 }
915
916 MtlBlitCommandEncoder* blitCmdEncoder = this->getBlitCommandEncoder();
917 if (!blitCmdEncoder) {
918 return false;
919 }
920
921#ifdef SK_ENABLE_MTL_DEBUG_INFO
922 blitCmdEncoder->pushDebugGroup(@"synchronizeToCpu");
923#endif
924 blitCmdEncoder->synchronizeResource(mtlBuffer);
925#ifdef SK_ENABLE_MTL_DEBUG_INFO
926 blitCmdEncoder->popDebugGroup();
927#endif
928
929 *outDidResultInWork = true;
930 return true;
931#else // SK_BUILD_FOR_MAC
932 // Explicit synchronization is never necessary on builds that are not macOS since we never use
933 // discrete GPUs with managed mode buffers outside of macOS.
934 *outDidResultInWork = false;
935 return true;
936#endif // SK_BUILD_FOR_MAC
937}
938
939bool MtlCommandBuffer::onClearBuffer(const Buffer* buffer, size_t offset, size_t size) {
940 SkASSERT(!fActiveRenderCommandEncoder);
941 SkASSERT(!fActiveComputeCommandEncoder);
942
943 MtlBlitCommandEncoder* blitCmdEncoder = this->getBlitCommandEncoder();
944 if (!blitCmdEncoder) {
945 return false;
946 }
947
948 id<MTLBuffer> mtlBuffer = static_cast<const MtlBuffer*>(buffer)->mtlBuffer();
949 blitCmdEncoder->fillBuffer(mtlBuffer, offset, size, 0);
950
951 return true;
952}
953
954} // namespace skgpu::graphite
static void info(const char *fmt,...) SK_PRINTF_LIKE(1
Definition DM.cpp:213
SkRect fRect
int count
#define SKGPU_LOG_E(fmt,...)
Definition Log.h:38
static constexpr size_t SkAlignTo(size_t x, size_t alignment)
Definition SkAlign.h:33
#define SkASSERT(cond)
Definition SkAssert.h:116
static bool left(const SkPoint &p0, const SkPoint &p1)
Type::kYUV Type::kRGBA() int(0.7 *637)
void trackResource(sk_sp< Resource > resource)
void fillBuffer(id< MTLBuffer > buffer, size_t bufferOffset, size_t bytes, uint8_t value)
static sk_sp< MtlBlitCommandEncoder > Make(const SharedContext *sharedContext, id< MTLCommandBuffer > commandBuffer)
void copyFromTexture(id< MTLTexture > texture, SkIRect srcRect, id< MTLBuffer > buffer, size_t bufferOffset, size_t bufferRowBytes)
void copyTextureToTexture(id< MTLTexture > srcTexture, SkIRect srcRect, id< MTLTexture > dstTexture, SkIPoint dstPoint, int mipLevel)
void copyBufferToBuffer(id< MTLBuffer > srcBuffer, size_t srcOffset, id< MTLBuffer > dstBuffer, size_t dstOffset, size_t size)
void copyFromBuffer(id< MTLBuffer > buffer, size_t bufferOffset, size_t bufferRowBytes, id< MTLTexture > texture, SkIRect dstRect, unsigned int dstLevel)
void addWaitSemaphores(size_t numWaitSemaphores, const BackendSemaphore *waitSemaphores) override
bool onSynchronizeBufferToCpu(const Buffer *, bool *outDidResultInWork) override
bool onCopyBufferToTexture(const Buffer *, const Texture *, const BufferTextureCopyData *copyData, int count) override
bool onCopyTextureToBuffer(const Texture *, SkIRect srcRect, const Buffer *, size_t bufferOffset, size_t bufferRowBytes) override
void addSignalSemaphores(size_t numSignalSemaphores, const BackendSemaphore *signalSemaphores) override
static std::unique_ptr< MtlCommandBuffer > Make(id< MTLCommandQueue >, const MtlSharedContext *, MtlResourceProvider *)
bool onCopyTextureToTexture(const Texture *src, SkIRect srcRect, const Texture *dst, SkIPoint dstPoint, int mipLevel) override
bool onCopyBufferToBuffer(const Buffer *srcBuffer, size_t srcOffset, const Buffer *dstBuffer, size_t dstOffset, size_t size) override
bool onAddComputePass(DispatchGroupSpan) override
bool onClearBuffer(const Buffer *, size_t offset, size_t size) override
bool onAddRenderPass(const RenderPassDesc &, const Texture *colorTexture, const Texture *resolveTexture, const Texture *depthStencilTexture, SkRect viewport, const DrawPassList &) override
static sk_sp< MtlComputeCommandEncoder > Make(const SharedContext *sharedContext, id< MTLCommandBuffer > commandBuffer)
static constexpr unsigned int kVertexBufferIndex
static constexpr unsigned int kInstanceBufferIndex
static constexpr unsigned int kPaintUniformBufferIndex
static constexpr unsigned int kRenderStepUniformBufferIndex
static constexpr unsigned int kIntrinsicUniformBufferIndex
static sk_sp< MtlRenderCommandEncoder > Make(const SharedContext *sharedContext, id< MTLCommandBuffer > commandBuffer, MTLRenderPassDescriptor *descriptor)
sk_sp< MtlGraphicsPipeline > findOrCreateLoadMSAAPipeline(const RenderPassDesc &)
VkQueue queue
Definition main.cc:55
static const uint8_t buffer[]
uint8_t value
FlTexture * texture
double y
double x
sk_sp< SkBlender > blender SkRect rect
Definition SkRecords.h:350
dict commands
Definition dom.py:171
static constexpr int kLoadOpCount
static bool check_max_blit_width(int widthInPixels)
static constexpr int kStoreOpCount
static MTLPrimitiveType graphite_to_mtl_primitive(PrimitiveType primitiveType)
bool MtlFormatIsStencil(MTLPixelFormat format)
Definition MtlUtils.mm:39
bool MtlFormatIsDepth(MTLPixelFormat format)
Definition MtlUtils.mm:29
int32_t height
int32_t width
Point offset
constexpr int32_t y() const
constexpr int32_t x() const
constexpr int32_t x() const
Definition SkRect.h:141
constexpr int32_t y() const
Definition SkRect.h:148
bool intersect(const SkIRect &r)
Definition SkRect.h:513
static bool Intersects(const SkIRect &a, const SkIRect &b)
Definition SkRect.h:535
constexpr int32_t height() const
Definition SkRect.h:165
static constexpr SkIRect MakeSize(const SkISize &size)
Definition SkRect.h:66
constexpr int32_t width() const
Definition SkRect.h:158
void setEmpty()
Definition SkRect.h:242
constexpr SkIRect makeOffset(int32_t dx, int32_t dy) const
Definition SkRect.h:300
static constexpr SkIRect MakeXYWH(int32_t x, int32_t y, int32_t w, int32_t h)
Definition SkRect.h:104
constexpr float x() const
Definition SkRect.h:720
constexpr float y() const
Definition SkRect.h:727
constexpr float height() const
Definition SkRect.h:769
constexpr float width() const
Definition SkRect.h:762
std::array< float, 4 > fClearColor