Flutter Engine
The Flutter Engine
GrMtlOpsRenderPass.mm
Go to the documentation of this file.
1/*
2 * Copyright 2018 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
9
20
21#if !__has_feature(objc_arc)
22#error This file must be compiled with Arc. Use -fobjc-arc flag
23#endif
24
25GR_NORETAIN_BEGIN
26
28 sk_sp<GrMtlFramebuffer> framebuffer, GrSurfaceOrigin origin,
29 const GrOpsRenderPass::LoadAndStoreInfo& colorInfo,
31 : INHERITED(rt, origin)
32 , fGpu(gpu)
33 , fFramebuffer(std::move(framebuffer)) {
34 this->setupRenderPass(colorInfo, stencilInfo);
35}
36
38}
39
41 if (!fFramebuffer) {
42 return;
43 }
44 SkIRect iBounds;
45 fBounds.roundOut(&iBounds);
47 fActiveRenderCmdEncoder = nullptr;
48}
49
50static MTLPrimitiveType gr_to_mtl_primitive(GrPrimitiveType primitiveType) {
51 const static MTLPrimitiveType mtlPrimitiveType[] {
52 MTLPrimitiveTypeTriangle,
53 MTLPrimitiveTypeTriangleStrip,
54 MTLPrimitiveTypePoint,
55 MTLPrimitiveTypeLine,
56 MTLPrimitiveTypeLineStrip
57 };
58 static_assert((int)GrPrimitiveType::kTriangles == 0);
59 static_assert((int)GrPrimitiveType::kTriangleStrip == 1);
60 static_assert((int)GrPrimitiveType::kPoints == 2);
61 static_assert((int)GrPrimitiveType::kLines == 3);
62 static_assert((int)GrPrimitiveType::kLineStrip == 4);
63
64 SkASSERT(primitiveType <= GrPrimitiveType::kLineStrip);
65 return mtlPrimitiveType[static_cast<int>(primitiveType)];
66}
67
68bool GrMtlOpsRenderPass::onBindPipeline(const GrProgramInfo& programInfo,
69 const SkRect& drawBounds) {
70 const GrMtlCaps& caps = fGpu->mtlCaps();
71 GrProgramDesc programDesc = caps.makeDesc(fRenderTarget, programInfo,
73 if (!programDesc.isValid()) {
74 return false;
75 }
76
77 fActivePipelineState = fGpu->resourceProvider().findOrCreateCompatiblePipelineState(
78 programDesc, programInfo);
79 if (!fActivePipelineState) {
80 return false;
81 }
82
83 fActivePipelineState->setData(fFramebuffer.get(), programInfo);
84 fCurrentVertexStride = programInfo.geomProc().vertexStride();
85
86 if (!fActiveRenderCmdEncoder) {
87 this->setupRenderCommandEncoder(fActivePipelineState);
88 if (!fActiveRenderCmdEncoder) {
89 return false;
90 }
92 sk_ref_sp<GrMtlAttachment>(fFramebuffer->colorAttachment()));
93 }
94
95 fActiveRenderCmdEncoder->setRenderPipelineState(
96 fActivePipelineState->pipeline()->mtlPipelineState());
97#ifdef SK_ENABLE_MTL_DEBUG_INFO
98 if (!fDebugGroupActive) {
99 fActiveRenderCmdEncoder->pushDebugGroup(@"bindAndDraw");
100 fDebugGroupActive = true;
101 }
102#endif
103 fActivePipelineState->setDrawState(fActiveRenderCmdEncoder,
104 programInfo.pipeline().writeSwizzle(),
105 programInfo.pipeline().getXferProcessor());
106 if (this->gpu()->caps()->wireframeMode() || programInfo.pipeline().isWireframe()) {
107 fActiveRenderCmdEncoder->setTriangleFillMode(MTLTriangleFillModeLines);
108 } else {
109 fActiveRenderCmdEncoder->setTriangleFillMode(MTLTriangleFillModeFill);
110 }
111
112 if (!programInfo.pipeline().isScissorTestEnabled()) {
113 // "Disable" scissor by setting it to the full pipeline bounds.
114 SkISize dimensions = fFramebuffer->colorAttachment()->dimensions();
116 dimensions, fOrigin,
117 SkIRect::MakeWH(dimensions.width(),
118 dimensions.height()));
119 }
120
121 fActivePrimitiveType = gr_to_mtl_primitive(programInfo.primitiveType());
122 fBounds.join(drawBounds);
123 return true;
124}
125
126void GrMtlOpsRenderPass::onSetScissorRect(const SkIRect& scissor) {
127 SkASSERT(fActivePipelineState);
128 SkASSERT(fActiveRenderCmdEncoder);
130 fFramebuffer->colorAttachment()->dimensions(),
131 fOrigin, scissor);
132}
133
134bool GrMtlOpsRenderPass::onBindTextures(const GrGeometryProcessor& geomProc,
135 const GrSurfaceProxy* const geomProcTextures[],
136 const GrPipeline& pipeline) {
137 SkASSERT(fActivePipelineState);
138 SkASSERT(fActiveRenderCmdEncoder);
139#ifdef SK_ENABLE_MTL_DEBUG_INFO
140 if (!fDebugGroupActive) {
141 fActiveRenderCmdEncoder->pushDebugGroup(@"bindAndDraw");
142 fDebugGroupActive = true;
143 }
144#endif
145 fActivePipelineState->setTextures(geomProc, pipeline, geomProcTextures);
146 fActivePipelineState->bindTextures(fActiveRenderCmdEncoder);
147 return true;
148}
149
150void GrMtlOpsRenderPass::onClear(const GrScissorState& scissor, std::array<float, 4> color) {
151 // Partial clears are not supported
152 SkASSERT(!scissor.enabled());
153
154 // Ideally we should never end up here since all clears should either be done as draws or
155 // load ops in metal. However, if a client inserts a wait op we need to handle it.
156 auto colorAttachment = fRenderPassDesc.colorAttachments[0];
157 colorAttachment.clearColor = MTLClearColorMake(color[0], color[1], color[2], color[3]);
158 colorAttachment.loadAction = MTLLoadActionClear;
159 if (!this->setupResolve()) {
160 this->setupRenderCommandEncoder(nullptr);
161 }
162}
163
164void GrMtlOpsRenderPass::onClearStencilClip(const GrScissorState& scissor, bool insideStencilMask) {
165 // Partial clears are not supported
166 SkASSERT(!scissor.enabled());
167
168 GrAttachment* sb = fFramebuffer->stencilAttachment();
169 // this should only be called internally when we know we have a
170 // stencil buffer.
171 SkASSERT(sb);
172 int stencilBitCount = GrBackendFormatStencilBits(sb->backendFormat());
173
174 // The contract with the callers does not guarantee that we preserve all bits in the stencil
175 // during this clear. Thus we will clear the entire stencil to the desired value.
176 auto stencilAttachment = fRenderPassDesc.stencilAttachment;
177 if (insideStencilMask) {
178 stencilAttachment.clearStencil = (1 << (stencilBitCount - 1));
179 } else {
180 stencilAttachment.clearStencil = 0;
181 }
182
183 stencilAttachment.loadAction = MTLLoadActionClear;
184 if (!this->setupResolve()) {
185 this->setupRenderCommandEncoder(nullptr);
186 }
187}
188
190 state->doUpload(upload);
191
192 // If the previous renderCommandEncoder did a resolve without an MSAA store
193 // (e.g., if the color attachment is memoryless) we need to copy the contents of
194 // the resolve attachment to the MSAA attachment at this point.
195 if (!this->setupResolve()) {
196 // If setting up for the resolve didn't create an encoder, it's probably reasonable to
197 // create a new encoder at this point, though maybe not necessary.
198 this->setupRenderCommandEncoder(nullptr);
199 }
200}
201
203 if (!encoder) {
204 return;
205 }
206#ifdef SK_ENABLE_MTL_DEBUG_INFO
207 encoder->pushDebugGroup(@"initRenderState");
208#endif
209 encoder->setFrontFacingWinding(MTLWindingCounterClockwise);
210 SkISize colorAttachmentDimensions = fFramebuffer->colorAttachment()->dimensions();
211 // Strictly speaking we shouldn't have to set this, as the default viewport is the size of
212 // the drawable used to generate the renderCommandEncoder -- but just in case.
213 MTLViewport viewport = { 0.0, 0.0,
214 (double) colorAttachmentDimensions.width(),
215 (double) colorAttachmentDimensions.height(),
216 0.0, 1.0 };
217 encoder->setViewport(viewport);
218#ifdef SK_ENABLE_MTL_DEBUG_INFO
219 encoder->popDebugGroup();
220#endif
221}
222
223void GrMtlOpsRenderPass::setupRenderPass(
224 const GrOpsRenderPass::LoadAndStoreInfo& colorInfo,
225 const GrOpsRenderPass::StencilLoadAndStoreInfo& stencilInfo) {
226 const static MTLLoadAction mtlLoadAction[] {
227 MTLLoadActionLoad,
228 MTLLoadActionClear,
229 MTLLoadActionDontCare
230 };
231 static_assert((int)GrLoadOp::kLoad == 0);
232 static_assert((int)GrLoadOp::kClear == 1);
233 static_assert((int)GrLoadOp::kDiscard == 2);
234 SkASSERT(colorInfo.fLoadOp <= GrLoadOp::kDiscard);
235 SkASSERT(stencilInfo.fLoadOp <= GrLoadOp::kDiscard);
236
237 const static MTLStoreAction mtlStoreAction[] {
238 MTLStoreActionStore,
239 MTLStoreActionDontCare
240 };
241 static_assert((int)GrStoreOp::kStore == 0);
242 static_assert((int)GrStoreOp::kDiscard == 1);
244 SkASSERT(stencilInfo.fStoreOp <= GrStoreOp::kDiscard);
245
246 fRenderPassDesc = [MTLRenderPassDescriptor new];
247 auto colorAttachment = fRenderPassDesc.colorAttachments[0];
248 auto color = fFramebuffer->colorAttachment();
249 colorAttachment.texture = color->mtlTexture();
250 const std::array<float, 4>& clearColor = colorInfo.fClearColor;
251 colorAttachment.clearColor =
252 MTLClearColorMake(clearColor[0], clearColor[1], clearColor[2], clearColor[3]);
253 colorAttachment.loadAction = mtlLoadAction[static_cast<int>(colorInfo.fLoadOp)];
254 colorAttachment.storeAction = mtlStoreAction[static_cast<int>(colorInfo.fStoreOp)];
255
256 auto stencil = fFramebuffer->stencilAttachment();
257 auto mtlStencil = fRenderPassDesc.stencilAttachment;
258 if (stencil) {
259 mtlStencil.texture = stencil->mtlTexture();
260 }
261 mtlStencil.clearStencil = 0;
262 mtlStencil.loadAction = mtlLoadAction[static_cast<int>(stencilInfo.fLoadOp)];
263 mtlStencil.storeAction = mtlStoreAction[static_cast<int>(stencilInfo.fStoreOp)];
264
265 if (!this->setupResolve()) {
266 // Manage initial clears
267 if (colorInfo.fLoadOp == GrLoadOp::kClear || stencilInfo.fLoadOp == GrLoadOp::kClear) {
268 fBounds = SkRect::MakeWH(color->dimensions().width(),
269 color->dimensions().height());
270 this->setupRenderCommandEncoder(nullptr);
271 } else {
272 fBounds.setEmpty();
273 // For now, we lazily create the renderCommandEncoder because we may have no draws,
274 // and an empty renderCommandEncoder can still produce output. This can cause issues
275 // when we clear a texture upon creation -- we'll subsequently discard the contents.
276 // This can be removed when that ordering is fixed.
277 }
278 }
279}
280
281bool GrMtlOpsRenderPass::setupResolve() {
282 fActiveRenderCmdEncoder = nullptr;
283 auto resolve = fFramebuffer->resolveAttachment();
284 if (resolve) {
285 auto colorAttachment = fRenderPassDesc.colorAttachments[0];
286 colorAttachment.resolveTexture = resolve->mtlTexture();
287 // TODO: For framebufferOnly attachments we should do StoreAndMultisampleResolve if
288 // the storeAction is Store. But for the moment they don't take this path.
289 colorAttachment.storeAction = MTLStoreActionMultisampleResolve;
290 if (colorAttachment.loadAction == MTLLoadActionLoad) {
291 auto color = fFramebuffer->colorAttachment();
292 auto dimensions = color->dimensions();
293 // for now use the full bounds
294 auto nativeBounds = GrNativeRect::MakeIRectRelativeTo(
295 fOrigin, dimensions.height(), SkIRect::MakeSize(dimensions));
296 fActiveRenderCmdEncoder =
297 fGpu->loadMSAAFromResolve(color, resolve, nativeBounds,
298 fRenderPassDesc.stencilAttachment);
299 }
300 }
301
302 return (fActiveRenderCmdEncoder != nullptr);
303}
304
305void GrMtlOpsRenderPass::setupRenderCommandEncoder(GrMtlPipelineState* pipelineState) {
306 fActiveRenderCmdEncoder =
307 fGpu->commandBuffer()->getRenderCommandEncoder(fRenderPassDesc, pipelineState, this);
308 // Any future RenderCommandEncoders we create for this OpsRenderPass should load,
309 // unless onClear or onClearStencilClip are explicitly called.
310 auto colorAttachment = fRenderPassDesc.colorAttachments[0];
311 colorAttachment.loadAction = MTLLoadActionLoad;
312 auto stencilAttachment = fRenderPassDesc.stencilAttachment;
313 stencilAttachment.loadAction = MTLLoadActionLoad;
314}
315
316void GrMtlOpsRenderPass::onBindBuffers(sk_sp<const GrBuffer> indexBuffer,
317 sk_sp<const GrBuffer> instanceBuffer,
318 sk_sp<const GrBuffer> vertexBuffer,
319 GrPrimitiveRestart primRestart) {
320#ifdef SK_ENABLE_MTL_DEBUG_INFO
321 if (!fDebugGroupActive) {
322 fActiveRenderCmdEncoder->pushDebugGroup(@"bindAndDraw");
323 fDebugGroupActive = true;
324 }
325#endif
326 SkASSERT(GrPrimitiveRestart::kNo == primRestart);
327 int inputBufferIndex = 0;
328 if (vertexBuffer) {
329 SkASSERT(!vertexBuffer->isCpuBuffer());
330 SkASSERT(!static_cast<const GrGpuBuffer*>(vertexBuffer.get())->isMapped());
331 fActiveVertexBuffer = std::move(vertexBuffer);
333 ++inputBufferIndex;
334 }
335 if (instanceBuffer) {
336 SkASSERT(!instanceBuffer->isCpuBuffer());
337 SkASSERT(!static_cast<const GrGpuBuffer*>(instanceBuffer.get())->isMapped());
338 this->setVertexBuffer(fActiveRenderCmdEncoder, instanceBuffer.get(), 0, inputBufferIndex++);
339 fActiveInstanceBuffer = std::move(instanceBuffer);
341 }
342 if (indexBuffer) {
343 SkASSERT(!indexBuffer->isCpuBuffer());
344 SkASSERT(!static_cast<const GrGpuBuffer*>(indexBuffer.get())->isMapped());
345 fActiveIndexBuffer = std::move(indexBuffer);
347 }
348}
349
350void GrMtlOpsRenderPass::onDraw(int vertexCount, int baseVertex) {
351 SkASSERT(fActivePipelineState);
352 SkASSERT(nil != fActiveRenderCmdEncoder);
353#ifdef SK_ENABLE_MTL_DEBUG_INFO
354 if (!fDebugGroupActive) {
355 fActiveRenderCmdEncoder->pushDebugGroup(@"bindAndDraw");
356 fDebugGroupActive = true;
357 }
358#endif
359 this->setVertexBuffer(fActiveRenderCmdEncoder, fActiveVertexBuffer.get(), 0, 0);
360
361 fActiveRenderCmdEncoder->drawPrimitives(fActivePrimitiveType, baseVertex, vertexCount);
362 fGpu->stats()->incNumDraws();
363#ifdef SK_ENABLE_MTL_DEBUG_INFO
364 SkASSERT(fDebugGroupActive);
365 fActiveRenderCmdEncoder->popDebugGroup();
366 fDebugGroupActive = false;
367#endif
368}
369
370void GrMtlOpsRenderPass::onDrawIndexed(int indexCount, int baseIndex, uint16_t minIndexValue,
371 uint16_t maxIndexValue, int baseVertex) {
372 SkASSERT(fActivePipelineState);
373 SkASSERT(nil != fActiveRenderCmdEncoder);
375#ifdef SK_ENABLE_MTL_DEBUG_INFO
376 if (!fDebugGroupActive) {
377 fActiveRenderCmdEncoder->pushDebugGroup(@"bindAndDraw");
378 fDebugGroupActive = true;
379 }
380#endif
381 this->setVertexBuffer(fActiveRenderCmdEncoder, fActiveVertexBuffer.get(),
382 fCurrentVertexStride * baseVertex, 0);
383
384 auto mtlIndexBuffer = static_cast<const GrMtlBuffer*>(fActiveIndexBuffer.get());
385 size_t indexOffset = sizeof(uint16_t) * baseIndex;
386 id<MTLBuffer> indexBuffer = mtlIndexBuffer->mtlBuffer();
387 fActiveRenderCmdEncoder->drawIndexedPrimitives(fActivePrimitiveType, indexCount,
388 MTLIndexTypeUInt16, indexBuffer, indexOffset);
389 fGpu->stats()->incNumDraws();
390#ifdef SK_ENABLE_MTL_DEBUG_INFO
391 SkASSERT(fDebugGroupActive);
392 fActiveRenderCmdEncoder->popDebugGroup();
393 fDebugGroupActive = false;
394#endif
395}
396
397void GrMtlOpsRenderPass::onDrawInstanced(int instanceCount, int baseInstance, int vertexCount,
398 int baseVertex) {
399 SkASSERT(fActivePipelineState);
400 SkASSERT(nil != fActiveRenderCmdEncoder);
401#ifdef SK_ENABLE_MTL_DEBUG_INFO
402 if (!fDebugGroupActive) {
403 fActiveRenderCmdEncoder->pushDebugGroup(@"bindAndDraw");
404 fDebugGroupActive = true;
405 }
406#endif
407 this->setVertexBuffer(fActiveRenderCmdEncoder, fActiveVertexBuffer.get(), 0, 0);
408
409 if (@available(macOS 10.11, iOS 9.0, tvOS 9.0, *)) {
410 fActiveRenderCmdEncoder->drawPrimitives(fActivePrimitiveType, baseVertex, vertexCount,
411 instanceCount, baseInstance);
412 } else {
413 SkASSERT(false);
414 }
415 fGpu->stats()->incNumDraws();
416#ifdef SK_ENABLE_MTL_DEBUG_INFO
417 SkASSERT(fDebugGroupActive);
418 fActiveRenderCmdEncoder->popDebugGroup();
419 fDebugGroupActive = false;
420#endif
421}
422
423void GrMtlOpsRenderPass::onDrawIndexedInstanced(
424 int indexCount, int baseIndex, int instanceCount, int baseInstance, int baseVertex) {
425 SkASSERT(fActivePipelineState);
426 SkASSERT(nil != fActiveRenderCmdEncoder);
428#ifdef SK_ENABLE_MTL_DEBUG_INFO
429 if (!fDebugGroupActive) {
430 fActiveRenderCmdEncoder->pushDebugGroup(@"bindAndDraw");
431 fDebugGroupActive = true;
432 }
433#endif
434 this->setVertexBuffer(fActiveRenderCmdEncoder, fActiveVertexBuffer.get(), 0, 0);
435
436 auto mtlIndexBuffer = static_cast<const GrMtlBuffer*>(fActiveIndexBuffer.get());
437 size_t indexOffset = sizeof(uint16_t) * baseIndex;
438 if (@available(macOS 10.11, iOS 9.0, tvOS 9.0, *)) {
439 fActiveRenderCmdEncoder->drawIndexedPrimitives(fActivePrimitiveType, indexCount,
440 MTLIndexTypeUInt16,
441 mtlIndexBuffer->mtlBuffer(), indexOffset,
442 instanceCount, baseVertex, baseInstance);
443 } else {
444 SkASSERT(false);
445 }
446 fGpu->stats()->incNumDraws();
447#ifdef SK_ENABLE_MTL_DEBUG_INFO
448 SkASSERT(fDebugGroupActive);
449 fActiveRenderCmdEncoder->popDebugGroup();
450 fDebugGroupActive = false;
451#endif
452}
453
454void GrMtlOpsRenderPass::onDrawIndirect(const GrBuffer* drawIndirectBuffer,
455 size_t bufferOffset,
456 int drawCount) {
458 SkASSERT(fActivePipelineState);
459 SkASSERT(nil != fActiveRenderCmdEncoder);
460#ifdef SK_ENABLE_MTL_DEBUG_INFO
461 if (!fDebugGroupActive) {
462 fActiveRenderCmdEncoder->pushDebugGroup(@"bindAndDraw");
463 fDebugGroupActive = true;
464 }
465#endif
466 this->setVertexBuffer(fActiveRenderCmdEncoder, fActiveVertexBuffer.get(), 0, 0);
467
468 auto mtlIndirectBuffer = static_cast<const GrMtlBuffer*>(drawIndirectBuffer);
469 const size_t stride = sizeof(GrDrawIndirectCommand);
470 while (drawCount >= 1) {
471 if (@available(macOS 10.11, iOS 9.0, tvOS 9.0, *)) {
472 fActiveRenderCmdEncoder->drawPrimitives(fActivePrimitiveType,
473 mtlIndirectBuffer->mtlBuffer(), bufferOffset);
474 } else {
475 SkASSERT(false);
476 }
477 drawCount--;
478 bufferOffset += stride;
479 fGpu->stats()->incNumDraws();
480 }
481#ifdef SK_ENABLE_MTL_DEBUG_INFO
482 SkASSERT(fDebugGroupActive);
483 fActiveRenderCmdEncoder->popDebugGroup();
484 fDebugGroupActive = false;
485#endif
486}
487
488void GrMtlOpsRenderPass::onDrawIndexedIndirect(const GrBuffer* drawIndirectBuffer,
489 size_t bufferOffset,
490 int drawCount) {
492 SkASSERT(fActivePipelineState);
493 SkASSERT(nil != fActiveRenderCmdEncoder);
495#ifdef SK_ENABLE_MTL_DEBUG_INFO
496 if (!fDebugGroupActive) {
497 fActiveRenderCmdEncoder->pushDebugGroup(@"bindAndDraw");
498 fDebugGroupActive = true;
499 }
500#endif
501 this->setVertexBuffer(fActiveRenderCmdEncoder, fActiveVertexBuffer.get(), 0, 0);
502
503 auto mtlIndexBuffer = static_cast<const GrMtlBuffer*>(fActiveIndexBuffer.get());
504 auto mtlIndirectBuffer = static_cast<const GrMtlBuffer*>(drawIndirectBuffer);
505 size_t indexOffset = 0;
506
507 const size_t stride = sizeof(GrDrawIndexedIndirectCommand);
508 while (drawCount >= 1) {
509 if (@available(macOS 10.11, iOS 9.0, tvOS 9.0, *)) {
510 fActiveRenderCmdEncoder->drawIndexedPrimitives(fActivePrimitiveType,
511 MTLIndexTypeUInt16,
512 mtlIndexBuffer->mtlBuffer(),
513 indexOffset,
514 mtlIndirectBuffer->mtlBuffer(),
515 bufferOffset);
516 } else {
517 SkASSERT(false);
518 }
519 drawCount--;
520 bufferOffset += stride;
521 fGpu->stats()->incNumDraws();
522 }
523#ifdef SK_ENABLE_MTL_DEBUG_INFO
524 SkASSERT(fDebugGroupActive);
525 fActiveRenderCmdEncoder->popDebugGroup();
526 fDebugGroupActive = false;
527#endif
528}
529
530void GrMtlOpsRenderPass::setVertexBuffer(GrMtlRenderCommandEncoder* encoder,
531 const GrBuffer* buffer,
532 size_t vertexOffset,
533 size_t inputBufferIndex) {
534 if (!buffer) {
535 return;
536 }
537
538 constexpr static int kFirstBufferBindingIdx = GrMtlUniformHandler::kLastUniformBinding + 1;
539 int index = inputBufferIndex + kFirstBufferBindingIdx;
540 SkASSERT(index < 4);
541 auto mtlBuffer = static_cast<const GrMtlBuffer*>(buffer);
542 id<MTLBuffer> mtlVertexBuffer = mtlBuffer->mtlBuffer();
543 SkASSERT(mtlVertexBuffer);
544 size_t offset = vertexOffset;
545 encoder->setVertexBuffer(mtlVertexBuffer, offset, index);
546}
547
548GR_NORETAIN_END
int GrBackendFormatStencilBits(const GrBackendFormat &format)
std::function< void(GrDeferredTextureUploadWritePixelsFn &)> GrDeferredTextureUploadFn
static MTLPrimitiveType gr_to_mtl_primitive(GrPrimitiveType primitiveType)
GrPrimitiveRestart
Definition: GrTypesPriv.h:56
GrPrimitiveType
Definition: GrTypesPriv.h:43
GrSurfaceOrigin
Definition: GrTypes.h:147
#define SkASSERT(cond)
Definition: SkAssert.h:116
virtual bool isCpuBuffer() const =0
bool nativeDrawIndirectSupport() const
Definition: GrCaps.h:84
size_t vertexStride() const
bool isMapped() const
Definition: GrGpuBuffer.cpp:47
void incNumDraws()
Definition: GrGpu.h:541
Stats * stats()
Definition: GrGpu.h:551
const GrCaps * caps() const
Definition: GrGpu.h:73
id< MTLBuffer > mtlBuffer() const
Definition: GrMtlBuffer.h:28
GrProgramDesc makeDesc(GrRenderTarget *, const GrProgramInfo &, ProgramDescOverrideFlags) const override
Definition: GrMtlCaps.mm:1214
GrMtlRenderCommandEncoder * getRenderCommandEncoder(MTLRenderPassDescriptor *, const GrMtlPipelineState *, GrMtlOpsRenderPass *opsRenderPass)
void addGrBuffer(sk_sp< const GrBuffer > buffer)
void addGrSurface(sk_sp< const GrSurface > surface)
GrMtlAttachment * stencilAttachment()
GrMtlAttachment * colorAttachment()
GrMtlAttachment * resolveAttachment()
GrMtlRenderCommandEncoder * loadMSAAFromResolve(GrAttachment *dst, GrMtlAttachment *src, const SkIRect &srcRect, MTLRenderPassStencilAttachmentDescriptor *)
Definition: GrMtlGpu.mm:1660
GrMtlCommandBuffer * commandBuffer()
Definition: GrMtlGpu.mm:187
const GrMtlCaps & mtlCaps() const
Definition: GrMtlGpu.h:47
GrMtlResourceProvider & resourceProvider()
Definition: GrMtlGpu.h:51
void submitIndirectCommandBuffer(GrSurface *surface, GrSurfaceOrigin origin, const SkIRect *bounds)
Definition: GrMtlGpu.h:113
~GrMtlOpsRenderPass() override
GrMtlOpsRenderPass(GrMtlGpu *gpu, GrRenderTarget *rt, sk_sp< GrMtlFramebuffer >, GrSurfaceOrigin origin, const GrOpsRenderPass::LoadAndStoreInfo &colorInfo, const GrOpsRenderPass::StencilLoadAndStoreInfo &stencilInfo)
void inlineUpload(GrOpFlushState *state, GrDeferredTextureUploadFn &upload) override
void initRenderState(GrMtlRenderCommandEncoder *)
void setDrawState(GrMtlRenderCommandEncoder *, const skgpu::Swizzle &writeSwizzle, const GrXferProcessor &)
static void SetDynamicScissorRectState(GrMtlRenderCommandEncoder *renderCmdEncoder, SkISize colorAttachmentDimensions, GrSurfaceOrigin rtOrigin, SkIRect scissorRect)
void bindTextures(GrMtlRenderCommandEncoder *renderCmdEncoder)
void setTextures(const GrGeometryProcessor &, const GrPipeline &, const GrSurfaceProxy *const geomProcTextures[])
void setData(GrMtlFramebuffer *, const GrProgramInfo &)
const sk_sp< GrMtlRenderPipeline > & pipeline() const
void drawPrimitives(MTLPrimitiveType primitiveType, NSUInteger vertexStart, NSUInteger vertexCount)
void pushDebugGroup(NSString *string)
void drawIndexedPrimitives(MTLPrimitiveType primitiveType, NSUInteger indexCount, MTLIndexType indexType, id< MTLBuffer > indexBuffer, NSUInteger indexBufferOffset)
void setTriangleFillMode(MTLTriangleFillMode fillMode)
void setRenderPipelineState(id< MTLRenderPipelineState > pso)
id< MTLRenderPipelineState > mtlPipelineState() const
Definition: GrMtlPipeline.h:36
GrMtlPipelineState * findOrCreateCompatiblePipelineState(const GrProgramDesc &, const GrProgramInfo &, GrThreadSafePipelineBuilder::Stats::ProgramCacheResult *stat=nullptr)
sk_sp< const GrBuffer > fActiveIndexBuffer
sk_sp< const GrBuffer > fActiveInstanceBuffer
sk_sp< const GrBuffer > fActiveVertexBuffer
GrSurfaceOrigin fOrigin
GrRenderTarget * fRenderTarget
const skgpu::Swizzle & writeSwizzle() const
Definition: GrPipeline.h:197
bool isScissorTestEnabled() const
Definition: GrPipeline.h:163
bool isWireframe() const
Definition: GrPipeline.h:170
const GrXferProcessor & getXferProcessor() const
Definition: GrPipeline.h:116
bool isValid() const
Definition: GrProgramDesc.h:31
GrPrimitiveType primitiveType() const
Definition: GrProgramInfo.h:42
const GrPipeline & pipeline() const
Definition: GrProgramInfo.h:39
const GrGeometryProcessor & geomProc() const
Definition: GrProgramInfo.h:40
bool enabled() const
virtual GrBackendFormat backendFormat() const =0
SkISize dimensions() const
Definition: GrSurface.h:27
T * get() const
Definition: SkRefCnt.h:303
DlColor color
AtkStateType state
DEF_SWITCHES_START aot vmservice shared library Name of the *so containing AOT compiled Dart assets for launching the service isolate vm snapshot The VM snapshot data that will be memory mapped as read only SnapshotAssetPath must be present isolate snapshot The isolate snapshot data that will be memory mapped as read only SnapshotAssetPath must be present cache dir Path to the cache directory This is different from the persistent_cache_path in embedder which is used for Skia shader cache icu native lib Path to the library file that exports the ICU data vm service The hostname IP address on which the Dart VM Service should be served If not defaults to or::depending on whether ipv6 is specified vm service A custom Dart VM Service port The default is to pick a randomly available open port disable vm Disable the Dart VM Service The Dart VM Service is never available in release mode disable vm service Disable mDNS Dart VM Service publication Bind to the IPv6 localhost address for the Dart VM Service Ignored if vm service host is set endless trace buffer
Definition: switches.h:126
Definition: ref_ptr.h:256
Definition: upload.py:1
SeparatedVector2 offset
static SkIRect MakeIRectRelativeTo(GrSurfaceOrigin origin, int rtHeight, SkIRect devRect)
Definition: GrNativeRect.h:31
std::array< float, 4 > fClearColor
Definition: SkRect.h:32
static constexpr SkIRect MakeSize(const SkISize &size)
Definition: SkRect.h:66
static constexpr SkIRect MakeWH(int32_t w, int32_t h)
Definition: SkRect.h:56
Definition: SkSize.h:16
constexpr int32_t width() const
Definition: SkSize.h:36
constexpr int32_t height() const
Definition: SkSize.h:37
void roundOut(SkIRect *dst) const
Definition: SkRect.h:1241
void join(const SkRect &r)
Definition: SkRect.cpp:126
static constexpr SkRect MakeWH(float w, float h)
Definition: SkRect.h:609
void setEmpty()
Definition: SkRect.h:842