Flutter Engine
The Flutter Engine
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Properties Friends Macros Modules Pages
GrMtlGpu.mm
Go to the documentation of this file.
1/*
2 * Copyright 2017 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
9
16#include "src/base/SkMathPriv.h"
19#include "src/core/SkMipmap.h"
20#include "src/gpu/DataUtils.h"
40
41#import <simd/simd.h>
42
43using namespace skia_private;
44
45#if !__has_feature(objc_arc)
46#error This file must be compiled with Arc. Use -fobjc-arc flag
47#endif
48
49GR_NORETAIN_BEGIN
50
51#if defined(GR_TEST_UTILS)
52// set to 1 if you want to do GPU capture of each commandBuffer
53#define GR_METAL_CAPTURE_COMMANDBUFFER 0
54#endif
55
56std::unique_ptr<GrGpu> GrMtlGpu::Make(const GrMtlBackendContext& context,
58 GrDirectContext* direct) {
59 if (!context.fDevice || !context.fQueue) {
60 return nullptr;
61 }
62 if (@available(macOS 10.14, iOS 10.0, tvOS 10.0, *)) {
63 // no warning needed
64 } else {
65 SkDebugf("*** Error ***: Skia's Metal backend no longer supports this OS version.\n");
66#ifdef SK_BUILD_FOR_IOS
67 SkDebugf("Minimum supported version is iOS 10.0.\n");
68#else
69 SkDebugf("Minimum supported version is MacOS 10.14.\n");
70#endif
71 return nullptr;
72 }
73
74 id<MTLDevice> GR_NORETAIN device = (__bridge id<MTLDevice>)(context.fDevice.get());
75 id<MTLCommandQueue> GR_NORETAIN queue = (__bridge id<MTLCommandQueue>)(context.fQueue.get());
76
77 return std::unique_ptr<GrGpu>(new GrMtlGpu(direct,
78 options,
79 device,
80 queue));
81}
82
83// This constant determines how many OutstandingCommandBuffers are allocated together as a block in
84// the deque. As such it needs to balance allocating too much memory vs. incurring
85// allocation/deallocation thrashing. It should roughly correspond to the max number of outstanding
86// command buffers we expect to see.
87static const int kDefaultOutstandingAllocCnt = 8;
88
89GrMtlGpu::GrMtlGpu(GrDirectContext* direct, const GrContextOptions& options,
90 id<MTLDevice> device, id<MTLCommandQueue> queue)
91 : INHERITED(direct)
92 , fDevice(device)
93 , fQueue(queue)
94 , fOutstandingCommandBuffers(sizeof(OutstandingCommandBuffer), kDefaultOutstandingAllocCnt)
95 , fResourceProvider(this)
96 , fStagingBufferManager(this)
97 , fUniformsRingBuffer(this, 128 * 1024, 256, GrGpuBufferType::kUniform)
98 , fDisconnected(false) {
99 fMtlCaps.reset(new GrMtlCaps(options, fDevice));
100 this->initCaps(fMtlCaps);
101#if GR_METAL_CAPTURE_COMMANDBUFFER
102 this->testingOnly_startCapture();
103#endif
104 fCurrentCmdBuffer = GrMtlCommandBuffer::Make(fQueue);
105}
106
108 if (!fDisconnected) {
109 this->destroyResources();
110 }
111}
112
115
116 if (!fDisconnected) {
117 this->destroyResources();
118 fDisconnected = true;
119 }
120}
121
123 return nullptr;
124}
125
127 return nullptr;
128}
129
130void GrMtlGpu::destroyResources() {
131 this->submitCommandBuffer(SyncQueue::kForce_SyncQueue);
132 // if there's no work we won't release the command buffer, so we do it here
133 fCurrentCmdBuffer = nil;
134
135 // We used a placement new for each object in fOutstandingCommandBuffers, so we're responsible
136 // for calling the destructor on each of them as well.
137 while (!fOutstandingCommandBuffers.empty()) {
138 OutstandingCommandBuffer* buffer =
139 (OutstandingCommandBuffer*)fOutstandingCommandBuffers.front();
140 // make sure we remove before deleting as deletion might try to kick off another submit
141 fOutstandingCommandBuffers.pop_front();
142 buffer->~OutstandingCommandBuffer();
143 }
144
145 fStagingBufferManager.reset();
146
147 fResourceProvider.destroyResources();
148
149 fQueue = nil;
150 fDevice = nil;
151}
152
153GrOpsRenderPass* GrMtlGpu::onGetOpsRenderPass(
154 GrRenderTarget* renderTarget, bool useMSAASurface, GrAttachment* stencil,
155 GrSurfaceOrigin origin, const SkIRect& bounds,
156 const GrOpsRenderPass::LoadAndStoreInfo& colorInfo,
158 const TArray<GrSurfaceProxy*, true>& sampledProxies,
159 GrXferBarrierFlags renderPassXferBarriers) {
160 // For the given render target and requested render pass features we need to find a compatible
161 // framebuffer to use.
162 GrMtlRenderTarget* mtlRT = static_cast<GrMtlRenderTarget*>(renderTarget);
163
164 // TODO: support DMSAA
165 SkASSERT(!useMSAASurface ||
166 (renderTarget->numSamples() > 1));
167
168 bool withResolve = false;
169
170 // Figure out if we can use a Resolve store action for this render pass. When we set up
171 // the render pass we'll update the color load/store ops since we don't want to ever load
172 // or store the msaa color attachment, but may need to for the resolve attachment.
173 if (useMSAASurface && this->mtlCaps().renderTargetSupportsDiscardableMSAA(mtlRT)) {
174 withResolve = true;
175 }
176
177 sk_sp<GrMtlFramebuffer> framebuffer =
178 sk_ref_sp(mtlRT->getFramebuffer(withResolve, SkToBool(stencil)));
179 if (!framebuffer) {
180 return nullptr;
181 }
182
183 return new GrMtlOpsRenderPass(this, renderTarget, std::move(framebuffer), origin, colorInfo,
184 stencilInfo);
185}
186
188 if (!fCurrentCmdBuffer) {
189#if GR_METAL_CAPTURE_COMMANDBUFFER
190 this->testingOnly_startCapture();
191#endif
192 // Create a new command buffer for the next submit
193 fCurrentCmdBuffer = GrMtlCommandBuffer::Make(fQueue);
194 }
195
196 SkASSERT(fCurrentCmdBuffer);
197 return fCurrentCmdBuffer.get();
198}
199
200void GrMtlGpu::takeOwnershipOfBuffer(sk_sp<GrGpuBuffer> buffer) {
202 this->commandBuffer()->addGrBuffer(std::move(buffer));
203}
204
206 GrMtlOpsRenderPass* mtlRenderPass = reinterpret_cast<GrMtlOpsRenderPass*>(renderPass);
207 mtlRenderPass->submit();
208 delete renderPass;
209}
210
211bool GrMtlGpu::submitCommandBuffer(SyncQueue sync) {
212 if (!fCurrentCmdBuffer || !fCurrentCmdBuffer->hasWork()) {
213 if (sync == SyncQueue::kForce_SyncQueue) {
215 this->checkForFinishedCommandBuffers();
216 }
217 // We need to manually call the finishedCallbacks since we don't add this
218 // to the OutstandingCommandBuffer list
219 if (fCurrentCmdBuffer) {
220 fCurrentCmdBuffer->callFinishedCallbacks();
221 }
222 return true;
223 }
224
225 SkASSERT(fCurrentCmdBuffer);
226 bool didCommit = fCurrentCmdBuffer->commit(sync == SyncQueue::kForce_SyncQueue);
227 if (didCommit) {
228 new (fOutstandingCommandBuffers.push_back()) OutstandingCommandBuffer(fCurrentCmdBuffer);
229 }
230
231 // We don't create a new command buffer here because we may end up using it
232 // in the next frame, and that confuses the GPU debugger. Instead we
233 // create when we next need one.
234 fCurrentCmdBuffer.reset();
235
236 // If the freeing of any resources held by a finished command buffer causes us to send
237 // a new command to the gpu we'll create the new command buffer in commandBuffer(), above.
238 this->checkForFinishedCommandBuffers();
239
240#if GR_METAL_CAPTURE_COMMANDBUFFER
241 this->testingOnly_stopCapture();
242#endif
243 return didCommit;
244}
245
246void GrMtlGpu::checkForFinishedCommandBuffers() {
247 // Iterate over all the outstanding command buffers to see if any have finished. The command
248 // buffers are in order from oldest to newest, so we start at the front to check if their fence
249 // has signaled. If so we pop it off and move onto the next.
250 // Repeat till we find a command list that has not finished yet (and all others afterwards are
251 // also guaranteed to not have finished).
252 OutstandingCommandBuffer* front = (OutstandingCommandBuffer*)fOutstandingCommandBuffers.front();
253 while (front && (*front)->isCompleted()) {
254 // Make sure we remove before deleting as deletion might try to kick off another submit
255 fOutstandingCommandBuffers.pop_front();
256 // Since we used placement new we are responsible for calling the destructor manually.
257 front->~OutstandingCommandBuffer();
258 front = (OutstandingCommandBuffer*)fOutstandingCommandBuffers.front();
259 }
260}
261
263 // wait for the last command buffer we've submitted to finish
265 (OutstandingCommandBuffer*)fOutstandingCommandBuffers.back();
266 if (back) {
267 (*back)->waitUntilCompleted();
268 }
269}
270
271void GrMtlGpu::addFinishedProc(GrGpuFinishedProc finishedProc,
272 GrGpuFinishedContext finishedContext) {
273 SkASSERT(finishedProc);
274 this->addFinishedCallback(skgpu::RefCntedCallback::Make(finishedProc, finishedContext));
275}
276
277void GrMtlGpu::addFinishedCallback(sk_sp<skgpu::RefCntedCallback> finishedCallback) {
278 SkASSERT(finishedCallback);
279 // Besides the current commandbuffer, we also add the finishedCallback to the newest outstanding
280 // commandbuffer. Our contract for calling the proc is that all previous submitted cmdbuffers
281 // have finished when we call it. However, if our current command buffer has no work when it is
282 // flushed it will drop its ref to the callback immediately. But the previous work may not have
283 // finished. It is safe to only add the proc to the newest outstanding commandbuffer cause that
284 // must finish after all previously submitted command buffers.
285 OutstandingCommandBuffer* back = (OutstandingCommandBuffer*)fOutstandingCommandBuffers.back();
286 if (back) {
287 (*back)->addFinishedCallback(finishedCallback);
288 }
289 commandBuffer()->addFinishedCallback(std::move(finishedCallback));
290}
291
292bool GrMtlGpu::onSubmitToGpu(GrSyncCpu sync) {
293 if (sync == GrSyncCpu::kYes) {
294 return this->submitCommandBuffer(kForce_SyncQueue);
295 } else {
296 return this->submitCommandBuffer(kSkip_SyncQueue);
297 }
298}
299
302 return nullptr;
303}
304
305sk_sp<GrGpuBuffer> GrMtlGpu::onCreateBuffer(size_t size,
307 GrAccessPattern accessPattern) {
308 return GrMtlBuffer::Make(this, size, type, accessPattern);
309}
310
311static bool check_max_blit_width(int widthInPixels) {
312 if (widthInPixels > 32767) {
313 SkASSERT(false); // surfaces should not be this wide anyway
314 return false;
315 }
316 return true;
317}
318
319bool GrMtlGpu::uploadToTexture(GrMtlTexture* tex,
321 GrColorType dataColorType,
322 const GrMipLevel texels[],
323 int mipLevelCount) {
324 SkASSERT(this->mtlCaps().isFormatTexturable(tex->mtlTexture().pixelFormat));
325 // The assumption is either that we have no mipmaps, or that our rect is the entire texture
326 SkASSERT(mipLevelCount == 1 || rect == SkIRect::MakeSize(tex->dimensions()));
327
328 // We assume that if the texture has mip levels, we either upload to all the levels or just the
329 // first.
330 SkASSERT(mipLevelCount == 1 || mipLevelCount == (tex->maxMipmapLevel() + 1));
331
332 if (!check_max_blit_width(rect.width())) {
333 return false;
334 }
335 if (rect.isEmpty()) {
336 return false;
337 }
338
339 SkASSERT(this->mtlCaps().surfaceSupportsWritePixels(tex));
340 SkASSERT(this->mtlCaps().areColorTypeAndFormatCompatible(dataColorType, tex->backendFormat()));
341
342 id<MTLTexture> GR_NORETAIN mtlTexture = tex->mtlTexture();
343 SkASSERT(mtlTexture);
344 // Either upload only the first miplevel or all miplevels
345 SkASSERT(1 == mipLevelCount || mipLevelCount == (int)mtlTexture.mipmapLevelCount);
346
347 if (mipLevelCount == 1 && !texels[0].fPixels) {
348 return true; // no data to upload
349 }
350
351 for (int i = 0; i < mipLevelCount; ++i) {
352 // We do not allow any gaps in the mip data
353 if (!texels[i].fPixels) {
354 return false;
355 }
356 }
357
358 size_t bpp = GrColorTypeBytesPerPixel(dataColorType);
359
360 TArray<size_t> individualMipOffsets(mipLevelCount);
361 size_t combinedBufferSize = GrComputeTightCombinedBufferSize(bpp,
362 rect.size(),
363 &individualMipOffsets,
364 mipLevelCount);
365 SkASSERT(combinedBufferSize);
366
367
368 // offset value must be a multiple of the destination texture's pixel size in bytes
369 size_t alignment = std::max(bpp, this->mtlCaps().getMinBufferAlignment());
370 GrStagingBufferManager::Slice slice = fStagingBufferManager.allocateStagingBufferSlice(
371 combinedBufferSize, alignment);
372 if (!slice.fBuffer) {
373 return false;
374 }
375 char* bufferData = (char*)slice.fOffsetMapPtr;
376 GrMtlBuffer* mtlBuffer = static_cast<GrMtlBuffer*>(slice.fBuffer);
377
378 int currentWidth = rect.width();
379 int currentHeight = rect.height();
380 SkDEBUGCODE(int layerHeight = tex->height());
381 MTLOrigin origin = MTLOriginMake(rect.left(), rect.top(), 0);
382
383 auto cmdBuffer = this->commandBuffer();
384 id<MTLBlitCommandEncoder> GR_NORETAIN blitCmdEncoder = cmdBuffer->getBlitCommandEncoder();
385 if (!blitCmdEncoder) {
386 return false;
387 }
388#ifdef SK_ENABLE_MTL_DEBUG_INFO
389 [blitCmdEncoder pushDebugGroup:@"uploadToTexture"];
390#endif
391 for (int currentMipLevel = 0; currentMipLevel < mipLevelCount; currentMipLevel++) {
392 if (texels[currentMipLevel].fPixels) {
393 SkASSERT(1 == mipLevelCount || currentHeight == layerHeight);
394 const size_t trimRowBytes = currentWidth * bpp;
395 const size_t rowBytes = texels[currentMipLevel].fRowBytes;
396
397 // copy data into the buffer, skipping any trailing bytes
398 char* dst = bufferData + individualMipOffsets[currentMipLevel];
399 const char* src = (const char*)texels[currentMipLevel].fPixels;
400 SkRectMemcpy(dst, trimRowBytes, src, rowBytes, trimRowBytes, currentHeight);
401
402 [blitCmdEncoder copyFromBuffer: mtlBuffer->mtlBuffer()
403 sourceOffset: slice.fOffset + individualMipOffsets[currentMipLevel]
404 sourceBytesPerRow: trimRowBytes
405 sourceBytesPerImage: trimRowBytes*currentHeight
406 sourceSize: MTLSizeMake(currentWidth, currentHeight, 1)
407 toTexture: mtlTexture
408 destinationSlice: 0
409 destinationLevel: currentMipLevel
410 destinationOrigin: origin];
411 }
412 currentWidth = std::max(1, currentWidth/2);
413 currentHeight = std::max(1, currentHeight/2);
414 SkDEBUGCODE(layerHeight = currentHeight);
415 }
416#ifdef SK_BUILD_FOR_MAC
417 if (this->mtlCaps().isMac()) {
418 [mtlBuffer->mtlBuffer() didModifyRange: NSMakeRange(slice.fOffset, combinedBufferSize)];
419 }
420#endif
421#ifdef SK_ENABLE_MTL_DEBUG_INFO
422 [blitCmdEncoder popDebugGroup];
423#endif
424
425 if (mipLevelCount < (int) tex->mtlTexture().mipmapLevelCount) {
426 tex->markMipmapsDirty();
427 }
428
429 return true;
430}
431
432bool GrMtlGpu::clearTexture(GrMtlTexture* tex, size_t bpp, uint32_t levelMask) {
433 SkASSERT(this->mtlCaps().isFormatTexturable(tex->mtlTexture().pixelFormat));
434
435 if (!levelMask) {
436 return true;
437 }
438
439 id<MTLTexture> GR_NORETAIN mtlTexture = tex->mtlTexture();
440 SkASSERT(mtlTexture);
441 // Either upload only the first miplevel or all miplevels
442 int mipLevelCount = (int)mtlTexture.mipmapLevelCount;
443
444 TArray<size_t> individualMipOffsets(mipLevelCount);
445 size_t combinedBufferSize = 0;
446 int currentWidth = tex->width();
447 int currentHeight = tex->height();
448
449 // The alignment must be at least 4 bytes and a multiple of the bytes per pixel of the image
450 // config. This works with the assumption that the bytes in pixel config is always a power of 2.
451 // TODO: can we just copy from a single buffer the size of the largest cleared level w/o a perf
452 // penalty?
453 SkASSERT((bpp & (bpp - 1)) == 0);
454 const size_t alignmentMask = 0x3 | (bpp - 1);
455 for (int currentMipLevel = 0; currentMipLevel < mipLevelCount; currentMipLevel++) {
456 if (levelMask & (1 << currentMipLevel)) {
457 const size_t trimmedSize = currentWidth * bpp * currentHeight;
458 const size_t alignmentDiff = combinedBufferSize & alignmentMask;
459 if (alignmentDiff != 0) {
460 combinedBufferSize += alignmentMask - alignmentDiff + 1;
461 }
462 individualMipOffsets.push_back(combinedBufferSize);
463 combinedBufferSize += trimmedSize;
464 }
465 currentWidth = std::max(1, currentWidth/2);
466 currentHeight = std::max(1, currentHeight/2);
467 }
468 SkASSERT(combinedBufferSize > 0 && !individualMipOffsets.empty());
469
470 size_t alignment = std::max(bpp, this->mtlCaps().getMinBufferAlignment());
471 GrStagingBufferManager::Slice slice = fStagingBufferManager.allocateStagingBufferSlice(
472 combinedBufferSize, alignment);
473 if (!slice.fBuffer) {
474 return false;
475 }
476 GrMtlBuffer* mtlBuffer = static_cast<GrMtlBuffer*>(slice.fBuffer);
477 id<MTLBuffer> transferBuffer = mtlBuffer->mtlBuffer();
478
479 auto cmdBuffer = this->commandBuffer();
480 id<MTLBlitCommandEncoder> GR_NORETAIN blitCmdEncoder = cmdBuffer->getBlitCommandEncoder();
481 if (!blitCmdEncoder) {
482 return false;
483 }
484#ifdef SK_ENABLE_MTL_DEBUG_INFO
485 [blitCmdEncoder pushDebugGroup:@"clearTexture"];
486#endif
487 // clear the buffer to transparent black
488 NSRange clearRange;
489 clearRange.location = 0;
490 clearRange.length = combinedBufferSize;
491 [blitCmdEncoder fillBuffer: transferBuffer
492 range: clearRange
493 value: 0];
494
495 // now copy buffer to texture
496 currentWidth = tex->width();
497 currentHeight = tex->height();
498 MTLOrigin origin = MTLOriginMake(0, 0, 0);
499 for (int currentMipLevel = 0; currentMipLevel < mipLevelCount; currentMipLevel++) {
500 if (levelMask & (1 << currentMipLevel)) {
501 const size_t rowBytes = currentWidth * bpp;
502
503 [blitCmdEncoder copyFromBuffer: transferBuffer
504 sourceOffset: individualMipOffsets[currentMipLevel]
505 sourceBytesPerRow: rowBytes
506 sourceBytesPerImage: rowBytes * currentHeight
507 sourceSize: MTLSizeMake(currentWidth, currentHeight, 1)
508 toTexture: mtlTexture
509 destinationSlice: 0
510 destinationLevel: currentMipLevel
511 destinationOrigin: origin];
512 }
513 currentWidth = std::max(1, currentWidth/2);
514 currentHeight = std::max(1, currentHeight/2);
515 }
516 // Don't need didModifyRange: here because fillBuffer: happens on the GPU
517#ifdef SK_ENABLE_MTL_DEBUG_INFO
518 [blitCmdEncoder popDebugGroup];
519#endif
520
521 if (mipLevelCount < (int) tex->mtlTexture().mipmapLevelCount) {
522 tex->markMipmapsDirty();
523 }
524
525 return true;
526}
527
528sk_sp<GrAttachment> GrMtlGpu::makeStencilAttachment(const GrBackendFormat& /*colorFormat*/,
529 SkISize dimensions, int numStencilSamples) {
530 MTLPixelFormat sFmt = this->mtlCaps().preferredStencilFormat();
531
533 return GrMtlAttachment::GrMtlAttachment::MakeStencil(this, dimensions, numStencilSamples, sFmt);
534}
535
536sk_sp<GrAttachment> GrMtlGpu::makeMSAAAttachment(SkISize dimensions,
537 const GrBackendFormat& format,
538 int numSamples,
539 GrProtected isProtected,
540 GrMemoryless isMemoryless) {
541 // Metal doesn't support protected textures
542 SkASSERT(isProtected == GrProtected::kNo);
543 // TODO: add memoryless support
544 SkASSERT(isMemoryless == GrMemoryless::kNo);
545
546 MTLPixelFormat pixelFormat = (MTLPixelFormat)GrBackendFormats::AsMtlFormat(format);
547 SkASSERT(pixelFormat != MTLPixelFormatInvalid);
549 SkASSERT(this->mtlCaps().isFormatRenderable(pixelFormat, numSamples));
550
552 return GrMtlAttachment::MakeMSAA(this, dimensions, numSamples, pixelFormat);
553}
554
555sk_sp<GrTexture> GrMtlGpu::onCreateTexture(SkISize dimensions,
556 const GrBackendFormat& format,
557 GrRenderable renderable,
558 int renderTargetSampleCnt,
559 skgpu::Budgeted budgeted,
560 GrProtected isProtected,
561 int mipLevelCount,
562 uint32_t levelClearMask,
563 std::string_view label) {
564 // We don't support protected textures in Metal.
565 if (isProtected == GrProtected::kYes) {
566 return nullptr;
567 }
568 SkASSERT(mipLevelCount > 0);
569
570 MTLPixelFormat mtlPixelFormat = GrBackendFormatAsMTLPixelFormat(format);
571 SkASSERT(mtlPixelFormat != MTLPixelFormatInvalid);
572 SkASSERT(!this->caps()->isFormatCompressed(format));
573
575 GrMipmapStatus mipmapStatus =
577 if (renderable == GrRenderable::kYes) {
579 this, budgeted, dimensions, renderTargetSampleCnt, mtlPixelFormat, mipLevelCount,
580 mipmapStatus, label);
581 } else {
582 tex = GrMtlTexture::MakeNewTexture(this, budgeted, dimensions, mtlPixelFormat,
583 mipLevelCount, mipmapStatus, label);
584 }
585
586 if (!tex) {
587 return nullptr;
588 }
589
590 if (levelClearMask) {
591 this->clearTexture(tex.get(),
592 skgpu::MtlFormatBytesPerBlock(mtlPixelFormat),
593 levelClearMask);
594 }
595
596 return std::move(tex);
597}
598
599sk_sp<GrTexture> GrMtlGpu::onCreateCompressedTexture(SkISize dimensions,
600 const GrBackendFormat& format,
601 skgpu::Budgeted budgeted,
602 skgpu::Mipmapped mipmapped,
603 GrProtected isProtected,
604 const void* data,
605 size_t dataSize) {
606 // We don't support protected textures in Metal.
607 if (isProtected == GrProtected::kYes) {
608 return nullptr;
609 }
610
611 SkASSERT(this->caps()->isFormatTexturable(format, GrTextureType::k2D));
612 SkASSERT(data);
613
614 if (!check_max_blit_width(dimensions.width())) {
615 return nullptr;
616 }
617
618 MTLPixelFormat mtlPixelFormat = GrBackendFormatAsMTLPixelFormat(format);
619 SkASSERT(this->caps()->isFormatCompressed(format));
620
621 int numMipLevels = 1;
622 if (mipmapped == skgpu::Mipmapped::kYes) {
623 numMipLevels = SkMipmap::ComputeLevelCount(dimensions.width(), dimensions.height()) + 1;
624 }
625
626 GrMipmapStatus mipmapStatus = (mipmapped == skgpu::Mipmapped::kYes)
629
630 auto tex = GrMtlTexture::MakeNewTexture(this, budgeted, dimensions, mtlPixelFormat,
631 numMipLevels, mipmapStatus,
632 /*label=*/"MtlGpu_CreateCompressedTexture");
633 if (!tex) {
634 return nullptr;
635 }
636
637 // Upload to texture
638 id<MTLTexture> GR_NORETAIN mtlTexture = tex->mtlTexture();
639 SkASSERT(mtlTexture);
640
641 auto compressionType = GrBackendFormatToCompressionType(format);
642 SkASSERT(compressionType != SkTextureCompressionType::kNone);
643
644 TArray<size_t> individualMipOffsets(numMipLevels);
645 SkDEBUGCODE(size_t combinedBufferSize =)
646 SkCompressedDataSize(compressionType,
647 dimensions,
648 &individualMipOffsets,
649 mipmapped == skgpu::Mipmapped::kYes);
650 SkASSERT(individualMipOffsets.size() == numMipLevels);
651 SkASSERT(dataSize == combinedBufferSize);
652
653 // offset value must be a multiple of the destination texture's pixel size in bytes
654 // for compressed textures, this is the block size
655 size_t alignment = SkCompressedBlockSize(compressionType);
656 GrStagingBufferManager::Slice slice = fStagingBufferManager.allocateStagingBufferSlice(
657 dataSize, alignment);
658 if (!slice.fBuffer) {
659 return nullptr;
660 }
661 char* bufferData = (char*)slice.fOffsetMapPtr;
662 GrMtlBuffer* mtlBuffer = static_cast<GrMtlBuffer*>(slice.fBuffer);
663
664 MTLOrigin origin = MTLOriginMake(0, 0, 0);
665
666 auto cmdBuffer = this->commandBuffer();
667 id<MTLBlitCommandEncoder> GR_NORETAIN blitCmdEncoder = cmdBuffer->getBlitCommandEncoder();
668 if (!blitCmdEncoder) {
669 return nullptr;
670 }
671#ifdef SK_ENABLE_MTL_DEBUG_INFO
672 [blitCmdEncoder pushDebugGroup:@"onCreateCompressedTexture"];
673#endif
674
675 // copy data into the buffer, skipping any trailing bytes
676 memcpy(bufferData, data, dataSize);
677
678 SkISize levelDimensions = dimensions;
679 for (int currentMipLevel = 0; currentMipLevel < numMipLevels; currentMipLevel++) {
680 const size_t levelRowBytes = skgpu::CompressedRowBytes(compressionType,
681 levelDimensions.width());
682 size_t levelSize = SkCompressedDataSize(compressionType, levelDimensions, nullptr, false);
683
684 // TODO: can this all be done in one go?
685 [blitCmdEncoder copyFromBuffer: mtlBuffer->mtlBuffer()
686 sourceOffset: slice.fOffset + individualMipOffsets[currentMipLevel]
687 sourceBytesPerRow: levelRowBytes
688 sourceBytesPerImage: levelSize
689 sourceSize: MTLSizeMake(levelDimensions.width(),
690 levelDimensions.height(), 1)
691 toTexture: mtlTexture
692 destinationSlice: 0
693 destinationLevel: currentMipLevel
694 destinationOrigin: origin];
695
696 levelDimensions = {std::max(1, levelDimensions.width() /2),
697 std::max(1, levelDimensions.height()/2)};
698 }
699#ifdef SK_BUILD_FOR_MAC
700 if (this->mtlCaps().isMac()) {
701 [mtlBuffer->mtlBuffer() didModifyRange: NSMakeRange(slice.fOffset, dataSize)];
702 }
703#endif
704#ifdef SK_ENABLE_MTL_DEBUG_INFO
705 [blitCmdEncoder popDebugGroup];
706#endif
707
708 return std::move(tex);
709}
710
711// TODO: Extra retain/release can't be avoided here because of GetMtlTextureInfo copying the
712// sk_cfp. It would be useful to have a (possibly-internal-only?) API to get the raw pointer.
713static id<MTLTexture> get_texture_from_backend(const GrBackendTexture& backendTex) {
714 GrMtlTextureInfo textureInfo;
715 if (!GrBackendTextures::GetMtlTextureInfo(backendTex, &textureInfo)) {
716 return nil;
717 }
718 return GrGetMTLTexture(textureInfo.fTexture.get());
719}
720
721static id<MTLTexture> get_texture_from_backend(const GrBackendRenderTarget& backendRT) {
722 GrMtlTextureInfo textureInfo;
723 if (!GrBackendRenderTargets::GetMtlTextureInfo(backendRT, &textureInfo)) {
724 return nil;
725 }
726 return GrGetMTLTexture(textureInfo.fTexture.get());
727}
728
729sk_sp<GrTexture> GrMtlGpu::onWrapBackendTexture(const GrBackendTexture& backendTex,
731 GrWrapCacheable cacheable,
732 GrIOType ioType) {
733 id<MTLTexture> mtlTexture = get_texture_from_backend(backendTex);
734 if (!mtlTexture) {
735 return nullptr;
736 }
737 // We don't currently support sampling from a MSAA texture in shaders.
738 if (mtlTexture.sampleCount != 1) {
739 return nullptr;
740 }
741
742 return GrMtlTexture::MakeWrappedTexture(this, backendTex.dimensions(), mtlTexture, cacheable,
743 ioType);
744}
745
746sk_sp<GrTexture> GrMtlGpu::onWrapCompressedBackendTexture(const GrBackendTexture& backendTex,
748 GrWrapCacheable cacheable) {
749 id<MTLTexture> mtlTexture = get_texture_from_backend(backendTex);
750 if (!mtlTexture) {
751 return nullptr;
752 }
753 // We don't currently support sampling from a MSAA texture in shaders.
754 if (mtlTexture.sampleCount != 1) {
755 return nullptr;
756 }
757
758 return GrMtlTexture::MakeWrappedTexture(this, backendTex.dimensions(), mtlTexture, cacheable,
760}
761
762sk_sp<GrTexture> GrMtlGpu::onWrapRenderableBackendTexture(const GrBackendTexture& backendTex,
763 int sampleCnt,
765 GrWrapCacheable cacheable) {
766 id<MTLTexture> mtlTexture = get_texture_from_backend(backendTex);
767 if (!mtlTexture) {
768 return nullptr;
769 }
770 // We don't currently support sampling from a MSAA texture in shaders.
771 if (mtlTexture.sampleCount != 1) {
772 return nullptr;
773 }
774
775 const GrMtlCaps& caps = this->mtlCaps();
776
777 MTLPixelFormat format = mtlTexture.pixelFormat;
778 if (!caps.isFormatRenderable(format, sampleCnt)) {
779 return nullptr;
780 }
781
782 if (@available(macOS 10.11, iOS 9.0, tvOS 9.0, *)) {
783 SkASSERT(MTLTextureUsageRenderTarget & mtlTexture.usage);
784 }
785
786 sampleCnt = caps.getRenderTargetSampleCount(sampleCnt, format);
787 SkASSERT(sampleCnt);
788
790 this, backendTex.dimensions(), sampleCnt, mtlTexture, cacheable);
791}
792
793sk_sp<GrRenderTarget> GrMtlGpu::onWrapBackendRenderTarget(const GrBackendRenderTarget& backendRT) {
794 if (!this->caps()->isFormatRenderable(backendRT.getBackendFormat(), backendRT.sampleCnt())) {
795 return nullptr;
796 }
797
798 id<MTLTexture> mtlTexture = get_texture_from_backend(backendRT);
799 if (!mtlTexture) {
800 return nullptr;
801 }
802
803 if (@available(macOS 10.11, iOS 9.0, tvOS 9.0, *)) {
804 SkASSERT(MTLTextureUsageRenderTarget & mtlTexture.usage);
805 }
806
808 backendRT.sampleCnt(), mtlTexture);
809}
810
811bool GrMtlGpu::onRegenerateMipMapLevels(GrTexture* texture) {
812 GrMtlTexture* grMtlTexture = static_cast<GrMtlTexture*>(texture);
813 id<MTLTexture> GR_NORETAIN mtlTexture = grMtlTexture->mtlTexture();
814
815 // Automatic mipmap generation is only supported by color-renderable formats
816 if (!fMtlCaps->isFormatRenderable(mtlTexture.pixelFormat, 1) &&
817 // We have pixel configs marked as textureable-only that use RGBA8 as the internal format
818 MTLPixelFormatRGBA8Unorm != mtlTexture.pixelFormat) {
819 return false;
820 }
821
822 auto cmdBuffer = this->commandBuffer();
823 id<MTLBlitCommandEncoder> GR_NORETAIN blitCmdEncoder = cmdBuffer->getBlitCommandEncoder();
824 if (!blitCmdEncoder) {
825 return false;
826 }
827 [blitCmdEncoder generateMipmapsForTexture: mtlTexture];
828 this->commandBuffer()->addGrSurface(sk_ref_sp<const GrSurface>(grMtlTexture->attachment()));
829
830 return true;
831}
832
833// Used to "clear" a backend texture to a constant color by transferring.
835 switch(format) {
836 case MTLPixelFormatA8Unorm: return GrColorType::kAlpha_8;
837 case MTLPixelFormatR8Unorm: return GrColorType::kR_8;
838 case MTLPixelFormatB5G6R5Unorm: return GrColorType::kBGR_565;
839 case MTLPixelFormatABGR4Unorm: return GrColorType::kABGR_4444;
840 case MTLPixelFormatRGBA8Unorm: return GrColorType::kRGBA_8888;
841 case MTLPixelFormatRGBA8Unorm_sRGB: return GrColorType::kRGBA_8888_SRGB;
842
843 case MTLPixelFormatRG8Unorm: return GrColorType::kRG_88;
844 case MTLPixelFormatBGRA8Unorm: return GrColorType::kBGRA_8888;
845 case MTLPixelFormatRGB10A2Unorm: return GrColorType::kRGBA_1010102;
846 case MTLPixelFormatBGR10A2Unorm: return GrColorType::kBGRA_1010102;
847 case MTLPixelFormatR16Float: return GrColorType::kR_F16;
848 case MTLPixelFormatRGBA16Float: return GrColorType::kRGBA_F16;
849 case MTLPixelFormatR16Unorm: return GrColorType::kR_16;
850 case MTLPixelFormatRG16Unorm: return GrColorType::kRG_1616;
851 case MTLPixelFormatRGBA16Unorm: return GrColorType::kRGBA_16161616;
852 case MTLPixelFormatRG16Float: return GrColorType::kRG_F16;
853 default: return GrColorType::kUnknown;
854 }
855
857}
858
860 size_t bytesPerPixel,
861 const TArray<size_t>& individualMipOffsets,
862 const GrPixmap srcData[],
863 int numMipLevels,
864 size_t bufferSize) {
865 SkASSERT(srcData && numMipLevels);
866 SkASSERT(individualMipOffsets.size() == numMipLevels);
867
868 for (int level = 0; level < numMipLevels; ++level) {
869 const size_t trimRB = srcData[level].width() * bytesPerPixel;
870 SkASSERT(individualMipOffsets[level] + trimRB * srcData[level].height() <= bufferSize);
871 SkRectMemcpy(dst + individualMipOffsets[level], trimRB,
872 srcData[level].addr(), srcData[level].rowBytes(),
873 trimRB, srcData[level].height());
874 }
875}
876
877bool GrMtlGpu::createMtlTextureForBackendSurface(MTLPixelFormat mtlFormat,
878 SkISize dimensions,
879 int sampleCnt,
880 GrTexturable texturable,
881 GrRenderable renderable,
882 skgpu::Mipmapped mipmapped,
883 GrMtlTextureInfo* info) {
884 SkASSERT(texturable == GrTexturable::kYes || renderable == GrRenderable::kYes);
885
886 if (texturable == GrTexturable::kYes && !fMtlCaps->isFormatTexturable(mtlFormat)) {
887 return false;
888 }
889 if (renderable == GrRenderable::kYes && !fMtlCaps->isFormatRenderable(mtlFormat, 1)) {
890 return false;
891 }
892
893 if (!check_max_blit_width(dimensions.width())) {
894 return false;
895 }
896
897 auto desc = [[MTLTextureDescriptor alloc] init];
898 desc.pixelFormat = mtlFormat;
899 desc.width = dimensions.width();
900 desc.height = dimensions.height();
901 if (mipmapped == skgpu::Mipmapped::kYes) {
902 desc.mipmapLevelCount = 1 + SkPrevLog2(std::max(dimensions.width(), dimensions.height()));
903 }
904 if (@available(macOS 10.11, iOS 9.0, tvOS 9.0, *)) {
905 desc.storageMode = MTLStorageModePrivate;
906 MTLTextureUsage usage = texturable == GrTexturable::kYes ? MTLTextureUsageShaderRead : 0;
907 usage |= renderable == GrRenderable::kYes ? MTLTextureUsageRenderTarget : 0;
908 desc.usage = usage;
909 }
910 if (sampleCnt != 1) {
911 desc.sampleCount = sampleCnt;
912 desc.textureType = MTLTextureType2DMultisample;
913 }
914 id<MTLTexture> testTexture = [fDevice newTextureWithDescriptor: desc];
915#ifdef SK_ENABLE_MTL_DEBUG_INFO
916 testTexture.label = @"testTexture";
917#endif
918 info->fTexture.reset(GrRetainPtrFromId(testTexture));
919 return true;
920}
921
922GrBackendTexture GrMtlGpu::onCreateBackendTexture(SkISize dimensions,
923 const GrBackendFormat& format,
924 GrRenderable renderable,
925 skgpu::Mipmapped mipmapped,
926 GrProtected isProtected,
927 std::string_view label) {
928 const MTLPixelFormat mtlFormat = GrBackendFormatAsMTLPixelFormat(format);
929
930 GrMtlTextureInfo info;
931 if (!this->createMtlTextureForBackendSurface(mtlFormat, dimensions, 1, GrTexturable::kYes,
932 renderable, mipmapped, &info)) {
933 return {};
934 }
935
936 return GrBackendTextures::MakeMtl(dimensions.width(), dimensions.height(), mipmapped, info);
937}
938
939bool GrMtlGpu::onClearBackendTexture(const GrBackendTexture& backendTexture,
940 sk_sp<skgpu::RefCntedCallback> finishedCallback,
941 std::array<float, 4> color) {
942 GrMtlTextureInfo info;
944
945 id<MTLTexture> GR_NORETAIN mtlTexture = GrGetMTLTexture(info.fTexture.get());
946
947 const MTLPixelFormat mtlFormat = mtlTexture.pixelFormat;
948
949 // Create a transfer buffer and fill with data.
950 size_t bytesPerPixel = skgpu::MtlFormatBytesPerBlock(mtlFormat);
951 size_t combinedBufferSize;
952
953 // Reuse the same buffer for all levels. Should be ok since we made the row bytes tight.
954 combinedBufferSize = bytesPerPixel*backendTexture.width()*backendTexture.height();
955
956 size_t alignment = std::max(bytesPerPixel, this->mtlCaps().getMinBufferAlignment());
957 GrStagingBufferManager::Slice slice = fStagingBufferManager.allocateStagingBufferSlice(
958 combinedBufferSize, alignment);
959 if (!slice.fBuffer) {
960 return false;
961 }
962 char* buffer = (char*)slice.fOffsetMapPtr;
963
966 return false;
967 }
968 GrImageInfo ii(colorType, kUnpremul_SkAlphaType, nullptr, backendTexture.dimensions());
969 auto rb = ii.minRowBytes();
970 SkASSERT(rb == bytesPerPixel*backendTexture.width());
971 if (!GrClearImage(ii, buffer, rb, color)) {
972 return false;
973 }
974
975 // Transfer buffer contents to texture
976 MTLOrigin origin = MTLOriginMake(0, 0, 0);
977
978 GrMtlCommandBuffer* cmdBuffer = this->commandBuffer();
979 id<MTLBlitCommandEncoder> GR_NORETAIN blitCmdEncoder = cmdBuffer->getBlitCommandEncoder();
980 if (!blitCmdEncoder) {
981 return false;
982 }
983#ifdef SK_ENABLE_MTL_DEBUG_INFO
984 [blitCmdEncoder pushDebugGroup:@"onClearBackendTexture"];
985#endif
986 GrMtlBuffer* mtlBuffer = static_cast<GrMtlBuffer*>(slice.fBuffer);
987
988 SkISize levelDimensions(backendTexture.dimensions());
989 int numMipLevels = mtlTexture.mipmapLevelCount;
990 for (int currentMipLevel = 0; currentMipLevel < numMipLevels; currentMipLevel++) {
991 size_t levelRowBytes;
992 size_t levelSize;
993
994 levelRowBytes = levelDimensions.width() * bytesPerPixel;
995 levelSize = levelRowBytes * levelDimensions.height();
996
997 // TODO: can this all be done in one go?
998 [blitCmdEncoder copyFromBuffer: mtlBuffer->mtlBuffer()
999 sourceOffset: slice.fOffset
1000 sourceBytesPerRow: levelRowBytes
1001 sourceBytesPerImage: levelSize
1002 sourceSize: MTLSizeMake(levelDimensions.width(),
1003 levelDimensions.height(),
1004 1)
1005 toTexture: mtlTexture
1006 destinationSlice: 0
1007 destinationLevel: currentMipLevel
1008 destinationOrigin: origin];
1009
1010 levelDimensions = {std::max(1, levelDimensions.width() / 2),
1011 std::max(1, levelDimensions.height() / 2)};
1012 }
1013#ifdef SK_BUILD_FOR_MAC
1014 if (this->mtlCaps().isMac()) {
1015 [mtlBuffer->mtlBuffer() didModifyRange: NSMakeRange(slice.fOffset, combinedBufferSize)];
1016 }
1017#endif
1018 [blitCmdEncoder popDebugGroup];
1019
1020 if (finishedCallback) {
1021 this->addFinishedCallback(std::move(finishedCallback));
1022 }
1023
1024 return true;
1025}
1026
1027GrBackendTexture GrMtlGpu::onCreateCompressedBackendTexture(SkISize dimensions,
1028 const GrBackendFormat& format,
1029 skgpu::Mipmapped mipmapped,
1030 GrProtected isProtected) {
1031 const MTLPixelFormat mtlFormat = GrBackendFormatAsMTLPixelFormat(format);
1032
1033 GrMtlTextureInfo info;
1034 if (!this->createMtlTextureForBackendSurface(mtlFormat, dimensions, 1, GrTexturable::kYes,
1035 GrRenderable::kNo, mipmapped, &info)) {
1036 return {};
1037 }
1038
1039 return GrBackendTextures::MakeMtl(dimensions.width(), dimensions.height(), mipmapped, info);
1040}
1041
1042bool GrMtlGpu::onUpdateCompressedBackendTexture(const GrBackendTexture& backendTexture,
1043 sk_sp<skgpu::RefCntedCallback> finishedCallback,
1044 const void* data,
1045 size_t size) {
1046 GrMtlTextureInfo info;
1048
1049 id<MTLTexture> mtlTexture = GrGetMTLTexture(info.fTexture.get());
1050
1051 int numMipLevels = mtlTexture.mipmapLevelCount;
1052 skgpu::Mipmapped mipmapped = numMipLevels > 1 ? skgpu::Mipmapped::kYes : skgpu::Mipmapped::kNo;
1053
1054 SkTextureCompressionType compression =
1057
1058 // Create a transfer buffer and fill with data.
1059 STArray<16, size_t> individualMipOffsets;
1060 size_t combinedBufferSize;
1061 combinedBufferSize = SkCompressedDataSize(compression,
1062 backendTexture.dimensions(),
1063 &individualMipOffsets,
1064 mipmapped == skgpu::Mipmapped::kYes);
1065 SkASSERT(individualMipOffsets.size() == numMipLevels);
1066
1067 size_t alignment = std::max(SkCompressedBlockSize(compression),
1068 this->mtlCaps().getMinBufferAlignment());
1070 fStagingBufferManager.allocateStagingBufferSlice(combinedBufferSize, alignment);
1071 if (!slice.fBuffer) {
1072 return false;
1073 }
1074 char* buffer = (char*)slice.fOffsetMapPtr;
1075
1076 memcpy(buffer, data, size);
1077
1078 // Transfer buffer contents to texture
1079 MTLOrigin origin = MTLOriginMake(0, 0, 0);
1080
1081 GrMtlCommandBuffer* cmdBuffer = this->commandBuffer();
1082 id<MTLBlitCommandEncoder> blitCmdEncoder = cmdBuffer->getBlitCommandEncoder();
1083 if (!blitCmdEncoder) {
1084 return false;
1085 }
1086#ifdef SK_ENABLE_MTL_DEBUG_INFO
1087 [blitCmdEncoder pushDebugGroup:@"onUpdateCompressedBackendTexture"];
1088#endif
1089 GrMtlBuffer* mtlBuffer = static_cast<GrMtlBuffer*>(slice.fBuffer);
1090
1091 SkISize levelDimensions(backendTexture.dimensions());
1092 for (int currentMipLevel = 0; currentMipLevel < numMipLevels; currentMipLevel++) {
1093 size_t levelRowBytes;
1094 size_t levelSize;
1095
1096 levelRowBytes = skgpu::CompressedRowBytes(compression, levelDimensions.width());
1097 levelSize = SkCompressedDataSize(compression, levelDimensions, nullptr, false);
1098
1099 // TODO: can this all be done in one go?
1100 [blitCmdEncoder copyFromBuffer: mtlBuffer->mtlBuffer()
1101 sourceOffset: slice.fOffset + individualMipOffsets[currentMipLevel]
1102 sourceBytesPerRow: levelRowBytes
1103 sourceBytesPerImage: levelSize
1104 sourceSize: MTLSizeMake(levelDimensions.width(),
1105 levelDimensions.height(),
1106 1)
1107 toTexture: mtlTexture
1108 destinationSlice: 0
1109 destinationLevel: currentMipLevel
1110 destinationOrigin: origin];
1111
1112 levelDimensions = {std::max(1, levelDimensions.width() / 2),
1113 std::max(1, levelDimensions.height() / 2)};
1114 }
1115#ifdef SK_BUILD_FOR_MAC
1116 if (this->mtlCaps().isMac()) {
1117 [mtlBuffer->mtlBuffer() didModifyRange:NSMakeRange(slice.fOffset, combinedBufferSize)];
1118 }
1119#endif
1120 [blitCmdEncoder popDebugGroup];
1121
1122 if (finishedCallback) {
1123 this->addFinishedCallback(std::move(finishedCallback));
1124 }
1125
1126 return true;
1127}
1128
1131 // Nothing to do here, will get cleaned up when the GrBackendTexture object goes away
1132}
1133
1134bool GrMtlGpu::compile(const GrProgramDesc& desc, const GrProgramInfo& programInfo) {
1135
1137
1138 auto pipelineState = this->resourceProvider().findOrCreateCompatiblePipelineState(
1139 desc, programInfo, &stat);
1140 if (!pipelineState) {
1141 return false;
1142 }
1143
1145}
1146
1148 return this->resourceProvider().precompileShader(key, data);
1149}
1150
1151#if defined(GR_TEST_UTILS)
1152bool GrMtlGpu::isTestingOnlyBackendTexture(const GrBackendTexture& tex) const {
1154
1155 GrMtlTextureInfo info;
1157 return false;
1158 }
1159 id<MTLTexture> mtlTexture = GrGetMTLTexture(info.fTexture.get());
1160 if (!mtlTexture) {
1161 return false;
1162 }
1163 if (@available(macOS 10.11, iOS 9.0, tvOS 9.0, *)) {
1164 return mtlTexture.usage & MTLTextureUsageShaderRead;
1165 } else {
1166 return true; // best we can do
1167 }
1168}
1169
1170GrBackendRenderTarget GrMtlGpu::createTestingOnlyBackendRenderTarget(SkISize dimensions,
1171 GrColorType ct,
1172 int sampleCnt,
1173 GrProtected isProtected) {
1174 if (dimensions.width() > this->caps()->maxRenderTargetSize() ||
1175 dimensions.height() > this->caps()->maxRenderTargetSize()) {
1176 return {};
1177 }
1178 if (isProtected == GrProtected::kYes) {
1179 return {};
1180 }
1181
1182 MTLPixelFormat format = this->mtlCaps().getFormatFromColorType(ct);
1183 sampleCnt = this->mtlCaps().getRenderTargetSampleCount(sampleCnt, format);
1184 if (sampleCnt == 0) {
1185 return {};
1186 }
1187
1188 GrMtlTextureInfo info;
1189 if (!this->createMtlTextureForBackendSurface(format,
1190 dimensions,
1191 sampleCnt,
1195 &info)) {
1196 return {};
1197 }
1198
1199 return GrBackendRenderTargets::MakeMtl(dimensions.width(), dimensions.height(), info);
1200}
1201
1202void GrMtlGpu::deleteTestingOnlyBackendRenderTarget(const GrBackendRenderTarget& rt) {
1204
1205 GrMtlTextureInfo info;
1208 // Nothing else to do here, will get cleaned up when the GrBackendRenderTarget
1209 // is deleted.
1210 }
1211}
1212#endif // defined(GR_TEST_UTILS)
1213
1215 // TODO: Add support for subrectangles
1216 GrMtlRenderTarget* srcRT = static_cast<GrMtlRenderTarget*>(src->asRenderTarget());
1217 GrRenderTarget* dstRT = dst->asRenderTarget();
1218 GrMtlAttachment* dstAttachment;
1219 if (dstRT) {
1220 GrMtlRenderTarget* mtlRT = static_cast<GrMtlRenderTarget*>(dstRT);
1221 dstAttachment = mtlRT->colorAttachment();
1222 } else {
1223 SkASSERT(dst->asTexture());
1224 dstAttachment = static_cast<GrMtlTexture*>(dst->asTexture())->attachment();
1225 }
1226
1227 this->resolve(dstAttachment, srcRT->colorAttachment());
1228}
1229
1231 GrMtlAttachment* dstAttachment, GrMtlAttachment* srcAttachment,
1232 const SkIRect& srcRect, const SkIPoint& dstPoint) {
1233#ifdef SK_DEBUG
1234 SkASSERT(this->mtlCaps().canCopyAsBlit(dstAttachment->mtlFormat(), dstAttachment->numSamples(),
1235 srcAttachment->mtlFormat(), dstAttachment->numSamples(),
1236 srcRect, dstPoint, dst == src));
1237#endif
1238 id<MTLTexture> GR_NORETAIN dstTex = dstAttachment->mtlTexture();
1239 id<MTLTexture> GR_NORETAIN srcTex = srcAttachment->mtlTexture();
1240
1241 auto cmdBuffer = this->commandBuffer();
1242 id<MTLBlitCommandEncoder> GR_NORETAIN blitCmdEncoder = cmdBuffer->getBlitCommandEncoder();
1243 if (!blitCmdEncoder) {
1244 return;
1245 }
1246#ifdef SK_ENABLE_MTL_DEBUG_INFO
1247 [blitCmdEncoder pushDebugGroup:@"copySurfaceAsBlit"];
1248#endif
1249 [blitCmdEncoder copyFromTexture: srcTex
1250 sourceSlice: 0
1251 sourceLevel: 0
1252 sourceOrigin: MTLOriginMake(srcRect.x(), srcRect.y(), 0)
1253 sourceSize: MTLSizeMake(srcRect.width(), srcRect.height(), 1)
1254 toTexture: dstTex
1255 destinationSlice: 0
1256 destinationLevel: 0
1257 destinationOrigin: MTLOriginMake(dstPoint.fX, dstPoint.fY, 0)];
1258#ifdef SK_ENABLE_MTL_DEBUG_INFO
1259 [blitCmdEncoder popDebugGroup];
1260#endif
1261 cmdBuffer->addGrSurface(sk_ref_sp<const GrSurface>(dst));
1262 cmdBuffer->addGrSurface(sk_ref_sp<const GrSurface>(src));
1263}
1264
1266 GrSurface* src, const SkIRect& srcRect,
1268 SkASSERT(!src->isProtected() && !dst->isProtected());
1269
1270 if (srcRect.size() != dstRect.size()) {
1271 return false;
1272 }
1273
1274 GrMtlAttachment* dstAttachment;
1275 GrMtlAttachment* srcAttachment;
1276 GrRenderTarget* dstRT = dst->asRenderTarget();
1277 if (dstRT) {
1278 GrMtlRenderTarget* mtlRT = static_cast<GrMtlRenderTarget*>(dstRT);
1279 // This will technically return true for single sample rts that used DMSAA in which case we
1280 // don't have to pick the resolve attachment. But in that case the resolve and color
1281 // attachments will be the same anyways.
1282 if (this->mtlCaps().renderTargetSupportsDiscardableMSAA(mtlRT)) {
1283 dstAttachment = mtlRT->resolveAttachment();
1284 } else {
1285 dstAttachment = mtlRT->colorAttachment();
1286 }
1287 } else if (dst->asTexture()) {
1288 dstAttachment = static_cast<GrMtlTexture*>(dst->asTexture())->attachment();
1289 } else {
1290 // The surface in a GrAttachment already
1291 dstAttachment = static_cast<GrMtlAttachment*>(dst);
1292 }
1293 GrRenderTarget* srcRT = src->asRenderTarget();
1294 if (srcRT) {
1295 GrMtlRenderTarget* mtlRT = static_cast<GrMtlRenderTarget*>(srcRT);
1296 // This will technically return true for single sample rts that used DMSAA in which case we
1297 // don't have to pick the resolve attachment. But in that case the resolve and color
1298 // attachments will be the same anyways.
1299 if (this->mtlCaps().renderTargetSupportsDiscardableMSAA(mtlRT)) {
1300 srcAttachment = mtlRT->resolveAttachment();
1301 } else {
1302 srcAttachment = mtlRT->colorAttachment();
1303 }
1304 } else if (src->asTexture()) {
1305 SkASSERT(src->asTexture());
1306 srcAttachment = static_cast<GrMtlTexture*>(src->asTexture())->attachment();
1307 } else {
1308 // The surface in a GrAttachment already
1309 srcAttachment = static_cast<GrMtlAttachment*>(src);
1310 }
1311
1312 MTLPixelFormat dstFormat = dstAttachment->mtlFormat();
1313 MTLPixelFormat srcFormat = srcAttachment->mtlFormat();
1314
1315 int dstSampleCnt = dstAttachment->sampleCount();
1316 int srcSampleCnt = srcAttachment->sampleCount();
1317
1318 const SkIPoint dstPoint = dstRect.topLeft();
1319 if (this->mtlCaps().canCopyAsResolve(dstFormat, dstSampleCnt,
1320 srcFormat, srcSampleCnt,
1321 SkToBool(srcRT), src->dimensions(),
1322 srcRect, dstPoint,
1323 dstAttachment == srcAttachment)) {
1324 this->copySurfaceAsResolve(dst, src);
1325 return true;
1326 }
1327
1328 if (srcAttachment->framebufferOnly() || dstAttachment->framebufferOnly()) {
1329 return false;
1330 }
1331
1332 if (this->mtlCaps().canCopyAsBlit(dstFormat, dstSampleCnt, srcFormat, srcSampleCnt,
1333 srcRect, dstPoint, dstAttachment == srcAttachment)) {
1334 this->copySurfaceAsBlit(dst, src, dstAttachment, srcAttachment, srcRect, dstPoint);
1335 return true;
1336 }
1337
1338 return false;
1339}
1340
1341bool GrMtlGpu::onWritePixels(GrSurface* surface,
1342 SkIRect rect,
1343 GrColorType surfaceColorType,
1344 GrColorType srcColorType,
1345 const GrMipLevel texels[],
1346 int mipLevelCount,
1347 bool prepForTexSampling) {
1348 GrMtlTexture* mtlTexture = static_cast<GrMtlTexture*>(surface->asTexture());
1349 // TODO: In principle we should be able to support pure rendertargets as well, but
1350 // until we find a use case we'll only support texture rendertargets.
1351 if (!mtlTexture) {
1352 return false;
1353 }
1354 if (!mipLevelCount) {
1355 return false;
1356 }
1357#ifdef SK_DEBUG
1358 for (int i = 0; i < mipLevelCount; i++) {
1359 SkASSERT(texels[i].fPixels);
1360 }
1361#endif
1362 return this->uploadToTexture(mtlTexture, rect, srcColorType, texels, mipLevelCount);
1363}
1364
1365bool GrMtlGpu::onReadPixels(GrSurface* surface,
1366 SkIRect rect,
1367 GrColorType surfaceColorType,
1368 GrColorType dstColorType,
1369 void* buffer,
1370 size_t rowBytes) {
1372
1373 if (surfaceColorType != dstColorType) {
1374 return false;
1375 }
1376
1377 int bpp = GrColorTypeBytesPerPixel(dstColorType);
1378 size_t transBufferRowBytes = bpp*rect.width();
1379 size_t transBufferImageBytes = transBufferRowBytes*rect.height();
1380
1382 sk_sp<GrGpuBuffer> transferBuffer = resourceProvider->createBuffer(
1383 transBufferImageBytes,
1387
1388 if (!transferBuffer) {
1389 return false;
1390 }
1391
1392 GrMtlBuffer* grMtlBuffer = static_cast<GrMtlBuffer*>(transferBuffer.get());
1393 if (!this->readOrTransferPixels(surface,
1394 rect,
1395 dstColorType,
1396 grMtlBuffer->mtlBuffer(),
1397 0,
1398 transBufferImageBytes,
1399 transBufferRowBytes)) {
1400 return false;
1401 }
1402 this->submitCommandBuffer(kForce_SyncQueue);
1403
1404 const void* mappedMemory = grMtlBuffer->mtlBuffer().contents;
1405
1407 rowBytes,
1408 mappedMemory,
1409 transBufferRowBytes,
1410 transBufferRowBytes,
1411 rect.height());
1412
1413 return true;
1414}
1415
1416bool GrMtlGpu::onTransferFromBufferToBuffer(sk_sp<GrGpuBuffer> src,
1417 size_t srcOffset,
1419 size_t dstOffset,
1420 size_t size) {
1421 id<MTLBuffer> GR_NORETAIN mtlSrc = static_cast<GrMtlBuffer*>(src.get())->mtlBuffer();
1422 id<MTLBuffer> GR_NORETAIN mtlDst = static_cast<GrMtlBuffer*>(dst.get())->mtlBuffer();
1423 SkASSERT(mtlSrc);
1424 SkASSERT(mtlDst);
1425
1426 auto cmdBuffer = this->commandBuffer();
1427 id<MTLBlitCommandEncoder> GR_NORETAIN blitCmdEncoder = cmdBuffer->getBlitCommandEncoder();
1428 if (!blitCmdEncoder) {
1429 return false;
1430 }
1431
1432#ifdef SK_ENABLE_MTL_DEBUG_INFO
1433 [blitCmdEncoder pushDebugGroup:@"onTransferFromBufferToBuffer"];
1434#endif
1435 [blitCmdEncoder copyFromBuffer: mtlSrc
1436 sourceOffset: srcOffset
1437 toBuffer: mtlDst
1438 destinationOffset: dstOffset
1439 size: size];
1440#ifdef SK_ENABLE_MTL_DEBUG_INFO
1441 [blitCmdEncoder popDebugGroup];
1442#endif
1443
1444 cmdBuffer->addGrBuffer(std::move(src));
1445 cmdBuffer->addGrBuffer(std::move(dst));
1446
1447 return true;
1448}
1449
1450bool GrMtlGpu::onTransferPixelsTo(GrTexture* texture,
1451 SkIRect rect,
1452 GrColorType textureColorType,
1453 GrColorType bufferColorType,
1454 sk_sp<GrGpuBuffer> transferBuffer,
1455 size_t offset,
1456 size_t rowBytes) {
1458 SkASSERT(transferBuffer);
1459 if (textureColorType != bufferColorType) {
1460 return false;
1461 }
1462
1463 GrMtlTexture* grMtlTexture = static_cast<GrMtlTexture*>(texture);
1464 id<MTLTexture> GR_NORETAIN mtlTexture = grMtlTexture->mtlTexture();
1465 SkASSERT(mtlTexture);
1466
1467 GrMtlBuffer* grMtlBuffer = static_cast<GrMtlBuffer*>(transferBuffer.get());
1468 id<MTLBuffer> GR_NORETAIN mtlBuffer = grMtlBuffer->mtlBuffer();
1469 SkASSERT(mtlBuffer);
1470
1471 size_t bpp = GrColorTypeBytesPerPixel(bufferColorType);
1472 if (offset % bpp) {
1473 return false;
1474 }
1475 if (GrBackendFormatBytesPerPixel(texture->backendFormat()) != bpp) {
1476 return false;
1477 }
1478
1479 MTLOrigin origin = MTLOriginMake(rect.left(), rect.top(), 0);
1480
1481 auto cmdBuffer = this->commandBuffer();
1482 id<MTLBlitCommandEncoder> GR_NORETAIN blitCmdEncoder = cmdBuffer->getBlitCommandEncoder();
1483 if (!blitCmdEncoder) {
1484 return false;
1485 }
1486#ifdef SK_ENABLE_MTL_DEBUG_INFO
1487 [blitCmdEncoder pushDebugGroup:@"onTransferPixelsTo"];
1488#endif
1489 [blitCmdEncoder copyFromBuffer: mtlBuffer
1490 sourceOffset: offset
1491 sourceBytesPerRow: rowBytes
1492 sourceBytesPerImage: rowBytes*rect.height()
1493 sourceSize: MTLSizeMake(rect.width(), rect.height(), 1)
1494 toTexture: mtlTexture
1495 destinationSlice: 0
1496 destinationLevel: 0
1497 destinationOrigin: origin];
1498#ifdef SK_ENABLE_MTL_DEBUG_INFO
1499 [blitCmdEncoder popDebugGroup];
1500#endif
1501
1502 return true;
1503}
1504
1505bool GrMtlGpu::onTransferPixelsFrom(GrSurface* surface,
1506 SkIRect rect,
1507 GrColorType surfaceColorType,
1508 GrColorType bufferColorType,
1509 sk_sp<GrGpuBuffer> transferBuffer,
1510 size_t offset) {
1512 SkASSERT(transferBuffer);
1513
1514 if (surfaceColorType != bufferColorType) {
1515 return false;
1516 }
1517
1518 // Metal only supports offsets that are aligned to a pixel.
1519 size_t bpp = GrColorTypeBytesPerPixel(bufferColorType);
1520 if (offset % bpp) {
1521 return false;
1522 }
1523 if (GrBackendFormatBytesPerPixel(surface->backendFormat()) != bpp) {
1524 return false;
1525 }
1526
1527 GrMtlBuffer* grMtlBuffer = static_cast<GrMtlBuffer*>(transferBuffer.get());
1528
1529 size_t transBufferRowBytes = bpp*rect.width();
1530 size_t transBufferImageBytes = transBufferRowBytes*rect.height();
1531
1532 return this->readOrTransferPixels(surface,
1533 rect,
1534 bufferColorType,
1535 grMtlBuffer->mtlBuffer(),
1536 offset,
1537 transBufferImageBytes,
1538 transBufferRowBytes);
1539}
1540
1541bool GrMtlGpu::readOrTransferPixels(GrSurface* surface,
1542 SkIRect rect,
1543 GrColorType dstColorType,
1544 id<MTLBuffer> transferBuffer,
1545 size_t offset,
1546 size_t imageBytes,
1547 size_t rowBytes) {
1548 if (!check_max_blit_width(rect.width())) {
1549 return false;
1550 }
1551
1552 id<MTLTexture> mtlTexture;
1553 if (GrMtlRenderTarget* rt = static_cast<GrMtlRenderTarget*>(surface->asRenderTarget())) {
1554 if (rt->numSamples() > 1) {
1555 SkASSERT(rt->requiresManualMSAAResolve()); // msaa-render-to-texture not yet supported.
1556 mtlTexture = rt->resolveMTLTexture();
1557 } else {
1558 SkASSERT(!rt->requiresManualMSAAResolve());
1559 mtlTexture = rt->colorMTLTexture();
1560 }
1561 } else if (GrMtlTexture* texture = static_cast<GrMtlTexture*>(surface->asTexture())) {
1562 mtlTexture = texture->mtlTexture();
1563 }
1564 if (!mtlTexture) {
1565 return false;
1566 }
1567
1568 auto cmdBuffer = this->commandBuffer();
1569 id<MTLBlitCommandEncoder> GR_NORETAIN blitCmdEncoder = cmdBuffer->getBlitCommandEncoder();
1570 if (!blitCmdEncoder) {
1571 return false;
1572 }
1573#ifdef SK_ENABLE_MTL_DEBUG_INFO
1574 [blitCmdEncoder pushDebugGroup:@"readOrTransferPixels"];
1575#endif
1576 [blitCmdEncoder copyFromTexture: mtlTexture
1577 sourceSlice: 0
1578 sourceLevel: 0
1579 sourceOrigin: MTLOriginMake(rect.left(), rect.top(), 0)
1580 sourceSize: MTLSizeMake(rect.width(), rect.height(), 1)
1581 toBuffer: transferBuffer
1582 destinationOffset: offset
1583 destinationBytesPerRow: rowBytes
1584 destinationBytesPerImage: imageBytes];
1585#ifdef SK_BUILD_FOR_MAC
1586 if (this->mtlCaps().isMac()) {
1587 // Sync GPU data back to the CPU
1588 [blitCmdEncoder synchronizeResource: transferBuffer];
1589 }
1590#endif
1591#ifdef SK_ENABLE_MTL_DEBUG_INFO
1592 [blitCmdEncoder popDebugGroup];
1593#endif
1594
1595 return true;
1596}
1597
1598[[nodiscard]] std::unique_ptr<GrSemaphore> GrMtlGpu::makeSemaphore(bool /*isOwned*/) {
1599 SkASSERT(this->caps()->semaphoreSupport());
1600 return GrMtlSemaphore::Make(this);
1601}
1602
1603std::unique_ptr<GrSemaphore> GrMtlGpu::wrapBackendSemaphore(const GrBackendSemaphore& semaphore,
1604 GrSemaphoreWrapType /* wrapType */,
1605 GrWrapOwnership /*ownership*/) {
1606 SkASSERT(this->caps()->backendSemaphoreSupport());
1609}
1610
1612 if (@available(macOS 10.14, iOS 12.0, tvOS 12.0, *)) {
1613 SkASSERT(semaphore);
1614 GrMtlSemaphore* mtlSem = static_cast<GrMtlSemaphore*>(semaphore);
1615
1616 this->commandBuffer()->encodeSignalEvent(mtlSem->event(), mtlSem->value());
1617 }
1618}
1619
1621 if (@available(macOS 10.14, iOS 12.0, tvOS 12.0, *)) {
1622 SkASSERT(semaphore);
1623 GrMtlSemaphore* mtlSem = static_cast<GrMtlSemaphore*>(semaphore);
1624
1625 this->commandBuffer()->encodeWaitForEvent(mtlSem->event(), mtlSem->value());
1626 }
1627}
1628
1629void GrMtlGpu::onResolveRenderTarget(GrRenderTarget* target, const SkIRect&) {
1630 SkASSERT(target->numSamples() > 1);
1631 GrMtlRenderTarget* rt = static_cast<GrMtlRenderTarget*>(target);
1632
1634 // We would have resolved the RT during the render pass.
1635 return;
1636 }
1637
1638 this->resolve(static_cast<GrMtlRenderTarget*>(target)->resolveAttachment(),
1639 static_cast<GrMtlRenderTarget*>(target)->colorAttachment());
1640}
1641
1642void GrMtlGpu::resolve(GrMtlAttachment* resolveAttachment,
1643 GrMtlAttachment* msaaAttachment) {
1644 auto renderPassDesc = [[MTLRenderPassDescriptor alloc] init];
1645 auto colorAttachment = renderPassDesc.colorAttachments[0];
1646 colorAttachment.texture = msaaAttachment->mtlTexture();
1647 colorAttachment.resolveTexture = resolveAttachment->mtlTexture();
1648 colorAttachment.loadAction = MTLLoadActionLoad;
1649 colorAttachment.storeAction = MTLStoreActionMultisampleResolve;
1650
1651 GrMtlRenderCommandEncoder* cmdEncoder =
1652 this->commandBuffer()->getRenderCommandEncoder(renderPassDesc, nullptr, nullptr);
1653 if (cmdEncoder) {
1654 cmdEncoder->setLabel(@"resolveTexture");
1655 this->commandBuffer()->addGrSurface(sk_ref_sp<const GrSurface>(resolveAttachment));
1656 this->commandBuffer()->addGrSurface(sk_ref_sp<const GrSurface>(msaaAttachment));
1657 }
1658}
1659
1661 GrAttachment* dst, GrMtlAttachment* src, const SkIRect& srcRect,
1662 MTLRenderPassStencilAttachmentDescriptor* stencil) {
1663 if (!dst) {
1664 return nil;
1665 }
1666 if (!src || src->framebufferOnly()) {
1667 return nil;
1668 }
1669
1670 GrMtlAttachment* mtlDst = static_cast<GrMtlAttachment*>(dst);
1671
1672 MTLPixelFormat stencilFormat = stencil.texture.pixelFormat;
1673 auto renderPipeline = this->resourceProvider().findOrCreateMSAALoadPipeline(mtlDst->mtlFormat(),
1674 dst->numSamples(),
1675 stencilFormat);
1676
1677 // Set up rendercommandencoder
1678 auto renderPassDesc = [MTLRenderPassDescriptor new];
1679 auto colorAttachment = renderPassDesc.colorAttachments[0];
1680 colorAttachment.texture = mtlDst->mtlTexture();
1681 colorAttachment.loadAction = MTLLoadActionDontCare;
1682 colorAttachment.storeAction = MTLStoreActionMultisampleResolve;
1683 colorAttachment.resolveTexture = src->mtlTexture();
1684
1685 renderPassDesc.stencilAttachment = stencil;
1686
1687 // We know in this case that the preceding renderCommandEncoder will not be compatible.
1688 // Either it's using a different rendertarget, or we are reading from the resolve and
1689 // hence we need to let the previous resolve finish. So we create a new one without checking.
1690 auto renderCmdEncoder =
1691 this->commandBuffer()->getRenderCommandEncoder(renderPassDesc, nullptr);
1692 if (!renderCmdEncoder) {
1693 return nullptr;
1694 }
1695
1696 // Bind pipeline
1697 renderCmdEncoder->setRenderPipelineState(renderPipeline->mtlPipelineState());
1698 this->commandBuffer()->addResource(sk_ref_sp(renderPipeline));
1699
1700 // Bind src as input texture
1701 renderCmdEncoder->setFragmentTexture(src->mtlTexture(), 0);
1702 // No sampler needed
1703 this->commandBuffer()->addGrSurface(sk_ref_sp<GrSurface>(src));
1704
1705 // Scissor and viewport should default to size of color attachment
1706
1707 // Update and bind uniform data
1708 int w = srcRect.width();
1709 int h = srcRect.height();
1710
1711 // dst rect edges in NDC (-1 to 1)
1712 int dw = dst->width();
1713 int dh = dst->height();
1714 float dx0 = 2.f * srcRect.fLeft / dw - 1.f;
1715 float dx1 = 2.f * (srcRect.fLeft + w) / dw - 1.f;
1716 float dy0 = 2.f * srcRect.fTop / dh - 1.f;
1717 float dy1 = 2.f * (srcRect.fTop + h) / dh - 1.f;
1718
1719 struct {
1720 float posXform[4];
1721 int textureSize[2];
1722 int pad[2];
1723 } uniData = {{dx1 - dx0, dy1 - dy0, dx0, dy0}, {dw, dh}, {0, 0}};
1724
1725 constexpr size_t uniformSize = 32;
1726 if (@available(macOS 10.11, iOS 8.3, tvOS 9.0, *)) {
1727 SkASSERT(uniformSize <= this->caps()->maxPushConstantsSize());
1728 renderCmdEncoder->setVertexBytes(&uniData, uniformSize, 0);
1729 } else {
1730 // upload the data
1731 GrRingBuffer::Slice slice = this->uniformsRingBuffer()->suballocate(uniformSize);
1733 char* destPtr = static_cast<char*>(slice.fBuffer->map()) + slice.fOffset;
1734 memcpy(destPtr, &uniData, uniformSize);
1735
1736 renderCmdEncoder->setVertexBuffer(buffer->mtlBuffer(), slice.fOffset, 0);
1737 }
1738
1739 renderCmdEncoder->drawPrimitives(MTLPrimitiveTypeTriangleStrip, (NSUInteger)0, (NSUInteger)4);
1740
1741 return renderCmdEncoder;
1742}
1743
1744#if defined(GR_TEST_UTILS)
1745void GrMtlGpu::testingOnly_startCapture() {
1746 if (@available(macOS 10.13, iOS 11.0, tvOS 11.0, *)) {
1747 // TODO: add Metal 3 interface as well
1748 MTLCaptureManager* captureManager = [MTLCaptureManager sharedCaptureManager];
1749 if (captureManager.isCapturing) {
1750 return;
1751 }
1752 if (@available(macOS 10.15, iOS 13.0, tvOS 13.0, *)) {
1753 MTLCaptureDescriptor* captureDescriptor = [[MTLCaptureDescriptor alloc] init];
1754 captureDescriptor.captureObject = fQueue;
1755
1756 NSError *error;
1757 if (![captureManager startCaptureWithDescriptor: captureDescriptor error:&error])
1758 {
1759 NSLog(@"Failed to start capture, error %@", error);
1760 }
1761 } else {
1762 [captureManager startCaptureWithCommandQueue: fQueue];
1763 }
1764 }
1765}
1766
1767void GrMtlGpu::testingOnly_stopCapture() {
1768 if (@available(macOS 10.13, iOS 11.0, tvOS 11.0, *)) {
1769 MTLCaptureManager* captureManager = [MTLCaptureManager sharedCaptureManager];
1770 if (captureManager.isCapturing) {
1771 [captureManager stopCapture];
1772 }
1773 }
1774}
1775#endif
1776
1777#ifdef SK_ENABLE_DUMP_GPU
1778#include "src/utils/SkJSONWriter.h"
1779void GrMtlGpu::onDumpJSON(SkJSONWriter* writer) const {
1780 // We are called by the base class, which has already called beginObject(). We choose to nest
1781 // all of our caps information in a named sub-object.
1782 writer->beginObject("Metal GPU");
1783
1784 writer->beginObject("Device");
1785 writer->appendCString("name", fDevice.name.UTF8String);
1786#ifdef SK_BUILD_FOR_MAC
1787 if (@available(macOS 10.11, *)) {
1788 writer->appendBool("isHeadless", fDevice.isHeadless);
1789 writer->appendBool("isLowPower", fDevice.isLowPower);
1790 }
1791 if (@available(macOS 10.13, *)) {
1792 writer->appendBool("isRemovable", fDevice.isRemovable);
1793 }
1794#endif
1795 if (@available(macOS 10.13, iOS 11.0, tvOS 11.0, *)) {
1796 writer->appendU64("registryID", fDevice.registryID);
1797 }
1798#if defined(SK_BUILD_FOR_MAC) && __MAC_OS_X_VERSION_MAX_ALLOWED >= 101500
1799 if (@available(macOS 10.15, *)) {
1800 switch (fDevice.location) {
1801 case MTLDeviceLocationBuiltIn:
1802 writer->appendNString("location", "builtIn");
1803 break;
1804 case MTLDeviceLocationSlot:
1805 writer->appendNString("location", "slot");
1806 break;
1807 case MTLDeviceLocationExternal:
1808 writer->appendNString("location", "external");
1809 break;
1810 case MTLDeviceLocationUnspecified:
1811 writer->appendNString("location", "unspecified");
1812 break;
1813 default:
1814 writer->appendNString("location", "unknown");
1815 break;
1816 }
1817 writer->appendU64("locationNumber", fDevice.locationNumber);
1818 writer->appendU64("maxTransferRate", fDevice.maxTransferRate);
1819 }
1820#endif // SK_BUILD_FOR_MAC
1821#if __MAC_OS_X_VERSION_MAX_ALLOWED >= 101500 || __IPHONE_OS_VERSION_MAX_ALLOWED >= 130000
1822 if (@available(macOS 10.15, iOS 13.0, tvOS 13.0, *)) {
1823 writer->appendBool("hasUnifiedMemory", fDevice.hasUnifiedMemory);
1824 }
1825#endif
1826#ifdef SK_BUILD_FOR_MAC
1827#if __MAC_OS_X_VERSION_MAX_ALLOWED >= 101500
1828 if (@available(macOS 10.15, *)) {
1829 writer->appendU64("peerGroupID", fDevice.peerGroupID);
1830 writer->appendU32("peerCount", fDevice.peerCount);
1831 writer->appendU32("peerIndex", fDevice.peerIndex);
1832 }
1833#endif
1834 if (@available(macOS 10.12, *)) {
1835 writer->appendU64("recommendedMaxWorkingSetSize", fDevice.recommendedMaxWorkingSetSize);
1836 }
1837#endif // SK_BUILD_FOR_MAC
1838 if (@available(macOS 10.13, iOS 11.0, tvOS 11.0, *)) {
1839 writer->appendU64("currentAllocatedSize", fDevice.currentAllocatedSize);
1840 writer->appendU64("maxThreadgroupMemoryLength", fDevice.maxThreadgroupMemoryLength);
1841 }
1842
1843 if (@available(macOS 10.11, iOS 9.0, tvOS 9.0, *)) {
1844 writer->beginObject("maxThreadsPerThreadgroup");
1845 writer->appendU64("width", fDevice.maxThreadsPerThreadgroup.width);
1846 writer->appendU64("height", fDevice.maxThreadsPerThreadgroup.height);
1847 writer->appendU64("depth", fDevice.maxThreadsPerThreadgroup.depth);
1848 writer->endObject();
1849 }
1850
1851 if (@available(macOS 10.13, iOS 11.0, tvOS 11.0, *)) {
1852 writer->appendBool("areProgrammableSamplePositionsSupported",
1853 fDevice.areProgrammableSamplePositionsSupported);
1854 writer->appendBool("areRasterOrderGroupsSupported",
1855 fDevice.areRasterOrderGroupsSupported);
1856 }
1857#ifdef SK_BUILD_FOR_MAC
1858 if (@available(macOS 10.11, *)) {
1859 writer->appendBool("isDepth24Stencil8PixelFormatSupported",
1860 fDevice.isDepth24Stencil8PixelFormatSupported);
1861
1862 }
1863#if __MAC_OS_X_VERSION_MAX_ALLOWED >= 101500
1864 if (@available(macOS 10.15, *)) {
1865 writer->appendBool("areBarycentricCoordsSupported",
1866 fDevice.areBarycentricCoordsSupported);
1867 writer->appendBool("supportsShaderBarycentricCoordinates",
1868 fDevice.supportsShaderBarycentricCoordinates);
1869 }
1870#endif
1871#endif // SK_BUILD_FOR_MAC
1872 if (@available(macOS 10.14, iOS 12.0, tvOS 12.0, *)) {
1873 writer->appendU64("maxBufferLength", fDevice.maxBufferLength);
1874 }
1875 if (@available(macOS 10.13, iOS 11.0, tvOS 11.0, *)) {
1876 switch (fDevice.readWriteTextureSupport) {
1877 case MTLReadWriteTextureTier1:
1878 writer->appendNString("readWriteTextureSupport", "tier1");
1879 break;
1880 case MTLReadWriteTextureTier2:
1881 writer->appendNString("readWriteTextureSupport", "tier2");
1882 break;
1883 case MTLReadWriteTextureTierNone:
1884 writer->appendNString("readWriteTextureSupport", "tierNone");
1885 break;
1886 default:
1887 writer->appendNString("readWriteTextureSupport", "unknown");
1888 break;
1889 }
1890 switch (fDevice.argumentBuffersSupport) {
1891 case MTLArgumentBuffersTier1:
1892 writer->appendNString("argumentBuffersSupport", "tier1");
1893 break;
1894 case MTLArgumentBuffersTier2:
1895 writer->appendNString("argumentBuffersSupport", "tier2");
1896 break;
1897 default:
1898 writer->appendNString("argumentBuffersSupport", "unknown");
1899 break;
1900 }
1901 }
1902 if (@available(macOS 10.14, iOS 12.0, tvOS 12.0, *)) {
1903 writer->appendU64("maxArgumentBufferSamplerCount", fDevice.maxArgumentBufferSamplerCount);
1904 }
1905#ifdef SK_BUILD_FOR_IOS
1906 if (@available(iOS 13.0, tvOS 13.0, *)) {
1907 writer->appendU64("sparseTileSizeInBytes", fDevice.sparseTileSizeInBytes);
1908 }
1909#endif
1910 writer->endObject();
1911
1912 writer->appendCString("queue", fQueue.label.UTF8String);
1913 writer->appendBool("disconnected", fDisconnected);
1914
1915 writer->endObject();
1916}
1917#endif
1918
1919GR_NORETAIN_END
const char * options
static void info(const char *fmt,...) SK_PRINTF_LIKE(1
Definition: DM.cpp:213
SkAssertResult(font.textToGlyphs("Hello", 5, SkTextEncoding::kUTF8, glyphs, std::size(glyphs))==count)
size_t GrBackendFormatBytesPerPixel(const GrBackendFormat &format)
SkTextureCompressionType GrBackendFormatToCompressionType(const GrBackendFormat &format)
size_t GrComputeTightCombinedBufferSize(size_t bytesPerPixel, SkISize baseDimensions, TArray< size_t > *individualMipOffsets, int mipLevelCount)
bool GrClearImage(const GrImageInfo &dstInfo, void *dst, size_t dstRB, std::array< float, 4 > color)
static GrColorType mtl_format_to_backend_tex_clear_colortype(MTLPixelFormat format)
Definition: GrMtlGpu.mm:834
static bool check_max_blit_width(int widthInPixels)
Definition: GrMtlGpu.mm:311
static const int kDefaultOutstandingAllocCnt
Definition: GrMtlGpu.mm:87
static id< MTLTexture > get_texture_from_backend(const GrBackendTexture &backendTex)
Definition: GrMtlGpu.mm:713
void copy_src_data(char *dst, size_t bytesPerPixel, const TArray< size_t > &individualMipOffsets, const GrPixmap srcData[], int numMipLevels, size_t bufferSize)
Definition: GrMtlGpu.mm:859
static MTLPixelFormat GrBackendFormatAsMTLPixelFormat(const GrBackendFormat &format)
Definition: GrMtlUtil.h:106
SK_ALWAYS_INLINE CF_RETURNS_RETAINED const void * GrRetainPtrFromId(id idObject)
Definition: GrMtlUtil.h:59
GR_NORETAIN_BEGIN SK_ALWAYS_INLINE id< MTLTexture > GrGetMTLTexture(const void *mtlTexture)
Definition: GrMtlUtil.h:36
GrWrapCacheable
Definition: GrTypesPriv.h:85
static constexpr size_t GrColorTypeBytesPerPixel(GrColorType ct)
Definition: GrTypesPriv.h:896
GrIOType
Definition: GrTypesPriv.h:402
@ kRead_GrIOType
Definition: GrTypesPriv.h:403
GrMipmapStatus
Definition: GrTypesPriv.h:523
GrWrapOwnership
Definition: GrTypesPriv.h:77
GrGpuBufferType
Definition: GrTypesPriv.h:411
GrMemoryless
Definition: GrTypesPriv.h:123
GrTexturable
Definition: GrTypesPriv.h:64
GrSemaphoreWrapType
Definition: GrTypesPriv.h:146
GrColorType
Definition: GrTypesPriv.h:540
GrAccessPattern
Definition: GrTypesPriv.h:424
@ kDynamic_GrAccessPattern
Definition: GrTypesPriv.h:426
GrSurfaceOrigin
Definition: GrTypes.h:147
void * GrGpuFinishedContext
Definition: GrTypes.h:178
void(* GrGpuFinishedProc)(GrGpuFinishedContext finishedContext)
Definition: GrTypes.h:179
GrSyncCpu
Definition: GrTypes.h:239
GrXferBarrierFlags
kUnpremul_SkAlphaType
#define SkUNREACHABLE
Definition: SkAssert.h:135
#define SkASSERT(cond)
Definition: SkAssert.h:116
size_t SkCompressedDataSize(SkTextureCompressionType type, SkISize dimensions, TArray< size_t > *individualMipOffsets, bool mipmapped)
size_t SkCompressedBlockSize(SkTextureCompressionType type)
void SK_SPI SkDebugf(const char format[],...) SK_PRINTF_LIKE(1
static SkColorType colorType(AImageDecoder *decoder, const AImageDecoderHeaderInfo *headerInfo)
static int SkPrevLog2(uint32_t value)
Definition: SkMathPriv.h:257
@ kYes
Do pre-clip the geometry before applying the (perspective) matrix.
#define INHERITED(method,...)
Definition: SkRecorder.cpp:128
static void SkRectMemcpy(void *dst, size_t dstRB, const void *src, size_t srcRB, size_t trimRowBytes, int rowCount)
Definition: SkRectMemcpy.h:16
sk_sp< T > sk_ref_sp(T *obj)
Definition: SkRefCnt.h:381
SkFilterMode
SkTextureCompressionType
SkDEBUGCODE(SK_SPI) SkThreadID SkGetThreadID()
static constexpr bool SkToBool(const T &x)
Definition: SkTo.h:35
GLenum type
int numSamples() const
Definition: GrAttachment.h:38
GrBackendFormat getBackendFormat() const
GrBackendApi backend() const
SkISize dimensions() const
SkISize dimensions() const
GrBackendFormat getBackendFormat() const
GrBackendApi backend() const
virtual bool isFormatRenderable(const GrBackendFormat &format, int sampleCount) const =0
virtual int getRenderTargetSampleCount(int requestedCount, const GrBackendFormat &) const =0
GrResourceProvider * resourceProvider()
GrDirectContextPriv priv()
void * map()
Definition: GrGpuBuffer.cpp:28
void incStencilAttachmentCreates()
Definition: GrGpu.h:539
void incMSAAAttachmentCreates()
Definition: GrGpu.h:540
bool submitToGpu(GrSyncCpu sync)
Definition: GrGpu.cpp:748
const GrCaps * caps() const
Definition: GrGpu.h:73
GrDirectContext * getContext()
Definition: GrGpu.h:67
DisconnectType
Definition: GrGpu.h:80
virtual void disconnect(DisconnectType)
Definition: GrGpu.cpp:51
Stats fStats
Definition: GrGpu.h:703
MTLPixelFormat mtlFormat() const
bool framebufferOnly() const
unsigned int sampleCount() const
id< MTLTexture > mtlTexture() const
static sk_sp< GrMtlAttachment > MakeMSAA(GrMtlGpu *gpu, SkISize dimensions, int sampleCnt, MTLPixelFormat format)
static sk_sp< GrMtlBuffer > Make(GrMtlGpu *, size_t size, GrGpuBufferType intendedType, GrAccessPattern)
Definition: GrMtlBuffer.mm:38
id< MTLBuffer > mtlBuffer() const
Definition: GrMtlBuffer.h:28
MTLPixelFormat preferredStencilFormat() const
Definition: GrMtlCaps.h:57
MTLPixelFormat getFormatFromColorType(GrColorType colorType) const
Definition: GrMtlCaps.h:75
bool isFormatTexturable(const GrBackendFormat &, GrTextureType) const override
Definition: GrMtlCaps.mm:466
int getRenderTargetSampleCount(int requestedCount, const GrBackendFormat &) const override
Definition: GrMtlCaps.mm:512
bool canCopyAsResolve(MTLPixelFormat dstFormat, int dstSampleCount, MTLPixelFormat srcFormat, int srcSampleCount, bool srcIsRenderTarget, const SkISize srcDimensions, const SkIRect &srcRect, const SkIPoint &dstPoint, bool areDstSrcSameObj) const
Definition: GrMtlCaps.mm:288
bool isFormatRenderable(const GrBackendFormat &format, int sampleCount) const override
Definition: GrMtlCaps.mm:490
bool renderTargetSupportsDiscardableMSAA(const GrMtlRenderTarget *) const
Definition: GrMtlCaps.mm:1251
GrMtlRenderCommandEncoder * getRenderCommandEncoder(MTLRenderPassDescriptor *, const GrMtlPipelineState *, GrMtlOpsRenderPass *opsRenderPass)
void addResource(const sk_sp< const GrManagedResource > &resource)
void addGrBuffer(sk_sp< const GrBuffer > buffer)
void addGrSurface(sk_sp< const GrSurface > surface)
id< MTLBlitCommandEncoder > getBlitCommandEncoder()
void encodeWaitForEvent(sk_sp< GrMtlEvent >, uint64_t value)
static sk_sp< GrMtlCommandBuffer > Make(id< MTLCommandQueue > queue)
bool commit(bool waitUntilCompleted)
void addFinishedCallback(sk_sp< skgpu::RefCntedCallback > callback)
void encodeSignalEvent(sk_sp< GrMtlEvent >, uint64_t value)
bool onCopySurface(GrSurface *dst, const SkIRect &dstRect, GrSurface *src, const SkIRect &srcRect, GrSamplerState::Filter) override
Definition: GrMtlGpu.mm:1265
sk_sp< GrThreadSafePipelineBuilder > refPipelineBuilder() override
Definition: GrMtlGpu.mm:126
std::unique_ptr< GrSemaphore > wrapBackendSemaphore(const GrBackendSemaphore &, GrSemaphoreWrapType, GrWrapOwnership) override
Definition: GrMtlGpu.mm:1603
GrRingBuffer * uniformsRingBuffer() override
Definition: GrMtlGpu.h:118
bool precompileShader(const SkData &key, const SkData &data) override
Definition: GrMtlGpu.mm:1147
~GrMtlGpu() override
Definition: GrMtlGpu.mm:107
GrMtlRenderCommandEncoder * loadMSAAFromResolve(GrAttachment *dst, GrMtlAttachment *src, const SkIRect &srcRect, MTLRenderPassStencilAttachmentDescriptor *)
Definition: GrMtlGpu.mm:1660
void insertSemaphore(GrSemaphore *semaphore) override
Definition: GrMtlGpu.mm:1611
static std::unique_ptr< GrGpu > Make(const GrMtlBackendContext &, const GrContextOptions &, GrDirectContext *)
Definition: GrMtlGpu.mm:56
void copySurfaceAsBlit(GrSurface *dst, GrSurface *src, GrMtlAttachment *dstAttachment, GrMtlAttachment *srcAttachment, const SkIRect &srcRect, const SkIPoint &dstPoint)
Definition: GrMtlGpu.mm:1230
GrMtlCommandBuffer * commandBuffer()
Definition: GrMtlGpu.mm:187
bool compile(const GrProgramDesc &, const GrProgramInfo &) override
Definition: GrMtlGpu.mm:1134
void disconnect(DisconnectType) override
Definition: GrMtlGpu.mm:113
const GrMtlCaps & mtlCaps() const
Definition: GrMtlGpu.h:47
void copySurfaceAsResolve(GrSurface *dst, GrSurface *src)
Definition: GrMtlGpu.mm:1214
void deleteBackendTexture(const GrBackendTexture &) override
Definition: GrMtlGpu.mm:1129
std::unique_ptr< GrSemaphore > prepareTextureForCrossContextUsage(GrTexture *) override
Definition: GrMtlGpu.mm:300
@ kForce_SyncQueue
Definition: GrMtlGpu.h:58
@ kSkip_SyncQueue
Definition: GrMtlGpu.h:59
void waitSemaphore(GrSemaphore *semaphore) override
Definition: GrMtlGpu.mm:1620
id< MTLDevice > device() const
Definition: GrMtlGpu.h:49
void finishOutstandingGpuWork() override
Definition: GrMtlGpu.mm:262
GrMtlResourceProvider & resourceProvider()
Definition: GrMtlGpu.h:51
GrThreadSafePipelineBuilder * pipelineBuilder() override
Definition: GrMtlGpu.mm:122
void submit(GrOpsRenderPass *renderPass) override
Definition: GrMtlGpu.mm:205
std::unique_ptr< GrSemaphore > makeSemaphore(bool isOwned) override
Definition: GrMtlGpu.mm:1598
void setRenderPipelineState(id< MTLRenderPipelineState > pso)
GrMtlAttachment * colorAttachment() const
static sk_sp< GrMtlRenderTarget > MakeWrappedRenderTarget(GrMtlGpu *, SkISize, int sampleCnt, id< MTLTexture >)
const GrMtlFramebuffer * getFramebuffer(bool withResolve, bool withStencil)
GrMtlAttachment * resolveAttachment() const
const GrMtlRenderPipeline * findOrCreateMSAALoadPipeline(MTLPixelFormat colorFormat, int sampleCount, MTLPixelFormat stencilFormat)
GrMtlPipelineState * findOrCreateCompatiblePipelineState(const GrProgramDesc &, const GrProgramInfo &, GrThreadSafePipelineBuilder::Stats::ProgramCacheResult *stat=nullptr)
bool precompileShader(const SkData &key, const SkData &data)
static std::unique_ptr< GrMtlSemaphore > Make(GrMtlGpu *gpu)
sk_sp< GrMtlEvent > event()
uint64_t value() const
static std::unique_ptr< GrMtlSemaphore > MakeWrapped(GrMTLHandle mtlEvent, uint64_t value)
static sk_sp< GrMtlTextureRenderTarget > MakeNewTextureRenderTarget(GrMtlGpu *, skgpu::Budgeted, SkISize, int sampleCnt, MTLPixelFormat, uint32_t mipLevels, GrMipmapStatus, std::string_view label)
static sk_sp< GrMtlTextureRenderTarget > MakeWrappedTextureRenderTarget(GrMtlGpu *, SkISize, int sampleCnt, id< MTLTexture >, GrWrapCacheable)
static sk_sp< GrMtlTexture > MakeNewTexture(GrMtlGpu *, skgpu::Budgeted budgeted, SkISize dimensions, MTLPixelFormat format, uint32_t mipLevels, GrMipmapStatus, std::string_view label)
Definition: GrMtlTexture.mm:81
id< MTLTexture > mtlTexture() const
Definition: GrMtlTexture.h:37
GrMtlAttachment * attachment() const
Definition: GrMtlTexture.h:36
GrBackendFormat backendFormat() const override
static sk_sp< GrMtlTexture > MakeWrappedTexture(GrMtlGpu *, SkISize, id< MTLTexture >, GrWrapCacheable, GrIOType)
Definition: GrMtlTexture.mm:98
int width() const
Definition: GrPixmap.h:27
int numSamples() const
Slice suballocate(size_t size)
Slice allocateStagingBufferSlice(size_t size, size_t requiredAlignment=1)
SkISize dimensions() const
Definition: GrSurface.h:27
int height() const
Definition: GrSurface.h:37
int width() const
Definition: GrSurface.h:32
int maxMipmapLevel() const
Definition: GrTexture.h:67
void markMipmapsDirty()
Definition: GrTexture.cpp:25
Definition: SkData.h:25
void * push_back()
Definition: SkDeque.cpp:112
const void * front() const
Definition: SkDeque.h:42
void pop_front()
Definition: SkDeque.cpp:153
bool empty() const
Definition: SkDeque.h:38
const void * back() const
Definition: SkDeque.h:43
void appendNString(char const (&value)[N])
Definition: SkJSONWriter.h:220
void appendU32(uint32_t value)
Definition: SkJSONWriter.h:239
void beginObject(const char *name=nullptr, bool multiline=true)
Definition: SkJSONWriter.h:114
void appendU64(uint64_t value)
void endObject()
Definition: SkJSONWriter.h:126
void appendBool(bool value)
Definition: SkJSONWriter.h:229
void appendCString(const char *value)
Definition: SkJSONWriter.h:224
static int ComputeLevelCount(int baseWidth, int baseHeight)
Definition: SkMipmap.cpp:134
T * get() const
Definition: SkRefCnt.h:303
void reset(T *ptr=nullptr)
Definition: SkRefCnt.h:310
static sk_sp< RefCntedCallback > Make(Callback proc, Context ctx)
int size() const
Definition: SkTArray.h:421
DlColor color
VkDevice device
Definition: main.cc:53
VkQueue queue
Definition: main.cc:55
VkSurfaceKHR surface
Definition: main.cc:49
if(end==-1)
const uint8_t uint32_t uint32_t GError ** error
uint8_t value
uint32_t uint32_t * format
uint32_t * target
static float max(float r, float g, float b)
Definition: hsl.cpp:49
FlTexture * texture
SK_API GrMTLPixelFormat AsMtlFormat(const GrBackendFormat &)
SK_API bool GetMtlTextureInfo(const GrBackendRenderTarget &, GrMtlTextureInfo *)
SK_API GrBackendRenderTarget MakeMtl(int width, int height, const GrMtlTextureInfo &mtlInfo)
SK_API uint64_t GetMtlValue(const GrBackendSemaphore &)
SK_API GrMTLHandle GetMtlHandle(const GrBackendSemaphore &)
SK_API GrBackendTexture MakeMtl(int width, int height, skgpu::Mipmapped, const GrMtlTextureInfo &mtlInfo, std::string_view label={})
SK_API bool GetMtlTextureInfo(const GrBackendTexture &, GrMtlTextureInfo *)
static bool init()
Optional< SkRect > bounds
Definition: SkRecords.h:189
sk_sp< SkBlender > blender SkRect rect
Definition: SkRecords.h:350
DEF_SWITCHES_START aot vmservice shared library Name of the *so containing AOT compiled Dart assets for launching the service isolate vm snapshot The VM snapshot data that will be memory mapped as read only SnapshotAssetPath must be present isolate snapshot The isolate snapshot data that will be memory mapped as read only SnapshotAssetPath must be present cache dir Path to the cache directory This is different from the persistent_cache_path in embedder which is used for Skia shader cache icu native lib Path to the library file that exports the ICU data vm service The hostname IP address on which the Dart VM Service should be served If not defaults to or::depending on whether ipv6 is specified vm service A custom Dart VM Service port The default is to pick a randomly available open port disable vm Disable the Dart VM Service The Dart VM Service is never available in release mode disable vm service Disable mDNS Dart VM Service publication Bind to the IPv6 localhost address for the Dart VM Service Ignored if vm service host is set endless trace buffer
Definition: switches.h:126
it will be possible to load the file into Perfetto s trace viewer disable asset Prevents usage of any non test fonts unless they were explicitly Loaded via prefetched default font Indicates whether the embedding started a prefetch of the default font manager before creating the engine run In non interactive keep the shell running after the Dart script has completed enable serial On low power devices with low core running concurrent GC tasks on threads can cause them to contend with the UI thread which could potentially lead to jank This option turns off all concurrent GC activities domain network JSON encoded network policy per domain This overrides the DisallowInsecureConnections switch Embedder can specify whether to allow or disallow insecure connections at a domain level old gen heap size
Definition: switches.h:259
dst
Definition: cp.py:12
Definition: GpuTools.h:21
Budgeted
Definition: GpuTypes.h:35
size_t CompressedRowBytes(SkTextureCompressionType type, int width)
Definition: DataUtils.cpp:179
Renderable
Definition: GpuTypes.h:69
Mipmapped
Definition: GpuTypes.h:53
bool MtlFormatIsCompressed(MTLPixelFormat mtlFormat)
Definition: MtlUtils.mm:60
size_t MtlFormatBytesPerBlock(MTLPixelFormat mtlFormat)
Definition: MtlUtils.mm:130
Protected
Definition: GpuTypes.h:61
Definition: ref_ptr.h:256
SkScalar w
SkScalar h
int32_t height
int32_t width
static void usage(char *argv0)
SeparatedVector2 offset
size_t fRowBytes
Definition: GrTypesPriv.h:136
sk_cfp< GrMTLHandle > fDevice
sk_cfp< GrMTLHandle > fQueue
GrGpuBuffer * fBuffer
Definition: GrRingBuffer.h:40
int32_t fX
x-axis value
Definition: SkPoint_impl.h:29
int32_t fY
y-axis value
Definition: SkPoint_impl.h:30
Definition: SkRect.h:32
constexpr int32_t x() const
Definition: SkRect.h:141
constexpr int32_t y() const
Definition: SkRect.h:148
constexpr SkISize size() const
Definition: SkRect.h:172
constexpr int32_t height() const
Definition: SkRect.h:165
int32_t fTop
smaller y-axis bounds
Definition: SkRect.h:34
static constexpr SkIRect MakeSize(const SkISize &size)
Definition: SkRect.h:66
constexpr int32_t width() const
Definition: SkRect.h:158
constexpr SkIPoint topLeft() const
Definition: SkRect.h:151
int32_t fLeft
smaller x-axis bounds
Definition: SkRect.h:33
Definition: SkSize.h:16
constexpr int32_t width() const
Definition: SkSize.h:36
constexpr int32_t height() const
Definition: SkSize.h:37
std::shared_ptr< const fml::Mapping > data
Definition: texture_gles.cc:63