Flutter Engine
The Flutter Engine
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Properties Friends Macros Modules Pages
GrVkGpu.cpp
Go to the documentation of this file.
1/*
2 * Copyright 2015 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
9
23#include "src/core/SkMipmap.h"
25#include "src/gpu/DataUtils.h"
38#include "src/gpu/ganesh/SkGr.h"
57
60
61#if defined(SK_USE_VMA)
63#endif
64
65using namespace skia_private;
66
67#define VK_CALL(X) GR_VK_CALL(this->vkInterface(), X)
68#define VK_CALL_RET(RET, X) GR_VK_CALL_RESULT(this, RET, X)
69
70std::unique_ptr<GrGpu> GrVkGpu::Make(const skgpu::VulkanBackendContext& backendContext,
72 GrDirectContext* direct) {
73 if (backendContext.fInstance == VK_NULL_HANDLE ||
74 backendContext.fPhysicalDevice == VK_NULL_HANDLE ||
75 backendContext.fDevice == VK_NULL_HANDLE ||
76 backendContext.fQueue == VK_NULL_HANDLE) {
77 return nullptr;
78 }
79 if (!backendContext.fGetProc) {
80 return nullptr;
81 }
82
83 PFN_vkEnumerateInstanceVersion localEnumerateInstanceVersion =
84 reinterpret_cast<PFN_vkEnumerateInstanceVersion>(
85 backendContext.fGetProc("vkEnumerateInstanceVersion",
87 uint32_t instanceVersion = 0;
88 if (!localEnumerateInstanceVersion) {
89 instanceVersion = VK_MAKE_VERSION(1, 0, 0);
90 } else {
91 VkResult err = localEnumerateInstanceVersion(&instanceVersion);
92 if (err) {
93 SkDebugf("Failed to enumerate instance version. Err: %d\n", err);
94 return nullptr;
95 }
96 }
97
98 PFN_vkGetPhysicalDeviceProperties localGetPhysicalDeviceProperties =
99 reinterpret_cast<PFN_vkGetPhysicalDeviceProperties>(
100 backendContext.fGetProc("vkGetPhysicalDeviceProperties",
101 backendContext.fInstance,
103
104 if (!localGetPhysicalDeviceProperties) {
105 return nullptr;
106 }
107 VkPhysicalDeviceProperties physDeviceProperties;
108 localGetPhysicalDeviceProperties(backendContext.fPhysicalDevice, &physDeviceProperties);
109 uint32_t physDevVersion = physDeviceProperties.apiVersion;
110
111 uint32_t apiVersion = backendContext.fMaxAPIVersion ? backendContext.fMaxAPIVersion
112 : instanceVersion;
113
114 instanceVersion = std::min(instanceVersion, apiVersion);
115 physDevVersion = std::min(physDevVersion, apiVersion);
116
117 skgpu::VulkanExtensions noExtensions;
119 backendContext.fVkExtensions ? backendContext.fVkExtensions : &noExtensions;
120
121 auto interface = sk_make_sp<skgpu::VulkanInterface>(backendContext.fGetProc,
122 backendContext.fInstance,
123 backendContext.fDevice,
124 instanceVersion,
125 physDevVersion,
126 extensions);
127 SkASSERT(interface);
128 if (!interface->validate(instanceVersion, physDevVersion, extensions)) {
129 return nullptr;
130 }
131
133 if (backendContext.fDeviceFeatures2) {
134 caps.reset(new GrVkCaps(options,
135 interface.get(),
136 backendContext.fPhysicalDevice,
137 *backendContext.fDeviceFeatures2,
138 instanceVersion,
139 physDevVersion,
140 *extensions,
141 backendContext.fProtectedContext));
142 } else if (backendContext.fDeviceFeatures) {
144 features2.pNext = nullptr;
145 features2.features = *backendContext.fDeviceFeatures;
146 caps.reset(new GrVkCaps(options,
147 interface.get(),
148 backendContext.fPhysicalDevice,
149 features2,
150 instanceVersion,
151 physDevVersion,
152 *extensions,
153 backendContext.fProtectedContext));
154 } else {
156 memset(&features, 0, sizeof(VkPhysicalDeviceFeatures2));
157
158 caps.reset(new GrVkCaps(options,
159 interface.get(),
160 backendContext.fPhysicalDevice,
161 features,
162 instanceVersion,
163 physDevVersion,
164 *extensions,
165 backendContext.fProtectedContext));
166 }
167
168 if (!caps) {
169 return nullptr;
170 }
171
173#if defined(SK_USE_VMA)
174 if (!memoryAllocator) {
175 // We were not given a memory allocator at creation
177 backendContext.fPhysicalDevice,
178 backendContext.fDevice,
179 physDevVersion,
181 interface.get(),
183 }
184#endif
185 if (!memoryAllocator) {
186 SkDEBUGFAIL("No supplied vulkan memory allocator and unable to create one internally.");
187 return nullptr;
188 }
189
190 std::unique_ptr<GrVkGpu> vkGpu(new GrVkGpu(direct,
191 backendContext,
192 std::move(caps),
193 interface,
194 instanceVersion,
195 physDevVersion,
196 std::move(memoryAllocator)));
197 if (backendContext.fProtectedContext == GrProtected::kYes &&
198 !vkGpu->vkCaps().supportsProtectedContent()) {
199 return nullptr;
200 }
201 return vkGpu;
202}
203
204////////////////////////////////////////////////////////////////////////////////
205
206GrVkGpu::GrVkGpu(GrDirectContext* direct,
207 const skgpu::VulkanBackendContext& backendContext,
208 sk_sp<GrVkCaps> caps,
210 uint32_t instanceVersion,
211 uint32_t physicalDeviceVersion,
213 : INHERITED(direct)
214 , fInterface(std::move(interface))
215 , fMemoryAllocator(std::move(memoryAllocator))
216 , fVkCaps(std::move(caps))
217 , fPhysicalDevice(backendContext.fPhysicalDevice)
218 , fDevice(backendContext.fDevice)
219 , fQueue(backendContext.fQueue)
220 , fQueueIndex(backendContext.fGraphicsQueueIndex)
221 , fResourceProvider(this)
222 , fStagingBufferManager(this)
223 , fDisconnected(false)
224 , fProtectedContext(backendContext.fProtectedContext)
225 , fDeviceLostContext(backendContext.fDeviceLostContext)
226 , fDeviceLostProc(backendContext.fDeviceLostProc) {
227 SkASSERT(fMemoryAllocator);
228
229 this->initCaps(fVkCaps);
230
231 VK_CALL(GetPhysicalDeviceProperties(backendContext.fPhysicalDevice, &fPhysDevProps));
232 VK_CALL(GetPhysicalDeviceMemoryProperties(backendContext.fPhysicalDevice, &fPhysDevMemProps));
233
234 fResourceProvider.init();
235
236 fMainCmdPool = fResourceProvider.findOrCreateCommandPool();
237 if (fMainCmdPool) {
238 fMainCmdBuffer = fMainCmdPool->getPrimaryCommandBuffer();
240 this->currentCommandBuffer()->begin(this);
241 }
242}
243
244void GrVkGpu::destroyResources() {
245 if (fMainCmdPool) {
246 fMainCmdPool->getPrimaryCommandBuffer()->end(this, /*abandoningBuffer=*/true);
247 fMainCmdPool->close();
248 }
249
250 // wait for all commands to finish
252
253 if (fMainCmdPool) {
254 fMainCmdPool->unref();
255 fMainCmdPool = nullptr;
256 }
257
258 for (int i = 0; i < fSemaphoresToWaitOn.size(); ++i) {
259 fSemaphoresToWaitOn[i]->unref();
260 }
261 fSemaphoresToWaitOn.clear();
262
263 for (int i = 0; i < fSemaphoresToSignal.size(); ++i) {
264 fSemaphoresToSignal[i]->unref();
265 }
266 fSemaphoresToSignal.clear();
267
268 fStagingBufferManager.reset();
269
270 fMSAALoadManager.destroyResources(this);
271
272 // must call this just before we destroy the command pool and VkDevice
273 fResourceProvider.destroyResources();
274}
275
277 if (!fDisconnected) {
278 this->destroyResources();
279 }
280 // We don't delete the memory allocator until the very end of the GrVkGpu lifetime so that
281 // clients can continue to delete backend textures even after a context has been abandoned.
282 fMemoryAllocator.reset();
283}
284
285
288 if (!fDisconnected) {
289 this->destroyResources();
290
291 fSemaphoresToWaitOn.clear();
292 fSemaphoresToSignal.clear();
293 fMainCmdBuffer = nullptr;
294 fDisconnected = true;
295 }
296}
297
299 return fResourceProvider.pipelineStateCache();
300}
301
303 return fResourceProvider.refPipelineStateCache();
304}
305
306///////////////////////////////////////////////////////////////////////////////
307
308GrOpsRenderPass* GrVkGpu::onGetOpsRenderPass(
309 GrRenderTarget* rt,
310 bool useMSAASurface,
311 GrAttachment* stencil,
312 GrSurfaceOrigin origin,
313 const SkIRect& bounds,
314 const GrOpsRenderPass::LoadAndStoreInfo& colorInfo,
316 const TArray<GrSurfaceProxy*, true>& sampledProxies,
317 GrXferBarrierFlags renderPassXferBarriers) {
318 if (!fCachedOpsRenderPass) {
319 fCachedOpsRenderPass = std::make_unique<GrVkOpsRenderPass>(this);
320 }
321
322 // For the given render target and requested render pass features we need to find a compatible
323 // framebuffer to use for the render pass. Technically it is the underlying VkRenderPass that
324 // is compatible, but that is part of the framebuffer that we get here.
325 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(rt);
326
327 SkASSERT(!useMSAASurface ||
328 rt->numSamples() > 1 ||
329 (this->vkCaps().supportsDiscardableMSAAForDMSAA() &&
330 vkRT->resolveAttachment() &&
332
333 // Covert the GrXferBarrierFlags into render pass self dependency flags
335 if (renderPassXferBarriers & GrXferBarrierFlags::kBlend) {
337 }
338 if (renderPassXferBarriers & GrXferBarrierFlags::kTexture) {
340 }
341
342 // Figure out if we need a resolve attachment for this render pass. A resolve attachment is
343 // needed if we are using msaa to draw with a discardable msaa attachment. If we are in this
344 // case we also need to update the color load/store ops since we don't want to ever load or
345 // store the msaa color attachment, but may need to for the resolve attachment.
346 GrOpsRenderPass::LoadAndStoreInfo localColorInfo = colorInfo;
347 bool withResolve = false;
350 if (useMSAASurface && this->vkCaps().renderTargetSupportsDiscardableMSAA(vkRT)) {
351 withResolve = true;
352 localColorInfo.fStoreOp = GrStoreOp::kDiscard;
353 if (colorInfo.fLoadOp == GrLoadOp::kLoad) {
355 localColorInfo.fLoadOp = GrLoadOp::kDiscard;
356 } else {
357 resolveInfo.fLoadOp = GrLoadOp::kDiscard;
358 }
359 }
360
361 // Get the framebuffer to use for the render pass
362 sk_sp<GrVkFramebuffer> framebuffer;
363 if (vkRT->wrapsSecondaryCommandBuffer()) {
364 framebuffer = vkRT->externalFramebuffer();
365 } else {
366 auto fb = vkRT->getFramebuffer(withResolve, SkToBool(stencil), selfDepFlags,
367 loadFromResolve);
368 framebuffer = sk_ref_sp(fb);
369 }
370 if (!framebuffer) {
371 return nullptr;
372 }
373
374 if (!fCachedOpsRenderPass->set(rt, std::move(framebuffer), origin, bounds, localColorInfo,
375 stencilInfo, resolveInfo, selfDepFlags, loadFromResolve,
376 sampledProxies)) {
377 return nullptr;
378 }
379 return fCachedOpsRenderPass.get();
380}
381
382bool GrVkGpu::submitCommandBuffer(SyncQueue sync) {
383 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
384 if (!this->currentCommandBuffer()) {
385 return false;
386 }
387 SkASSERT(!fCachedOpsRenderPass || !fCachedOpsRenderPass->isActive());
388
389 if (!this->currentCommandBuffer()->hasWork() && kForce_SyncQueue != sync &&
390 fSemaphoresToSignal.empty() && fSemaphoresToWaitOn.empty()) {
391 // We may have added finished procs during the flush call. Since there is no actual work
392 // we are not submitting the command buffer and may never come back around to submit it.
393 // Thus we call all current finished procs manually, since the work has technically
394 // finished.
396 SkASSERT(fDrawables.empty());
397 fResourceProvider.checkCommandBuffers();
398 return true;
399 }
400
401 fMainCmdBuffer->end(this);
402 SkASSERT(fMainCmdPool);
403 fMainCmdPool->close();
404 bool didSubmit = fMainCmdBuffer->submitToQueue(this, fQueue, fSemaphoresToSignal,
405 fSemaphoresToWaitOn);
406
407 if (didSubmit && sync == kForce_SyncQueue) {
408 fMainCmdBuffer->forceSync(this);
409 }
410
411 // We must delete any drawables that had to wait until submit to destroy.
412 fDrawables.clear();
413
414 // If we didn't submit the command buffer then we did not wait on any semaphores. We will
415 // continue to hold onto these semaphores and wait on them during the next command buffer
416 // submission.
417 if (didSubmit) {
418 for (int i = 0; i < fSemaphoresToWaitOn.size(); ++i) {
419 fSemaphoresToWaitOn[i]->unref();
420 }
421 fSemaphoresToWaitOn.clear();
422 }
423
424 // Even if we did not submit the command buffer, we drop all the signal semaphores since we will
425 // not try to recover the work that wasn't submitted and instead just drop it all. The client
426 // will be notified that the semaphores were not submit so that they will not try to wait on
427 // them.
428 for (int i = 0; i < fSemaphoresToSignal.size(); ++i) {
429 fSemaphoresToSignal[i]->unref();
430 }
431 fSemaphoresToSignal.clear();
432
433 // Release old command pool and create a new one
434 fMainCmdPool->unref();
435 fMainCmdPool = fResourceProvider.findOrCreateCommandPool();
436 if (fMainCmdPool) {
437 fMainCmdBuffer = fMainCmdPool->getPrimaryCommandBuffer();
438 SkASSERT(fMainCmdBuffer);
439 fMainCmdBuffer->begin(this);
440 } else {
441 fMainCmdBuffer = nullptr;
442 }
443 // We must wait to call checkCommandBuffers until after we get a new command buffer. The
444 // checkCommandBuffers may trigger a releaseProc which may cause us to insert a barrier for a
445 // released GrVkImage. That barrier needs to be put into a new command buffer and not the old
446 // one that was just submitted.
447 fResourceProvider.checkCommandBuffers();
448 return didSubmit;
449}
450
451///////////////////////////////////////////////////////////////////////////////
452sk_sp<GrGpuBuffer> GrVkGpu::onCreateBuffer(size_t size,
454 GrAccessPattern accessPattern) {
455#ifdef SK_DEBUG
456 switch (type) {
460 SkASSERT(accessPattern == kDynamic_GrAccessPattern ||
461 accessPattern == kStatic_GrAccessPattern);
462 break;
464 SkASSERT(accessPattern == kDynamic_GrAccessPattern);
465 break;
467 SkASSERT(accessPattern == kDynamic_GrAccessPattern ||
468 accessPattern == kStream_GrAccessPattern);
469 break;
471 SkASSERT(accessPattern == kDynamic_GrAccessPattern);
472 break;
473 }
474#endif
475 return GrVkBuffer::Make(this, size, type, accessPattern);
476}
477
478bool GrVkGpu::onWritePixels(GrSurface* surface,
480 GrColorType surfaceColorType,
481 GrColorType srcColorType,
482 const GrMipLevel texels[],
483 int mipLevelCount,
484 bool prepForTexSampling) {
485 GrVkTexture* texture = static_cast<GrVkTexture*>(surface->asTexture());
486 if (!texture) {
487 return false;
488 }
489 GrVkImage* texImage = texture->textureImage();
490
491 // Make sure we have at least the base level
492 if (!mipLevelCount || !texels[0].fPixels) {
493 return false;
494 }
495
497 bool success = false;
498 bool linearTiling = texImage->isLinearTiled();
499 if (linearTiling) {
500 if (mipLevelCount > 1) {
501 SkDebugf("Can't upload mipmap data to linear tiled texture");
502 return false;
503 }
505 // Need to change the layout to general in order to perform a host write
506 texImage->setImageLayout(this,
510 false);
511 if (!this->submitCommandBuffer(kForce_SyncQueue)) {
512 return false;
513 }
514 }
515 success = this->uploadTexDataLinear(texImage,
516 rect,
517 srcColorType,
518 texels[0].fPixels,
519 texels[0].fRowBytes);
520 } else {
521 SkASSERT(mipLevelCount <= (int)texImage->mipLevels());
522 success = this->uploadTexDataOptimal(texImage,
523 rect,
524 srcColorType,
525 texels,
526 mipLevelCount);
527 if (1 == mipLevelCount) {
528 texture->markMipmapsDirty();
529 }
530 }
531
532 if (prepForTexSampling) {
533 texImage->setImageLayout(this,
537 false);
538 }
539
540 return success;
541}
542
543// When we update vertex/index buffers via transfers we assume that they may have been used
544// previously in draws and will be used again in draws afterwards. So we put a barrier before and
545// after. If we had a mechanism for gathering the buffers that will be used in a GrVkOpsRenderPass
546// *before* we begin a subpass we could do this lazily and non-redundantly by tracking the "last
547// usage" on the GrVkBuffer. Then Pass 1 draw, xfer, xfer, xfer, Pass 2 draw would insert just two
548// barriers: one before the first xfer and one before Pass 2. Currently, we'd use six barriers.
549// Pass false as "after" before the transfer and true after the transfer.
552 size_t offset,
553 size_t size,
554 bool after) {
555 if (dst->intendedType() != GrGpuBufferType::kIndex &&
556 dst->intendedType() != GrGpuBufferType::kVertex) {
557 return;
558 }
559
560 VkAccessFlags srcAccessMask = dst->intendedType() == GrGpuBufferType::kIndex
564
567
568 if (after) {
569 using std::swap;
570 swap(srcAccessMask, dstAccessMask );
571 swap(srcPipelineStageFlags, dstPipelineStageFlags);
572 }
573
574 VkBufferMemoryBarrier bufferMemoryBarrier = {
576 nullptr, // pNext
577 srcAccessMask, // srcAccessMask
578 dstAccessMask, // dstAccessMask
579 VK_QUEUE_FAMILY_IGNORED, // srcQueueFamilyIndex
580 VK_QUEUE_FAMILY_IGNORED, // dstQueueFamilyIndex
581 dst->vkBuffer(), // buffer
582 offset, // offset
583 size, // size
584 };
585
586 gpu->addBufferMemoryBarrier(srcPipelineStageFlags,
587 dstPipelineStageFlags,
588 /*byRegion=*/false,
589 &bufferMemoryBarrier);
590}
591
592bool GrVkGpu::onTransferFromBufferToBuffer(sk_sp<GrGpuBuffer> src,
593 size_t srcOffset,
595 size_t dstOffset,
596 size_t size) {
597 if (!this->currentCommandBuffer()) {
598 return false;
599 }
600
601 VkBufferCopy copyRegion;
602 copyRegion.srcOffset = srcOffset;
603 copyRegion.dstOffset = dstOffset;
604 copyRegion.size = size;
605
607 static_cast<GrVkBuffer*>(dst.get()),
608 dstOffset,
609 size,
610 /*after=*/false);
611 this->currentCommandBuffer()->copyBuffer(this, std::move(src), dst, 1, &copyRegion);
613 static_cast<GrVkBuffer*>(dst.get()),
614 dstOffset,
615 size,
616 /*after=*/true);
617
618 return true;
619}
620
621bool GrVkGpu::onTransferPixelsTo(GrTexture* texture,
623 GrColorType surfaceColorType,
624 GrColorType bufferColorType,
625 sk_sp<GrGpuBuffer> transferBuffer,
626 size_t bufferOffset,
627 size_t rowBytes) {
628 if (!this->currentCommandBuffer()) {
629 return false;
630 }
631
632 size_t bpp = GrColorTypeBytesPerPixel(bufferColorType);
633 if (GrBackendFormatBytesPerPixel(texture->backendFormat()) != bpp) {
634 return false;
635 }
636
637 // Vulkan only supports offsets that are both 4-byte aligned and aligned to a pixel.
638 if ((bufferOffset & 0x3) || (bufferOffset % bpp)) {
639 return false;
640 }
641 GrVkTexture* tex = static_cast<GrVkTexture*>(texture);
642 if (!tex) {
643 return false;
644 }
645 GrVkImage* vkImage = tex->textureImage();
646 VkFormat format = vkImage->imageFormat();
647
648 // Can't transfer compressed data
650
651 if (!transferBuffer) {
652 return false;
653 }
654
655 if (bufferColorType != this->vkCaps().transferColorType(format, surfaceColorType)) {
656 return false;
657 }
659
661
662 // Set up copy region
664 memset(&region, 0, sizeof(VkBufferImageCopy));
665 region.bufferOffset = bufferOffset;
666 region.bufferRowLength = (uint32_t)(rowBytes/bpp);
667 region.bufferImageHeight = 0;
668 region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
669 region.imageOffset = { rect.left(), rect.top(), 0 };
670 region.imageExtent = { (uint32_t)rect.width(), (uint32_t)rect.height(), 1 };
671
672 // Change layout of our target so it can be copied to
673 vkImage->setImageLayout(this,
677 false);
678
679 const GrVkBuffer* vkBuffer = static_cast<GrVkBuffer*>(transferBuffer.get());
680
681 // Copy the buffer to the image.
683 vkBuffer->vkBuffer(),
684 vkImage,
686 1,
687 &region);
688 this->currentCommandBuffer()->addGrBuffer(std::move(transferBuffer));
689
690 tex->markMipmapsDirty();
691 return true;
692}
693
694bool GrVkGpu::onTransferPixelsFrom(GrSurface* surface,
696 GrColorType surfaceColorType,
697 GrColorType bufferColorType,
698 sk_sp<GrGpuBuffer> transferBuffer,
699 size_t offset) {
700 if (!this->currentCommandBuffer()) {
701 return false;
702 }
703 SkASSERT(surface);
704 SkASSERT(transferBuffer);
705 if (fProtectedContext == GrProtected::kYes) {
706 return false;
707 }
708
709 GrVkImage* srcImage;
710 if (GrVkRenderTarget* rt = static_cast<GrVkRenderTarget*>(surface->asRenderTarget())) {
711 // Reading from render targets that wrap a secondary command buffer is not allowed since
712 // it would require us to know the VkImage, which we don't have, as well as need us to
713 // stop and start the VkRenderPass which we don't have access to.
714 if (rt->wrapsSecondaryCommandBuffer()) {
715 return false;
716 }
717 if (!rt->nonMSAAAttachment()) {
718 return false;
719 }
720 srcImage = rt->nonMSAAAttachment();
721 } else {
722 SkASSERT(surface->asTexture());
723 srcImage = static_cast<GrVkTexture*>(surface->asTexture())->textureImage();
724 }
725
726 VkFormat format = srcImage->imageFormat();
727 if (bufferColorType != this->vkCaps().transferColorType(format, surfaceColorType)) {
728 return false;
729 }
731
732 // Set up copy region
734 memset(&region, 0, sizeof(VkBufferImageCopy));
735 region.bufferOffset = offset;
736 region.bufferRowLength = rect.width();
737 region.bufferImageHeight = 0;
738 region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
739 region.imageOffset = {rect.left(), rect.top(), 0};
740 region.imageExtent = {(uint32_t)rect.width(), (uint32_t)rect.height(), 1};
741
742 srcImage->setImageLayout(this,
746 false);
747
748 this->currentCommandBuffer()->copyImageToBuffer(this, srcImage,
750 transferBuffer, 1, &region);
751
752 GrVkBuffer* vkBuffer = static_cast<GrVkBuffer*>(transferBuffer.get());
753 // Make sure the copy to buffer has finished.
758 false);
759 return true;
760}
761
762void GrVkGpu::resolveImage(GrSurface* dst, GrVkRenderTarget* src, const SkIRect& srcRect,
763 const SkIPoint& dstPoint) {
764 if (!this->currentCommandBuffer()) {
765 return;
766 }
767
768 SkASSERT(dst);
769 SkASSERT(src && src->colorAttachment() && src->colorAttachment()->numSamples() > 1);
770
771 VkImageResolve resolveInfo;
772 resolveInfo.srcSubresource = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1};
773 resolveInfo.srcOffset = {srcRect.fLeft, srcRect.fTop, 0};
774 resolveInfo.dstSubresource = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1};
775 resolveInfo.dstOffset = {dstPoint.fX, dstPoint.fY, 0};
776 resolveInfo.extent = {(uint32_t)srcRect.width(), (uint32_t)srcRect.height(), 1};
777
778 GrVkImage* dstImage;
779 GrRenderTarget* dstRT = dst->asRenderTarget();
780 GrTexture* dstTex = dst->asTexture();
781 if (dstTex) {
782 dstImage = static_cast<GrVkTexture*>(dstTex)->textureImage();
783 } else {
784 SkASSERT(dst->asRenderTarget());
785 dstImage = static_cast<GrVkRenderTarget*>(dstRT)->nonMSAAAttachment();
786 }
787 SkASSERT(dstImage);
788
789 dstImage->setImageLayout(this,
793 false);
794
795 src->colorAttachment()->setImageLayout(this,
799 false);
800 this->currentCommandBuffer()->addGrSurface(sk_ref_sp<const GrSurface>(src->colorAttachment()));
801 this->currentCommandBuffer()->addGrSurface(sk_ref_sp<const GrSurface>(dst));
802 this->currentCommandBuffer()->resolveImage(this, *src->colorAttachment(), *dstImage, 1,
803 &resolveInfo);
804}
805
807 SkASSERT(target->numSamples() > 1);
808 GrVkRenderTarget* rt = static_cast<GrVkRenderTarget*>(target);
810
811 if (this->vkCaps().renderTargetSupportsDiscardableMSAA(rt)) {
812 // We would have resolved the RT during the render pass;
813 return;
814 }
815
816 this->resolveImage(target, rt, resolveRect,
817 SkIPoint::Make(resolveRect.x(), resolveRect.y()));
818}
819
820bool GrVkGpu::uploadTexDataLinear(GrVkImage* texImage,
822 GrColorType dataColorType,
823 const void* data,
824 size_t rowBytes) {
825 SkASSERT(data);
826 SkASSERT(texImage->isLinearTiled());
827
829
830 size_t bpp = GrColorTypeBytesPerPixel(dataColorType);
831 size_t trimRowBytes = rect.width() * bpp;
832
835 const VkImageSubresource subres = {
837 0, // mipLevel
838 0, // arraySlice
839 };
840 VkSubresourceLayout layout;
841
842 const skgpu::VulkanInterface* interface = this->vkInterface();
843
844 GR_VK_CALL(interface, GetImageSubresourceLayout(fDevice,
845 texImage->image(),
846 &subres,
847 &layout));
848
849 const skgpu::VulkanAlloc& alloc = texImage->alloc();
850 if (VK_NULL_HANDLE == alloc.fMemory) {
851 return false;
852 }
853 VkDeviceSize offset = rect.top()*layout.rowPitch + rect.left()*bpp;
854 VkDeviceSize size = rect.height()*layout.rowPitch;
855 SkASSERT(size + offset <= alloc.fSize);
856 auto checkResult = [this](VkResult result) {
857 return this->checkVkResult(result);
858 };
859 auto allocator = this->memoryAllocator();
860 void* mapPtr = skgpu::VulkanMemory::MapAlloc(allocator, alloc, checkResult);
861 if (!mapPtr) {
862 return false;
863 }
864 mapPtr = reinterpret_cast<char*>(mapPtr) + offset;
865
866 SkRectMemcpy(mapPtr,
867 static_cast<size_t>(layout.rowPitch),
868 data,
869 rowBytes,
870 trimRowBytes,
871 rect.height());
872
873 skgpu::VulkanMemory::FlushMappedAlloc(allocator, alloc, offset, size, checkResult);
874 skgpu::VulkanMemory::UnmapAlloc(allocator, alloc);
875
876 return true;
877}
878
879// This fills in the 'regions' vector in preparation for copying a buffer to an image.
880// 'individualMipOffsets' is filled in as a side-effect.
881static size_t fill_in_compressed_regions(GrStagingBufferManager* stagingBufferManager,
883 TArray<size_t>* individualMipOffsets,
885 SkTextureCompressionType compression,
886 VkFormat vkFormat,
887 SkISize dimensions,
888 skgpu::Mipmapped mipmapped) {
890 int numMipLevels = 1;
891 if (mipmapped == skgpu::Mipmapped::kYes) {
892 numMipLevels = SkMipmap::ComputeLevelCount(dimensions.width(), dimensions.height()) + 1;
893 }
894
895 regions->reserve_exact(regions->size() + numMipLevels);
896 individualMipOffsets->reserve_exact(individualMipOffsets->size() + numMipLevels);
897
898 size_t bytesPerBlock = skgpu::VkFormatBytesPerBlock(vkFormat);
899
900 size_t bufferSize = SkCompressedDataSize(
901 compression, dimensions, individualMipOffsets, mipmapped == skgpu::Mipmapped::kYes);
902 SkASSERT(individualMipOffsets->size() == numMipLevels);
903
904 // Get a staging buffer slice to hold our mip data.
905 // Vulkan requires offsets in the buffer to be aligned to multiple of the texel size and 4
906 size_t alignment = bytesPerBlock;
907 switch (alignment & 0b11) {
908 case 0: break; // alignment is already a multiple of 4.
909 case 2: alignment *= 2; break; // alignment is a multiple of 2 but not 4.
910 default: alignment *= 4; break; // alignment is not a multiple of 2.
911 }
912 *slice = stagingBufferManager->allocateStagingBufferSlice(bufferSize, alignment);
913 if (!slice->fBuffer) {
914 return 0;
915 }
916
917 for (int i = 0; i < numMipLevels; ++i) {
918 VkBufferImageCopy& region = regions->push_back();
919 memset(&region, 0, sizeof(VkBufferImageCopy));
920 region.bufferOffset = slice->fOffset + (*individualMipOffsets)[i];
921 SkISize revisedDimensions = skgpu::CompressedDimensions(compression, dimensions);
922 region.bufferRowLength = revisedDimensions.width();
923 region.bufferImageHeight = revisedDimensions.height();
924 region.imageSubresource = {VK_IMAGE_ASPECT_COLOR_BIT, SkToU32(i), 0, 1};
925 region.imageOffset = {0, 0, 0};
926 region.imageExtent = {SkToU32(dimensions.width()),
927 SkToU32(dimensions.height()), 1};
928
929 dimensions = {std::max(1, dimensions.width() /2),
930 std::max(1, dimensions.height()/2)};
931 }
932
933 return bufferSize;
934}
935
936bool GrVkGpu::uploadTexDataOptimal(GrVkImage* texImage,
938 GrColorType dataColorType,
939 const GrMipLevel texels[],
940 int mipLevelCount) {
941 if (!this->currentCommandBuffer()) {
942 return false;
943 }
944
945 SkASSERT(!texImage->isLinearTiled());
946 // The assumption is either that we have no mipmaps, or that our rect is the entire texture
947 SkASSERT(mipLevelCount == 1 || rect == SkIRect::MakeSize(texImage->dimensions()));
948
949 // We assume that if the texture has mip levels, we either upload to all the levels or just the
950 // first.
951 SkASSERT(mipLevelCount == 1 || mipLevelCount == (int)texImage->mipLevels());
952
953 SkASSERT(!rect.isEmpty());
954
955 SkASSERT(this->vkCaps().surfaceSupportsWritePixels(texImage));
956
957 SkASSERT(this->vkCaps().isVkFormatTexturable(texImage->imageFormat()));
958 size_t bpp = GrColorTypeBytesPerPixel(dataColorType);
959
960 // texels is const.
961 // But we may need to adjust the fPixels ptr based on the copyRect, or fRowBytes.
962 // Because of this we need to make a non-const shallow copy of texels.
963 AutoTArray<GrMipLevel> texelsShallowCopy(mipLevelCount);
964 std::copy_n(texels, mipLevelCount, texelsShallowCopy.get());
965
966 TArray<size_t> individualMipOffsets;
967 size_t combinedBufferSize;
968 if (mipLevelCount > 1) {
969 combinedBufferSize = GrComputeTightCombinedBufferSize(bpp,
970 rect.size(),
971 &individualMipOffsets,
972 mipLevelCount);
973 } else {
974 SkASSERT(texelsShallowCopy[0].fPixels && texelsShallowCopy[0].fRowBytes);
975 combinedBufferSize = rect.width()*rect.height()*bpp;
976 individualMipOffsets.push_back(0);
977 }
978 SkASSERT(combinedBufferSize);
979
980 // Get a staging buffer slice to hold our mip data.
981 // Vulkan requires offsets in the buffer to be aligned to multiple of the texel size and 4
982 size_t alignment = bpp;
983 switch (alignment & 0b11) {
984 case 0: break; // alignment is already a multiple of 4.
985 case 2: alignment *= 2; break; // alignment is a multiple of 2 but not 4.
986 default: alignment *= 4; break; // alignment is not a multiple of 2.
987 }
989 fStagingBufferManager.allocateStagingBufferSlice(combinedBufferSize, alignment);
990 if (!slice.fBuffer) {
991 return false;
992 }
993
994 int uploadLeft = rect.left();
995 int uploadTop = rect.top();
996
997 char* buffer = (char*) slice.fOffsetMapPtr;
998 TArray<VkBufferImageCopy> regions(mipLevelCount);
999
1000 int currentWidth = rect.width();
1001 int currentHeight = rect.height();
1002 for (int currentMipLevel = 0; currentMipLevel < mipLevelCount; currentMipLevel++) {
1003 if (texelsShallowCopy[currentMipLevel].fPixels) {
1004 const size_t trimRowBytes = currentWidth * bpp;
1005 const size_t rowBytes = texelsShallowCopy[currentMipLevel].fRowBytes;
1006
1007 // copy data into the buffer, skipping the trailing bytes
1008 char* dst = buffer + individualMipOffsets[currentMipLevel];
1009 const char* src = (const char*)texelsShallowCopy[currentMipLevel].fPixels;
1010 SkRectMemcpy(dst, trimRowBytes, src, rowBytes, trimRowBytes, currentHeight);
1011
1012 VkBufferImageCopy& region = regions.push_back();
1013 memset(&region, 0, sizeof(VkBufferImageCopy));
1014 region.bufferOffset = slice.fOffset + individualMipOffsets[currentMipLevel];
1015 region.bufferRowLength = currentWidth;
1016 region.bufferImageHeight = currentHeight;
1017 region.imageSubresource = {VK_IMAGE_ASPECT_COLOR_BIT, SkToU32(currentMipLevel), 0, 1};
1018 region.imageOffset = {uploadLeft, uploadTop, 0};
1019 region.imageExtent = {(uint32_t)currentWidth, (uint32_t)currentHeight, 1};
1020 }
1021
1022 currentWidth = std::max(1, currentWidth/2);
1023 currentHeight = std::max(1, currentHeight/2);
1024 }
1025
1026 // Change layout of our target so it can be copied to
1027 texImage->setImageLayout(this,
1031 false);
1032
1033 // Copy the buffer to the image. This call takes the raw VkBuffer instead of a GrGpuBuffer
1034 // because we don't need the command buffer to ref the buffer here. The reason being is that
1035 // the buffer is coming from the staging manager and the staging manager will make sure the
1036 // command buffer has a ref on the buffer. This avoids having to add and remove a ref for ever
1037 // upload in the frame.
1038 GrVkBuffer* vkBuffer = static_cast<GrVkBuffer*>(slice.fBuffer);
1040 vkBuffer->vkBuffer(),
1041 texImage,
1043 regions.size(),
1044 regions.begin());
1045 return true;
1046}
1047
1048// It's probably possible to roll this into uploadTexDataOptimal,
1049// but for now it's easier to maintain as a separate entity.
1050bool GrVkGpu::uploadTexDataCompressed(GrVkImage* uploadTexture,
1051 SkTextureCompressionType compression,
1052 VkFormat vkFormat,
1053 SkISize dimensions,
1054 skgpu::Mipmapped mipmapped,
1055 const void* data,
1056 size_t dataSize) {
1057 if (!this->currentCommandBuffer()) {
1058 return false;
1059 }
1060 SkASSERT(data);
1061 SkASSERT(!uploadTexture->isLinearTiled());
1062 // For now the assumption is that our rect is the entire texture.
1063 // Compressed textures are read-only so this should be a reasonable assumption.
1064 SkASSERT(dimensions.fWidth == uploadTexture->width() &&
1065 dimensions.fHeight == uploadTexture->height());
1066
1067 if (dimensions.fWidth == 0 || dimensions.fHeight == 0) {
1068 return false;
1069 }
1070
1071 SkASSERT(uploadTexture->imageFormat() == vkFormat);
1072 SkASSERT(this->vkCaps().isVkFormatTexturable(vkFormat));
1073
1074
1077 TArray<size_t> individualMipOffsets;
1078 SkDEBUGCODE(size_t combinedBufferSize =) fill_in_compressed_regions(&fStagingBufferManager,
1079 &regions,
1080 &individualMipOffsets,
1081 &slice,
1082 compression,
1083 vkFormat,
1084 dimensions,
1085 mipmapped);
1086 if (!slice.fBuffer) {
1087 return false;
1088 }
1089 SkASSERT(dataSize == combinedBufferSize);
1090
1091 {
1092 char* buffer = (char*)slice.fOffsetMapPtr;
1093 memcpy(buffer, data, dataSize);
1094 }
1095
1096 // Change layout of our target so it can be copied to
1097 uploadTexture->setImageLayout(this,
1101 false);
1102
1103 // Copy the buffer to the image. This call takes the raw VkBuffer instead of a GrGpuBuffer
1104 // because we don't need the command buffer to ref the buffer here. The reason being is that
1105 // the buffer is coming from the staging manager and the staging manager will make sure the
1106 // command buffer has a ref on the buffer. This avoids having to add and remove a ref for ever
1107 // upload in the frame.
1108 GrVkBuffer* vkBuffer = static_cast<GrVkBuffer*>(slice.fBuffer);
1110 vkBuffer->vkBuffer(),
1111 uploadTexture,
1113 regions.size(),
1114 regions.begin());
1115
1116 return true;
1117}
1118
1119////////////////////////////////////////////////////////////////////////////////
1120// TODO: make this take a skgpu::Mipmapped
1121sk_sp<GrTexture> GrVkGpu::onCreateTexture(SkISize dimensions,
1122 const GrBackendFormat& format,
1123 GrRenderable renderable,
1124 int renderTargetSampleCnt,
1125 skgpu::Budgeted budgeted,
1126 GrProtected isProtected,
1127 int mipLevelCount,
1128 uint32_t levelClearMask,
1129 std::string_view label) {
1130 VkFormat pixelFormat;
1133 SkASSERT(mipLevelCount > 0);
1134
1135 GrMipmapStatus mipmapStatus =
1137
1139 if (renderable == GrRenderable::kYes) {
1141 this, budgeted, dimensions, pixelFormat, mipLevelCount, renderTargetSampleCnt,
1142 mipmapStatus, isProtected, label);
1143 } else {
1144 tex = GrVkTexture::MakeNewTexture(this, budgeted, dimensions, pixelFormat,
1145 mipLevelCount, isProtected, mipmapStatus, label);
1146 }
1147
1148 if (!tex) {
1149 return nullptr;
1150 }
1151
1152 if (levelClearMask) {
1153 if (!this->currentCommandBuffer()) {
1154 return nullptr;
1155 }
1157 bool inRange = false;
1158 GrVkImage* texImage = tex->textureImage();
1159 for (uint32_t i = 0; i < texImage->mipLevels(); ++i) {
1160 if (levelClearMask & (1U << i)) {
1161 if (inRange) {
1162 ranges.back().levelCount++;
1163 } else {
1164 auto& range = ranges.push_back();
1165 range.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
1166 range.baseArrayLayer = 0;
1167 range.baseMipLevel = i;
1168 range.layerCount = 1;
1169 range.levelCount = 1;
1170 inRange = true;
1171 }
1172 } else if (inRange) {
1173 inRange = false;
1174 }
1175 }
1176 SkASSERT(!ranges.empty());
1177 static constexpr VkClearColorValue kZeroClearColor = {};
1180 this->currentCommandBuffer()->clearColorImage(this, texImage, &kZeroClearColor,
1181 ranges.size(), ranges.begin());
1182 }
1183 return tex;
1184}
1185
1186sk_sp<GrTexture> GrVkGpu::onCreateCompressedTexture(SkISize dimensions,
1187 const GrBackendFormat& format,
1188 skgpu::Budgeted budgeted,
1189 skgpu::Mipmapped mipmapped,
1190 GrProtected isProtected,
1191 const void* data,
1192 size_t dataSize) {
1193 VkFormat pixelFormat;
1196
1197 int numMipLevels = 1;
1198 if (mipmapped == skgpu::Mipmapped::kYes) {
1199 numMipLevels = SkMipmap::ComputeLevelCount(dimensions.width(), dimensions.height())+1;
1200 }
1201
1202 GrMipmapStatus mipmapStatus = (mipmapped == skgpu::Mipmapped::kYes)
1205
1206 auto tex = GrVkTexture::MakeNewTexture(this,
1207 budgeted,
1208 dimensions,
1209 pixelFormat,
1210 numMipLevels,
1211 isProtected,
1212 mipmapStatus,
1213 /*label=*/"VkGpu_CreateCompressedTexture");
1214 if (!tex) {
1215 return nullptr;
1216 }
1217
1219 if (!this->uploadTexDataCompressed(tex->textureImage(), compression, pixelFormat,
1220 dimensions, mipmapped, data, dataSize)) {
1221 return nullptr;
1222 }
1223
1224 return tex;
1225}
1226
1227////////////////////////////////////////////////////////////////////////////////
1228
1231 if (!this->currentCommandBuffer()) {
1232 return false;
1233 }
1235 static_cast<GrVkBuffer*>(buffer.get()),
1236 offset,
1237 size,
1238 /*after=*/false);
1239 this->currentCommandBuffer()->updateBuffer(this, buffer, offset, size, src);
1241 static_cast<GrVkBuffer*>(buffer.get()),
1242 offset,
1243 size,
1244 /*after=*/true);
1245
1246 return true;
1247}
1248
1250 if (!this->currentCommandBuffer()) {
1251 return false;
1252 }
1253
1255 static_cast<GrVkBuffer*>(buffer.get()),
1256 /*offset=*/0,
1257 buffer->size(),
1258 /*after=*/false);
1259 this->currentCommandBuffer()->fillBuffer(this,
1260 buffer,
1261 /*offset=*/0,
1262 buffer->size(),
1263 /*data=*/0);
1265 static_cast<GrVkBuffer*>(buffer.get()),
1266 /*offset=*/0,
1267 buffer->size(),
1268 /*after=*/true);
1269
1270 return true;
1271}
1272
1273////////////////////////////////////////////////////////////////////////////////
1274
1275static bool check_image_info(const GrVkCaps& caps,
1276 const GrVkImageInfo& info,
1277 bool needsAllocation,
1278 uint32_t graphicsQueueIndex) {
1279 if (VK_NULL_HANDLE == info.fImage) {
1280 return false;
1281 }
1282
1283 if (VK_NULL_HANDLE == info.fAlloc.fMemory && needsAllocation) {
1284 return false;
1285 }
1286
1287 if (info.fImageLayout == VK_IMAGE_LAYOUT_PRESENT_SRC_KHR && !caps.supportsSwapchain()) {
1288 return false;
1289 }
1290
1291 if (info.fCurrentQueueFamily != VK_QUEUE_FAMILY_IGNORED &&
1292 info.fCurrentQueueFamily != VK_QUEUE_FAMILY_EXTERNAL &&
1293 info.fCurrentQueueFamily != VK_QUEUE_FAMILY_FOREIGN_EXT) {
1294 if (info.fSharingMode == VK_SHARING_MODE_EXCLUSIVE) {
1295 if (info.fCurrentQueueFamily != graphicsQueueIndex) {
1296 return false;
1297 }
1298 } else {
1299 return false;
1300 }
1301 }
1302
1303 if (info.fYcbcrConversionInfo.isValid()) {
1304 if (!caps.supportsYcbcrConversion()) {
1305 return false;
1306 }
1307 if (info.fYcbcrConversionInfo.fExternalFormat != 0) {
1308 return true;
1309 }
1310 }
1311
1312 // We currently require everything to be made with transfer bits set
1313 if (!SkToBool(info.fImageUsageFlags & VK_IMAGE_USAGE_TRANSFER_SRC_BIT) ||
1314 !SkToBool(info.fImageUsageFlags & VK_IMAGE_USAGE_TRANSFER_DST_BIT)) {
1315 return false;
1316 }
1317
1318 return true;
1319}
1320
1321static bool check_tex_image_info(const GrVkCaps& caps, const GrVkImageInfo& info) {
1322 // We don't support directly importing multisampled textures for sampling from shaders.
1323 if (info.fSampleCount != 1) {
1324 return false;
1325 }
1326
1327 if (info.fYcbcrConversionInfo.isValid() && info.fYcbcrConversionInfo.fExternalFormat != 0) {
1328 return true;
1329 }
1330 if (info.fImageTiling == VK_IMAGE_TILING_OPTIMAL) {
1331 if (!caps.isVkFormatTexturable(info.fFormat)) {
1332 return false;
1333 }
1334 } else if (info.fImageTiling == VK_IMAGE_TILING_LINEAR) {
1335 if (!caps.isVkFormatTexturableLinearly(info.fFormat)) {
1336 return false;
1337 }
1338 } else if (info.fImageTiling == VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT) {
1339 if (!caps.supportsDRMFormatModifiers()) {
1340 return false;
1341 }
1342 // To be technically correct we should query the vulkan support for VkFormat and
1343 // drmFormatModifier pairs to confirm the required feature support is there. However, we
1344 // currently don't have our caps and format tables set up to do this effeciently. So
1345 // instead we just rely on the client's passed in VkImageUsageFlags and assume they we set
1346 // up using valid features (checked below). In practice this should all be safe because
1347 // currently we are setting all drm format modifier textures to have a
1348 // GrTextureType::kExternal so we just really need to be able to read these video VkImage in
1349 // a shader. The video decoder isn't going to give us VkImages that don't support being
1350 // sampled.
1351 } else {
1353 }
1354
1355 // We currently require all textures to be made with sample support
1356 if (!SkToBool(info.fImageUsageFlags & VK_IMAGE_USAGE_SAMPLED_BIT)) {
1357 return false;
1358 }
1359
1360 return true;
1361}
1362
1363static bool check_rt_image_info(const GrVkCaps& caps, const GrVkImageInfo& info, bool resolveOnly) {
1364 if (!caps.isFormatRenderable(info.fFormat, info.fSampleCount)) {
1365 return false;
1366 }
1367 if (!resolveOnly && !SkToBool(info.fImageUsageFlags & VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT)) {
1368 return false;
1369 }
1370 return true;
1371}
1372
1373sk_sp<GrTexture> GrVkGpu::onWrapBackendTexture(const GrBackendTexture& backendTex,
1374 GrWrapOwnership ownership,
1375 GrWrapCacheable cacheable,
1376 GrIOType ioType) {
1377 GrVkImageInfo imageInfo;
1378 if (!GrBackendTextures::GetVkImageInfo(backendTex, &imageInfo)) {
1379 return nullptr;
1380 }
1381
1382 if (!check_image_info(this->vkCaps(), imageInfo, kAdopt_GrWrapOwnership == ownership,
1383 this->queueIndex())) {
1384 return nullptr;
1385 }
1386
1387 if (!check_tex_image_info(this->vkCaps(), imageInfo)) {
1388 return nullptr;
1389 }
1390
1391 if (backendTex.isProtected() && (fProtectedContext == GrProtected::kNo)) {
1392 return nullptr;
1393 }
1394
1395 sk_sp<skgpu::MutableTextureState> mutableState = backendTex.getMutableState();
1396 SkASSERT(mutableState);
1397 return GrVkTexture::MakeWrappedTexture(this, backendTex.dimensions(), ownership, cacheable,
1398 ioType, imageInfo, std::move(mutableState));
1399}
1400
1401sk_sp<GrTexture> GrVkGpu::onWrapCompressedBackendTexture(const GrBackendTexture& beTex,
1402 GrWrapOwnership ownership,
1403 GrWrapCacheable cacheable) {
1404 return this->onWrapBackendTexture(beTex, ownership, cacheable, kRead_GrIOType);
1405}
1406
1407sk_sp<GrTexture> GrVkGpu::onWrapRenderableBackendTexture(const GrBackendTexture& backendTex,
1408 int sampleCnt,
1409 GrWrapOwnership ownership,
1410 GrWrapCacheable cacheable) {
1411 GrVkImageInfo imageInfo;
1412 if (!GrBackendTextures::GetVkImageInfo(backendTex, &imageInfo)) {
1413 return nullptr;
1414 }
1415
1416 if (!check_image_info(this->vkCaps(), imageInfo, kAdopt_GrWrapOwnership == ownership,
1417 this->queueIndex())) {
1418 return nullptr;
1419 }
1420
1421 if (!check_tex_image_info(this->vkCaps(), imageInfo)) {
1422 return nullptr;
1423 }
1424 // If sampleCnt is > 1 we will create an intermediate MSAA VkImage and then resolve into
1425 // the wrapped VkImage.
1426 bool resolveOnly = sampleCnt > 1;
1427 if (!check_rt_image_info(this->vkCaps(), imageInfo, resolveOnly)) {
1428 return nullptr;
1429 }
1430
1431 if (backendTex.isProtected() && (fProtectedContext == GrProtected::kNo)) {
1432 return nullptr;
1433 }
1434
1435 sampleCnt = this->vkCaps().getRenderTargetSampleCount(sampleCnt, imageInfo.fFormat);
1436
1437 sk_sp<skgpu::MutableTextureState> mutableState = backendTex.getMutableState();
1438 SkASSERT(mutableState);
1439
1441 sampleCnt, ownership, cacheable,
1442 imageInfo,
1443 std::move(mutableState));
1444}
1445
1446sk_sp<GrRenderTarget> GrVkGpu::onWrapBackendRenderTarget(const GrBackendRenderTarget& backendRT) {
1448 if (!GrBackendRenderTargets::GetVkImageInfo(backendRT, &info)) {
1449 return nullptr;
1450 }
1451
1452 if (!check_image_info(this->vkCaps(), info, false, this->queueIndex())) {
1453 return nullptr;
1454 }
1455
1456 // We will always render directly to this VkImage.
1457 static bool kResolveOnly = false;
1458 if (!check_rt_image_info(this->vkCaps(), info, kResolveOnly)) {
1459 return nullptr;
1460 }
1461
1462 if (backendRT.isProtected() && (fProtectedContext == GrProtected::kNo)) {
1463 return nullptr;
1464 }
1465
1466 sk_sp<skgpu::MutableTextureState> mutableState = backendRT.getMutableState();
1467 SkASSERT(mutableState);
1468
1470 this, backendRT.dimensions(), backendRT.sampleCnt(), info, std::move(mutableState));
1471
1472 // We don't allow the client to supply a premade stencil buffer. We always create one if needed.
1473 SkASSERT(!backendRT.stencilBits());
1474 if (tgt) {
1475 SkASSERT(tgt->canAttemptStencilAttachment(tgt->numSamples() > 1));
1476 }
1477
1478 return tgt;
1479}
1480
1481sk_sp<GrRenderTarget> GrVkGpu::onWrapVulkanSecondaryCBAsRenderTarget(
1482 const SkImageInfo& imageInfo, const GrVkDrawableInfo& vkInfo) {
1483 int maxSize = this->caps()->maxTextureSize();
1484 if (imageInfo.width() > maxSize || imageInfo.height() > maxSize) {
1485 return nullptr;
1486 }
1487
1488 GrBackendFormat backendFormat = GrBackendFormats::MakeVk(vkInfo.fFormat);
1489 if (!backendFormat.isValid()) {
1490 return nullptr;
1491 }
1492 int sampleCnt = this->vkCaps().getRenderTargetSampleCount(1, vkInfo.fFormat);
1493 if (!sampleCnt) {
1494 return nullptr;
1495 }
1496
1497 return GrVkRenderTarget::MakeSecondaryCBRenderTarget(this, imageInfo.dimensions(), vkInfo);
1498}
1499
1501 const GrVkRenderPass& renderPass,
1503 GrVkImage* src,
1504 const SkIRect& srcRect) {
1505 return fMSAALoadManager.loadMSAAFromResolve(this, commandBuffer, renderPass, dst, src, srcRect);
1506}
1507
1509 if (!this->currentCommandBuffer()) {
1510 return false;
1511 }
1512 auto* vkTex = static_cast<GrVkTexture*>(tex)->textureImage();
1513 // don't do anything for linearly tiled textures (can't have mipmaps)
1514 if (vkTex->isLinearTiled()) {
1515 SkDebugf("Trying to create mipmap for linear tiled texture");
1516 return false;
1517 }
1519
1520 // determine if we can blit to and from this format
1521 const GrVkCaps& caps = this->vkCaps();
1522 if (!caps.formatCanBeDstofBlit(vkTex->imageFormat(), false) ||
1523 !caps.formatCanBeSrcofBlit(vkTex->imageFormat(), false) ||
1524 !caps.mipmapSupport()) {
1525 return false;
1526 }
1527
1528 int width = tex->width();
1529 int height = tex->height();
1530 VkImageBlit blitRegion;
1531 memset(&blitRegion, 0, sizeof(VkImageBlit));
1532
1533 // SkMipmap doesn't include the base level in the level count so we have to add 1
1534 uint32_t levelCount = SkMipmap::ComputeLevelCount(tex->width(), tex->height()) + 1;
1535 SkASSERT(levelCount == vkTex->mipLevels());
1536
1537 // change layout of the layers so we can write to them.
1540
1541 // setup memory barrier
1542 SkASSERT(GrVkFormatIsSupported(vkTex->imageFormat()));
1543 VkImageMemoryBarrier imageMemoryBarrier = {
1545 nullptr, // pNext
1546 VK_ACCESS_TRANSFER_WRITE_BIT, // srcAccessMask
1547 VK_ACCESS_TRANSFER_READ_BIT, // dstAccessMask
1550 VK_QUEUE_FAMILY_IGNORED, // srcQueueFamilyIndex
1551 VK_QUEUE_FAMILY_IGNORED, // dstQueueFamilyIndex
1552 vkTex->image(), // image
1553 {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1} // subresourceRange
1554 };
1555
1556 // Blit the miplevels
1557 uint32_t mipLevel = 1;
1558 while (mipLevel < levelCount) {
1559 int prevWidth = width;
1560 int prevHeight = height;
1561 width = std::max(1, width / 2);
1562 height = std::max(1, height / 2);
1563
1564 imageMemoryBarrier.subresourceRange.baseMipLevel = mipLevel - 1;
1566 VK_PIPELINE_STAGE_TRANSFER_BIT, false, &imageMemoryBarrier);
1567
1568 blitRegion.srcSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, mipLevel - 1, 0, 1 };
1569 blitRegion.srcOffsets[0] = { 0, 0, 0 };
1570 blitRegion.srcOffsets[1] = { prevWidth, prevHeight, 1 };
1571 blitRegion.dstSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, mipLevel, 0, 1 };
1572 blitRegion.dstOffsets[0] = { 0, 0, 0 };
1573 blitRegion.dstOffsets[1] = { width, height, 1 };
1574 this->currentCommandBuffer()->blitImage(this,
1575 vkTex->resource(),
1576 vkTex->image(),
1578 vkTex->resource(),
1579 vkTex->image(),
1581 1,
1582 &blitRegion,
1584 ++mipLevel;
1585 }
1586 if (levelCount > 1) {
1587 // This barrier logically is not needed, but it changes the final level to the same layout
1588 // as all the others, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL. This makes tracking of the
1589 // layouts and future layout changes easier. The alternative here would be to track layout
1590 // and memory accesses per layer which doesn't seem work it.
1591 imageMemoryBarrier.subresourceRange.baseMipLevel = mipLevel - 1;
1593 VK_PIPELINE_STAGE_TRANSFER_BIT, false, &imageMemoryBarrier);
1594 vkTex->updateImageLayout(VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL);
1595 }
1596 return true;
1597}
1598
1599////////////////////////////////////////////////////////////////////////////////
1600
1602 SkISize dimensions, int numStencilSamples) {
1603 VkFormat sFmt = this->vkCaps().preferredStencilFormat();
1604
1606 return GrVkImage::MakeStencil(this, dimensions, numStencilSamples, sFmt);
1607}
1608
1610 const GrBackendFormat& format,
1611 int numSamples,
1612 GrProtected isProtected,
1613 GrMemoryless memoryless) {
1614 VkFormat pixelFormat;
1617 SkASSERT(this->vkCaps().isFormatRenderable(pixelFormat, numSamples));
1618
1620 return GrVkImage::MakeMSAA(this, dimensions, numSamples, pixelFormat, isProtected, memoryless);
1621}
1622
1623////////////////////////////////////////////////////////////////////////////////
1624
1625bool copy_src_data(char* mapPtr,
1626 VkFormat vkFormat,
1627 const TArray<size_t>& individualMipOffsets,
1628 const GrPixmap srcData[],
1629 int numMipLevels) {
1630 SkASSERT(srcData && numMipLevels);
1632 SkASSERT(individualMipOffsets.size() == numMipLevels);
1633 SkASSERT(mapPtr);
1634
1635 size_t bytesPerPixel = skgpu::VkFormatBytesPerBlock(vkFormat);
1636
1637 for (int level = 0; level < numMipLevels; ++level) {
1638 const size_t trimRB = srcData[level].info().width() * bytesPerPixel;
1639
1640 SkRectMemcpy(mapPtr + individualMipOffsets[level], trimRB,
1641 srcData[level].addr(), srcData[level].rowBytes(),
1642 trimRB, srcData[level].height());
1643 }
1644 return true;
1645}
1646
1647bool GrVkGpu::createVkImageForBackendSurface(VkFormat vkFormat,
1648 SkISize dimensions,
1649 int sampleCnt,
1650 GrTexturable texturable,
1651 GrRenderable renderable,
1652 skgpu::Mipmapped mipmapped,
1654 GrProtected isProtected) {
1655 SkASSERT(texturable == GrTexturable::kYes || renderable == GrRenderable::kYes);
1656
1657 if (fProtectedContext != isProtected) {
1658 return false;
1659 }
1660
1661 if (texturable == GrTexturable::kYes && !fVkCaps->isVkFormatTexturable(vkFormat)) {
1662 return false;
1663 }
1664
1665 // MSAA images are only currently used by createTestingOnlyBackendRenderTarget.
1666 if (sampleCnt > 1 && (texturable == GrTexturable::kYes || renderable == GrRenderable::kNo)) {
1667 return false;
1668 }
1669
1670 if (renderable == GrRenderable::kYes) {
1671 sampleCnt = fVkCaps->getRenderTargetSampleCount(sampleCnt, vkFormat);
1672 if (!sampleCnt) {
1673 return false;
1674 }
1675 }
1676
1677
1678 int numMipLevels = 1;
1679 if (mipmapped == skgpu::Mipmapped::kYes) {
1680 numMipLevels = SkMipmap::ComputeLevelCount(dimensions.width(), dimensions.height()) + 1;
1681 }
1682
1685 if (texturable == GrTexturable::kYes) {
1686 usageFlags |= VK_IMAGE_USAGE_SAMPLED_BIT;
1687 }
1688 if (renderable == GrRenderable::kYes) {
1690 // We always make our render targets support being used as input attachments
1692 }
1693
1694 GrVkImage::ImageDesc imageDesc;
1695 imageDesc.fImageType = VK_IMAGE_TYPE_2D;
1696 imageDesc.fFormat = vkFormat;
1697 imageDesc.fWidth = dimensions.width();
1698 imageDesc.fHeight = dimensions.height();
1699 imageDesc.fLevels = numMipLevels;
1700 imageDesc.fSamples = sampleCnt;
1702 imageDesc.fUsageFlags = usageFlags;
1704 imageDesc.fIsProtected = fProtectedContext;
1705
1706 if (!GrVkImage::InitImageInfo(this, imageDesc, info)) {
1707 SkDebugf("Failed to init image info\n");
1708 return false;
1709 }
1710
1711 return true;
1712}
1713
1714bool GrVkGpu::onClearBackendTexture(const GrBackendTexture& backendTexture,
1715 sk_sp<skgpu::RefCntedCallback> finishedCallback,
1716 std::array<float, 4> color) {
1719
1720 sk_sp<skgpu::MutableTextureState> mutableState = backendTexture.getMutableState();
1721 SkASSERT(mutableState);
1723 GrVkTexture::MakeWrappedTexture(this, backendTexture.dimensions(),
1725 kRW_GrIOType, info, std::move(mutableState));
1726 if (!texture) {
1727 return false;
1728 }
1729 GrVkImage* texImage = texture->textureImage();
1730
1732 if (!cmdBuffer) {
1733 return false;
1734 }
1735
1736 texImage->setImageLayout(this,
1740 false);
1741
1742 // CmdClearColorImage doesn't work for compressed formats
1744
1745 VkClearColorValue vkColor;
1746 // If we ever support SINT or UINT formats this needs to be updated to use the int32 and
1747 // uint32 union members in those cases.
1748 vkColor.float32[0] = color[0];
1749 vkColor.float32[1] = color[1];
1750 vkColor.float32[2] = color[2];
1751 vkColor.float32[3] = color[3];
1754 range.baseArrayLayer = 0;
1755 range.baseMipLevel = 0;
1756 range.layerCount = 1;
1757 range.levelCount = info.fLevelCount;
1758 cmdBuffer->clearColorImage(this, texImage, &vkColor, 1, &range);
1759
1760 // Change image layout to shader read since if we use this texture as a borrowed
1761 // texture within Ganesh we require that its layout be set to that
1764 false);
1765
1766 if (finishedCallback) {
1767 this->addFinishedCallback(std::move(finishedCallback));
1768 }
1769 return true;
1770}
1771
1772GrBackendTexture GrVkGpu::onCreateBackendTexture(SkISize dimensions,
1773 const GrBackendFormat& format,
1774 GrRenderable renderable,
1775 skgpu::Mipmapped mipmapped,
1776 GrProtected isProtected,
1777 std::string_view label) {
1778 const GrVkCaps& caps = this->vkCaps();
1779
1780 if (fProtectedContext != isProtected) {
1781 return {};
1782 }
1783
1784 VkFormat vkFormat;
1785 if (!GrBackendFormats::AsVkFormat(format, &vkFormat)) {
1786 return {};
1787 }
1788
1789 // TODO: move the texturability check up to GrGpu::createBackendTexture and just assert here
1790 if (!caps.isVkFormatTexturable(vkFormat)) {
1791 return {};
1792 }
1793
1794 if (skgpu::VkFormatNeedsYcbcrSampler(vkFormat)) {
1795 return {};
1796 }
1797
1799 if (!this->createVkImageForBackendSurface(vkFormat, dimensions, 1, GrTexturable::kYes,
1800 renderable, mipmapped, &info, isProtected)) {
1801 return {};
1802 }
1803
1804 return GrBackendTextures::MakeVk(dimensions.width(), dimensions.height(), info);
1805}
1806
1807GrBackendTexture GrVkGpu::onCreateCompressedBackendTexture(SkISize dimensions,
1808 const GrBackendFormat& format,
1809 skgpu::Mipmapped mipmapped,
1810 GrProtected isProtected) {
1811 return this->onCreateBackendTexture(dimensions,
1812 format,
1814 mipmapped,
1815 isProtected,
1816 /*label=*/"VkGpu_CreateCompressedBackendTexture");
1817}
1818
1819bool GrVkGpu::onUpdateCompressedBackendTexture(const GrBackendTexture& backendTexture,
1820 sk_sp<skgpu::RefCntedCallback> finishedCallback,
1821 const void* data,
1822 size_t size) {
1825
1826 sk_sp<skgpu::MutableTextureState> mutableState = backendTexture.getMutableState();
1827 SkASSERT(mutableState);
1829 backendTexture.dimensions(),
1833 info,
1834 std::move(mutableState));
1835 if (!texture) {
1836 return false;
1837 }
1838
1840 if (!cmdBuffer) {
1841 return false;
1842 }
1843 GrVkImage* image = texture->textureImage();
1844 image->setImageLayout(this,
1848 false);
1849
1850 SkTextureCompressionType compression =
1852
1854 TArray<size_t> individualMipOffsets;
1856
1857 fill_in_compressed_regions(&fStagingBufferManager,
1858 &regions,
1859 &individualMipOffsets,
1860 &slice,
1861 compression,
1862 info.fFormat,
1863 backendTexture.dimensions(),
1864 backendTexture.fMipmapped);
1865
1866 if (!slice.fBuffer) {
1867 return false;
1868 }
1869
1870 memcpy(slice.fOffsetMapPtr, data, size);
1871
1872 cmdBuffer->addGrSurface(texture);
1873 // Copy the buffer to the image. This call takes the raw VkBuffer instead of a GrGpuBuffer
1874 // because we don't need the command buffer to ref the buffer here. The reason being is that
1875 // the buffer is coming from the staging manager and the staging manager will make sure the
1876 // command buffer has a ref on the buffer. This avoids having to add and remove a ref for
1877 // every upload in the frame.
1878 cmdBuffer->copyBufferToImage(this,
1879 static_cast<GrVkBuffer*>(slice.fBuffer)->vkBuffer(),
1880 image,
1881 image->currentLayout(),
1882 regions.size(),
1883 regions.begin());
1884
1885 // Change image layout to shader read since if we use this texture as a borrowed
1886 // texture within Ganesh we require that its layout be set to that
1887 image->setImageLayout(this,
1891 false);
1892
1893 if (finishedCallback) {
1894 this->addFinishedCallback(std::move(finishedCallback));
1895 }
1896 return true;
1897}
1898
1900 VkImageLayout newLayout,
1901 uint32_t newQueueFamilyIndex) {
1902 // Even though internally we use this helper for getting src access flags and stages they
1903 // can also be used for general dst flags since we don't know exactly what the client
1904 // plans on using the image for.
1905 if (newLayout == VK_IMAGE_LAYOUT_UNDEFINED) {
1906 newLayout = image->currentLayout();
1907 }
1909 VkAccessFlags dstAccess = GrVkImage::LayoutToSrcAccessMask(newLayout);
1910
1911 uint32_t currentQueueFamilyIndex = image->currentQueueFamilyIndex();
1912 auto isSpecialQueue = [](uint32_t queueFamilyIndex) {
1913 return queueFamilyIndex == VK_QUEUE_FAMILY_EXTERNAL ||
1914 queueFamilyIndex == VK_QUEUE_FAMILY_FOREIGN_EXT;
1915 };
1916 if (isSpecialQueue(currentQueueFamilyIndex) && isSpecialQueue(newQueueFamilyIndex)) {
1917 // It is illegal to have both the new and old queue be special queue families (i.e. external
1918 // or foreign).
1919 return;
1920 }
1921
1922 image->setImageLayoutAndQueueIndex(gpu, newLayout, dstAccess, dstStage, false,
1923 newQueueFamilyIndex);
1924}
1925
1926bool GrVkGpu::setBackendSurfaceState(GrVkImageInfo info,
1928 SkISize dimensions,
1929 VkImageLayout newLayout,
1930 uint32_t newQueueFamilyIndex,
1931 skgpu::MutableTextureState* previousState,
1932 sk_sp<skgpu::RefCntedCallback> finishedCallback) {
1934 dimensions,
1935 info,
1936 std::move(currentState),
1940 "VkGpu_SetBackendSurfaceState",
1941 /*forSecondaryCB=*/false);
1943 if (!texture) {
1944 return false;
1945 }
1946 if (previousState) {
1947 previousState->set(*texture->getMutableState());
1948 }
1949 set_layout_and_queue_from_mutable_state(this, texture.get(), newLayout, newQueueFamilyIndex);
1950 if (finishedCallback) {
1951 this->addFinishedCallback(std::move(finishedCallback));
1952 }
1953 return true;
1954}
1955
1957 const skgpu::MutableTextureState& newState,
1958 skgpu::MutableTextureState* previousState,
1959 sk_sp<skgpu::RefCntedCallback> finishedCallback) {
1962 sk_sp<skgpu::MutableTextureState> currentState = backendTeture.getMutableState();
1963 SkASSERT(currentState);
1964 SkASSERT(newState.isValid() && newState.backend() == skgpu::BackendApi::kVulkan);
1965 return this->setBackendSurfaceState(info, std::move(currentState), backendTeture.dimensions(),
1968 previousState,
1969 std::move(finishedCallback));
1970}
1971
1973 const skgpu::MutableTextureState& newState,
1974 skgpu::MutableTextureState* previousState,
1975 sk_sp<skgpu::RefCntedCallback> finishedCallback) {
1978 sk_sp<skgpu::MutableTextureState> currentState = backendRenderTarget.getMutableState();
1979 SkASSERT(currentState);
1981 return this->setBackendSurfaceState(info, std::move(currentState),
1982 backendRenderTarget.dimensions(),
1985 previousState, std::move(finishedCallback));
1986}
1987
1989 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(rt);
1990 VkPipelineStageFlags dstStage;
1991 VkAccessFlags dstAccess;
1992 if (barrierType == kBlend_GrXferBarrierType) {
1995 } else {
1996 SkASSERT(barrierType == kTexture_GrXferBarrierType);
1999 }
2000 GrVkImage* image = vkRT->colorAttachment();
2001 VkImageMemoryBarrier barrier;
2003 barrier.pNext = nullptr;
2005 barrier.dstAccessMask = dstAccess;
2006 barrier.oldLayout = image->currentLayout();
2007 barrier.newLayout = barrier.oldLayout;
2010 barrier.image = image->image();
2011 barrier.subresourceRange = {VK_IMAGE_ASPECT_COLOR_BIT, 0, image->mipLevels(), 0, 1};
2012 this->addImageMemoryBarrier(image->resource(),
2014 dstStage, true, &barrier);
2015}
2016
2018 SkASSERT(GrBackendApi::kVulkan == tex.fBackend);
2019
2022 GrVkImage::DestroyImageInfo(this, const_cast<GrVkImageInfo*>(&info));
2023 }
2024}
2025
2026bool GrVkGpu::compile(const GrProgramDesc& desc, const GrProgramInfo& programInfo) {
2027 GrVkRenderPass::AttachmentsDescriptor attachmentsDescriptor;
2028 GrVkRenderPass::AttachmentFlags attachmentFlags;
2030 &attachmentsDescriptor, &attachmentFlags);
2031
2033 if (programInfo.renderPassBarriers() & GrXferBarrierFlags::kBlend) {
2035 }
2038 }
2039
2041 if (this->vkCaps().programInfoWillUseDiscardableMSAA(programInfo) &&
2042 programInfo.colorLoadOp() == GrLoadOp::kLoad) {
2044 }
2045 sk_sp<const GrVkRenderPass> renderPass(this->resourceProvider().findCompatibleRenderPass(
2046 &attachmentsDescriptor, attachmentFlags, selfDepFlags, loadFromResolve));
2047 if (!renderPass) {
2048 return false;
2049 }
2050
2052
2053 auto pipelineState = this->resourceProvider().findOrCreateCompatiblePipelineState(
2054 desc,
2055 programInfo,
2056 renderPass->vkRenderPass(),
2057 &stat);
2058 if (!pipelineState) {
2059 return false;
2060 }
2061
2063}
2064
2065#if defined(GR_TEST_UTILS)
2066bool GrVkGpu::isTestingOnlyBackendTexture(const GrBackendTexture& tex) const {
2067 SkASSERT(GrBackendApi::kVulkan == tex.fBackend);
2068
2071 return false;
2072 }
2073
2074 if (backend.fImage && backend.fAlloc.fMemory) {
2076 memset(&req, 0, sizeof(req));
2077 GR_VK_CALL(this->vkInterface(), GetImageMemoryRequirements(fDevice,
2078 backend.fImage,
2079 &req));
2080 // TODO: find a better check
2081 // This will probably fail with a different driver
2082 return (req.size > 0) && (req.size <= 8192 * 8192);
2083 }
2084
2085 return false;
2086}
2087
2088GrBackendRenderTarget GrVkGpu::createTestingOnlyBackendRenderTarget(SkISize dimensions,
2089 GrColorType ct,
2090 int sampleCnt,
2091 GrProtected isProtected) {
2092 if (dimensions.width() > this->caps()->maxRenderTargetSize() ||
2093 dimensions.height() > this->caps()->maxRenderTargetSize()) {
2094 return {};
2095 }
2096
2097 VkFormat vkFormat = this->vkCaps().getFormatFromColorType(ct);
2098
2100 if (!this->createVkImageForBackendSurface(vkFormat,
2101 dimensions,
2102 sampleCnt,
2106 &info,
2107 isProtected)) {
2108 return {};
2109 }
2110 return GrBackendRenderTargets::MakeVk(dimensions.width(), dimensions.height(), info);
2111}
2112
2113void GrVkGpu::deleteTestingOnlyBackendRenderTarget(const GrBackendRenderTarget& rt) {
2114 SkASSERT(GrBackendApi::kVulkan == rt.fBackend);
2115
2118 // something in the command buffer may still be using this, so force submit
2119 SkAssertResult(this->submitCommandBuffer(kForce_SyncQueue));
2120 GrVkImage::DestroyImageInfo(this, const_cast<GrVkImageInfo*>(&info));
2121 }
2122}
2123#endif
2124
2125////////////////////////////////////////////////////////////////////////////////
2126
2128 VkPipelineStageFlags srcStageMask,
2129 VkPipelineStageFlags dstStageMask,
2130 bool byRegion,
2131 VkBufferMemoryBarrier* barrier) const {
2132 if (!this->currentCommandBuffer()) {
2133 return;
2134 }
2135 SkASSERT(resource);
2137 resource,
2138 srcStageMask,
2139 dstStageMask,
2140 byRegion,
2142 barrier);
2143}
2145 VkPipelineStageFlags dstStageMask,
2146 bool byRegion,
2147 VkBufferMemoryBarrier* barrier) const {
2148 if (!this->currentCommandBuffer()) {
2149 return;
2150 }
2151 // We don't pass in a resource here to the command buffer. The command buffer only is using it
2152 // to hold a ref, but every place where we add a buffer memory barrier we are doing some other
2153 // command with the buffer on the command buffer. Thus those other commands will already cause
2154 // the command buffer to be holding a ref to the buffer.
2156 /*resource=*/nullptr,
2157 srcStageMask,
2158 dstStageMask,
2159 byRegion,
2161 barrier);
2162}
2163
2165 VkPipelineStageFlags srcStageMask,
2166 VkPipelineStageFlags dstStageMask,
2167 bool byRegion,
2168 VkImageMemoryBarrier* barrier) const {
2169 // If we are in the middle of destroying or abandoning the context we may hit a release proc
2170 // that triggers the destruction of a GrVkImage. This could cause us to try and transfer the
2171 // VkImage back to the original queue. In this state we don't submit anymore work and we may not
2172 // have a current command buffer. Thus we won't do the queue transfer.
2173 if (!this->currentCommandBuffer()) {
2174 return;
2175 }
2176 SkASSERT(resource);
2178 resource,
2179 srcStageMask,
2180 dstStageMask,
2181 byRegion,
2183 barrier);
2184}
2185
2186void GrVkGpu::prepareSurfacesForBackendAccessAndStateUpdates(
2189 const skgpu::MutableTextureState* newState) {
2190 // Submit the current command buffer to the Queue. Whether we inserted semaphores or not does
2191 // not effect what we do here.
2192 if (!proxies.empty() && (access == SkSurfaces::BackendSurfaceAccess::kPresent || newState)) {
2193 // We currently don't support passing in new surface state for multiple proxies here. The
2194 // only time we have multiple proxies is if we are flushing a yuv SkImage which won't have
2195 // state updates anyways. Additionally if we have a newState than we must not have any
2196 // BackendSurfaceAccess.
2197 SkASSERT(!newState || proxies.size() == 1);
2200 for (GrSurfaceProxy* proxy : proxies) {
2201 SkASSERT(proxy->isInstantiated());
2202 if (GrTexture* tex = proxy->peekTexture()) {
2203 image = static_cast<GrVkTexture*>(tex)->textureImage();
2204 } else {
2205 GrRenderTarget* rt = proxy->peekRenderTarget();
2206 SkASSERT(rt);
2207 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(rt);
2208 image = vkRT->externalAttachment();
2209 }
2210 if (newState) {
2211 VkImageLayout newLayout =
2213 uint32_t newIndex =
2215 set_layout_and_queue_from_mutable_state(this, image, newLayout, newIndex);
2216 } else {
2218 image->prepareForPresent(this);
2219 }
2220 }
2221 }
2222}
2223
2224void GrVkGpu::addFinishedProc(GrGpuFinishedProc finishedProc,
2225 GrGpuFinishedContext finishedContext) {
2226 SkASSERT(finishedProc);
2227 this->addFinishedCallback(skgpu::RefCntedCallback::Make(finishedProc, finishedContext));
2228}
2229
2230void GrVkGpu::addFinishedCallback(sk_sp<skgpu::RefCntedCallback> finishedCallback) {
2231 SkASSERT(finishedCallback);
2232 fResourceProvider.addFinishedProcToActiveCommandBuffers(std::move(finishedCallback));
2233}
2234
2236 this->currentCommandBuffer()->addGrBuffer(std::move(buffer));
2237}
2238
2239bool GrVkGpu::onSubmitToGpu(GrSyncCpu sync) {
2240 if (sync == GrSyncCpu::kYes) {
2241 return this->submitCommandBuffer(kForce_SyncQueue);
2242 } else {
2243 return this->submitCommandBuffer(kSkip_SyncQueue);
2244 }
2245}
2246
2248 VK_CALL(QueueWaitIdle(fQueue));
2249
2250 if (this->vkCaps().mustSyncCommandBuffersWithQueue()) {
2251 fResourceProvider.forceSyncAllCommandBuffers();
2252 }
2253}
2254
2255void GrVkGpu::onReportSubmitHistograms() {
2256#if SK_HISTOGRAMS_ENABLED
2257 uint64_t allocatedMemory = 0, usedMemory = 0;
2258 std::tie(allocatedMemory, usedMemory) = fMemoryAllocator->totalAllocatedAndUsedMemory();
2259 SkASSERT(usedMemory <= allocatedMemory);
2260 if (allocatedMemory > 0) {
2261 SK_HISTOGRAM_PERCENTAGE("VulkanMemoryAllocator.PercentUsed",
2262 (usedMemory * 100) / allocatedMemory);
2263 }
2264 // allocatedMemory is in bytes and need to be reported it in kilobytes. SK_HISTOGRAM_MEMORY_KB
2265 // supports samples up to around 500MB which should support the amounts of memory we allocate.
2266 SK_HISTOGRAM_MEMORY_KB("VulkanMemoryAllocator.AmountAllocated", allocatedMemory >> 10);
2267#endif // SK_HISTOGRAMS_ENABLED
2268}
2269
2270void GrVkGpu::copySurfaceAsCopyImage(GrSurface* dst,
2271 GrSurface* src,
2272 GrVkImage* dstImage,
2273 GrVkImage* srcImage,
2274 const SkIRect& srcRect,
2275 const SkIPoint& dstPoint) {
2276 if (!this->currentCommandBuffer()) {
2277 return;
2278 }
2279
2280#ifdef SK_DEBUG
2281 int dstSampleCnt = dstImage->numSamples();
2282 int srcSampleCnt = srcImage->numSamples();
2283 bool dstHasYcbcr = dstImage->ycbcrConversionInfo().isValid();
2284 bool srcHasYcbcr = srcImage->ycbcrConversionInfo().isValid();
2285 VkFormat dstFormat = dstImage->imageFormat();
2286 VkFormat srcFormat;
2287 SkAssertResult(GrBackendFormats::AsVkFormat(dst->backendFormat(), &srcFormat));
2288 SkASSERT(this->vkCaps().canCopyImage(dstFormat, dstSampleCnt, dstHasYcbcr,
2289 srcFormat, srcSampleCnt, srcHasYcbcr));
2290#endif
2291 if (src->isProtected() && !dst->isProtected()) {
2292 SkDebugf("Can't copy from protected memory to non-protected");
2293 return;
2294 }
2295
2296 // These flags are for flushing/invalidating caches and for the dst image it doesn't matter if
2297 // the cache is flushed since it is only being written to.
2298 dstImage->setImageLayout(this,
2302 false);
2303
2304 srcImage->setImageLayout(this,
2308 false);
2309
2310 VkImageCopy copyRegion;
2311 memset(&copyRegion, 0, sizeof(VkImageCopy));
2312 copyRegion.srcSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
2313 copyRegion.srcOffset = { srcRect.fLeft, srcRect.fTop, 0 };
2314 copyRegion.dstSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
2315 copyRegion.dstOffset = { dstPoint.fX, dstPoint.fY, 0 };
2316 copyRegion.extent = { (uint32_t)srcRect.width(), (uint32_t)srcRect.height(), 1 };
2317
2318 this->currentCommandBuffer()->addGrSurface(sk_ref_sp<const GrSurface>(src));
2319 this->currentCommandBuffer()->addGrSurface(sk_ref_sp<const GrSurface>(dst));
2320 this->currentCommandBuffer()->copyImage(this,
2321 srcImage,
2323 dstImage,
2325 1,
2326 &copyRegion);
2327
2328 SkIRect dstRect = SkIRect::MakeXYWH(dstPoint.fX, dstPoint.fY,
2329 srcRect.width(), srcRect.height());
2330 // The rect is already in device space so we pass in kTopLeft so no flip is done.
2331 this->didWriteToSurface(dst, kTopLeft_GrSurfaceOrigin, &dstRect);
2332}
2333
2334void GrVkGpu::copySurfaceAsBlit(GrSurface* dst,
2335 GrSurface* src,
2336 GrVkImage* dstImage,
2337 GrVkImage* srcImage,
2338 const SkIRect& srcRect,
2339 const SkIRect& dstRect,
2340 GrSamplerState::Filter filter) {
2341 if (!this->currentCommandBuffer()) {
2342 return;
2343 }
2344
2345#ifdef SK_DEBUG
2346 int dstSampleCnt = dstImage->numSamples();
2347 int srcSampleCnt = srcImage->numSamples();
2348 bool dstHasYcbcr = dstImage->ycbcrConversionInfo().isValid();
2349 bool srcHasYcbcr = srcImage->ycbcrConversionInfo().isValid();
2350 VkFormat dstFormat = dstImage->imageFormat();
2351 VkFormat srcFormat;
2352 SkAssertResult(GrBackendFormats::AsVkFormat(dst->backendFormat(), &srcFormat));
2353 SkASSERT(this->vkCaps().canCopyAsBlit(dstFormat,
2354 dstSampleCnt,
2355 dstImage->isLinearTiled(),
2356 dstHasYcbcr,
2357 srcFormat,
2358 srcSampleCnt,
2359 srcImage->isLinearTiled(),
2360 srcHasYcbcr));
2361
2362#endif
2363 if (src->isProtected() && !dst->isProtected()) {
2364 SkDebugf("Can't copy from protected memory to non-protected");
2365 return;
2366 }
2367
2368 dstImage->setImageLayout(this,
2372 false);
2373
2374 srcImage->setImageLayout(this,
2378 false);
2379
2380 VkImageBlit blitRegion;
2381 memset(&blitRegion, 0, sizeof(VkImageBlit));
2382 blitRegion.srcSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
2383 blitRegion.srcOffsets[0] = { srcRect.fLeft, srcRect.fTop, 0 };
2384 blitRegion.srcOffsets[1] = { srcRect.fRight, srcRect.fBottom, 1 };
2385 blitRegion.dstSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
2386 blitRegion.dstOffsets[0] = { dstRect.fLeft, dstRect.fTop, 0 };
2387 blitRegion.dstOffsets[1] = { dstRect.fRight, dstRect.fBottom, 1 };
2388
2389 this->currentCommandBuffer()->addGrSurface(sk_ref_sp<const GrSurface>(src));
2390 this->currentCommandBuffer()->addGrSurface(sk_ref_sp<const GrSurface>(dst));
2391 this->currentCommandBuffer()->blitImage(this,
2392 *srcImage,
2393 *dstImage,
2394 1,
2395 &blitRegion,
2398
2399 // The rect is already in device space so we pass in kTopLeft so no flip is done.
2400 this->didWriteToSurface(dst, kTopLeft_GrSurfaceOrigin, &dstRect);
2401}
2402
2403void GrVkGpu::copySurfaceAsResolve(GrSurface* dst, GrSurface* src, const SkIRect& srcRect,
2404 const SkIPoint& dstPoint) {
2405 if (src->isProtected() && !dst->isProtected()) {
2406 SkDebugf("Can't copy from protected memory to non-protected");
2407 return;
2408 }
2409 GrVkRenderTarget* srcRT = static_cast<GrVkRenderTarget*>(src->asRenderTarget());
2410 this->resolveImage(dst, srcRT, srcRect, dstPoint);
2411 SkIRect dstRect = SkIRect::MakeXYWH(dstPoint.fX, dstPoint.fY,
2412 srcRect.width(), srcRect.height());
2413 // The rect is already in device space so we pass in kTopLeft so no flip is done.
2414 this->didWriteToSurface(dst, kTopLeft_GrSurfaceOrigin, &dstRect);
2415}
2416
2417bool GrVkGpu::onCopySurface(GrSurface* dst, const SkIRect& dstRect,
2418 GrSurface* src, const SkIRect& srcRect,
2419 GrSamplerState::Filter filter) {
2420#ifdef SK_DEBUG
2421 if (GrVkRenderTarget* srcRT = static_cast<GrVkRenderTarget*>(src->asRenderTarget())) {
2423 }
2424 if (GrVkRenderTarget* dstRT = static_cast<GrVkRenderTarget*>(dst->asRenderTarget())) {
2425 SkASSERT(!dstRT->wrapsSecondaryCommandBuffer());
2426 }
2427#endif
2428 if (src->isProtected() && !dst->isProtected()) {
2429 SkDebugf("Can't copy from protected memory to non-protected");
2430 return false;
2431 }
2432
2433 GrVkImage* dstImage;
2434 GrVkImage* srcImage;
2435 GrRenderTarget* dstRT = dst->asRenderTarget();
2436 if (dstRT) {
2437 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(dstRT);
2438 if (vkRT->wrapsSecondaryCommandBuffer()) {
2439 return false;
2440 }
2441 // This will technically return true for single sample rts that used DMSAA in which case we
2442 // don't have to pick the resolve attachment. But in that case the resolve and color
2443 // attachments will be the same anyways.
2444 if (this->vkCaps().renderTargetSupportsDiscardableMSAA(vkRT)) {
2445 dstImage = vkRT->resolveAttachment();
2446 } else {
2447 dstImage = vkRT->colorAttachment();
2448 }
2449 } else if (dst->asTexture()) {
2450 dstImage = static_cast<GrVkTexture*>(dst->asTexture())->textureImage();
2451 } else {
2452 // The surface in a GrAttachment already
2453 dstImage = static_cast<GrVkImage*>(dst);
2454 }
2455 GrRenderTarget* srcRT = src->asRenderTarget();
2456 if (srcRT) {
2457 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(srcRT);
2458 // This will technically return true for single sample rts that used DMSAA in which case we
2459 // don't have to pick the resolve attachment. But in that case the resolve and color
2460 // attachments will be the same anyways.
2461 if (this->vkCaps().renderTargetSupportsDiscardableMSAA(vkRT)) {
2462 srcImage = vkRT->resolveAttachment();
2463 } else {
2464 srcImage = vkRT->colorAttachment();
2465 }
2466 } else if (src->asTexture()) {
2467 SkASSERT(src->asTexture());
2468 srcImage = static_cast<GrVkTexture*>(src->asTexture())->textureImage();
2469 } else {
2470 // The surface in a GrAttachment already
2471 srcImage = static_cast<GrVkImage*>(src);
2472 }
2473
2474 VkFormat dstFormat = dstImage->imageFormat();
2475 VkFormat srcFormat = srcImage->imageFormat();
2476
2477 int dstSampleCnt = dstImage->numSamples();
2478 int srcSampleCnt = srcImage->numSamples();
2479
2480 bool dstHasYcbcr = dstImage->ycbcrConversionInfo().isValid();
2481 bool srcHasYcbcr = srcImage->ycbcrConversionInfo().isValid();
2482
2483 if (srcRect.size() == dstRect.size()) {
2484 // Prefer resolves or copy-image commands when there is no scaling
2485 const SkIPoint dstPoint = dstRect.topLeft();
2486 if (this->vkCaps().canCopyAsResolve(dstFormat, dstSampleCnt, dstHasYcbcr,
2487 srcFormat, srcSampleCnt, srcHasYcbcr)) {
2488 this->copySurfaceAsResolve(dst, src, srcRect, dstPoint);
2489 return true;
2490 }
2491
2492 if (this->vkCaps().canCopyImage(dstFormat, dstSampleCnt, dstHasYcbcr,
2493 srcFormat, srcSampleCnt, srcHasYcbcr)) {
2494 this->copySurfaceAsCopyImage(dst, src, dstImage, srcImage, srcRect, dstPoint);
2495 return true;
2496 }
2497 }
2498
2499 if (this->vkCaps().canCopyAsBlit(dstFormat,
2500 dstSampleCnt,
2501 dstImage->isLinearTiled(),
2502 dstHasYcbcr,
2503 srcFormat,
2504 srcSampleCnt,
2505 srcImage->isLinearTiled(),
2506 srcHasYcbcr)) {
2507 this->copySurfaceAsBlit(dst, src, dstImage, srcImage, srcRect, dstRect, filter);
2508 return true;
2509 }
2510
2511 return false;
2512}
2513
2514bool GrVkGpu::onReadPixels(GrSurface* surface,
2515 SkIRect rect,
2516 GrColorType surfaceColorType,
2517 GrColorType dstColorType,
2518 void* buffer,
2519 size_t rowBytes) {
2520 if (surface->isProtected()) {
2521 return false;
2522 }
2523
2524 if (!this->currentCommandBuffer()) {
2525 return false;
2526 }
2527
2528 GrVkImage* image = nullptr;
2529 GrVkRenderTarget* rt = static_cast<GrVkRenderTarget*>(surface->asRenderTarget());
2530 if (rt) {
2531 // Reading from render targets that wrap a secondary command buffer is not allowed since
2532 // it would require us to know the VkImage, which we don't have, as well as need us to
2533 // stop and start the VkRenderPass which we don't have access to.
2534 if (rt->wrapsSecondaryCommandBuffer()) {
2535 return false;
2536 }
2537 image = rt->nonMSAAAttachment();
2538 } else {
2539 image = static_cast<GrVkTexture*>(surface->asTexture())->textureImage();
2540 }
2541
2542 if (!image) {
2543 return false;
2544 }
2545
2546 if (dstColorType == GrColorType::kUnknown ||
2547 dstColorType != this->vkCaps().transferColorType(image->imageFormat(), surfaceColorType)) {
2548 return false;
2549 }
2550
2551 // Change layout of our target so it can be used as copy
2552 image->setImageLayout(this,
2556 false);
2557
2558 size_t bpp = GrColorTypeBytesPerPixel(dstColorType);
2559 if (skgpu::VkFormatBytesPerBlock(image->imageFormat()) != bpp) {
2560 return false;
2561 }
2562 size_t tightRowBytes = bpp*rect.width();
2563
2565 memset(&region, 0, sizeof(VkBufferImageCopy));
2566 VkOffset3D offset = { rect.left(), rect.top(), 0 };
2567 region.imageOffset = offset;
2568 region.imageExtent = { (uint32_t)rect.width(), (uint32_t)rect.height(), 1 };
2569
2570 size_t transBufferRowBytes = bpp * region.imageExtent.width;
2571 size_t imageRows = region.imageExtent.height;
2573 sk_sp<GrGpuBuffer> transferBuffer = resourceProvider->createBuffer(
2574 transBufferRowBytes * imageRows,
2578
2579 if (!transferBuffer) {
2580 return false;
2581 }
2582
2583 GrVkBuffer* vkBuffer = static_cast<GrVkBuffer*>(transferBuffer.get());
2584
2585 // Copy the image to a buffer so we can map it to cpu memory
2586 region.bufferOffset = 0;
2587 region.bufferRowLength = 0; // Forces RowLength to be width. We handle the rowBytes below.
2588 region.bufferImageHeight = 0; // Forces height to be tightly packed. Only useful for 3d images.
2589 region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
2590
2592 image,
2594 transferBuffer,
2595 1,
2596 &region);
2597
2598 // make sure the copy to buffer has finished
2603 false);
2604
2605 // We need to submit the current command buffer to the Queue and make sure it finishes before
2606 // we can copy the data out of the buffer.
2607 if (!this->submitCommandBuffer(kForce_SyncQueue)) {
2608 return false;
2609 }
2610 void* mappedMemory = transferBuffer->map();
2611 if (!mappedMemory) {
2612 return false;
2613 }
2614
2615 SkRectMemcpy(buffer, rowBytes, mappedMemory, transBufferRowBytes, tightRowBytes, rect.height());
2616
2617 transferBuffer->unmap();
2618 return true;
2619}
2620
2622 sk_sp<const GrVkFramebuffer> framebuffer,
2623 const VkClearValue* colorClear,
2624 const GrSurface* target,
2625 const SkIRect& renderPassBounds,
2626 bool forSecondaryCB) {
2627 if (!this->currentCommandBuffer()) {
2628 return false;
2629 }
2630 SkASSERT (!framebuffer->isExternal());
2631
2632#ifdef SK_DEBUG
2633 uint32_t index;
2634 bool result = renderPass->colorAttachmentIndex(&index);
2635 SkASSERT(result && 0 == index);
2636 result = renderPass->stencilAttachmentIndex(&index);
2637 if (result) {
2638 SkASSERT(1 == index);
2639 }
2640#endif
2641 VkClearValue clears[3];
2642 int stencilIndex = renderPass->hasResolveAttachment() ? 2 : 1;
2643 clears[0].color = colorClear->color;
2644 clears[stencilIndex].depthStencil.depth = 0.0f;
2645 clears[stencilIndex].depthStencil.stencil = 0;
2646
2647 return this->currentCommandBuffer()->beginRenderPass(
2648 this, renderPass, std::move(framebuffer), clears, target, renderPassBounds, forSecondaryCB);
2649}
2650
2652 const SkIRect& bounds) {
2653 // We had a command buffer when we started the render pass, we should have one now as well.
2655 this->currentCommandBuffer()->endRenderPass(this);
2656 this->didWriteToSurface(target, origin, &bounds);
2657}
2658
2660 switch (result) {
2661 case VK_SUCCESS:
2662 return true;
2664 if (!fDeviceIsLost) {
2665 // Callback should only be invoked once, and device should be marked as lost first.
2666 fDeviceIsLost = true;
2668 device(),
2669 fDeviceLostContext,
2670 fDeviceLostProc,
2671 vkCaps().supportsDeviceFaultInfo());
2672 }
2673 return false;
2676 this->setOOMed();
2677 return false;
2678 default:
2679 return false;
2680 }
2681}
2682
2683void GrVkGpu::submitSecondaryCommandBuffer(std::unique_ptr<GrVkSecondaryCommandBuffer> buffer) {
2684 if (!this->currentCommandBuffer()) {
2685 return;
2686 }
2687 this->currentCommandBuffer()->executeCommands(this, std::move(buffer));
2688}
2689
2691 SkASSERT(fCachedOpsRenderPass.get() == renderPass);
2692
2693 fCachedOpsRenderPass->submit();
2694 fCachedOpsRenderPass->reset();
2695}
2696
2697[[nodiscard]] std::unique_ptr<GrSemaphore> GrVkGpu::makeSemaphore(bool isOwned) {
2698 return GrVkSemaphore::Make(this, isOwned);
2699}
2700
2701std::unique_ptr<GrSemaphore> GrVkGpu::wrapBackendSemaphore(const GrBackendSemaphore& semaphore,
2702 GrSemaphoreWrapType wrapType,
2703 GrWrapOwnership ownership) {
2705 wrapType, ownership);
2706}
2707
2709 SkASSERT(semaphore);
2710
2711 GrVkSemaphore* vkSem = static_cast<GrVkSemaphore*>(semaphore);
2712
2714 if (resource->shouldSignal()) {
2715 resource->ref();
2716 fSemaphoresToSignal.push_back(resource);
2717 }
2718}
2719
2721 SkASSERT(semaphore);
2722
2723 GrVkSemaphore* vkSem = static_cast<GrVkSemaphore*>(semaphore);
2724
2726 if (resource->shouldWait()) {
2727 resource->ref();
2728 fSemaphoresToWaitOn.push_back(resource);
2729 }
2730}
2731
2734 GrVkImage* vkTexture = static_cast<GrVkTexture*>(texture)->textureImage();
2735 vkTexture->setImageLayout(this,
2739 false);
2740 // TODO: should we have a way to notify the caller that this has failed? Currently if the submit
2741 // fails (caused by DEVICE_LOST) this will just cause us to fail the next use of the gpu.
2742 // Eventually we will abandon the whole GPU if this fails.
2744
2745 // The image layout change serves as a barrier, so no semaphore is needed.
2746 // If we ever decide we need to return a semaphore here, we need to make sure GrVkSemaphore is
2747 // thread safe so that only the first thread that tries to use the semaphore actually submits
2748 // it. This additionally would also require thread safety in command buffer submissions to
2749 // queues in general.
2750 return nullptr;
2751}
2752
2753void GrVkGpu::addDrawable(std::unique_ptr<SkDrawable::GpuDrawHandler> drawable) {
2754 fDrawables.emplace_back(std::move(drawable));
2755}
2756
2758 if (this->getContext()->priv().getPersistentCache()) {
2760 }
2761}
const char * options
const char * backend
static void info(const char *fmt,...) SK_PRINTF_LIKE(1
Definition: DM.cpp:213
SkAssertResult(font.textToGlyphs("Hello", 5, SkTextEncoding::kUTF8, glyphs, std::size(glyphs))==count)
size_t GrBackendFormatBytesPerPixel(const GrBackendFormat &format)
SkTextureCompressionType GrBackendFormatToCompressionType(const GrBackendFormat &format)
size_t GrComputeTightCombinedBufferSize(size_t bytesPerPixel, SkISize baseDimensions, TArray< size_t > *individualMipOffsets, int mipLevelCount)
GrWrapCacheable
Definition: GrTypesPriv.h:85
static constexpr size_t GrColorTypeBytesPerPixel(GrColorType ct)
Definition: GrTypesPriv.h:896
GrIOType
Definition: GrTypesPriv.h:402
@ kRead_GrIOType
Definition: GrTypesPriv.h:403
@ kRW_GrIOType
Definition: GrTypesPriv.h:405
GrMipmapStatus
Definition: GrTypesPriv.h:523
GrWrapOwnership
Definition: GrTypesPriv.h:77
@ kAdopt_GrWrapOwnership
Definition: GrTypesPriv.h:82
@ kBorrow_GrWrapOwnership
Definition: GrTypesPriv.h:79
GrGpuBufferType
Definition: GrTypesPriv.h:411
GrMemoryless
Definition: GrTypesPriv.h:123
GrTexturable
Definition: GrTypesPriv.h:64
GrSemaphoreWrapType
Definition: GrTypesPriv.h:146
GrColorType
Definition: GrTypesPriv.h:540
GrAccessPattern
Definition: GrTypesPriv.h:424
@ kDynamic_GrAccessPattern
Definition: GrTypesPriv.h:426
@ kStatic_GrAccessPattern
Definition: GrTypesPriv.h:428
@ kStream_GrAccessPattern
Definition: GrTypesPriv.h:430
GrSurfaceOrigin
Definition: GrTypes.h:147
@ kTopLeft_GrSurfaceOrigin
Definition: GrTypes.h:148
void * GrGpuFinishedContext
Definition: GrTypes.h:178
void(* GrGpuFinishedProc)(GrGpuFinishedContext finishedContext)
Definition: GrTypes.h:179
GrSyncCpu
Definition: GrTypes.h:239
#define VK_CALL(X)
Definition: GrVkGpu.cpp:67
static size_t fill_in_compressed_regions(GrStagingBufferManager *stagingBufferManager, TArray< VkBufferImageCopy > *regions, TArray< size_t > *individualMipOffsets, GrStagingBufferManager::Slice *slice, SkTextureCompressionType compression, VkFormat vkFormat, SkISize dimensions, skgpu::Mipmapped mipmapped)
Definition: GrVkGpu.cpp:881
static bool check_tex_image_info(const GrVkCaps &caps, const GrVkImageInfo &info)
Definition: GrVkGpu.cpp:1321
void set_layout_and_queue_from_mutable_state(GrVkGpu *gpu, GrVkImage *image, VkImageLayout newLayout, uint32_t newQueueFamilyIndex)
Definition: GrVkGpu.cpp:1899
static bool check_image_info(const GrVkCaps &caps, const GrVkImageInfo &info, bool needsAllocation, uint32_t graphicsQueueIndex)
Definition: GrVkGpu.cpp:1275
static void add_transfer_dst_buffer_mem_barrier(GrVkGpu *gpu, GrVkBuffer *dst, size_t offset, size_t size, bool after)
Definition: GrVkGpu.cpp:550
static bool check_rt_image_info(const GrVkCaps &caps, const GrVkImageInfo &info, bool resolveOnly)
Definition: GrVkGpu.cpp:1363
bool copy_src_data(char *mapPtr, VkFormat vkFormat, const TArray< size_t > &individualMipOffsets, const GrPixmap srcData[], int numMipLevels)
Definition: GrVkGpu.cpp:1625
bool GrVkFormatIsSupported(VkFormat format)
Definition: GrVkUtil.cpp:21
#define GR_VK_CALL(IFACE, X)
Definition: GrVkUtil.h:24
GrXferBarrierType
@ kTexture_GrXferBarrierType
@ kBlend_GrXferBarrierType
GrXferBarrierFlags
#define SkUNREACHABLE
Definition: SkAssert.h:135
#define SkDEBUGFAIL(message)
Definition: SkAssert.h:118
#define SkASSERT(cond)
Definition: SkAssert.h:116
size_t SkCompressedDataSize(SkTextureCompressionType type, SkISize dimensions, TArray< size_t > *individualMipOffsets, bool mipmapped)
void SK_SPI SkDebugf(const char format[],...) SK_PRINTF_LIKE(1
static SkString resource(SkPDFResourceType type, int index)
#define INHERITED(method,...)
Definition: SkRecorder.cpp:128
static void SkRectMemcpy(void *dst, size_t dstRB, const void *src, size_t srcRB, size_t trimRowBytes, int rowCount)
Definition: SkRectMemcpy.h:16
sk_sp< T > sk_make_sp(Args &&... args)
Definition: SkRefCnt.h:371
sk_sp< T > sk_ref_sp(T *obj)
Definition: SkRefCnt.h:381
void swap(sk_sp< T > &a, sk_sp< T > &b)
Definition: SkRefCnt.h:341
SkFilterMode
SkTextureCompressionType
SkDEBUGCODE(SK_SPI) SkThreadID SkGetThreadID()
static constexpr bool SkToBool(const T &x)
Definition: SkTo.h:35
constexpr uint32_t SkToU32(S x)
Definition: SkTo.h:26
#define TRACE_FUNC
Definition: SkTraceEvent.h:30
#define SK_HISTOGRAM_MEMORY_KB(name, sample)
Definition: SkTypes.h:119
#define SK_HISTOGRAM_PERCENTAGE(name, percent_as_int)
Definition: SkTypes.h:122
GLenum type
int numSamples() const
Definition: GrAttachment.h:38
bool isValid() const
SkISize dimensions() const
SkISize dimensions() const
GrBackendFormat getBackendFormat() const
bool isProtected() const
bool mipmapSupport() const
Definition: GrCaps.h:72
int maxTextureSize() const
Definition: GrCaps.h:229
GrResourceProvider * resourceProvider()
GrDirectContextPriv priv()
void * map()
Definition: GrGpuBuffer.cpp:28
void unmap()
Definition: GrGpuBuffer.cpp:38
void incStencilAttachmentCreates()
Definition: GrGpu.h:539
void incMSAAAttachmentCreates()
Definition: GrGpu.h:540
bool submitToGpu(GrSyncCpu sync)
Definition: GrGpu.cpp:748
void setOOMed()
Definition: GrGpu.h:701
const GrCaps * caps() const
Definition: GrGpu.h:73
GrDirectContext * getContext()
Definition: GrGpu.h:67
void didWriteToSurface(GrSurface *surface, GrSurfaceOrigin origin, const SkIRect *bounds, uint32_t mipLevels=1) const
Definition: GrGpu.cpp:665
DisconnectType
Definition: GrGpu.h:80
virtual void disconnect(DisconnectType)
Definition: GrGpu.cpp:51
void initCaps(sk_sp< const GrCaps > caps)
Definition: GrGpu.cpp:47
Stats fStats
Definition: GrGpu.h:703
int width() const
Definition: GrImageInfo.h:54
const GrImageInfo & info() const
Definition: GrPixmap.h:17
GrLoadOp colorLoadOp() const
Definition: GrProgramInfo.h:52
GrXferBarrierFlags renderPassBarriers() const
Definition: GrProgramInfo.h:50
int numSamples() const
Slice allocateStagingBufferSlice(size_t size, size_t requiredAlignment=1)
SkISize dimensions() const
Definition: GrSurface.h:27
int height() const
Definition: GrSurface.h:37
int width() const
Definition: GrSurface.h:32
void markMipmapsDirty()
Definition: GrTexture.cpp:25
GrTextureType textureType() const
Definition: GrTexture.h:55
static sk_sp< GrVkBuffer > Make(GrVkGpu *gpu, size_t size, GrGpuBufferType bufferType, GrAccessPattern accessPattern)
Definition: GrVkBuffer.cpp:69
void addMemoryBarrier(VkAccessFlags srcAccessMask, VkAccessFlags dstAccesMask, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask, bool byRegion) const
Definition: GrVkBuffer.cpp:270
VkBuffer vkBuffer() const
Definition: GrVkBuffer.h:24
bool canCopyAsResolve(VkFormat dstConfig, int dstSampleCnt, bool dstHasYcbcr, VkFormat srcConfig, int srcSamplecnt, bool srcHasYcbcr) const
Definition: GrVkCaps.cpp:193
bool isVkFormatTexturable(VkFormat) const
Definition: GrVkCaps.cpp:1566
bool formatCanBeDstofBlit(VkFormat format, bool linearTiled) const
Definition: GrVkCaps.h:71
bool isFormatRenderable(const GrBackendFormat &format, int sampleCount) const override
Definition: GrVkCaps.cpp:1587
bool renderTargetSupportsDiscardableMSAA(const GrVkRenderTarget *) const
Definition: GrVkCaps.cpp:1789
bool supportsYcbcrConversion() const
Definition: GrVkCaps.h:153
VkFormat getFormatFromColorType(GrColorType colorType) const
Definition: GrVkCaps.h:235
GrColorType transferColorType(VkFormat, GrColorType surfaceColorType) const
Definition: GrVkCaps.cpp:1717
int getRenderTargetSampleCount(int requestedCount, const GrBackendFormat &) const override
Definition: GrVkCaps.cpp:1599
bool supportsSwapchain() const
Definition: GrVkCaps.h:126
bool supportsDRMFormatModifiers() const
Definition: GrVkCaps.h:168
VkFormat preferredStencilFormat() const
Definition: GrVkCaps.h:104
bool isVkFormatTexturableLinearly(VkFormat format) const
Definition: GrVkCaps.h:67
void pipelineBarrier(const GrVkGpu *gpu, const GrManagedResource *resource, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask, bool byRegion, BarrierType barrierType, void *barrier)
void addGrSurface(sk_sp< const GrSurface > surface)
void addGrBuffer(sk_sp< const GrBuffer > buffer)
GrVkPrimaryCommandBuffer * getPrimaryCommandBuffer()
bool isExternal() const
std::unique_ptr< GrSemaphore > makeSemaphore(bool isOwned) override
Definition: GrVkGpu.cpp:2697
bool setBackendTextureState(const GrBackendTexture &, const skgpu::MutableTextureState &, skgpu::MutableTextureState *previousState, sk_sp< skgpu::RefCntedCallback > finishedCallback) override
Definition: GrVkGpu.cpp:1956
uint32_t queueIndex() const
Definition: GrVkGpu.h:73
const GrVkCaps & vkCaps() const
Definition: GrVkGpu.h:61
void onResolveRenderTarget(GrRenderTarget *target, const SkIRect &resolveRect) override
Definition: GrVkGpu.cpp:806
void insertSemaphore(GrSemaphore *semaphore) override
Definition: GrVkGpu.cpp:2708
bool zeroBuffer(sk_sp< GrGpuBuffer >)
Definition: GrVkGpu.cpp:1249
void addImageMemoryBarrier(const GrManagedResource *, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask, bool byRegion, VkImageMemoryBarrier *barrier) const
Definition: GrVkGpu.cpp:2164
bool loadMSAAFromResolve(GrVkCommandBuffer *commandBuffer, const GrVkRenderPass &renderPass, GrAttachment *dst, GrVkImage *src, const SkIRect &srcRect)
Definition: GrVkGpu.cpp:1500
void addBufferMemoryBarrier(const GrManagedResource *, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask, bool byRegion, VkBufferMemoryBarrier *barrier) const
Definition: GrVkGpu.cpp:2127
const skgpu::VulkanInterface * vkInterface() const
Definition: GrVkGpu.h:60
void addDrawable(std::unique_ptr< SkDrawable::GpuDrawHandler > drawable)
Definition: GrVkGpu.cpp:2753
VkDevice device() const
Definition: GrVkGpu.h:71
void deleteBackendTexture(const GrBackendTexture &) override
Definition: GrVkGpu.cpp:2017
bool beginRenderPass(const GrVkRenderPass *, sk_sp< const GrVkFramebuffer >, const VkClearValue *colorClear, const GrSurface *, const SkIRect &renderPassBounds, bool forSecondaryCB)
Definition: GrVkGpu.cpp:2621
void disconnect(DisconnectType) override
Definition: GrVkGpu.cpp:286
sk_sp< GrAttachment > makeStencilAttachment(const GrBackendFormat &, SkISize dimensions, int numStencilSamples) override
Definition: GrVkGpu.cpp:1601
sk_sp< GrThreadSafePipelineBuilder > refPipelineBuilder() override
Definition: GrVkGpu.cpp:302
void endRenderPass(GrRenderTarget *target, GrSurfaceOrigin origin, const SkIRect &bounds)
Definition: GrVkGpu.cpp:2651
GrVkResourceProvider & resourceProvider()
Definition: GrVkGpu.h:83
void waitSemaphore(GrSemaphore *semaphore) override
Definition: GrVkGpu.cpp:2720
bool onRegenerateMipMapLevels(GrTexture *tex) override
Definition: GrVkGpu.cpp:1508
GrThreadSafePipelineBuilder * pipelineBuilder() override
Definition: GrVkGpu.cpp:298
bool updateBuffer(sk_sp< GrVkBuffer > buffer, const void *src, VkDeviceSize offset, VkDeviceSize size)
Definition: GrVkGpu.cpp:1229
std::unique_ptr< GrSemaphore > wrapBackendSemaphore(const GrBackendSemaphore &, GrSemaphoreWrapType, GrWrapOwnership) override
Definition: GrVkGpu.cpp:2701
std::unique_ptr< GrSemaphore > prepareTextureForCrossContextUsage(GrTexture *) override
Definition: GrVkGpu.cpp:2732
bool compile(const GrProgramDesc &, const GrProgramInfo &) override
Definition: GrVkGpu.cpp:2026
static std::unique_ptr< GrGpu > Make(const skgpu::VulkanBackendContext &, const GrContextOptions &, GrDirectContext *)
Definition: GrVkGpu.cpp:70
void storeVkPipelineCacheData() override
Definition: GrVkGpu.cpp:2757
GrVkPrimaryCommandBuffer * currentCommandBuffer() const
Definition: GrVkGpu.h:85
void finishOutstandingGpuWork() override
Definition: GrVkGpu.cpp:2247
sk_sp< GrAttachment > makeMSAAAttachment(SkISize dimensions, const GrBackendFormat &format, int numSamples, GrProtected isProtected, GrMemoryless isMemoryless) override
Definition: GrVkGpu.cpp:1609
void submit(GrOpsRenderPass *) override
Definition: GrVkGpu.cpp:2690
bool checkVkResult(VkResult)
Definition: GrVkGpu.cpp:2659
void submitSecondaryCommandBuffer(std::unique_ptr< GrVkSecondaryCommandBuffer >)
Definition: GrVkGpu.cpp:2683
void takeOwnershipOfBuffer(sk_sp< GrGpuBuffer >) override
Definition: GrVkGpu.cpp:2235
bool setBackendRenderTargetState(const GrBackendRenderTarget &, const skgpu::MutableTextureState &, skgpu::MutableTextureState *previousState, sk_sp< skgpu::RefCntedCallback > finishedCallback) override
Definition: GrVkGpu.cpp:1972
~GrVkGpu() override
Definition: GrVkGpu.cpp:276
void xferBarrier(GrRenderTarget *, GrXferBarrierType) override
Definition: GrVkGpu.cpp:1988
skgpu::VulkanMemoryAllocator * memoryAllocator() const
Definition: GrVkGpu.h:68
static sk_sp< GrVkImage > MakeStencil(GrVkGpu *gpu, SkISize dimensions, int sampleCnt, VkFormat format)
Definition: GrVkImage.cpp:21
static VkAccessFlags LayoutToSrcAccessMask(const VkImageLayout layout)
Definition: GrVkImage.cpp:335
uint32_t mipLevels() const
Definition: GrVkImage.h:94
bool isLinearTiled() const
Definition: GrVkImage.h:123
void setImageLayout(const GrVkGpu *gpu, VkImageLayout newLayout, VkAccessFlags dstAccessMask, VkPipelineStageFlags dstStageMask, bool byRegion)
Definition: GrVkImage.h:144
static void DestroyImageInfo(const GrVkGpu *gpu, GrVkImageInfo *)
Definition: GrVkImage.cpp:577
bool supportsInputAttachmentUsage() const
Definition: GrVkImage.h:102
static sk_sp< GrVkImage > MakeMSAA(GrVkGpu *gpu, SkISize dimensions, int numSamples, VkFormat format, GrProtected isProtected, GrMemoryless memoryless)
Definition: GrVkImage.cpp:39
VkImageLayout currentLayout() const
Definition: GrVkImage.h:133
VkFormat imageFormat() const
Definition: GrVkImage.h:83
static sk_sp< GrVkImage > MakeWrapped(GrVkGpu *gpu, SkISize dimensions, const GrVkImageInfo &, sk_sp< skgpu::MutableTextureState >, UsageFlags attachmentUsages, GrWrapOwnership, GrWrapCacheable, std::string_view label, bool forSecondaryCB=false)
Definition: GrVkImage.cpp:179
const skgpu::VulkanYcbcrConversionInfo & ycbcrConversionInfo() const
Definition: GrVkImage.h:95
static VkPipelineStageFlags LayoutToPipelineSrcStageFlags(const VkImageLayout layout)
Definition: GrVkImage.cpp:312
const Resource * resource() const
Definition: GrVkImage.h:119
const skgpu::VulkanAlloc & alloc() const
Definition: GrVkImage.h:76
VkImage image() const
Definition: GrVkImage.h:70
static bool InitImageInfo(GrVkGpu *gpu, const ImageDesc &imageDesc, GrVkImageInfo *)
Definition: GrVkImage.cpp:471
bool loadMSAAFromResolve(GrVkGpu *gpu, GrVkCommandBuffer *commandBuffer, const GrVkRenderPass &renderPass, GrAttachment *dst, GrVkImage *src, const SkIRect &srcRect)
void destroyResources(GrVkGpu *gpu)
void endRenderPass(const GrVkGpu *gpu)
bool submitToQueue(GrVkGpu *gpu, VkQueue queue, skia_private::TArray< GrVkSemaphore::Resource * > &signalSemaphores, skia_private::TArray< GrVkSemaphore::Resource * > &waitSemaphores)
void forceSync(GrVkGpu *gpu)
void clearColorImage(const GrVkGpu *gpu, GrVkImage *image, const VkClearColorValue *color, uint32_t subRangeCount, const VkImageSubresourceRange *subRanges)
void blitImage(const GrVkGpu *gpu, const GrManagedResource *srcResource, VkImage srcImage, VkImageLayout srcLayout, const GrManagedResource *dstResource, VkImage dstImage, VkImageLayout dstLayout, uint32_t blitRegionCount, const VkImageBlit *blitRegions, VkFilter filter)
void copyImageToBuffer(const GrVkGpu *gpu, GrVkImage *srcImage, VkImageLayout srcLayout, sk_sp< GrGpuBuffer > dstBuffer, uint32_t copyRegionCount, const VkBufferImageCopy *copyRegions)
void end(GrVkGpu *gpu, bool abandoningBuffer=false)
void resolveImage(GrVkGpu *gpu, const GrVkImage &srcImage, const GrVkImage &dstImage, uint32_t regionCount, const VkImageResolve *regions)
bool beginRenderPass(GrVkGpu *gpu, const GrVkRenderPass *, sk_sp< const GrVkFramebuffer >, const VkClearValue clearValues[], const GrSurface *target, const SkIRect &bounds, bool forSecondaryCB)
void copyBuffer(GrVkGpu *gpu, sk_sp< GrGpuBuffer > srcBuffer, sk_sp< GrGpuBuffer > dstBuffer, uint32_t regionCount, const VkBufferCopy *regions)
void copyImage(const GrVkGpu *gpu, GrVkImage *srcImage, VkImageLayout srcLayout, GrVkImage *dstImage, VkImageLayout dstLayout, uint32_t copyRegionCount, const VkImageCopy *copyRegions)
void executeCommands(const GrVkGpu *gpu, std::unique_ptr< GrVkSecondaryCommandBuffer > secondaryBuffer)
void fillBuffer(GrVkGpu *gpu, sk_sp< GrGpuBuffer >, VkDeviceSize offset, VkDeviceSize size, uint32_t data)
void copyBufferToImage(const GrVkGpu *gpu, VkBuffer srcBuffer, GrVkImage *dstImage, VkImageLayout dstLayout, uint32_t copyRegionCount, const VkBufferImageCopy *copyRegions)
void updateBuffer(GrVkGpu *gpu, sk_sp< GrVkBuffer > dstBuffer, VkDeviceSize dstOffset, VkDeviceSize dataSize, const void *data)
bool colorAttachmentIndex(uint32_t *index) const
bool stencilAttachmentIndex(uint32_t *index) const
VkRenderPass vkRenderPass() const
bool hasResolveAttachment() const
bool wrapsSecondaryCommandBuffer() const
GrVkImage * externalAttachment() const
sk_sp< GrVkFramebuffer > externalFramebuffer() const
GrVkImage * nonMSAAAttachment() const
const GrVkImageView * resolveAttachmentView() const
static void ReconstructAttachmentsDescriptor(const GrVkCaps &vkCaps, const GrProgramInfo &programInfo, GrVkRenderPass::AttachmentsDescriptor *desc, GrVkRenderPass::AttachmentFlags *flags)
const GrVkFramebuffer * getFramebuffer(bool withResolve, bool withStencil, SelfDependencyFlags selfDepFlags, LoadFromResolve)
GrVkImage * colorAttachment() const
const GrVkImageView * colorAttachmentView() const
GrVkImage * resolveAttachment() const
static sk_sp< GrVkRenderTarget > MakeSecondaryCBRenderTarget(GrVkGpu *, SkISize, const GrVkDrawableInfo &vkInfo)
static sk_sp< GrVkRenderTarget > MakeWrappedRenderTarget(GrVkGpu *, SkISize, int sampleCnt, const GrVkImageInfo &, sk_sp< skgpu::MutableTextureState >)
GrVkPipelineState * findOrCreateCompatiblePipelineState(GrRenderTarget *, const GrProgramInfo &, VkRenderPass compatibleRenderPass, bool overrideSubpassForResolveLoad)
sk_sp< GrThreadSafePipelineBuilder > refPipelineStateCache()
void addFinishedProcToActiveCommandBuffers(sk_sp< skgpu::RefCntedCallback > finishedCallback)
GrVkCommandPool * findOrCreateCommandPool()
GrThreadSafePipelineBuilder * pipelineStateCache()
Resource * getResource()
Definition: GrVkSemaphore.h:84
static std::unique_ptr< GrVkSemaphore > Make(GrVkGpu *gpu, bool isOwned)
static std::unique_ptr< GrVkSemaphore > MakeWrapped(GrVkGpu *, VkSemaphore, GrSemaphoreWrapType, GrWrapOwnership)
static sk_sp< GrVkTextureRenderTarget > MakeNewTextureRenderTarget(GrVkGpu *gpu, skgpu::Budgeted budgeted, SkISize dimensions, VkFormat format, uint32_t mipLevels, int sampleCnt, GrMipmapStatus mipmapStatus, GrProtected isProtected, std::string_view label)
static sk_sp< GrVkTextureRenderTarget > MakeWrappedTextureRenderTarget(GrVkGpu *, SkISize dimensions, int sampleCnt, GrWrapOwnership, GrWrapCacheable, const GrVkImageInfo &, sk_sp< skgpu::MutableTextureState >)
GrVkImage * textureImage() const
Definition: GrVkTexture.h:50
static sk_sp< GrVkTexture > MakeWrappedTexture(GrVkGpu *, SkISize dimensions, GrWrapOwnership, GrWrapCacheable, GrIOType, const GrVkImageInfo &, sk_sp< skgpu::MutableTextureState >)
static sk_sp< GrVkTexture > MakeNewTexture(GrVkGpu *, skgpu::Budgeted budgeted, SkISize dimensions, VkFormat format, uint32_t mipLevels, GrProtected, GrMipmapStatus, std::string_view label)
static int ComputeLevelCount(int baseWidth, int baseHeight)
Definition: SkMipmap.cpp:134
constexpr bool empty() const
Definition: SkSpan_impl.h:96
constexpr size_t size() const
Definition: SkSpan_impl.h:95
T * get() const
Definition: SkRefCnt.h:303
void reset(T *ptr=nullptr)
Definition: SkRefCnt.h:310
void set(const MutableTextureState &that)
static sk_sp< RefCntedCallback > Make(Callback proc, Context ctx)
static sk_sp< VulkanMemoryAllocator > Make(VkInstance instance, VkPhysicalDevice physicalDevice, VkDevice device, uint32_t physicalDeviceVersion, const VulkanExtensions *extensions, const VulkanInterface *interface, ThreadSafe)
virtual std::pair< uint64_t, uint64_t > totalAllocatedAndUsedMemory() const =0
bool empty() const
Definition: SkTArray.h:199
int size() const
Definition: SkTArray.h:421
void reserve_exact(int n)
Definition: SkTArray.h:181
T & emplace_back(Args &&... args)
Definition: SkTArray.h:248
DlColor color
VkSurfaceKHR surface
Definition: main.cc:49
if(end==-1)
FlPixelBufferTexturePrivate * priv
GAsyncResult * result
uint32_t uint32_t * format
uint32_t * target
static float max(float r, float g, float b)
Definition: hsl.cpp:49
static float min(float r, float g, float b)
Definition: hsl.cpp:48
FlTexture * texture
SK_API bool AsVkFormat(const GrBackendFormat &, VkFormat *)
SK_API GrBackendFormat MakeVk(VkFormat format, bool willUseDRMFormatModifiers=false)
SK_API bool GetVkImageInfo(const GrBackendRenderTarget &, GrVkImageInfo *)
SK_API GrBackendRenderTarget MakeVk(int width, int height, const GrVkImageInfo &)
SK_API VkSemaphore GetVkSemaphore(const GrBackendSemaphore &)
SK_API GrBackendTexture MakeVk(int width, int height, const GrVkImageInfo &, std::string_view label={})
SK_API bool GetVkImageInfo(const GrBackendTexture &, GrVkImageInfo *)
Optional< SkRect > bounds
Definition: SkRecords.h:189
sk_sp< const SkImage > image
Definition: SkRecords.h:269
ClipOpAndAA opAA SkRegion region
Definition: SkRecords.h:238
sk_sp< SkBlender > blender SkRect rect
Definition: SkRecords.h:350
BackendSurfaceAccess
Definition: SkSurface.h:44
@ kPresent
back-end surface will be used for presenting to screen
@ kNoAccess
back-end surface will not be used by client
DEF_SWITCHES_START aot vmservice shared library Name of the *so containing AOT compiled Dart assets for launching the service isolate vm snapshot The VM snapshot data that will be memory mapped as read only SnapshotAssetPath must be present isolate snapshot The isolate snapshot data that will be memory mapped as read only SnapshotAssetPath must be present cache dir Path to the cache directory This is different from the persistent_cache_path in embedder which is used for Skia shader cache icu native lib Path to the library file that exports the ICU data vm service The hostname IP address on which the Dart VM Service should be served If not defaults to or::depending on whether ipv6 is specified vm service A custom Dart VM Service port The default is to pick a randomly available open port disable vm Disable the Dart VM Service The Dart VM Service is never available in release mode disable vm service Disable mDNS Dart VM Service publication Bind to the IPv6 localhost address for the Dart VM Service Ignored if vm service host is set endless trace buffer
Definition: switches.h:126
it will be possible to load the file into Perfetto s trace viewer disable asset Prevents usage of any non test fonts unless they were explicitly Loaded via prefetched default font Indicates whether the embedding started a prefetch of the default font manager before creating the engine run In non interactive keep the shell running after the Dart script has completed enable serial On low power devices with low core running concurrent GC tasks on threads can cause them to contend with the UI thread which could potentially lead to jank This option turns off all concurrent GC activities domain network JSON encoded network policy per domain This overrides the DisallowInsecureConnections switch Embedder can specify whether to allow or disallow insecure connections at a domain level old gen heap size
Definition: switches.h:259
dst
Definition: cp.py:12
SK_API uint32_t GetVkQueueFamilyIndex(const MutableTextureState &state)
SK_API VkImageLayout GetVkImageLayout(const MutableTextureState &state)
void * MapAlloc(VulkanMemoryAllocator *, const VulkanAlloc &, const std::function< CheckResult > &)
void FlushMappedAlloc(VulkanMemoryAllocator *, const skgpu::VulkanAlloc &, VkDeviceSize offset, VkDeviceSize size, const std::function< CheckResult > &)
void UnmapAlloc(VulkanMemoryAllocator *, const VulkanAlloc &alloc)
Definition: GpuTools.h:21
static constexpr size_t VkFormatBytesPerBlock(VkFormat vkFormat)
Budgeted
Definition: GpuTypes.h:35
static constexpr bool VkFormatNeedsYcbcrSampler(VkFormat format)
static constexpr bool VkFormatIsCompressed(VkFormat vkFormat)
Renderable
Definition: GpuTypes.h:69
Mipmapped
Definition: GpuTypes.h:53
void InvokeDeviceLostCallback(const skgpu::VulkanInterface *vulkanInterface, VkDevice vkDevice, skgpu::VulkanDeviceLostContext deviceLostContext, skgpu::VulkanDeviceLostProc deviceLostProc, bool supportsDeviceFaultInfoExtension)
SkISize CompressedDimensions(SkTextureCompressionType type, SkISize baseDimensions)
Definition: DataUtils.cpp:195
Protected
Definition: GpuTypes.h:61
Definition: ref_ptr.h:256
int32_t height
int32_t width
SeparatedVector2 offset
VkFormat fFormat
Definition: GrVkTypes.h:88
VkFormat fFormat
Definition: GrVkTypes.h:30
VkImageUsageFlags fUsageFlags
Definition: GrVkImage.h:186
VkImageType fImageType
Definition: GrVkImage.h:179
GrProtected fIsProtected
Definition: GrVkImage.h:188
VkImageTiling fImageTiling
Definition: GrVkImage.h:185
int32_t fX
x-axis value
Definition: SkPoint_impl.h:29
int32_t fY
y-axis value
Definition: SkPoint_impl.h:30
static constexpr SkIPoint Make(int32_t x, int32_t y)
Definition: SkPoint_impl.h:38
Definition: SkRect.h:32
constexpr int32_t x() const
Definition: SkRect.h:141
constexpr int32_t y() const
Definition: SkRect.h:148
int32_t fBottom
larger y-axis bounds
Definition: SkRect.h:36
constexpr SkISize size() const
Definition: SkRect.h:172
constexpr int32_t height() const
Definition: SkRect.h:165
int32_t fTop
smaller y-axis bounds
Definition: SkRect.h:34
static constexpr SkIRect MakeSize(const SkISize &size)
Definition: SkRect.h:66
constexpr int32_t width() const
Definition: SkRect.h:158
constexpr SkIPoint topLeft() const
Definition: SkRect.h:151
static constexpr SkIRect MakeXYWH(int32_t x, int32_t y, int32_t w, int32_t h)
Definition: SkRect.h:104
int32_t fLeft
smaller x-axis bounds
Definition: SkRect.h:33
bool contains(int32_t x, int32_t y) const
Definition: SkRect.h:463
int32_t fRight
larger x-axis bounds
Definition: SkRect.h:35
Definition: SkSize.h:16
int32_t fHeight
Definition: SkSize.h:18
int32_t fWidth
Definition: SkSize.h:17
constexpr int32_t width() const
Definition: SkSize.h:36
constexpr int32_t height() const
Definition: SkSize.h:37
SkISize dimensions() const
Definition: SkImageInfo.h:421
int width() const
Definition: SkImageInfo.h:365
int height() const
Definition: SkImageInfo.h:371
VkDeviceSize dstOffset
Definition: vulkan_core.h:3899
VkDeviceSize size
Definition: vulkan_core.h:3900
VkDeviceSize srcOffset
Definition: vulkan_core.h:3898
VkOffset3D srcOffsets[2]
Definition: vulkan_core.h:3949
VkImageSubresourceLayers srcSubresource
Definition: vulkan_core.h:3948
VkOffset3D dstOffsets[2]
Definition: vulkan_core.h:3951
VkImageSubresourceLayers dstSubresource
Definition: vulkan_core.h:3950
VkExtent3D extent
Definition: vulkan_core.h:3959
VkOffset3D srcOffset
Definition: vulkan_core.h:3956
VkImageSubresourceLayers srcSubresource
Definition: vulkan_core.h:3955
VkImageSubresourceLayers dstSubresource
Definition: vulkan_core.h:3957
VkOffset3D dstOffset
Definition: vulkan_core.h:3958
VkAccessFlags dstAccessMask
Definition: vulkan_core.h:2939
uint32_t dstQueueFamilyIndex
Definition: vulkan_core.h:2943
VkAccessFlags srcAccessMask
Definition: vulkan_core.h:2938
VkStructureType sType
Definition: vulkan_core.h:2936
VkImageLayout newLayout
Definition: vulkan_core.h:2941
const void * pNext
Definition: vulkan_core.h:2937
VkImageSubresourceRange subresourceRange
Definition: vulkan_core.h:2945
VkImageLayout oldLayout
Definition: vulkan_core.h:2940
uint32_t srcQueueFamilyIndex
Definition: vulkan_core.h:2942
VkImageSubresourceLayers dstSubresource
Definition: vulkan_core.h:3965
VkOffset3D srcOffset
Definition: vulkan_core.h:3964
VkImageSubresourceLayers srcSubresource
Definition: vulkan_core.h:3963
VkExtent3D extent
Definition: vulkan_core.h:3967
VkOffset3D dstOffset
Definition: vulkan_core.h:3966
VkImageAspectFlags aspectMask
Definition: vulkan_core.h:2928
VkPhysicalDeviceFeatures features
Definition: vulkan_core.h:5271
VkDeviceSize rowPitch
Definition: vulkan_core.h:3454
VkDeviceSize fSize
Definition: VulkanTypes.h:41
VkDeviceMemory fMemory
Definition: VulkanTypes.h:39
const VkPhysicalDeviceFeatures2 * fDeviceFeatures2
sk_sp< VulkanMemoryAllocator > fMemoryAllocator
const VkPhysicalDeviceFeatures * fDeviceFeatures
const skgpu::VulkanExtensions * fVkExtensions
std::shared_ptr< const fml::Mapping > data
Definition: texture_gles.cc:63
#define TRACE_EVENT0(category_group, name)
Definition: trace_event.h:131
VkClearColorValue color
Definition: vulkan_core.h:3931
VkClearDepthStencilValue depthStencil
Definition: vulkan_core.h:3932
VkFlags VkPipelineStageFlags
Definition: vulkan_core.h:2470
VkImageLayout
Definition: vulkan_core.h:1330
@ VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL
Definition: vulkan_core.h:1337
@ VK_IMAGE_LAYOUT_PRESENT_SRC_KHR
Definition: vulkan_core.h:1348
@ VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL
Definition: vulkan_core.h:1336
@ VK_IMAGE_LAYOUT_PREINITIALIZED
Definition: vulkan_core.h:1339
@ VK_IMAGE_LAYOUT_UNDEFINED
Definition: vulkan_core.h:1331
@ VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL
Definition: vulkan_core.h:1338
@ VK_IMAGE_LAYOUT_GENERAL
Definition: vulkan_core.h:1332
@ VK_SHARING_MODE_EXCLUSIVE
Definition: vulkan_core.h:1813
@ VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT
Definition: vulkan_core.h:2399
VkFlags VkAccessFlags
Definition: vulkan_core.h:2235
VkFlags VkImageUsageFlags
Definition: vulkan_core.h:2382
uint64_t VkDeviceSize
Definition: vulkan_core.h:96
@ VK_IMAGE_TILING_OPTIMAL
Definition: vulkan_core.h:1767
@ VK_IMAGE_TILING_LINEAR
Definition: vulkan_core.h:1768
@ VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT
Definition: vulkan_core.h:1769
void(VKAPI_PTR * PFN_vkGetPhysicalDeviceProperties)(VkPhysicalDevice physicalDevice, VkPhysicalDeviceProperties *pProperties)
Definition: vulkan_core.h:3986
@ VK_IMAGE_ASPECT_COLOR_BIT
Definition: vulkan_core.h:2238
@ VK_IMAGE_USAGE_TRANSFER_DST_BIT
Definition: vulkan_core.h:2353
@ VK_IMAGE_USAGE_SAMPLED_BIT
Definition: vulkan_core.h:2354
@ VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT
Definition: vulkan_core.h:2359
@ VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT
Definition: vulkan_core.h:2356
@ VK_IMAGE_USAGE_TRANSFER_SRC_BIT
Definition: vulkan_core.h:2352
#define VK_QUEUE_FAMILY_FOREIGN_EXT
#define VK_MAKE_VERSION(major, minor, patch)
Definition: vulkan_core.h:78
@ VK_IMAGE_TYPE_2D
Definition: vulkan_core.h:1775
@ VK_FILTER_NEAREST
Definition: vulkan_core.h:2101
@ VK_FILTER_LINEAR
Definition: vulkan_core.h:2102
VkResult
Definition: vulkan_core.h:140
@ VK_ERROR_DEVICE_LOST
Definition: vulkan_core.h:150
@ VK_SUCCESS
Definition: vulkan_core.h:141
@ VK_ERROR_OUT_OF_HOST_MEMORY
Definition: vulkan_core.h:147
@ VK_ERROR_OUT_OF_DEVICE_MEMORY
Definition: vulkan_core.h:148
@ VK_ACCESS_HOST_READ_BIT
Definition: vulkan_core.h:2213
@ VK_ACCESS_TRANSFER_WRITE_BIT
Definition: vulkan_core.h:2212
@ VK_ACCESS_HOST_WRITE_BIT
Definition: vulkan_core.h:2214
@ VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT
Definition: vulkan_core.h:2202
@ VK_ACCESS_INPUT_ATTACHMENT_READ_BIT
Definition: vulkan_core.h:2204
@ VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT
Definition: vulkan_core.h:2208
@ VK_ACCESS_TRANSFER_READ_BIT
Definition: vulkan_core.h:2211
@ VK_ACCESS_COLOR_ATTACHMENT_READ_NONCOHERENT_BIT_EXT
Definition: vulkan_core.h:2222
@ VK_ACCESS_SHADER_READ_BIT
Definition: vulkan_core.h:2205
@ VK_ACCESS_INDEX_READ_BIT
Definition: vulkan_core.h:2201
#define VK_NULL_HANDLE
Definition: vulkan_core.h:46
#define VK_QUEUE_FAMILY_EXTERNAL
Definition: vulkan_core.h:4927
VkFormat
Definition: vulkan_core.h:1458
VkResult(VKAPI_PTR * PFN_vkEnumerateInstanceVersion)(uint32_t *pApiVersion)
Definition: vulkan_core.h:5608
VkPipelineStageFlagBits
Definition: vulkan_core.h:2434
@ VK_PIPELINE_STAGE_VERTEX_INPUT_BIT
Definition: vulkan_core.h:2437
@ VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT
Definition: vulkan_core.h:2442
@ VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT
Definition: vulkan_core.h:2445
@ VK_PIPELINE_STAGE_HOST_BIT
Definition: vulkan_core.h:2449
@ VK_PIPELINE_STAGE_TRANSFER_BIT
Definition: vulkan_core.h:2447
#define VK_QUEUE_FAMILY_IGNORED
Definition: vulkan_core.h:127
@ VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER
Definition: vulkan_core.h:246
@ VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER
Definition: vulkan_core.h:247