Flutter Engine
The Flutter Engine
Loading...
Searching...
No Matches
GrVkGpu.cpp
Go to the documentation of this file.
1/*
2 * Copyright 2015 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
9
22#include "src/core/SkMipmap.h"
24#include "src/gpu/DataUtils.h"
37#include "src/gpu/ganesh/SkGr.h"
57
60
61using namespace skia_private;
62
63#define VK_CALL(X) GR_VK_CALL(this->vkInterface(), X)
64#define VK_CALL_RET(RET, X) GR_VK_CALL_RESULT(this, RET, X)
65
66std::unique_ptr<GrGpu> GrVkGpu::Make(const GrVkBackendContext& backendContext,
68 GrDirectContext* direct) {
69 if (backendContext.fInstance == VK_NULL_HANDLE ||
70 backendContext.fPhysicalDevice == VK_NULL_HANDLE ||
71 backendContext.fDevice == VK_NULL_HANDLE ||
72 backendContext.fQueue == VK_NULL_HANDLE) {
73 return nullptr;
74 }
75 if (!backendContext.fGetProc) {
76 return nullptr;
77 }
78
79 PFN_vkEnumerateInstanceVersion localEnumerateInstanceVersion =
80 reinterpret_cast<PFN_vkEnumerateInstanceVersion>(
81 backendContext.fGetProc("vkEnumerateInstanceVersion",
83 uint32_t instanceVersion = 0;
84 if (!localEnumerateInstanceVersion) {
85 instanceVersion = VK_MAKE_VERSION(1, 0, 0);
86 } else {
87 VkResult err = localEnumerateInstanceVersion(&instanceVersion);
88 if (err) {
89 SkDebugf("Failed to enumerate instance version. Err: %d\n", err);
90 return nullptr;
91 }
92 }
93
94 PFN_vkGetPhysicalDeviceProperties localGetPhysicalDeviceProperties =
95 reinterpret_cast<PFN_vkGetPhysicalDeviceProperties>(
96 backendContext.fGetProc("vkGetPhysicalDeviceProperties",
97 backendContext.fInstance,
99
100 if (!localGetPhysicalDeviceProperties) {
101 return nullptr;
102 }
103 VkPhysicalDeviceProperties physDeviceProperties;
104 localGetPhysicalDeviceProperties(backendContext.fPhysicalDevice, &physDeviceProperties);
105 uint32_t physDevVersion = physDeviceProperties.apiVersion;
106
107 uint32_t apiVersion = backendContext.fMaxAPIVersion ? backendContext.fMaxAPIVersion
108 : instanceVersion;
109
110 instanceVersion = std::min(instanceVersion, apiVersion);
111 physDevVersion = std::min(physDevVersion, apiVersion);
112
114
115 if (backendContext.fVkExtensions) {
116 interface.reset(new skgpu::VulkanInterface(backendContext.fGetProc,
117 backendContext.fInstance,
118 backendContext.fDevice,
119 instanceVersion,
120 physDevVersion,
121 backendContext.fVkExtensions));
122 if (!interface->validate(instanceVersion, physDevVersion, backendContext.fVkExtensions)) {
123 return nullptr;
124 }
125 } else {
126 skgpu::VulkanExtensions extensions;
127 // The only extension flag that may effect the vulkan backend is the swapchain extension. We
128 // need to know if this is enabled to know if we can transition to a present layout when
129 // flushing a surface.
130 if (backendContext.fExtensions & kKHR_swapchain_GrVkExtensionFlag) {
131 const char* swapChainExtName = VK_KHR_SWAPCHAIN_EXTENSION_NAME;
132 extensions.init(backendContext.fGetProc, backendContext.fInstance,
133 backendContext.fPhysicalDevice, 0, nullptr, 1, &swapChainExtName);
134 }
135 interface.reset(new skgpu::VulkanInterface(backendContext.fGetProc,
136 backendContext.fInstance,
137 backendContext.fDevice,
138 instanceVersion,
139 physDevVersion,
140 &extensions));
141 if (!interface->validate(instanceVersion, physDevVersion, &extensions)) {
142 return nullptr;
143 }
144 }
145
147 if (backendContext.fDeviceFeatures2) {
148 caps.reset(new GrVkCaps(options, interface.get(), backendContext.fPhysicalDevice,
149 *backendContext.fDeviceFeatures2, instanceVersion, physDevVersion,
150 *backendContext.fVkExtensions, backendContext.fProtectedContext));
151 } else if (backendContext.fDeviceFeatures) {
153 features2.pNext = nullptr;
154 features2.features = *backendContext.fDeviceFeatures;
155 caps.reset(new GrVkCaps(options, interface.get(), backendContext.fPhysicalDevice,
156 features2, instanceVersion, physDevVersion,
157 *backendContext.fVkExtensions, backendContext.fProtectedContext));
158 } else {
160 memset(&features, 0, sizeof(VkPhysicalDeviceFeatures2));
161 features.pNext = nullptr;
162 if (backendContext.fFeatures & kGeometryShader_GrVkFeatureFlag) {
163 features.features.geometryShader = true;
164 }
165 if (backendContext.fFeatures & kDualSrcBlend_GrVkFeatureFlag) {
166 features.features.dualSrcBlend = true;
167 }
168 if (backendContext.fFeatures & kSampleRateShading_GrVkFeatureFlag) {
169 features.features.sampleRateShading = true;
170 }
171 skgpu::VulkanExtensions extensions;
172 // The only extension flag that may effect the vulkan backend is the swapchain extension. We
173 // need to know if this is enabled to know if we can transition to a present layout when
174 // flushing a surface.
175 if (backendContext.fExtensions & kKHR_swapchain_GrVkExtensionFlag) {
176 const char* swapChainExtName = VK_KHR_SWAPCHAIN_EXTENSION_NAME;
177 extensions.init(backendContext.fGetProc, backendContext.fInstance,
178 backendContext.fPhysicalDevice, 0, nullptr, 1, &swapChainExtName);
179 }
180 caps.reset(new GrVkCaps(options, interface.get(), backendContext.fPhysicalDevice,
181 features, instanceVersion, physDevVersion, extensions,
182 backendContext.fProtectedContext));
183 }
184
185 if (!caps) {
186 return nullptr;
187 }
188
190 if (!memoryAllocator) {
191 // We were not given a memory allocator at creation
193 backendContext.fPhysicalDevice,
194 backendContext.fDevice,
195 physDevVersion,
196 backendContext.fVkExtensions,
197 interface.get(),
198 /*=threadSafe=*/false);
199 }
200 if (!memoryAllocator) {
201 SkDEBUGFAIL("No supplied vulkan memory allocator and unable to create one internally.");
202 return nullptr;
203 }
204
205 std::unique_ptr<GrVkGpu> vkGpu(new GrVkGpu(direct,
206 backendContext,
207 std::move(caps),
208 interface,
209 instanceVersion,
210 physDevVersion,
211 std::move(memoryAllocator)));
212 if (backendContext.fProtectedContext == GrProtected::kYes &&
213 !vkGpu->vkCaps().supportsProtectedContent()) {
214 return nullptr;
215 }
216 return vkGpu;
217}
218
219////////////////////////////////////////////////////////////////////////////////
220
221GrVkGpu::GrVkGpu(GrDirectContext* direct,
222 const GrVkBackendContext& backendContext,
223 sk_sp<GrVkCaps> caps,
225 uint32_t instanceVersion,
226 uint32_t physicalDeviceVersion,
228 : INHERITED(direct)
229 , fInterface(std::move(interface))
230 , fMemoryAllocator(std::move(memoryAllocator))
231 , fVkCaps(std::move(caps))
232 , fPhysicalDevice(backendContext.fPhysicalDevice)
233 , fDevice(backendContext.fDevice)
234 , fQueue(backendContext.fQueue)
235 , fQueueIndex(backendContext.fGraphicsQueueIndex)
236 , fResourceProvider(this)
237 , fStagingBufferManager(this)
238 , fDisconnected(false)
239 , fProtectedContext(backendContext.fProtectedContext)
240 , fDeviceLostContext(backendContext.fDeviceLostContext)
241 , fDeviceLostProc(backendContext.fDeviceLostProc) {
242 SkASSERT(!backendContext.fOwnsInstanceAndDevice);
243 SkASSERT(fMemoryAllocator);
244
245 this->initCaps(fVkCaps);
246
247 VK_CALL(GetPhysicalDeviceProperties(backendContext.fPhysicalDevice, &fPhysDevProps));
248 VK_CALL(GetPhysicalDeviceMemoryProperties(backendContext.fPhysicalDevice, &fPhysDevMemProps));
249
250 fResourceProvider.init();
251
252 fMainCmdPool = fResourceProvider.findOrCreateCommandPool();
253 if (fMainCmdPool) {
254 fMainCmdBuffer = fMainCmdPool->getPrimaryCommandBuffer();
256 this->currentCommandBuffer()->begin(this);
257 }
258}
259
260void GrVkGpu::destroyResources() {
261 if (fMainCmdPool) {
262 fMainCmdPool->getPrimaryCommandBuffer()->end(this, /*abandoningBuffer=*/true);
263 fMainCmdPool->close();
264 }
265
266 // wait for all commands to finish
268
269 if (fMainCmdPool) {
270 fMainCmdPool->unref();
271 fMainCmdPool = nullptr;
272 }
273
274 for (int i = 0; i < fSemaphoresToWaitOn.size(); ++i) {
275 fSemaphoresToWaitOn[i]->unref();
276 }
277 fSemaphoresToWaitOn.clear();
278
279 for (int i = 0; i < fSemaphoresToSignal.size(); ++i) {
280 fSemaphoresToSignal[i]->unref();
281 }
282 fSemaphoresToSignal.clear();
283
284 fStagingBufferManager.reset();
285
286 fMSAALoadManager.destroyResources(this);
287
288 // must call this just before we destroy the command pool and VkDevice
289 fResourceProvider.destroyResources();
290}
291
293 if (!fDisconnected) {
294 this->destroyResources();
295 }
296 // We don't delete the memory allocator until the very end of the GrVkGpu lifetime so that
297 // clients can continue to delete backend textures even after a context has been abandoned.
298 fMemoryAllocator.reset();
299}
300
301
304 if (!fDisconnected) {
305 this->destroyResources();
306
307 fSemaphoresToWaitOn.clear();
308 fSemaphoresToSignal.clear();
309 fMainCmdBuffer = nullptr;
310 fDisconnected = true;
311 }
312}
313
317
321
322///////////////////////////////////////////////////////////////////////////////
323
325 GrRenderTarget* rt,
326 bool useMSAASurface,
327 GrAttachment* stencil,
328 GrSurfaceOrigin origin,
329 const SkIRect& bounds,
330 const GrOpsRenderPass::LoadAndStoreInfo& colorInfo,
332 const TArray<GrSurfaceProxy*, true>& sampledProxies,
333 GrXferBarrierFlags renderPassXferBarriers) {
334 if (!fCachedOpsRenderPass) {
335 fCachedOpsRenderPass = std::make_unique<GrVkOpsRenderPass>(this);
336 }
337
338 // For the given render target and requested render pass features we need to find a compatible
339 // framebuffer to use for the render pass. Technically it is the underlying VkRenderPass that
340 // is compatible, but that is part of the framebuffer that we get here.
341 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(rt);
342
343 SkASSERT(!useMSAASurface ||
344 rt->numSamples() > 1 ||
345 (this->vkCaps().supportsDiscardableMSAAForDMSAA() &&
346 vkRT->resolveAttachment() &&
348
349 // Covert the GrXferBarrierFlags into render pass self dependency flags
351 if (renderPassXferBarriers & GrXferBarrierFlags::kBlend) {
353 }
354 if (renderPassXferBarriers & GrXferBarrierFlags::kTexture) {
356 }
357
358 // Figure out if we need a resolve attachment for this render pass. A resolve attachment is
359 // needed if we are using msaa to draw with a discardable msaa attachment. If we are in this
360 // case we also need to update the color load/store ops since we don't want to ever load or
361 // store the msaa color attachment, but may need to for the resolve attachment.
362 GrOpsRenderPass::LoadAndStoreInfo localColorInfo = colorInfo;
363 bool withResolve = false;
366 if (useMSAASurface && this->vkCaps().renderTargetSupportsDiscardableMSAA(vkRT)) {
367 withResolve = true;
368 localColorInfo.fStoreOp = GrStoreOp::kDiscard;
369 if (colorInfo.fLoadOp == GrLoadOp::kLoad) {
371 localColorInfo.fLoadOp = GrLoadOp::kDiscard;
372 } else {
373 resolveInfo.fLoadOp = GrLoadOp::kDiscard;
374 }
375 }
376
377 // Get the framebuffer to use for the render pass
378 sk_sp<GrVkFramebuffer> framebuffer;
379 if (vkRT->wrapsSecondaryCommandBuffer()) {
380 framebuffer = vkRT->externalFramebuffer();
381 } else {
382 auto fb = vkRT->getFramebuffer(withResolve, SkToBool(stencil), selfDepFlags,
383 loadFromResolve);
384 framebuffer = sk_ref_sp(fb);
385 }
386 if (!framebuffer) {
387 return nullptr;
388 }
389
390 if (!fCachedOpsRenderPass->set(rt, std::move(framebuffer), origin, bounds, localColorInfo,
391 stencilInfo, resolveInfo, selfDepFlags, loadFromResolve,
392 sampledProxies)) {
393 return nullptr;
394 }
395 return fCachedOpsRenderPass.get();
396}
397
398bool GrVkGpu::submitCommandBuffer(SyncQueue sync) {
399 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
400 if (!this->currentCommandBuffer()) {
401 return false;
402 }
403 SkASSERT(!fCachedOpsRenderPass || !fCachedOpsRenderPass->isActive());
404
405 if (!this->currentCommandBuffer()->hasWork() && kForce_SyncQueue != sync &&
406 fSemaphoresToSignal.empty() && fSemaphoresToWaitOn.empty()) {
407 // We may have added finished procs during the flush call. Since there is no actual work
408 // we are not submitting the command buffer and may never come back around to submit it.
409 // Thus we call all current finished procs manually, since the work has technically
410 // finished.
412 SkASSERT(fDrawables.empty());
413 fResourceProvider.checkCommandBuffers();
414 return true;
415 }
416
417 fMainCmdBuffer->end(this);
418 SkASSERT(fMainCmdPool);
419 fMainCmdPool->close();
420 bool didSubmit = fMainCmdBuffer->submitToQueue(this, fQueue, fSemaphoresToSignal,
421 fSemaphoresToWaitOn);
422
423 if (didSubmit && sync == kForce_SyncQueue) {
424 fMainCmdBuffer->forceSync(this);
425 }
426
427 // We must delete any drawables that had to wait until submit to destroy.
428 fDrawables.clear();
429
430 // If we didn't submit the command buffer then we did not wait on any semaphores. We will
431 // continue to hold onto these semaphores and wait on them during the next command buffer
432 // submission.
433 if (didSubmit) {
434 for (int i = 0; i < fSemaphoresToWaitOn.size(); ++i) {
435 fSemaphoresToWaitOn[i]->unref();
436 }
437 fSemaphoresToWaitOn.clear();
438 }
439
440 // Even if we did not submit the command buffer, we drop all the signal semaphores since we will
441 // not try to recover the work that wasn't submitted and instead just drop it all. The client
442 // will be notified that the semaphores were not submit so that they will not try to wait on
443 // them.
444 for (int i = 0; i < fSemaphoresToSignal.size(); ++i) {
445 fSemaphoresToSignal[i]->unref();
446 }
447 fSemaphoresToSignal.clear();
448
449 // Release old command pool and create a new one
450 fMainCmdPool->unref();
451 fMainCmdPool = fResourceProvider.findOrCreateCommandPool();
452 if (fMainCmdPool) {
453 fMainCmdBuffer = fMainCmdPool->getPrimaryCommandBuffer();
454 SkASSERT(fMainCmdBuffer);
455 fMainCmdBuffer->begin(this);
456 } else {
457 fMainCmdBuffer = nullptr;
458 }
459 // We must wait to call checkCommandBuffers until after we get a new command buffer. The
460 // checkCommandBuffers may trigger a releaseProc which may cause us to insert a barrier for a
461 // released GrVkImage. That barrier needs to be put into a new command buffer and not the old
462 // one that was just submitted.
463 fResourceProvider.checkCommandBuffers();
464 return didSubmit;
465}
466
467///////////////////////////////////////////////////////////////////////////////
470 GrAccessPattern accessPattern) {
471#ifdef SK_DEBUG
472 switch (type) {
476 SkASSERT(accessPattern == kDynamic_GrAccessPattern ||
477 accessPattern == kStatic_GrAccessPattern);
478 break;
480 SkASSERT(accessPattern == kDynamic_GrAccessPattern);
481 break;
483 SkASSERT(accessPattern == kDynamic_GrAccessPattern ||
484 accessPattern == kStream_GrAccessPattern);
485 break;
487 SkASSERT(accessPattern == kDynamic_GrAccessPattern);
488 break;
489 }
490#endif
491 return GrVkBuffer::Make(this, size, type, accessPattern);
492}
493
495 SkIRect rect,
496 GrColorType surfaceColorType,
497 GrColorType srcColorType,
498 const GrMipLevel texels[],
499 int mipLevelCount,
500 bool prepForTexSampling) {
501 GrVkTexture* texture = static_cast<GrVkTexture*>(surface->asTexture());
502 if (!texture) {
503 return false;
504 }
505 GrVkImage* texImage = texture->textureImage();
506
507 // Make sure we have at least the base level
508 if (!mipLevelCount || !texels[0].fPixels) {
509 return false;
510 }
511
513 bool success = false;
514 bool linearTiling = texImage->isLinearTiled();
515 if (linearTiling) {
516 if (mipLevelCount > 1) {
517 SkDebugf("Can't upload mipmap data to linear tiled texture");
518 return false;
519 }
521 // Need to change the layout to general in order to perform a host write
522 texImage->setImageLayout(this,
526 false);
527 if (!this->submitCommandBuffer(kForce_SyncQueue)) {
528 return false;
529 }
530 }
531 success = this->uploadTexDataLinear(texImage,
532 rect,
533 srcColorType,
534 texels[0].fPixels,
535 texels[0].fRowBytes);
536 } else {
537 SkASSERT(mipLevelCount <= (int)texImage->mipLevels());
538 success = this->uploadTexDataOptimal(texImage,
539 rect,
540 srcColorType,
541 texels,
542 mipLevelCount);
543 if (1 == mipLevelCount) {
544 texture->markMipmapsDirty();
545 }
546 }
547
548 if (prepForTexSampling) {
549 texImage->setImageLayout(this,
553 false);
554 }
555
556 return success;
557}
558
559// When we update vertex/index buffers via transfers we assume that they may have been used
560// previously in draws and will be used again in draws afterwards. So we put a barrier before and
561// after. If we had a mechanism for gathering the buffers that will be used in a GrVkOpsRenderPass
562// *before* we begin a subpass we could do this lazily and non-redundantly by tracking the "last
563// usage" on the GrVkBuffer. Then Pass 1 draw, xfer, xfer, xfer, Pass 2 draw would insert just two
564// barriers: one before the first xfer and one before Pass 2. Currently, we'd use six barriers.
565// Pass false as "after" before the transfer and true after the transfer.
567 GrVkBuffer* dst,
568 size_t offset,
569 size_t size,
570 bool after) {
571 if (dst->intendedType() != GrGpuBufferType::kIndex &&
572 dst->intendedType() != GrGpuBufferType::kVertex) {
573 return;
574 }
575
576 VkAccessFlags srcAccessMask = dst->intendedType() == GrGpuBufferType::kIndex
580
583
584 if (after) {
585 using std::swap;
586 swap(srcAccessMask, dstAccessMask );
587 swap(srcPipelineStageFlags, dstPipelineStageFlags);
588 }
589
590 VkBufferMemoryBarrier bufferMemoryBarrier = {
592 nullptr, // pNext
593 srcAccessMask, // srcAccessMask
594 dstAccessMask, // dstAccessMask
595 VK_QUEUE_FAMILY_IGNORED, // srcQueueFamilyIndex
596 VK_QUEUE_FAMILY_IGNORED, // dstQueueFamilyIndex
597 dst->vkBuffer(), // buffer
598 offset, // offset
599 size, // size
600 };
601
602 gpu->addBufferMemoryBarrier(srcPipelineStageFlags,
603 dstPipelineStageFlags,
604 /*byRegion=*/false,
605 &bufferMemoryBarrier);
606}
607
609 size_t srcOffset,
611 size_t dstOffset,
612 size_t size) {
613 if (!this->currentCommandBuffer()) {
614 return false;
615 }
616
617 VkBufferCopy copyRegion;
618 copyRegion.srcOffset = srcOffset;
619 copyRegion.dstOffset = dstOffset;
620 copyRegion.size = size;
621
623 static_cast<GrVkBuffer*>(dst.get()),
624 dstOffset,
625 size,
626 /*after=*/false);
627 this->currentCommandBuffer()->copyBuffer(this, std::move(src), dst, 1, &copyRegion);
629 static_cast<GrVkBuffer*>(dst.get()),
630 dstOffset,
631 size,
632 /*after=*/true);
633
634 return true;
635}
636
638 SkIRect rect,
639 GrColorType surfaceColorType,
640 GrColorType bufferColorType,
641 sk_sp<GrGpuBuffer> transferBuffer,
642 size_t bufferOffset,
643 size_t rowBytes) {
644 if (!this->currentCommandBuffer()) {
645 return false;
646 }
647
648 size_t bpp = GrColorTypeBytesPerPixel(bufferColorType);
649 if (GrBackendFormatBytesPerPixel(texture->backendFormat()) != bpp) {
650 return false;
651 }
652
653 // Vulkan only supports offsets that are both 4-byte aligned and aligned to a pixel.
654 if ((bufferOffset & 0x3) || (bufferOffset % bpp)) {
655 return false;
656 }
657 GrVkTexture* tex = static_cast<GrVkTexture*>(texture);
658 if (!tex) {
659 return false;
660 }
661 GrVkImage* vkImage = tex->textureImage();
662 VkFormat format = vkImage->imageFormat();
663
664 // Can't transfer compressed data
666
667 if (!transferBuffer) {
668 return false;
669 }
670
671 if (bufferColorType != this->vkCaps().transferColorType(format, surfaceColorType)) {
672 return false;
673 }
675
676 SkASSERT(SkIRect::MakeSize(texture->dimensions()).contains(rect));
677
678 // Set up copy region
679 VkBufferImageCopy region;
680 memset(&region, 0, sizeof(VkBufferImageCopy));
681 region.bufferOffset = bufferOffset;
682 region.bufferRowLength = (uint32_t)(rowBytes/bpp);
683 region.bufferImageHeight = 0;
684 region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
685 region.imageOffset = { rect.left(), rect.top(), 0 };
686 region.imageExtent = { (uint32_t)rect.width(), (uint32_t)rect.height(), 1 };
687
688 // Change layout of our target so it can be copied to
689 vkImage->setImageLayout(this,
693 false);
694
695 const GrVkBuffer* vkBuffer = static_cast<GrVkBuffer*>(transferBuffer.get());
696
697 // Copy the buffer to the image.
699 vkBuffer->vkBuffer(),
700 vkImage,
702 1,
703 &region);
704 this->currentCommandBuffer()->addGrBuffer(std::move(transferBuffer));
705
706 tex->markMipmapsDirty();
707 return true;
708}
709
711 SkIRect rect,
712 GrColorType surfaceColorType,
713 GrColorType bufferColorType,
714 sk_sp<GrGpuBuffer> transferBuffer,
715 size_t offset) {
716 if (!this->currentCommandBuffer()) {
717 return false;
718 }
719 SkASSERT(surface);
720 SkASSERT(transferBuffer);
721 if (fProtectedContext == GrProtected::kYes) {
722 return false;
723 }
724
725 GrVkImage* srcImage;
726 if (GrVkRenderTarget* rt = static_cast<GrVkRenderTarget*>(surface->asRenderTarget())) {
727 // Reading from render targets that wrap a secondary command buffer is not allowed since
728 // it would require us to know the VkImage, which we don't have, as well as need us to
729 // stop and start the VkRenderPass which we don't have access to.
730 if (rt->wrapsSecondaryCommandBuffer()) {
731 return false;
732 }
733 if (!rt->nonMSAAAttachment()) {
734 return false;
735 }
736 srcImage = rt->nonMSAAAttachment();
737 } else {
738 SkASSERT(surface->asTexture());
739 srcImage = static_cast<GrVkTexture*>(surface->asTexture())->textureImage();
740 }
741
742 VkFormat format = srcImage->imageFormat();
743 if (bufferColorType != this->vkCaps().transferColorType(format, surfaceColorType)) {
744 return false;
745 }
747
748 // Set up copy region
749 VkBufferImageCopy region;
750 memset(&region, 0, sizeof(VkBufferImageCopy));
751 region.bufferOffset = offset;
752 region.bufferRowLength = rect.width();
753 region.bufferImageHeight = 0;
754 region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
755 region.imageOffset = {rect.left(), rect.top(), 0};
756 region.imageExtent = {(uint32_t)rect.width(), (uint32_t)rect.height(), 1};
757
758 srcImage->setImageLayout(this,
762 false);
763
764 this->currentCommandBuffer()->copyImageToBuffer(this, srcImage,
766 transferBuffer, 1, &region);
767
768 GrVkBuffer* vkBuffer = static_cast<GrVkBuffer*>(transferBuffer.get());
769 // Make sure the copy to buffer has finished.
774 false);
775 return true;
776}
777
778void GrVkGpu::resolveImage(GrSurface* dst, GrVkRenderTarget* src, const SkIRect& srcRect,
779 const SkIPoint& dstPoint) {
780 if (!this->currentCommandBuffer()) {
781 return;
782 }
783
784 SkASSERT(dst);
785 SkASSERT(src && src->colorAttachment() && src->colorAttachment()->numSamples() > 1);
786
787 VkImageResolve resolveInfo;
788 resolveInfo.srcSubresource = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1};
789 resolveInfo.srcOffset = {srcRect.fLeft, srcRect.fTop, 0};
790 resolveInfo.dstSubresource = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1};
791 resolveInfo.dstOffset = {dstPoint.fX, dstPoint.fY, 0};
792 resolveInfo.extent = {(uint32_t)srcRect.width(), (uint32_t)srcRect.height(), 1};
793
794 GrVkImage* dstImage;
795 GrRenderTarget* dstRT = dst->asRenderTarget();
796 GrTexture* dstTex = dst->asTexture();
797 if (dstTex) {
798 dstImage = static_cast<GrVkTexture*>(dstTex)->textureImage();
799 } else {
800 SkASSERT(dst->asRenderTarget());
801 dstImage = static_cast<GrVkRenderTarget*>(dstRT)->nonMSAAAttachment();
802 }
803 SkASSERT(dstImage);
804
805 dstImage->setImageLayout(this,
809 false);
810
811 src->colorAttachment()->setImageLayout(this,
815 false);
816 this->currentCommandBuffer()->addGrSurface(sk_ref_sp<const GrSurface>(src->colorAttachment()));
817 this->currentCommandBuffer()->addGrSurface(sk_ref_sp<const GrSurface>(dst));
818 this->currentCommandBuffer()->resolveImage(this, *src->colorAttachment(), *dstImage, 1,
819 &resolveInfo);
820}
821
823 SkASSERT(target->numSamples() > 1);
824 GrVkRenderTarget* rt = static_cast<GrVkRenderTarget*>(target);
826
827 if (this->vkCaps().renderTargetSupportsDiscardableMSAA(rt)) {
828 // We would have resolved the RT during the render pass;
829 return;
830 }
831
832 this->resolveImage(target, rt, resolveRect,
833 SkIPoint::Make(resolveRect.x(), resolveRect.y()));
834}
835
836bool GrVkGpu::uploadTexDataLinear(GrVkImage* texImage,
837 SkIRect rect,
838 GrColorType dataColorType,
839 const void* data,
840 size_t rowBytes) {
841 SkASSERT(data);
842 SkASSERT(texImage->isLinearTiled());
843
844 SkASSERT(SkIRect::MakeSize(texImage->dimensions()).contains(rect));
845
846 size_t bpp = GrColorTypeBytesPerPixel(dataColorType);
847 size_t trimRowBytes = rect.width() * bpp;
848
851 const VkImageSubresource subres = {
853 0, // mipLevel
854 0, // arraySlice
855 };
856 VkSubresourceLayout layout;
857
858 const skgpu::VulkanInterface* interface = this->vkInterface();
859
860 GR_VK_CALL(interface, GetImageSubresourceLayout(fDevice,
861 texImage->image(),
862 &subres,
863 &layout));
864
865 const skgpu::VulkanAlloc& alloc = texImage->alloc();
866 if (VK_NULL_HANDLE == alloc.fMemory) {
867 return false;
868 }
869 VkDeviceSize offset = rect.top()*layout.rowPitch + rect.left()*bpp;
870 VkDeviceSize size = rect.height()*layout.rowPitch;
871 SkASSERT(size + offset <= alloc.fSize);
872 auto checkResult = [this](VkResult result) {
873 return this->checkVkResult(result);
874 };
875 auto allocator = this->memoryAllocator();
876 void* mapPtr = skgpu::VulkanMemory::MapAlloc(allocator, alloc, checkResult);
877 if (!mapPtr) {
878 return false;
879 }
880 mapPtr = reinterpret_cast<char*>(mapPtr) + offset;
881
882 SkRectMemcpy(mapPtr,
883 static_cast<size_t>(layout.rowPitch),
884 data,
885 rowBytes,
886 trimRowBytes,
887 rect.height());
888
889 skgpu::VulkanMemory::FlushMappedAlloc(allocator, alloc, offset, size, checkResult);
890 skgpu::VulkanMemory::UnmapAlloc(allocator, alloc);
891
892 return true;
893}
894
895// This fills in the 'regions' vector in preparation for copying a buffer to an image.
896// 'individualMipOffsets' is filled in as a side-effect.
897static size_t fill_in_compressed_regions(GrStagingBufferManager* stagingBufferManager,
899 TArray<size_t>* individualMipOffsets,
901 SkTextureCompressionType compression,
902 VkFormat vkFormat,
903 SkISize dimensions,
904 skgpu::Mipmapped mipmapped) {
906 int numMipLevels = 1;
907 if (mipmapped == skgpu::Mipmapped::kYes) {
908 numMipLevels = SkMipmap::ComputeLevelCount(dimensions.width(), dimensions.height()) + 1;
909 }
910
911 regions->reserve_exact(regions->size() + numMipLevels);
912 individualMipOffsets->reserve_exact(individualMipOffsets->size() + numMipLevels);
913
914 size_t bytesPerBlock = skgpu::VkFormatBytesPerBlock(vkFormat);
915
916 size_t bufferSize = SkCompressedDataSize(
917 compression, dimensions, individualMipOffsets, mipmapped == skgpu::Mipmapped::kYes);
918 SkASSERT(individualMipOffsets->size() == numMipLevels);
919
920 // Get a staging buffer slice to hold our mip data.
921 // Vulkan requires offsets in the buffer to be aligned to multiple of the texel size and 4
922 size_t alignment = bytesPerBlock;
923 switch (alignment & 0b11) {
924 case 0: break; // alignment is already a multiple of 4.
925 case 2: alignment *= 2; break; // alignment is a multiple of 2 but not 4.
926 default: alignment *= 4; break; // alignment is not a multiple of 2.
927 }
928 *slice = stagingBufferManager->allocateStagingBufferSlice(bufferSize, alignment);
929 if (!slice->fBuffer) {
930 return 0;
931 }
932
933 for (int i = 0; i < numMipLevels; ++i) {
934 VkBufferImageCopy& region = regions->push_back();
935 memset(&region, 0, sizeof(VkBufferImageCopy));
936 region.bufferOffset = slice->fOffset + (*individualMipOffsets)[i];
937 SkISize revisedDimensions = skgpu::CompressedDimensions(compression, dimensions);
938 region.bufferRowLength = revisedDimensions.width();
939 region.bufferImageHeight = revisedDimensions.height();
940 region.imageSubresource = {VK_IMAGE_ASPECT_COLOR_BIT, SkToU32(i), 0, 1};
941 region.imageOffset = {0, 0, 0};
942 region.imageExtent = {SkToU32(dimensions.width()),
943 SkToU32(dimensions.height()), 1};
944
945 dimensions = {std::max(1, dimensions.width() /2),
946 std::max(1, dimensions.height()/2)};
947 }
948
949 return bufferSize;
950}
951
952bool GrVkGpu::uploadTexDataOptimal(GrVkImage* texImage,
953 SkIRect rect,
954 GrColorType dataColorType,
955 const GrMipLevel texels[],
956 int mipLevelCount) {
957 if (!this->currentCommandBuffer()) {
958 return false;
959 }
960
961 SkASSERT(!texImage->isLinearTiled());
962 // The assumption is either that we have no mipmaps, or that our rect is the entire texture
963 SkASSERT(mipLevelCount == 1 || rect == SkIRect::MakeSize(texImage->dimensions()));
964
965 // We assume that if the texture has mip levels, we either upload to all the levels or just the
966 // first.
967 SkASSERT(mipLevelCount == 1 || mipLevelCount == (int)texImage->mipLevels());
968
969 SkASSERT(!rect.isEmpty());
970
971 SkASSERT(this->vkCaps().surfaceSupportsWritePixels(texImage));
972
973 SkASSERT(this->vkCaps().isVkFormatTexturable(texImage->imageFormat()));
974 size_t bpp = GrColorTypeBytesPerPixel(dataColorType);
975
976 // texels is const.
977 // But we may need to adjust the fPixels ptr based on the copyRect, or fRowBytes.
978 // Because of this we need to make a non-const shallow copy of texels.
979 AutoTArray<GrMipLevel> texelsShallowCopy(mipLevelCount);
980 std::copy_n(texels, mipLevelCount, texelsShallowCopy.get());
981
982 TArray<size_t> individualMipOffsets;
983 size_t combinedBufferSize;
984 if (mipLevelCount > 1) {
985 combinedBufferSize = GrComputeTightCombinedBufferSize(bpp,
986 rect.size(),
987 &individualMipOffsets,
988 mipLevelCount);
989 } else {
990 SkASSERT(texelsShallowCopy[0].fPixels && texelsShallowCopy[0].fRowBytes);
991 combinedBufferSize = rect.width()*rect.height()*bpp;
992 individualMipOffsets.push_back(0);
993 }
994 SkASSERT(combinedBufferSize);
995
996 // Get a staging buffer slice to hold our mip data.
997 // Vulkan requires offsets in the buffer to be aligned to multiple of the texel size and 4
998 size_t alignment = bpp;
999 switch (alignment & 0b11) {
1000 case 0: break; // alignment is already a multiple of 4.
1001 case 2: alignment *= 2; break; // alignment is a multiple of 2 but not 4.
1002 default: alignment *= 4; break; // alignment is not a multiple of 2.
1003 }
1005 fStagingBufferManager.allocateStagingBufferSlice(combinedBufferSize, alignment);
1006 if (!slice.fBuffer) {
1007 return false;
1008 }
1009
1010 int uploadLeft = rect.left();
1011 int uploadTop = rect.top();
1012
1013 char* buffer = (char*) slice.fOffsetMapPtr;
1014 TArray<VkBufferImageCopy> regions(mipLevelCount);
1015
1016 int currentWidth = rect.width();
1017 int currentHeight = rect.height();
1018 for (int currentMipLevel = 0; currentMipLevel < mipLevelCount; currentMipLevel++) {
1019 if (texelsShallowCopy[currentMipLevel].fPixels) {
1020 const size_t trimRowBytes = currentWidth * bpp;
1021 const size_t rowBytes = texelsShallowCopy[currentMipLevel].fRowBytes;
1022
1023 // copy data into the buffer, skipping the trailing bytes
1024 char* dst = buffer + individualMipOffsets[currentMipLevel];
1025 const char* src = (const char*)texelsShallowCopy[currentMipLevel].fPixels;
1026 SkRectMemcpy(dst, trimRowBytes, src, rowBytes, trimRowBytes, currentHeight);
1027
1028 VkBufferImageCopy& region = regions.push_back();
1029 memset(&region, 0, sizeof(VkBufferImageCopy));
1030 region.bufferOffset = slice.fOffset + individualMipOffsets[currentMipLevel];
1031 region.bufferRowLength = currentWidth;
1032 region.bufferImageHeight = currentHeight;
1033 region.imageSubresource = {VK_IMAGE_ASPECT_COLOR_BIT, SkToU32(currentMipLevel), 0, 1};
1034 region.imageOffset = {uploadLeft, uploadTop, 0};
1035 region.imageExtent = {(uint32_t)currentWidth, (uint32_t)currentHeight, 1};
1036 }
1037
1038 currentWidth = std::max(1, currentWidth/2);
1039 currentHeight = std::max(1, currentHeight/2);
1040 }
1041
1042 // Change layout of our target so it can be copied to
1043 texImage->setImageLayout(this,
1047 false);
1048
1049 // Copy the buffer to the image. This call takes the raw VkBuffer instead of a GrGpuBuffer
1050 // because we don't need the command buffer to ref the buffer here. The reason being is that
1051 // the buffer is coming from the staging manager and the staging manager will make sure the
1052 // command buffer has a ref on the buffer. This avoids having to add and remove a ref for ever
1053 // upload in the frame.
1054 GrVkBuffer* vkBuffer = static_cast<GrVkBuffer*>(slice.fBuffer);
1056 vkBuffer->vkBuffer(),
1057 texImage,
1059 regions.size(),
1060 regions.begin());
1061 return true;
1062}
1063
1064// It's probably possible to roll this into uploadTexDataOptimal,
1065// but for now it's easier to maintain as a separate entity.
1066bool GrVkGpu::uploadTexDataCompressed(GrVkImage* uploadTexture,
1067 SkTextureCompressionType compression,
1068 VkFormat vkFormat,
1069 SkISize dimensions,
1070 skgpu::Mipmapped mipmapped,
1071 const void* data,
1072 size_t dataSize) {
1073 if (!this->currentCommandBuffer()) {
1074 return false;
1075 }
1076 SkASSERT(data);
1077 SkASSERT(!uploadTexture->isLinearTiled());
1078 // For now the assumption is that our rect is the entire texture.
1079 // Compressed textures are read-only so this should be a reasonable assumption.
1080 SkASSERT(dimensions.fWidth == uploadTexture->width() &&
1081 dimensions.fHeight == uploadTexture->height());
1082
1083 if (dimensions.fWidth == 0 || dimensions.fHeight == 0) {
1084 return false;
1085 }
1086
1087 SkASSERT(uploadTexture->imageFormat() == vkFormat);
1088 SkASSERT(this->vkCaps().isVkFormatTexturable(vkFormat));
1089
1090
1093 TArray<size_t> individualMipOffsets;
1094 SkDEBUGCODE(size_t combinedBufferSize =) fill_in_compressed_regions(&fStagingBufferManager,
1095 &regions,
1096 &individualMipOffsets,
1097 &slice,
1098 compression,
1099 vkFormat,
1100 dimensions,
1101 mipmapped);
1102 if (!slice.fBuffer) {
1103 return false;
1104 }
1105 SkASSERT(dataSize == combinedBufferSize);
1106
1107 {
1108 char* buffer = (char*)slice.fOffsetMapPtr;
1109 memcpy(buffer, data, dataSize);
1110 }
1111
1112 // Change layout of our target so it can be copied to
1113 uploadTexture->setImageLayout(this,
1117 false);
1118
1119 // Copy the buffer to the image. This call takes the raw VkBuffer instead of a GrGpuBuffer
1120 // because we don't need the command buffer to ref the buffer here. The reason being is that
1121 // the buffer is coming from the staging manager and the staging manager will make sure the
1122 // command buffer has a ref on the buffer. This avoids having to add and remove a ref for ever
1123 // upload in the frame.
1124 GrVkBuffer* vkBuffer = static_cast<GrVkBuffer*>(slice.fBuffer);
1126 vkBuffer->vkBuffer(),
1127 uploadTexture,
1129 regions.size(),
1130 regions.begin());
1131
1132 return true;
1133}
1134
1135////////////////////////////////////////////////////////////////////////////////
1136// TODO: make this take a skgpu::Mipmapped
1138 const GrBackendFormat& format,
1139 GrRenderable renderable,
1140 int renderTargetSampleCnt,
1141 skgpu::Budgeted budgeted,
1142 GrProtected isProtected,
1143 int mipLevelCount,
1144 uint32_t levelClearMask,
1145 std::string_view label) {
1146 VkFormat pixelFormat;
1149 SkASSERT(mipLevelCount > 0);
1150
1151 GrMipmapStatus mipmapStatus =
1153
1155 if (renderable == GrRenderable::kYes) {
1157 this, budgeted, dimensions, pixelFormat, mipLevelCount, renderTargetSampleCnt,
1158 mipmapStatus, isProtected, label);
1159 } else {
1160 tex = GrVkTexture::MakeNewTexture(this, budgeted, dimensions, pixelFormat,
1161 mipLevelCount, isProtected, mipmapStatus, label);
1162 }
1163
1164 if (!tex) {
1165 return nullptr;
1166 }
1167
1168 if (levelClearMask) {
1169 if (!this->currentCommandBuffer()) {
1170 return nullptr;
1171 }
1173 bool inRange = false;
1174 GrVkImage* texImage = tex->textureImage();
1175 for (uint32_t i = 0; i < texImage->mipLevels(); ++i) {
1176 if (levelClearMask & (1U << i)) {
1177 if (inRange) {
1178 ranges.back().levelCount++;
1179 } else {
1180 auto& range = ranges.push_back();
1181 range.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
1182 range.baseArrayLayer = 0;
1183 range.baseMipLevel = i;
1184 range.layerCount = 1;
1185 range.levelCount = 1;
1186 inRange = true;
1187 }
1188 } else if (inRange) {
1189 inRange = false;
1190 }
1191 }
1192 SkASSERT(!ranges.empty());
1193 static constexpr VkClearColorValue kZeroClearColor = {};
1196 this->currentCommandBuffer()->clearColorImage(this, texImage, &kZeroClearColor,
1197 ranges.size(), ranges.begin());
1198 }
1199 return tex;
1200}
1201
1203 const GrBackendFormat& format,
1204 skgpu::Budgeted budgeted,
1205 skgpu::Mipmapped mipmapped,
1206 GrProtected isProtected,
1207 const void* data,
1208 size_t dataSize) {
1209 VkFormat pixelFormat;
1212
1213 int numMipLevels = 1;
1214 if (mipmapped == skgpu::Mipmapped::kYes) {
1215 numMipLevels = SkMipmap::ComputeLevelCount(dimensions.width(), dimensions.height())+1;
1216 }
1217
1218 GrMipmapStatus mipmapStatus = (mipmapped == skgpu::Mipmapped::kYes)
1221
1222 auto tex = GrVkTexture::MakeNewTexture(this,
1223 budgeted,
1224 dimensions,
1225 pixelFormat,
1226 numMipLevels,
1227 isProtected,
1228 mipmapStatus,
1229 /*label=*/"VkGpu_CreateCompressedTexture");
1230 if (!tex) {
1231 return nullptr;
1232 }
1233
1235 if (!this->uploadTexDataCompressed(tex->textureImage(), compression, pixelFormat,
1236 dimensions, mipmapped, data, dataSize)) {
1237 return nullptr;
1238 }
1239
1240 return tex;
1241}
1242
1243////////////////////////////////////////////////////////////////////////////////
1244
1247 if (!this->currentCommandBuffer()) {
1248 return false;
1249 }
1251 static_cast<GrVkBuffer*>(buffer.get()),
1252 offset,
1253 size,
1254 /*after=*/false);
1255 this->currentCommandBuffer()->updateBuffer(this, buffer, offset, size, src);
1257 static_cast<GrVkBuffer*>(buffer.get()),
1258 offset,
1259 size,
1260 /*after=*/true);
1261
1262 return true;
1263}
1264
1266 if (!this->currentCommandBuffer()) {
1267 return false;
1268 }
1269
1271 static_cast<GrVkBuffer*>(buffer.get()),
1272 /*offset=*/0,
1273 buffer->size(),
1274 /*after=*/false);
1275 this->currentCommandBuffer()->fillBuffer(this,
1276 buffer,
1277 /*offset=*/0,
1278 buffer->size(),
1279 /*data=*/0);
1281 static_cast<GrVkBuffer*>(buffer.get()),
1282 /*offset=*/0,
1283 buffer->size(),
1284 /*after=*/true);
1285
1286 return true;
1287}
1288
1289////////////////////////////////////////////////////////////////////////////////
1290
1291static bool check_image_info(const GrVkCaps& caps,
1292 const GrVkImageInfo& info,
1293 bool needsAllocation,
1294 uint32_t graphicsQueueIndex) {
1295 if (VK_NULL_HANDLE == info.fImage) {
1296 return false;
1297 }
1298
1299 if (VK_NULL_HANDLE == info.fAlloc.fMemory && needsAllocation) {
1300 return false;
1301 }
1302
1303 if (info.fImageLayout == VK_IMAGE_LAYOUT_PRESENT_SRC_KHR && !caps.supportsSwapchain()) {
1304 return false;
1305 }
1306
1307 if (info.fCurrentQueueFamily != VK_QUEUE_FAMILY_IGNORED &&
1308 info.fCurrentQueueFamily != VK_QUEUE_FAMILY_EXTERNAL &&
1309 info.fCurrentQueueFamily != VK_QUEUE_FAMILY_FOREIGN_EXT) {
1310 if (info.fSharingMode == VK_SHARING_MODE_EXCLUSIVE) {
1311 if (info.fCurrentQueueFamily != graphicsQueueIndex) {
1312 return false;
1313 }
1314 } else {
1315 return false;
1316 }
1317 }
1318
1319 if (info.fYcbcrConversionInfo.isValid()) {
1320 if (!caps.supportsYcbcrConversion()) {
1321 return false;
1322 }
1323 if (info.fYcbcrConversionInfo.fExternalFormat != 0) {
1324 return true;
1325 }
1326 }
1327
1328 // We currently require everything to be made with transfer bits set
1329 if (!SkToBool(info.fImageUsageFlags & VK_IMAGE_USAGE_TRANSFER_SRC_BIT) ||
1330 !SkToBool(info.fImageUsageFlags & VK_IMAGE_USAGE_TRANSFER_DST_BIT)) {
1331 return false;
1332 }
1333
1334 return true;
1335}
1336
1337static bool check_tex_image_info(const GrVkCaps& caps, const GrVkImageInfo& info) {
1338 // We don't support directly importing multisampled textures for sampling from shaders.
1339 if (info.fSampleCount != 1) {
1340 return false;
1341 }
1342
1343 if (info.fYcbcrConversionInfo.isValid() && info.fYcbcrConversionInfo.fExternalFormat != 0) {
1344 return true;
1345 }
1346 if (info.fImageTiling == VK_IMAGE_TILING_OPTIMAL) {
1347 if (!caps.isVkFormatTexturable(info.fFormat)) {
1348 return false;
1349 }
1350 } else if (info.fImageTiling == VK_IMAGE_TILING_LINEAR) {
1351 if (!caps.isVkFormatTexturableLinearly(info.fFormat)) {
1352 return false;
1353 }
1354 } else if (info.fImageTiling == VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT) {
1355 if (!caps.supportsDRMFormatModifiers()) {
1356 return false;
1357 }
1358 // To be technically correct we should query the vulkan support for VkFormat and
1359 // drmFormatModifier pairs to confirm the required feature support is there. However, we
1360 // currently don't have our caps and format tables set up to do this effeciently. So
1361 // instead we just rely on the client's passed in VkImageUsageFlags and assume they we set
1362 // up using valid features (checked below). In practice this should all be safe because
1363 // currently we are setting all drm format modifier textures to have a
1364 // GrTextureType::kExternal so we just really need to be able to read these video VkImage in
1365 // a shader. The video decoder isn't going to give us VkImages that don't support being
1366 // sampled.
1367 } else {
1369 }
1370
1371 // We currently require all textures to be made with sample support
1372 if (!SkToBool(info.fImageUsageFlags & VK_IMAGE_USAGE_SAMPLED_BIT)) {
1373 return false;
1374 }
1375
1376 return true;
1377}
1378
1379static bool check_rt_image_info(const GrVkCaps& caps, const GrVkImageInfo& info, bool resolveOnly) {
1380 if (!caps.isFormatRenderable(info.fFormat, info.fSampleCount)) {
1381 return false;
1382 }
1383 if (!resolveOnly && !SkToBool(info.fImageUsageFlags & VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT)) {
1384 return false;
1385 }
1386 return true;
1387}
1388
1390 GrWrapOwnership ownership,
1391 GrWrapCacheable cacheable,
1392 GrIOType ioType) {
1393 GrVkImageInfo imageInfo;
1394 if (!GrBackendTextures::GetVkImageInfo(backendTex, &imageInfo)) {
1395 return nullptr;
1396 }
1397
1398 if (!check_image_info(this->vkCaps(), imageInfo, kAdopt_GrWrapOwnership == ownership,
1399 this->queueIndex())) {
1400 return nullptr;
1401 }
1402
1403 if (!check_tex_image_info(this->vkCaps(), imageInfo)) {
1404 return nullptr;
1405 }
1406
1407 if (backendTex.isProtected() && (fProtectedContext == GrProtected::kNo)) {
1408 return nullptr;
1409 }
1410
1411 sk_sp<skgpu::MutableTextureState> mutableState = backendTex.getMutableState();
1412 SkASSERT(mutableState);
1413 return GrVkTexture::MakeWrappedTexture(this, backendTex.dimensions(), ownership, cacheable,
1414 ioType, imageInfo, std::move(mutableState));
1415}
1416
1418 GrWrapOwnership ownership,
1419 GrWrapCacheable cacheable) {
1420 return this->onWrapBackendTexture(beTex, ownership, cacheable, kRead_GrIOType);
1421}
1422
1424 int sampleCnt,
1425 GrWrapOwnership ownership,
1426 GrWrapCacheable cacheable) {
1427 GrVkImageInfo imageInfo;
1428 if (!GrBackendTextures::GetVkImageInfo(backendTex, &imageInfo)) {
1429 return nullptr;
1430 }
1431
1432 if (!check_image_info(this->vkCaps(), imageInfo, kAdopt_GrWrapOwnership == ownership,
1433 this->queueIndex())) {
1434 return nullptr;
1435 }
1436
1437 if (!check_tex_image_info(this->vkCaps(), imageInfo)) {
1438 return nullptr;
1439 }
1440 // If sampleCnt is > 1 we will create an intermediate MSAA VkImage and then resolve into
1441 // the wrapped VkImage.
1442 bool resolveOnly = sampleCnt > 1;
1443 if (!check_rt_image_info(this->vkCaps(), imageInfo, resolveOnly)) {
1444 return nullptr;
1445 }
1446
1447 if (backendTex.isProtected() && (fProtectedContext == GrProtected::kNo)) {
1448 return nullptr;
1449 }
1450
1451 sampleCnt = this->vkCaps().getRenderTargetSampleCount(sampleCnt, imageInfo.fFormat);
1452
1453 sk_sp<skgpu::MutableTextureState> mutableState = backendTex.getMutableState();
1454 SkASSERT(mutableState);
1455
1457 sampleCnt, ownership, cacheable,
1458 imageInfo,
1459 std::move(mutableState));
1460}
1461
1464 if (!GrBackendRenderTargets::GetVkImageInfo(backendRT, &info)) {
1465 return nullptr;
1466 }
1467
1468 if (!check_image_info(this->vkCaps(), info, false, this->queueIndex())) {
1469 return nullptr;
1470 }
1471
1472 // We will always render directly to this VkImage.
1473 static bool kResolveOnly = false;
1474 if (!check_rt_image_info(this->vkCaps(), info, kResolveOnly)) {
1475 return nullptr;
1476 }
1477
1478 if (backendRT.isProtected() && (fProtectedContext == GrProtected::kNo)) {
1479 return nullptr;
1480 }
1481
1482 sk_sp<skgpu::MutableTextureState> mutableState = backendRT.getMutableState();
1483 SkASSERT(mutableState);
1484
1486 this, backendRT.dimensions(), backendRT.sampleCnt(), info, std::move(mutableState));
1487
1488 // We don't allow the client to supply a premade stencil buffer. We always create one if needed.
1489 SkASSERT(!backendRT.stencilBits());
1490 if (tgt) {
1491 SkASSERT(tgt->canAttemptStencilAttachment(tgt->numSamples() > 1));
1492 }
1493
1494 return tgt;
1495}
1496
1498 const SkImageInfo& imageInfo, const GrVkDrawableInfo& vkInfo) {
1499 int maxSize = this->caps()->maxTextureSize();
1500 if (imageInfo.width() > maxSize || imageInfo.height() > maxSize) {
1501 return nullptr;
1502 }
1503
1504 GrBackendFormat backendFormat = GrBackendFormats::MakeVk(vkInfo.fFormat);
1505 if (!backendFormat.isValid()) {
1506 return nullptr;
1507 }
1508 int sampleCnt = this->vkCaps().getRenderTargetSampleCount(1, vkInfo.fFormat);
1509 if (!sampleCnt) {
1510 return nullptr;
1511 }
1512
1513 return GrVkRenderTarget::MakeSecondaryCBRenderTarget(this, imageInfo.dimensions(), vkInfo);
1514}
1515
1517 const GrVkRenderPass& renderPass,
1518 GrAttachment* dst,
1519 GrVkImage* src,
1520 const SkIRect& srcRect) {
1521 return fMSAALoadManager.loadMSAAFromResolve(this, commandBuffer, renderPass, dst, src, srcRect);
1522}
1523
1525 if (!this->currentCommandBuffer()) {
1526 return false;
1527 }
1528 auto* vkTex = static_cast<GrVkTexture*>(tex)->textureImage();
1529 // don't do anything for linearly tiled textures (can't have mipmaps)
1530 if (vkTex->isLinearTiled()) {
1531 SkDebugf("Trying to create mipmap for linear tiled texture");
1532 return false;
1533 }
1535
1536 // determine if we can blit to and from this format
1537 const GrVkCaps& caps = this->vkCaps();
1538 if (!caps.formatCanBeDstofBlit(vkTex->imageFormat(), false) ||
1539 !caps.formatCanBeSrcofBlit(vkTex->imageFormat(), false) ||
1540 !caps.mipmapSupport()) {
1541 return false;
1542 }
1543
1544 int width = tex->width();
1545 int height = tex->height();
1546 VkImageBlit blitRegion;
1547 memset(&blitRegion, 0, sizeof(VkImageBlit));
1548
1549 // SkMipmap doesn't include the base level in the level count so we have to add 1
1550 uint32_t levelCount = SkMipmap::ComputeLevelCount(tex->width(), tex->height()) + 1;
1551 SkASSERT(levelCount == vkTex->mipLevels());
1552
1553 // change layout of the layers so we can write to them.
1556
1557 // setup memory barrier
1558 SkASSERT(GrVkFormatIsSupported(vkTex->imageFormat()));
1559 VkImageMemoryBarrier imageMemoryBarrier = {
1561 nullptr, // pNext
1562 VK_ACCESS_TRANSFER_WRITE_BIT, // srcAccessMask
1563 VK_ACCESS_TRANSFER_READ_BIT, // dstAccessMask
1566 VK_QUEUE_FAMILY_IGNORED, // srcQueueFamilyIndex
1567 VK_QUEUE_FAMILY_IGNORED, // dstQueueFamilyIndex
1568 vkTex->image(), // image
1569 {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1} // subresourceRange
1570 };
1571
1572 // Blit the miplevels
1573 uint32_t mipLevel = 1;
1574 while (mipLevel < levelCount) {
1575 int prevWidth = width;
1576 int prevHeight = height;
1577 width = std::max(1, width / 2);
1578 height = std::max(1, height / 2);
1579
1580 imageMemoryBarrier.subresourceRange.baseMipLevel = mipLevel - 1;
1582 VK_PIPELINE_STAGE_TRANSFER_BIT, false, &imageMemoryBarrier);
1583
1584 blitRegion.srcSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, mipLevel - 1, 0, 1 };
1585 blitRegion.srcOffsets[0] = { 0, 0, 0 };
1586 blitRegion.srcOffsets[1] = { prevWidth, prevHeight, 1 };
1587 blitRegion.dstSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, mipLevel, 0, 1 };
1588 blitRegion.dstOffsets[0] = { 0, 0, 0 };
1589 blitRegion.dstOffsets[1] = { width, height, 1 };
1590 this->currentCommandBuffer()->blitImage(this,
1591 vkTex->resource(),
1592 vkTex->image(),
1594 vkTex->resource(),
1595 vkTex->image(),
1597 1,
1598 &blitRegion,
1600 ++mipLevel;
1601 }
1602 if (levelCount > 1) {
1603 // This barrier logically is not needed, but it changes the final level to the same layout
1604 // as all the others, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL. This makes tracking of the
1605 // layouts and future layout changes easier. The alternative here would be to track layout
1606 // and memory accesses per layer which doesn't seem work it.
1607 imageMemoryBarrier.subresourceRange.baseMipLevel = mipLevel - 1;
1609 VK_PIPELINE_STAGE_TRANSFER_BIT, false, &imageMemoryBarrier);
1610 vkTex->updateImageLayout(VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL);
1611 }
1612 return true;
1613}
1614
1615////////////////////////////////////////////////////////////////////////////////
1616
1618 SkISize dimensions, int numStencilSamples) {
1619 VkFormat sFmt = this->vkCaps().preferredStencilFormat();
1620
1622 return GrVkImage::MakeStencil(this, dimensions, numStencilSamples, sFmt);
1623}
1624
1626 const GrBackendFormat& format,
1627 int numSamples,
1628 GrProtected isProtected,
1629 GrMemoryless memoryless) {
1630 VkFormat pixelFormat;
1633 SkASSERT(this->vkCaps().isFormatRenderable(pixelFormat, numSamples));
1634
1636 return GrVkImage::MakeMSAA(this, dimensions, numSamples, pixelFormat, isProtected, memoryless);
1637}
1638
1639////////////////////////////////////////////////////////////////////////////////
1640
1641bool copy_src_data(char* mapPtr,
1642 VkFormat vkFormat,
1643 const TArray<size_t>& individualMipOffsets,
1644 const GrPixmap srcData[],
1645 int numMipLevels) {
1646 SkASSERT(srcData && numMipLevels);
1648 SkASSERT(individualMipOffsets.size() == numMipLevels);
1649 SkASSERT(mapPtr);
1650
1651 size_t bytesPerPixel = skgpu::VkFormatBytesPerBlock(vkFormat);
1652
1653 for (int level = 0; level < numMipLevels; ++level) {
1654 const size_t trimRB = srcData[level].info().width() * bytesPerPixel;
1655
1656 SkRectMemcpy(mapPtr + individualMipOffsets[level], trimRB,
1657 srcData[level].addr(), srcData[level].rowBytes(),
1658 trimRB, srcData[level].height());
1659 }
1660 return true;
1661}
1662
1663bool GrVkGpu::createVkImageForBackendSurface(VkFormat vkFormat,
1664 SkISize dimensions,
1665 int sampleCnt,
1666 GrTexturable texturable,
1667 GrRenderable renderable,
1668 skgpu::Mipmapped mipmapped,
1670 GrProtected isProtected) {
1671 SkASSERT(texturable == GrTexturable::kYes || renderable == GrRenderable::kYes);
1672
1673 if (fProtectedContext != isProtected) {
1674 return false;
1675 }
1676
1677 if (texturable == GrTexturable::kYes && !fVkCaps->isVkFormatTexturable(vkFormat)) {
1678 return false;
1679 }
1680
1681 // MSAA images are only currently used by createTestingOnlyBackendRenderTarget.
1682 if (sampleCnt > 1 && (texturable == GrTexturable::kYes || renderable == GrRenderable::kNo)) {
1683 return false;
1684 }
1685
1686 if (renderable == GrRenderable::kYes) {
1687 sampleCnt = fVkCaps->getRenderTargetSampleCount(sampleCnt, vkFormat);
1688 if (!sampleCnt) {
1689 return false;
1690 }
1691 }
1692
1693
1694 int numMipLevels = 1;
1695 if (mipmapped == skgpu::Mipmapped::kYes) {
1696 numMipLevels = SkMipmap::ComputeLevelCount(dimensions.width(), dimensions.height()) + 1;
1697 }
1698
1701 if (texturable == GrTexturable::kYes) {
1702 usageFlags |= VK_IMAGE_USAGE_SAMPLED_BIT;
1703 }
1704 if (renderable == GrRenderable::kYes) {
1706 // We always make our render targets support being used as input attachments
1708 }
1709
1710 GrVkImage::ImageDesc imageDesc;
1711 imageDesc.fImageType = VK_IMAGE_TYPE_2D;
1712 imageDesc.fFormat = vkFormat;
1713 imageDesc.fWidth = dimensions.width();
1714 imageDesc.fHeight = dimensions.height();
1715 imageDesc.fLevels = numMipLevels;
1716 imageDesc.fSamples = sampleCnt;
1718 imageDesc.fUsageFlags = usageFlags;
1720 imageDesc.fIsProtected = fProtectedContext;
1721
1722 if (!GrVkImage::InitImageInfo(this, imageDesc, info)) {
1723 SkDebugf("Failed to init image info\n");
1724 return false;
1725 }
1726
1727 return true;
1728}
1729
1731 sk_sp<skgpu::RefCntedCallback> finishedCallback,
1732 std::array<float, 4> color) {
1735
1736 sk_sp<skgpu::MutableTextureState> mutableState = backendTexture.getMutableState();
1737 SkASSERT(mutableState);
1739 GrVkTexture::MakeWrappedTexture(this, backendTexture.dimensions(),
1741 kRW_GrIOType, info, std::move(mutableState));
1742 if (!texture) {
1743 return false;
1744 }
1745 GrVkImage* texImage = texture->textureImage();
1746
1748 if (!cmdBuffer) {
1749 return false;
1750 }
1751
1752 texImage->setImageLayout(this,
1756 false);
1757
1758 // CmdClearColorImage doesn't work for compressed formats
1760
1761 VkClearColorValue vkColor;
1762 // If we ever support SINT or UINT formats this needs to be updated to use the int32 and
1763 // uint32 union members in those cases.
1764 vkColor.float32[0] = color[0];
1765 vkColor.float32[1] = color[1];
1766 vkColor.float32[2] = color[2];
1767 vkColor.float32[3] = color[3];
1770 range.baseArrayLayer = 0;
1771 range.baseMipLevel = 0;
1772 range.layerCount = 1;
1773 range.levelCount = info.fLevelCount;
1774 cmdBuffer->clearColorImage(this, texImage, &vkColor, 1, &range);
1775
1776 // Change image layout to shader read since if we use this texture as a borrowed
1777 // texture within Ganesh we require that its layout be set to that
1780 false);
1781
1782 if (finishedCallback) {
1783 this->addFinishedCallback(std::move(finishedCallback));
1784 }
1785 return true;
1786}
1787
1789 const GrBackendFormat& format,
1790 GrRenderable renderable,
1791 skgpu::Mipmapped mipmapped,
1792 GrProtected isProtected,
1793 std::string_view label) {
1794 const GrVkCaps& caps = this->vkCaps();
1795
1796 if (fProtectedContext != isProtected) {
1797 return {};
1798 }
1799
1800 VkFormat vkFormat;
1801 if (!GrBackendFormats::AsVkFormat(format, &vkFormat)) {
1802 return {};
1803 }
1804
1805 // TODO: move the texturability check up to GrGpu::createBackendTexture and just assert here
1806 if (!caps.isVkFormatTexturable(vkFormat)) {
1807 return {};
1808 }
1809
1810 if (skgpu::VkFormatNeedsYcbcrSampler(vkFormat)) {
1811 return {};
1812 }
1813
1815 if (!this->createVkImageForBackendSurface(vkFormat, dimensions, 1, GrTexturable::kYes,
1816 renderable, mipmapped, &info, isProtected)) {
1817 return {};
1818 }
1819
1820 return GrBackendTextures::MakeVk(dimensions.width(), dimensions.height(), info);
1821}
1822
1824 const GrBackendFormat& format,
1825 skgpu::Mipmapped mipmapped,
1826 GrProtected isProtected) {
1827 return this->onCreateBackendTexture(dimensions,
1828 format,
1829 GrRenderable::kNo,
1830 mipmapped,
1831 isProtected,
1832 /*label=*/"VkGpu_CreateCompressedBackendTexture");
1833}
1834
1836 sk_sp<skgpu::RefCntedCallback> finishedCallback,
1837 const void* data,
1838 size_t size) {
1841
1842 sk_sp<skgpu::MutableTextureState> mutableState = backendTexture.getMutableState();
1843 SkASSERT(mutableState);
1845 backendTexture.dimensions(),
1849 info,
1850 std::move(mutableState));
1851 if (!texture) {
1852 return false;
1853 }
1854
1856 if (!cmdBuffer) {
1857 return false;
1858 }
1859 GrVkImage* image = texture->textureImage();
1860 image->setImageLayout(this,
1864 false);
1865
1866 SkTextureCompressionType compression =
1868
1870 TArray<size_t> individualMipOffsets;
1872
1873 fill_in_compressed_regions(&fStagingBufferManager,
1874 &regions,
1875 &individualMipOffsets,
1876 &slice,
1877 compression,
1878 info.fFormat,
1879 backendTexture.dimensions(),
1880 backendTexture.fMipmapped);
1881
1882 if (!slice.fBuffer) {
1883 return false;
1884 }
1885
1886 memcpy(slice.fOffsetMapPtr, data, size);
1887
1888 cmdBuffer->addGrSurface(texture);
1889 // Copy the buffer to the image. This call takes the raw VkBuffer instead of a GrGpuBuffer
1890 // because we don't need the command buffer to ref the buffer here. The reason being is that
1891 // the buffer is coming from the staging manager and the staging manager will make sure the
1892 // command buffer has a ref on the buffer. This avoids having to add and remove a ref for
1893 // every upload in the frame.
1894 cmdBuffer->copyBufferToImage(this,
1895 static_cast<GrVkBuffer*>(slice.fBuffer)->vkBuffer(),
1896 image,
1897 image->currentLayout(),
1898 regions.size(),
1899 regions.begin());
1900
1901 // Change image layout to shader read since if we use this texture as a borrowed
1902 // texture within Ganesh we require that its layout be set to that
1903 image->setImageLayout(this,
1907 false);
1908
1909 if (finishedCallback) {
1910 this->addFinishedCallback(std::move(finishedCallback));
1911 }
1912 return true;
1913}
1914
1916 VkImageLayout newLayout,
1917 uint32_t newQueueFamilyIndex) {
1918 // Even though internally we use this helper for getting src access flags and stages they
1919 // can also be used for general dst flags since we don't know exactly what the client
1920 // plans on using the image for.
1921 if (newLayout == VK_IMAGE_LAYOUT_UNDEFINED) {
1922 newLayout = image->currentLayout();
1923 }
1925 VkAccessFlags dstAccess = GrVkImage::LayoutToSrcAccessMask(newLayout);
1926
1927 uint32_t currentQueueFamilyIndex = image->currentQueueFamilyIndex();
1928 auto isSpecialQueue = [](uint32_t queueFamilyIndex) {
1929 return queueFamilyIndex == VK_QUEUE_FAMILY_EXTERNAL ||
1930 queueFamilyIndex == VK_QUEUE_FAMILY_FOREIGN_EXT;
1931 };
1932 if (isSpecialQueue(currentQueueFamilyIndex) && isSpecialQueue(newQueueFamilyIndex)) {
1933 // It is illegal to have both the new and old queue be special queue families (i.e. external
1934 // or foreign).
1935 return;
1936 }
1937
1938 image->setImageLayoutAndQueueIndex(gpu, newLayout, dstAccess, dstStage, false,
1939 newQueueFamilyIndex);
1940}
1941
1942bool GrVkGpu::setBackendSurfaceState(GrVkImageInfo info,
1944 SkISize dimensions,
1945 VkImageLayout newLayout,
1946 uint32_t newQueueFamilyIndex,
1947 skgpu::MutableTextureState* previousState,
1948 sk_sp<skgpu::RefCntedCallback> finishedCallback) {
1950 dimensions,
1951 info,
1952 std::move(currentState),
1956 "VkGpu_SetBackendSurfaceState",
1957 /*forSecondaryCB=*/false);
1959 if (!texture) {
1960 return false;
1961 }
1962 if (previousState) {
1963 previousState->set(*texture->getMutableState());
1964 }
1965 set_layout_and_queue_from_mutable_state(this, texture.get(), newLayout, newQueueFamilyIndex);
1966 if (finishedCallback) {
1967 this->addFinishedCallback(std::move(finishedCallback));
1968 }
1969 return true;
1970}
1971
1973 const skgpu::MutableTextureState& newState,
1974 skgpu::MutableTextureState* previousState,
1975 sk_sp<skgpu::RefCntedCallback> finishedCallback) {
1978 sk_sp<skgpu::MutableTextureState> currentState = backendTeture.getMutableState();
1979 SkASSERT(currentState);
1980 SkASSERT(newState.isValid() && newState.backend() == skgpu::BackendApi::kVulkan);
1981 return this->setBackendSurfaceState(info, std::move(currentState), backendTeture.dimensions(),
1984 previousState,
1985 std::move(finishedCallback));
1986}
1987
1989 const skgpu::MutableTextureState& newState,
1990 skgpu::MutableTextureState* previousState,
1991 sk_sp<skgpu::RefCntedCallback> finishedCallback) {
1994 sk_sp<skgpu::MutableTextureState> currentState = backendRenderTarget.getMutableState();
1995 SkASSERT(currentState);
1997 return this->setBackendSurfaceState(info, std::move(currentState),
1998 backendRenderTarget.dimensions(),
2001 previousState, std::move(finishedCallback));
2002}
2003
2005 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(rt);
2006 VkPipelineStageFlags dstStage;
2007 VkAccessFlags dstAccess;
2008 if (barrierType == kBlend_GrXferBarrierType) {
2011 } else {
2012 SkASSERT(barrierType == kTexture_GrXferBarrierType);
2015 }
2016 GrVkImage* image = vkRT->colorAttachment();
2017 VkImageMemoryBarrier barrier;
2019 barrier.pNext = nullptr;
2021 barrier.dstAccessMask = dstAccess;
2022 barrier.oldLayout = image->currentLayout();
2023 barrier.newLayout = barrier.oldLayout;
2026 barrier.image = image->image();
2027 barrier.subresourceRange = {VK_IMAGE_ASPECT_COLOR_BIT, 0, image->mipLevels(), 0, 1};
2028 this->addImageMemoryBarrier(image->resource(),
2030 dstStage, true, &barrier);
2031}
2032
2034 SkASSERT(GrBackendApi::kVulkan == tex.fBackend);
2035
2038 GrVkImage::DestroyImageInfo(this, const_cast<GrVkImageInfo*>(&info));
2039 }
2040}
2041
2042bool GrVkGpu::compile(const GrProgramDesc& desc, const GrProgramInfo& programInfo) {
2043 GrVkRenderPass::AttachmentsDescriptor attachmentsDescriptor;
2044 GrVkRenderPass::AttachmentFlags attachmentFlags;
2046 &attachmentsDescriptor, &attachmentFlags);
2047
2049 if (programInfo.renderPassBarriers() & GrXferBarrierFlags::kBlend) {
2051 }
2054 }
2055
2057 if (this->vkCaps().programInfoWillUseDiscardableMSAA(programInfo) &&
2058 programInfo.colorLoadOp() == GrLoadOp::kLoad) {
2060 }
2061 sk_sp<const GrVkRenderPass> renderPass(this->resourceProvider().findCompatibleRenderPass(
2062 &attachmentsDescriptor, attachmentFlags, selfDepFlags, loadFromResolve));
2063 if (!renderPass) {
2064 return false;
2065 }
2066
2068
2069 auto pipelineState = this->resourceProvider().findOrCreateCompatiblePipelineState(
2070 desc,
2071 programInfo,
2072 renderPass->vkRenderPass(),
2073 &stat);
2074 if (!pipelineState) {
2075 return false;
2076 }
2077
2079}
2080
2081#if defined(GR_TEST_UTILS)
2082bool GrVkGpu::isTestingOnlyBackendTexture(const GrBackendTexture& tex) const {
2083 SkASSERT(GrBackendApi::kVulkan == tex.fBackend);
2084
2086 if (!GrBackendTextures::GetVkImageInfo(tex, &backend)) {
2087 return false;
2088 }
2089
2090 if (backend.fImage && backend.fAlloc.fMemory) {
2092 memset(&req, 0, sizeof(req));
2093 GR_VK_CALL(this->vkInterface(), GetImageMemoryRequirements(fDevice,
2094 backend.fImage,
2095 &req));
2096 // TODO: find a better check
2097 // This will probably fail with a different driver
2098 return (req.size > 0) && (req.size <= 8192 * 8192);
2099 }
2100
2101 return false;
2102}
2103
2104GrBackendRenderTarget GrVkGpu::createTestingOnlyBackendRenderTarget(SkISize dimensions,
2105 GrColorType ct,
2106 int sampleCnt,
2107 GrProtected isProtected) {
2108 if (dimensions.width() > this->caps()->maxRenderTargetSize() ||
2109 dimensions.height() > this->caps()->maxRenderTargetSize()) {
2110 return {};
2111 }
2112
2113 VkFormat vkFormat = this->vkCaps().getFormatFromColorType(ct);
2114
2116 if (!this->createVkImageForBackendSurface(vkFormat,
2117 dimensions,
2118 sampleCnt,
2120 GrRenderable::kYes,
2121 skgpu::Mipmapped::kNo,
2122 &info,
2123 isProtected)) {
2124 return {};
2125 }
2126 return GrBackendRenderTargets::MakeVk(dimensions.width(), dimensions.height(), info);
2127}
2128
2129void GrVkGpu::deleteTestingOnlyBackendRenderTarget(const GrBackendRenderTarget& rt) {
2130 SkASSERT(GrBackendApi::kVulkan == rt.fBackend);
2131
2134 // something in the command buffer may still be using this, so force submit
2135 SkAssertResult(this->submitCommandBuffer(kForce_SyncQueue));
2136 GrVkImage::DestroyImageInfo(this, const_cast<GrVkImageInfo*>(&info));
2137 }
2138}
2139#endif
2140
2141////////////////////////////////////////////////////////////////////////////////
2142
2144 VkPipelineStageFlags srcStageMask,
2145 VkPipelineStageFlags dstStageMask,
2146 bool byRegion,
2147 VkBufferMemoryBarrier* barrier) const {
2148 if (!this->currentCommandBuffer()) {
2149 return;
2150 }
2151 SkASSERT(resource);
2153 resource,
2154 srcStageMask,
2155 dstStageMask,
2156 byRegion,
2158 barrier);
2159}
2161 VkPipelineStageFlags dstStageMask,
2162 bool byRegion,
2163 VkBufferMemoryBarrier* barrier) const {
2164 if (!this->currentCommandBuffer()) {
2165 return;
2166 }
2167 // We don't pass in a resource here to the command buffer. The command buffer only is using it
2168 // to hold a ref, but every place where we add a buffer memory barrier we are doing some other
2169 // command with the buffer on the command buffer. Thus those other commands will already cause
2170 // the command buffer to be holding a ref to the buffer.
2172 /*resource=*/nullptr,
2173 srcStageMask,
2174 dstStageMask,
2175 byRegion,
2177 barrier);
2178}
2179
2181 VkPipelineStageFlags srcStageMask,
2182 VkPipelineStageFlags dstStageMask,
2183 bool byRegion,
2184 VkImageMemoryBarrier* barrier) const {
2185 // If we are in the middle of destroying or abandoning the context we may hit a release proc
2186 // that triggers the destruction of a GrVkImage. This could cause us to try and transfer the
2187 // VkImage back to the original queue. In this state we don't submit anymore work and we may not
2188 // have a current command buffer. Thus we won't do the queue transfer.
2189 if (!this->currentCommandBuffer()) {
2190 return;
2191 }
2192 SkASSERT(resource);
2194 resource,
2195 srcStageMask,
2196 dstStageMask,
2197 byRegion,
2199 barrier);
2200}
2201
2205 const skgpu::MutableTextureState* newState) {
2206 // Submit the current command buffer to the Queue. Whether we inserted semaphores or not does
2207 // not effect what we do here.
2208 if (!proxies.empty() && (access == SkSurfaces::BackendSurfaceAccess::kPresent || newState)) {
2209 // We currently don't support passing in new surface state for multiple proxies here. The
2210 // only time we have multiple proxies is if we are flushing a yuv SkImage which won't have
2211 // state updates anyways. Additionally if we have a newState than we must not have any
2212 // BackendSurfaceAccess.
2213 SkASSERT(!newState || proxies.size() == 1);
2216 for (GrSurfaceProxy* proxy : proxies) {
2217 SkASSERT(proxy->isInstantiated());
2218 if (GrTexture* tex = proxy->peekTexture()) {
2219 image = static_cast<GrVkTexture*>(tex)->textureImage();
2220 } else {
2221 GrRenderTarget* rt = proxy->peekRenderTarget();
2222 SkASSERT(rt);
2223 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(rt);
2224 image = vkRT->externalAttachment();
2225 }
2226 if (newState) {
2227 VkImageLayout newLayout =
2229 uint32_t newIndex =
2231 set_layout_and_queue_from_mutable_state(this, image, newLayout, newIndex);
2232 } else {
2234 image->prepareForPresent(this);
2235 }
2236 }
2237 }
2238}
2239
2241 GrGpuFinishedContext finishedContext) {
2242 SkASSERT(finishedProc);
2243 this->addFinishedCallback(skgpu::RefCntedCallback::Make(finishedProc, finishedContext));
2244}
2245
2246void GrVkGpu::addFinishedCallback(sk_sp<skgpu::RefCntedCallback> finishedCallback) {
2247 SkASSERT(finishedCallback);
2248 fResourceProvider.addFinishedProcToActiveCommandBuffers(std::move(finishedCallback));
2249}
2250
2254
2256 if (sync == GrSyncCpu::kYes) {
2257 return this->submitCommandBuffer(kForce_SyncQueue);
2258 } else {
2259 return this->submitCommandBuffer(kSkip_SyncQueue);
2260 }
2261}
2262
2264 VK_CALL(QueueWaitIdle(fQueue));
2265
2266 if (this->vkCaps().mustSyncCommandBuffersWithQueue()) {
2267 fResourceProvider.forceSyncAllCommandBuffers();
2268 }
2269}
2270
2272#if SK_HISTOGRAMS_ENABLED
2273 uint64_t allocatedMemory = 0, usedMemory = 0;
2274 std::tie(allocatedMemory, usedMemory) = fMemoryAllocator->totalAllocatedAndUsedMemory();
2275 SkASSERT(usedMemory <= allocatedMemory);
2276 if (allocatedMemory > 0) {
2277 SK_HISTOGRAM_PERCENTAGE("VulkanMemoryAllocator.PercentUsed",
2278 (usedMemory * 100) / allocatedMemory);
2279 }
2280 // allocatedMemory is in bytes and need to be reported it in kilobytes. SK_HISTOGRAM_MEMORY_KB
2281 // supports samples up to around 500MB which should support the amounts of memory we allocate.
2282 SK_HISTOGRAM_MEMORY_KB("VulkanMemoryAllocator.AmountAllocated", allocatedMemory >> 10);
2283#endif // SK_HISTOGRAMS_ENABLED
2284}
2285
2286void GrVkGpu::copySurfaceAsCopyImage(GrSurface* dst,
2287 GrSurface* src,
2288 GrVkImage* dstImage,
2289 GrVkImage* srcImage,
2290 const SkIRect& srcRect,
2291 const SkIPoint& dstPoint) {
2292 if (!this->currentCommandBuffer()) {
2293 return;
2294 }
2295
2296#ifdef SK_DEBUG
2297 int dstSampleCnt = dstImage->numSamples();
2298 int srcSampleCnt = srcImage->numSamples();
2299 bool dstHasYcbcr = dstImage->ycbcrConversionInfo().isValid();
2300 bool srcHasYcbcr = srcImage->ycbcrConversionInfo().isValid();
2301 VkFormat dstFormat = dstImage->imageFormat();
2302 VkFormat srcFormat;
2303 SkAssertResult(GrBackendFormats::AsVkFormat(dst->backendFormat(), &srcFormat));
2304 SkASSERT(this->vkCaps().canCopyImage(dstFormat, dstSampleCnt, dstHasYcbcr,
2305 srcFormat, srcSampleCnt, srcHasYcbcr));
2306#endif
2307 if (src->isProtected() && !dst->isProtected()) {
2308 SkDebugf("Can't copy from protected memory to non-protected");
2309 return;
2310 }
2311
2312 // These flags are for flushing/invalidating caches and for the dst image it doesn't matter if
2313 // the cache is flushed since it is only being written to.
2314 dstImage->setImageLayout(this,
2318 false);
2319
2320 srcImage->setImageLayout(this,
2324 false);
2325
2326 VkImageCopy copyRegion;
2327 memset(&copyRegion, 0, sizeof(VkImageCopy));
2328 copyRegion.srcSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
2329 copyRegion.srcOffset = { srcRect.fLeft, srcRect.fTop, 0 };
2330 copyRegion.dstSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
2331 copyRegion.dstOffset = { dstPoint.fX, dstPoint.fY, 0 };
2332 copyRegion.extent = { (uint32_t)srcRect.width(), (uint32_t)srcRect.height(), 1 };
2333
2334 this->currentCommandBuffer()->addGrSurface(sk_ref_sp<const GrSurface>(src));
2335 this->currentCommandBuffer()->addGrSurface(sk_ref_sp<const GrSurface>(dst));
2336 this->currentCommandBuffer()->copyImage(this,
2337 srcImage,
2339 dstImage,
2341 1,
2342 &copyRegion);
2343
2344 SkIRect dstRect = SkIRect::MakeXYWH(dstPoint.fX, dstPoint.fY,
2345 srcRect.width(), srcRect.height());
2346 // The rect is already in device space so we pass in kTopLeft so no flip is done.
2347 this->didWriteToSurface(dst, kTopLeft_GrSurfaceOrigin, &dstRect);
2348}
2349
2350void GrVkGpu::copySurfaceAsBlit(GrSurface* dst,
2351 GrSurface* src,
2352 GrVkImage* dstImage,
2353 GrVkImage* srcImage,
2354 const SkIRect& srcRect,
2355 const SkIRect& dstRect,
2356 GrSamplerState::Filter filter) {
2357 if (!this->currentCommandBuffer()) {
2358 return;
2359 }
2360
2361#ifdef SK_DEBUG
2362 int dstSampleCnt = dstImage->numSamples();
2363 int srcSampleCnt = srcImage->numSamples();
2364 bool dstHasYcbcr = dstImage->ycbcrConversionInfo().isValid();
2365 bool srcHasYcbcr = srcImage->ycbcrConversionInfo().isValid();
2366 VkFormat dstFormat = dstImage->imageFormat();
2367 VkFormat srcFormat;
2368 SkAssertResult(GrBackendFormats::AsVkFormat(dst->backendFormat(), &srcFormat));
2369 SkASSERT(this->vkCaps().canCopyAsBlit(dstFormat,
2370 dstSampleCnt,
2371 dstImage->isLinearTiled(),
2372 dstHasYcbcr,
2373 srcFormat,
2374 srcSampleCnt,
2375 srcImage->isLinearTiled(),
2376 srcHasYcbcr));
2377
2378#endif
2379 if (src->isProtected() && !dst->isProtected()) {
2380 SkDebugf("Can't copy from protected memory to non-protected");
2381 return;
2382 }
2383
2384 dstImage->setImageLayout(this,
2388 false);
2389
2390 srcImage->setImageLayout(this,
2394 false);
2395
2396 VkImageBlit blitRegion;
2397 memset(&blitRegion, 0, sizeof(VkImageBlit));
2398 blitRegion.srcSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
2399 blitRegion.srcOffsets[0] = { srcRect.fLeft, srcRect.fTop, 0 };
2400 blitRegion.srcOffsets[1] = { srcRect.fRight, srcRect.fBottom, 1 };
2401 blitRegion.dstSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
2402 blitRegion.dstOffsets[0] = { dstRect.fLeft, dstRect.fTop, 0 };
2403 blitRegion.dstOffsets[1] = { dstRect.fRight, dstRect.fBottom, 1 };
2404
2405 this->currentCommandBuffer()->addGrSurface(sk_ref_sp<const GrSurface>(src));
2406 this->currentCommandBuffer()->addGrSurface(sk_ref_sp<const GrSurface>(dst));
2407 this->currentCommandBuffer()->blitImage(this,
2408 *srcImage,
2409 *dstImage,
2410 1,
2411 &blitRegion,
2412 filter == GrSamplerState::Filter::kNearest ?
2414
2415 // The rect is already in device space so we pass in kTopLeft so no flip is done.
2416 this->didWriteToSurface(dst, kTopLeft_GrSurfaceOrigin, &dstRect);
2417}
2418
2419void GrVkGpu::copySurfaceAsResolve(GrSurface* dst, GrSurface* src, const SkIRect& srcRect,
2420 const SkIPoint& dstPoint) {
2421 if (src->isProtected() && !dst->isProtected()) {
2422 SkDebugf("Can't copy from protected memory to non-protected");
2423 return;
2424 }
2425 GrVkRenderTarget* srcRT = static_cast<GrVkRenderTarget*>(src->asRenderTarget());
2426 this->resolveImage(dst, srcRT, srcRect, dstPoint);
2427 SkIRect dstRect = SkIRect::MakeXYWH(dstPoint.fX, dstPoint.fY,
2428 srcRect.width(), srcRect.height());
2429 // The rect is already in device space so we pass in kTopLeft so no flip is done.
2430 this->didWriteToSurface(dst, kTopLeft_GrSurfaceOrigin, &dstRect);
2431}
2432
2433bool GrVkGpu::onCopySurface(GrSurface* dst, const SkIRect& dstRect,
2434 GrSurface* src, const SkIRect& srcRect,
2435 GrSamplerState::Filter filter) {
2436#ifdef SK_DEBUG
2437 if (GrVkRenderTarget* srcRT = static_cast<GrVkRenderTarget*>(src->asRenderTarget())) {
2439 }
2440 if (GrVkRenderTarget* dstRT = static_cast<GrVkRenderTarget*>(dst->asRenderTarget())) {
2441 SkASSERT(!dstRT->wrapsSecondaryCommandBuffer());
2442 }
2443#endif
2444 if (src->isProtected() && !dst->isProtected()) {
2445 SkDebugf("Can't copy from protected memory to non-protected");
2446 return false;
2447 }
2448
2449 GrVkImage* dstImage;
2450 GrVkImage* srcImage;
2451 GrRenderTarget* dstRT = dst->asRenderTarget();
2452 if (dstRT) {
2453 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(dstRT);
2454 if (vkRT->wrapsSecondaryCommandBuffer()) {
2455 return false;
2456 }
2457 // This will technically return true for single sample rts that used DMSAA in which case we
2458 // don't have to pick the resolve attachment. But in that case the resolve and color
2459 // attachments will be the same anyways.
2460 if (this->vkCaps().renderTargetSupportsDiscardableMSAA(vkRT)) {
2461 dstImage = vkRT->resolveAttachment();
2462 } else {
2463 dstImage = vkRT->colorAttachment();
2464 }
2465 } else if (dst->asTexture()) {
2466 dstImage = static_cast<GrVkTexture*>(dst->asTexture())->textureImage();
2467 } else {
2468 // The surface in a GrAttachment already
2469 dstImage = static_cast<GrVkImage*>(dst);
2470 }
2471 GrRenderTarget* srcRT = src->asRenderTarget();
2472 if (srcRT) {
2473 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(srcRT);
2474 // This will technically return true for single sample rts that used DMSAA in which case we
2475 // don't have to pick the resolve attachment. But in that case the resolve and color
2476 // attachments will be the same anyways.
2477 if (this->vkCaps().renderTargetSupportsDiscardableMSAA(vkRT)) {
2478 srcImage = vkRT->resolveAttachment();
2479 } else {
2480 srcImage = vkRT->colorAttachment();
2481 }
2482 } else if (src->asTexture()) {
2483 SkASSERT(src->asTexture());
2484 srcImage = static_cast<GrVkTexture*>(src->asTexture())->textureImage();
2485 } else {
2486 // The surface in a GrAttachment already
2487 srcImage = static_cast<GrVkImage*>(src);
2488 }
2489
2490 VkFormat dstFormat = dstImage->imageFormat();
2491 VkFormat srcFormat = srcImage->imageFormat();
2492
2493 int dstSampleCnt = dstImage->numSamples();
2494 int srcSampleCnt = srcImage->numSamples();
2495
2496 bool dstHasYcbcr = dstImage->ycbcrConversionInfo().isValid();
2497 bool srcHasYcbcr = srcImage->ycbcrConversionInfo().isValid();
2498
2499 if (srcRect.size() == dstRect.size()) {
2500 // Prefer resolves or copy-image commands when there is no scaling
2501 const SkIPoint dstPoint = dstRect.topLeft();
2502 if (this->vkCaps().canCopyAsResolve(dstFormat, dstSampleCnt, dstHasYcbcr,
2503 srcFormat, srcSampleCnt, srcHasYcbcr)) {
2504 this->copySurfaceAsResolve(dst, src, srcRect, dstPoint);
2505 return true;
2506 }
2507
2508 if (this->vkCaps().canCopyImage(dstFormat, dstSampleCnt, dstHasYcbcr,
2509 srcFormat, srcSampleCnt, srcHasYcbcr)) {
2510 this->copySurfaceAsCopyImage(dst, src, dstImage, srcImage, srcRect, dstPoint);
2511 return true;
2512 }
2513 }
2514
2515 if (this->vkCaps().canCopyAsBlit(dstFormat,
2516 dstSampleCnt,
2517 dstImage->isLinearTiled(),
2518 dstHasYcbcr,
2519 srcFormat,
2520 srcSampleCnt,
2521 srcImage->isLinearTiled(),
2522 srcHasYcbcr)) {
2523 this->copySurfaceAsBlit(dst, src, dstImage, srcImage, srcRect, dstRect, filter);
2524 return true;
2525 }
2526
2527 return false;
2528}
2529
2531 SkIRect rect,
2532 GrColorType surfaceColorType,
2533 GrColorType dstColorType,
2534 void* buffer,
2535 size_t rowBytes) {
2536 if (surface->isProtected()) {
2537 return false;
2538 }
2539
2540 if (!this->currentCommandBuffer()) {
2541 return false;
2542 }
2543
2544 GrVkImage* image = nullptr;
2545 GrVkRenderTarget* rt = static_cast<GrVkRenderTarget*>(surface->asRenderTarget());
2546 if (rt) {
2547 // Reading from render targets that wrap a secondary command buffer is not allowed since
2548 // it would require us to know the VkImage, which we don't have, as well as need us to
2549 // stop and start the VkRenderPass which we don't have access to.
2550 if (rt->wrapsSecondaryCommandBuffer()) {
2551 return false;
2552 }
2553 image = rt->nonMSAAAttachment();
2554 } else {
2555 image = static_cast<GrVkTexture*>(surface->asTexture())->textureImage();
2556 }
2557
2558 if (!image) {
2559 return false;
2560 }
2561
2562 if (dstColorType == GrColorType::kUnknown ||
2563 dstColorType != this->vkCaps().transferColorType(image->imageFormat(), surfaceColorType)) {
2564 return false;
2565 }
2566
2567 // Change layout of our target so it can be used as copy
2568 image->setImageLayout(this,
2572 false);
2573
2574 size_t bpp = GrColorTypeBytesPerPixel(dstColorType);
2575 if (skgpu::VkFormatBytesPerBlock(image->imageFormat()) != bpp) {
2576 return false;
2577 }
2578 size_t tightRowBytes = bpp*rect.width();
2579
2580 VkBufferImageCopy region;
2581 memset(&region, 0, sizeof(VkBufferImageCopy));
2582 VkOffset3D offset = { rect.left(), rect.top(), 0 };
2583 region.imageOffset = offset;
2584 region.imageExtent = { (uint32_t)rect.width(), (uint32_t)rect.height(), 1 };
2585
2586 size_t transBufferRowBytes = bpp * region.imageExtent.width;
2587 size_t imageRows = region.imageExtent.height;
2589 sk_sp<GrGpuBuffer> transferBuffer = resourceProvider->createBuffer(
2590 transBufferRowBytes * imageRows,
2594
2595 if (!transferBuffer) {
2596 return false;
2597 }
2598
2599 GrVkBuffer* vkBuffer = static_cast<GrVkBuffer*>(transferBuffer.get());
2600
2601 // Copy the image to a buffer so we can map it to cpu memory
2602 region.bufferOffset = 0;
2603 region.bufferRowLength = 0; // Forces RowLength to be width. We handle the rowBytes below.
2604 region.bufferImageHeight = 0; // Forces height to be tightly packed. Only useful for 3d images.
2605 region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
2606
2608 image,
2610 transferBuffer,
2611 1,
2612 &region);
2613
2614 // make sure the copy to buffer has finished
2619 false);
2620
2621 // We need to submit the current command buffer to the Queue and make sure it finishes before
2622 // we can copy the data out of the buffer.
2623 if (!this->submitCommandBuffer(kForce_SyncQueue)) {
2624 return false;
2625 }
2626 void* mappedMemory = transferBuffer->map();
2627 if (!mappedMemory) {
2628 return false;
2629 }
2630
2631 SkRectMemcpy(buffer, rowBytes, mappedMemory, transBufferRowBytes, tightRowBytes, rect.height());
2632
2633 transferBuffer->unmap();
2634 return true;
2635}
2636
2638 sk_sp<const GrVkFramebuffer> framebuffer,
2639 const VkClearValue* colorClear,
2640 const GrSurface* target,
2641 const SkIRect& renderPassBounds,
2642 bool forSecondaryCB) {
2643 if (!this->currentCommandBuffer()) {
2644 return false;
2645 }
2646 SkASSERT (!framebuffer->isExternal());
2647
2648#ifdef SK_DEBUG
2649 uint32_t index;
2650 bool result = renderPass->colorAttachmentIndex(&index);
2651 SkASSERT(result && 0 == index);
2652 result = renderPass->stencilAttachmentIndex(&index);
2653 if (result) {
2654 SkASSERT(1 == index);
2655 }
2656#endif
2657 VkClearValue clears[3];
2658 int stencilIndex = renderPass->hasResolveAttachment() ? 2 : 1;
2659 clears[0].color = colorClear->color;
2660 clears[stencilIndex].depthStencil.depth = 0.0f;
2661 clears[stencilIndex].depthStencil.stencil = 0;
2662
2663 return this->currentCommandBuffer()->beginRenderPass(
2664 this, renderPass, std::move(framebuffer), clears, target, renderPassBounds, forSecondaryCB);
2665}
2666
2668 const SkIRect& bounds) {
2669 // We had a command buffer when we started the render pass, we should have one now as well.
2671 this->currentCommandBuffer()->endRenderPass(this);
2672 this->didWriteToSurface(target, origin, &bounds);
2673}
2674
2676 switch (result) {
2677 case VK_SUCCESS:
2678 return true;
2680 if (!fDeviceIsLost) {
2681 // Callback should only be invoked once, and device should be marked as lost first.
2682 fDeviceIsLost = true;
2684 device(),
2685 fDeviceLostContext,
2686 fDeviceLostProc,
2687 vkCaps().supportsDeviceFaultInfo());
2688 }
2689 return false;
2692 this->setOOMed();
2693 return false;
2694 default:
2695 return false;
2696 }
2697}
2698
2699void GrVkGpu::submitSecondaryCommandBuffer(std::unique_ptr<GrVkSecondaryCommandBuffer> buffer) {
2700 if (!this->currentCommandBuffer()) {
2701 return;
2702 }
2703 this->currentCommandBuffer()->executeCommands(this, std::move(buffer));
2704}
2705
2707 SkASSERT(fCachedOpsRenderPass.get() == renderPass);
2708
2709 fCachedOpsRenderPass->submit();
2710 fCachedOpsRenderPass->reset();
2711}
2712
2713[[nodiscard]] std::unique_ptr<GrSemaphore> GrVkGpu::makeSemaphore(bool isOwned) {
2714 return GrVkSemaphore::Make(this, isOwned);
2715}
2716
2717std::unique_ptr<GrSemaphore> GrVkGpu::wrapBackendSemaphore(const GrBackendSemaphore& semaphore,
2718 GrSemaphoreWrapType wrapType,
2719 GrWrapOwnership ownership) {
2721 wrapType, ownership);
2722}
2723
2725 SkASSERT(semaphore);
2726
2727 GrVkSemaphore* vkSem = static_cast<GrVkSemaphore*>(semaphore);
2728
2729 GrVkSemaphore::Resource* resource = vkSem->getResource();
2730 if (resource->shouldSignal()) {
2731 resource->ref();
2732 fSemaphoresToSignal.push_back(resource);
2733 }
2734}
2735
2737 SkASSERT(semaphore);
2738
2739 GrVkSemaphore* vkSem = static_cast<GrVkSemaphore*>(semaphore);
2740
2741 GrVkSemaphore::Resource* resource = vkSem->getResource();
2742 if (resource->shouldWait()) {
2743 resource->ref();
2744 fSemaphoresToWaitOn.push_back(resource);
2745 }
2746}
2747
2750 GrVkImage* vkTexture = static_cast<GrVkTexture*>(texture)->textureImage();
2751 vkTexture->setImageLayout(this,
2755 false);
2756 // TODO: should we have a way to notify the caller that this has failed? Currently if the submit
2757 // fails (caused by DEVICE_LOST) this will just cause us to fail the next use of the gpu.
2758 // Eventually we will abandon the whole GPU if this fails.
2760
2761 // The image layout change serves as a barrier, so no semaphore is needed.
2762 // If we ever decide we need to return a semaphore here, we need to make sure GrVkSemaphore is
2763 // thread safe so that only the first thread that tries to use the semaphore actually submits
2764 // it. This additionally would also require thread safety in command buffer submissions to
2765 // queues in general.
2766 return nullptr;
2767}
2768
2769void GrVkGpu::addDrawable(std::unique_ptr<SkDrawable::GpuDrawHandler> drawable) {
2770 fDrawables.emplace_back(std::move(drawable));
2771}
2772
2774 if (this->getContext()->priv().getPersistentCache()) {
2776 }
2777}
const char * options
const char * backend
static void info(const char *fmt,...) SK_PRINTF_LIKE(1
Definition DM.cpp:213
size_t GrBackendFormatBytesPerPixel(const GrBackendFormat &format)
SkTextureCompressionType GrBackendFormatToCompressionType(const GrBackendFormat &format)
size_t GrComputeTightCombinedBufferSize(size_t bytesPerPixel, SkISize baseDimensions, TArray< size_t > *individualMipOffsets, int mipLevelCount)
GrWrapCacheable
Definition GrTypesPriv.h:84
static constexpr size_t GrColorTypeBytesPerPixel(GrColorType ct)
GrIOType
@ kRead_GrIOType
@ kRW_GrIOType
GrMipmapStatus
GrWrapOwnership
Definition GrTypesPriv.h:76
@ kAdopt_GrWrapOwnership
Definition GrTypesPriv.h:81
@ kBorrow_GrWrapOwnership
Definition GrTypesPriv.h:78
GrGpuBufferType
GrMemoryless
GrTexturable
Definition GrTypesPriv.h:63
GrSemaphoreWrapType
GrColorType
GrAccessPattern
@ kDynamic_GrAccessPattern
@ kStatic_GrAccessPattern
@ kStream_GrAccessPattern
GrSurfaceOrigin
Definition GrTypes.h:147
@ kTopLeft_GrSurfaceOrigin
Definition GrTypes.h:148
void * GrGpuFinishedContext
Definition GrTypes.h:178
void(* GrGpuFinishedProc)(GrGpuFinishedContext finishedContext)
Definition GrTypes.h:179
GrSyncCpu
Definition GrTypes.h:239
@ kKHR_swapchain_GrVkExtensionFlag
@ kSampleRateShading_GrVkFeatureFlag
@ kDualSrcBlend_GrVkFeatureFlag
@ kGeometryShader_GrVkFeatureFlag
#define VK_CALL(GPU, X)
static size_t fill_in_compressed_regions(GrStagingBufferManager *stagingBufferManager, TArray< VkBufferImageCopy > *regions, TArray< size_t > *individualMipOffsets, GrStagingBufferManager::Slice *slice, SkTextureCompressionType compression, VkFormat vkFormat, SkISize dimensions, skgpu::Mipmapped mipmapped)
Definition GrVkGpu.cpp:897
static bool check_tex_image_info(const GrVkCaps &caps, const GrVkImageInfo &info)
Definition GrVkGpu.cpp:1337
void set_layout_and_queue_from_mutable_state(GrVkGpu *gpu, GrVkImage *image, VkImageLayout newLayout, uint32_t newQueueFamilyIndex)
Definition GrVkGpu.cpp:1915
static bool check_image_info(const GrVkCaps &caps, const GrVkImageInfo &info, bool needsAllocation, uint32_t graphicsQueueIndex)
Definition GrVkGpu.cpp:1291
static void add_transfer_dst_buffer_mem_barrier(GrVkGpu *gpu, GrVkBuffer *dst, size_t offset, size_t size, bool after)
Definition GrVkGpu.cpp:566
static bool check_rt_image_info(const GrVkCaps &caps, const GrVkImageInfo &info, bool resolveOnly)
Definition GrVkGpu.cpp:1379
bool copy_src_data(char *mapPtr, VkFormat vkFormat, const TArray< size_t > &individualMipOffsets, const GrPixmap srcData[], int numMipLevels)
Definition GrVkGpu.cpp:1641
bool GrVkFormatIsSupported(VkFormat format)
Definition GrVkUtil.cpp:21
#define GR_VK_CALL(IFACE, X)
Definition GrVkUtil.h:24
GrXferBarrierType
@ kTexture_GrXferBarrierType
@ kBlend_GrXferBarrierType
GrXferBarrierFlags
SkColor4f color
#define SkAssertResult(cond)
Definition SkAssert.h:123
#define SkUNREACHABLE
Definition SkAssert.h:135
#define SkDEBUGFAIL(message)
Definition SkAssert.h:118
#define SkASSERT(cond)
Definition SkAssert.h:116
size_t SkCompressedDataSize(SkTextureCompressionType type, SkISize dimensions, TArray< size_t > *individualMipOffsets, bool mipmapped)
void SK_SPI SkDebugf(const char format[],...) SK_PRINTF_LIKE(1
#define SkDEBUGCODE(...)
Definition SkDebug.h:23
#define INHERITED(method,...)
static void SkRectMemcpy(void *dst, size_t dstRB, const void *src, size_t srcRB, size_t trimRowBytes, int rowCount)
sk_sp< T > sk_ref_sp(T *obj)
Definition SkRefCnt.h:381
SkFilterMode
static constexpr bool SkToBool(const T &x)
Definition SkTo.h:35
constexpr uint32_t SkToU32(S x)
Definition SkTo.h:26
#define TRACE_FUNC
#define SK_HISTOGRAM_MEMORY_KB(name, sample)
Definition SkTypes.h:119
#define SK_HISTOGRAM_PERCENTAGE(name, percent_as_int)
Definition SkTypes.h:122
int numSamples() const
bool isValid() const
SkISize dimensions() const
SkISize dimensions() const
GrBackendFormat getBackendFormat() const
bool mipmapSupport() const
Definition GrCaps.h:72
int maxTextureSize() const
Definition GrCaps.h:229
GrResourceProvider * resourceProvider()
GrDirectContextPriv priv()
size_t size() const final
Definition GrGpuBuffer.h:34
void incStencilAttachmentCreates()
Definition GrGpu.h:539
void incMSAAAttachmentCreates()
Definition GrGpu.h:540
bool submitToGpu(GrSyncCpu sync)
Definition GrGpu.cpp:748
void setOOMed()
Definition GrGpu.h:701
const GrCaps * caps() const
Definition GrGpu.h:73
GrDirectContext * getContext()
Definition GrGpu.h:67
void didWriteToSurface(GrSurface *surface, GrSurfaceOrigin origin, const SkIRect *bounds, uint32_t mipLevels=1) const
Definition GrGpu.cpp:665
DisconnectType
Definition GrGpu.h:80
virtual void disconnect(DisconnectType)
Definition GrGpu.cpp:51
void initCaps(sk_sp< const GrCaps > caps)
Definition GrGpu.cpp:47
Stats fStats
Definition GrGpu.h:703
int width() const
Definition GrImageInfo.h:54
const GrImageInfo & info() const
Definition GrPixmap.h:17
GrLoadOp colorLoadOp() const
GrXferBarrierFlags renderPassBarriers() const
int numSamples() const
Slice allocateStagingBufferSlice(size_t size, size_t requiredAlignment=1)
SkISize dimensions() const
Definition GrSurface.h:27
int height() const
Definition GrSurface.h:37
int width() const
Definition GrSurface.h:32
void markMipmapsDirty()
Definition GrTexture.cpp:25
GrTextureType textureType() const
Definition GrTexture.h:55
static sk_sp< GrVkBuffer > Make(GrVkGpu *gpu, size_t size, GrGpuBufferType bufferType, GrAccessPattern accessPattern)
void addMemoryBarrier(VkAccessFlags srcAccessMask, VkAccessFlags dstAccesMask, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask, bool byRegion) const
VkBuffer vkBuffer() const
Definition GrVkBuffer.h:24
bool canCopyAsResolve(VkFormat dstConfig, int dstSampleCnt, bool dstHasYcbcr, VkFormat srcConfig, int srcSamplecnt, bool srcHasYcbcr) const
Definition GrVkCaps.cpp:192
bool isVkFormatTexturable(VkFormat) const
bool formatCanBeDstofBlit(VkFormat format, bool linearTiled) const
Definition GrVkCaps.h:71
bool isFormatRenderable(const GrBackendFormat &format, int sampleCount) const override
bool renderTargetSupportsDiscardableMSAA(const GrVkRenderTarget *) const
bool supportsYcbcrConversion() const
Definition GrVkCaps.h:153
VkFormat getFormatFromColorType(GrColorType colorType) const
Definition GrVkCaps.h:235
GrColorType transferColorType(VkFormat, GrColorType surfaceColorType) const
int getRenderTargetSampleCount(int requestedCount, const GrBackendFormat &) const override
bool supportsSwapchain() const
Definition GrVkCaps.h:126
bool supportsDRMFormatModifiers() const
Definition GrVkCaps.h:168
VkFormat preferredStencilFormat() const
Definition GrVkCaps.h:104
bool isVkFormatTexturableLinearly(VkFormat format) const
Definition GrVkCaps.h:67
void pipelineBarrier(const GrVkGpu *gpu, const GrManagedResource *resource, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask, bool byRegion, BarrierType barrierType, void *barrier)
void addGrSurface(sk_sp< const GrSurface > surface)
void addGrBuffer(sk_sp< const GrBuffer > buffer)
GrVkPrimaryCommandBuffer * getPrimaryCommandBuffer()
std::unique_ptr< GrSemaphore > makeSemaphore(bool isOwned) override
Definition GrVkGpu.cpp:2713
bool onReadPixels(GrSurface *, SkIRect, GrColorType surfaceColorType, GrColorType dstColorType, void *buffer, size_t rowBytes) override
Definition GrVkGpu.cpp:2530
bool setBackendTextureState(const GrBackendTexture &, const skgpu::MutableTextureState &, skgpu::MutableTextureState *previousState, sk_sp< skgpu::RefCntedCallback > finishedCallback) override
Definition GrVkGpu.cpp:1972
uint32_t queueIndex() const
Definition GrVkGpu.h:73
bool onSubmitToGpu(GrSyncCpu sync) override
Definition GrVkGpu.cpp:2255
const GrVkCaps & vkCaps() const
Definition GrVkGpu.h:61
static std::unique_ptr< GrGpu > Make(const GrVkBackendContext &, const GrContextOptions &, GrDirectContext *)
Definition GrVkGpu.cpp:66
void onResolveRenderTarget(GrRenderTarget *target, const SkIRect &resolveRect) override
Definition GrVkGpu.cpp:822
void prepareSurfacesForBackendAccessAndStateUpdates(SkSpan< GrSurfaceProxy * > proxies, SkSurfaces::BackendSurfaceAccess access, const skgpu::MutableTextureState *newState) override
Definition GrVkGpu.cpp:2202
void insertSemaphore(GrSemaphore *semaphore) override
Definition GrVkGpu.cpp:2724
bool zeroBuffer(sk_sp< GrGpuBuffer >)
Definition GrVkGpu.cpp:1265
void addImageMemoryBarrier(const GrManagedResource *, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask, bool byRegion, VkImageMemoryBarrier *barrier) const
Definition GrVkGpu.cpp:2180
bool onClearBackendTexture(const GrBackendTexture &, sk_sp< skgpu::RefCntedCallback > finishedCallback, std::array< float, 4 > color) override
Definition GrVkGpu.cpp:1730
bool loadMSAAFromResolve(GrVkCommandBuffer *commandBuffer, const GrVkRenderPass &renderPass, GrAttachment *dst, GrVkImage *src, const SkIRect &srcRect)
Definition GrVkGpu.cpp:1516
void addBufferMemoryBarrier(const GrManagedResource *, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask, bool byRegion, VkBufferMemoryBarrier *barrier) const
Definition GrVkGpu.cpp:2143
const skgpu::VulkanInterface * vkInterface() const
Definition GrVkGpu.h:60
void addDrawable(std::unique_ptr< SkDrawable::GpuDrawHandler > drawable)
Definition GrVkGpu.cpp:2769
VkDevice device() const
Definition GrVkGpu.h:71
void deleteBackendTexture(const GrBackendTexture &) override
Definition GrVkGpu.cpp:2033
bool beginRenderPass(const GrVkRenderPass *, sk_sp< const GrVkFramebuffer >, const VkClearValue *colorClear, const GrSurface *, const SkIRect &renderPassBounds, bool forSecondaryCB)
Definition GrVkGpu.cpp:2637
void disconnect(DisconnectType) override
Definition GrVkGpu.cpp:302
sk_sp< GrTexture > onWrapRenderableBackendTexture(const GrBackendTexture &, int sampleCnt, GrWrapOwnership, GrWrapCacheable) override
Definition GrVkGpu.cpp:1423
sk_sp< GrAttachment > makeStencilAttachment(const GrBackendFormat &, SkISize dimensions, int numStencilSamples) override
Definition GrVkGpu.cpp:1617
sk_sp< GrGpuBuffer > onCreateBuffer(size_t size, GrGpuBufferType type, GrAccessPattern) override
Definition GrVkGpu.cpp:468
sk_sp< GrThreadSafePipelineBuilder > refPipelineBuilder() override
Definition GrVkGpu.cpp:318
void endRenderPass(GrRenderTarget *target, GrSurfaceOrigin origin, const SkIRect &bounds)
Definition GrVkGpu.cpp:2667
sk_sp< GrRenderTarget > onWrapBackendRenderTarget(const GrBackendRenderTarget &) override
Definition GrVkGpu.cpp:1462
sk_sp< GrTexture > onWrapCompressedBackendTexture(const GrBackendTexture &, GrWrapOwnership, GrWrapCacheable) override
Definition GrVkGpu.cpp:1417
sk_sp< GrTexture > onWrapBackendTexture(const GrBackendTexture &, GrWrapOwnership, GrWrapCacheable, GrIOType) override
Definition GrVkGpu.cpp:1389
GrVkResourceProvider & resourceProvider()
Definition GrVkGpu.h:83
void waitSemaphore(GrSemaphore *semaphore) override
Definition GrVkGpu.cpp:2736
bool onCopySurface(GrSurface *dst, const SkIRect &dstRect, GrSurface *src, const SkIRect &srcRect, GrSamplerState::Filter) override
Definition GrVkGpu.cpp:2433
bool onRegenerateMipMapLevels(GrTexture *tex) override
Definition GrVkGpu.cpp:1524
GrThreadSafePipelineBuilder * pipelineBuilder() override
Definition GrVkGpu.cpp:314
bool updateBuffer(sk_sp< GrVkBuffer > buffer, const void *src, VkDeviceSize offset, VkDeviceSize size)
Definition GrVkGpu.cpp:1245
bool onTransferPixelsFrom(GrSurface *, SkIRect, GrColorType surfaceColorType, GrColorType bufferColorType, sk_sp< GrGpuBuffer >, size_t offset) override
Definition GrVkGpu.cpp:710
std::unique_ptr< GrSemaphore > wrapBackendSemaphore(const GrBackendSemaphore &, GrSemaphoreWrapType, GrWrapOwnership) override
Definition GrVkGpu.cpp:2717
std::unique_ptr< GrSemaphore > prepareTextureForCrossContextUsage(GrTexture *) override
Definition GrVkGpu.cpp:2748
bool compile(const GrProgramDesc &, const GrProgramInfo &) override
Definition GrVkGpu.cpp:2042
bool onWritePixels(GrSurface *, SkIRect, GrColorType surfaceColorType, GrColorType srcColorType, const GrMipLevel[], int mipLevelCount, bool prepForTexSampling) override
Definition GrVkGpu.cpp:494
sk_sp< GrTexture > onCreateTexture(SkISize, const GrBackendFormat &, GrRenderable, int renderTargetSampleCnt, skgpu::Budgeted, GrProtected, int mipLevelCount, uint32_t levelClearMask, std::string_view label) override
Definition GrVkGpu.cpp:1137
void storeVkPipelineCacheData() override
Definition GrVkGpu.cpp:2773
GrVkPrimaryCommandBuffer * currentCommandBuffer() const
Definition GrVkGpu.h:85
sk_sp< GrRenderTarget > onWrapVulkanSecondaryCBAsRenderTarget(const SkImageInfo &, const GrVkDrawableInfo &) override
Definition GrVkGpu.cpp:1497
void finishOutstandingGpuWork() override
Definition GrVkGpu.cpp:2263
bool onUpdateCompressedBackendTexture(const GrBackendTexture &, sk_sp< skgpu::RefCntedCallback > finishedCallback, const void *data, size_t length) override
Definition GrVkGpu.cpp:1835
sk_sp< GrAttachment > makeMSAAAttachment(SkISize dimensions, const GrBackendFormat &format, int numSamples, GrProtected isProtected, GrMemoryless isMemoryless) override
Definition GrVkGpu.cpp:1625
void submit(GrOpsRenderPass *) override
Definition GrVkGpu.cpp:2706
GrBackendTexture onCreateCompressedBackendTexture(SkISize dimensions, const GrBackendFormat &, skgpu::Mipmapped, GrProtected) override
Definition GrVkGpu.cpp:1823
bool checkVkResult(VkResult)
Definition GrVkGpu.cpp:2675
void submitSecondaryCommandBuffer(std::unique_ptr< GrVkSecondaryCommandBuffer >)
Definition GrVkGpu.cpp:2699
void onReportSubmitHistograms() override
Definition GrVkGpu.cpp:2271
bool onTransferFromBufferToBuffer(sk_sp< GrGpuBuffer > src, size_t srcOffset, sk_sp< GrGpuBuffer > dst, size_t dstOffset, size_t size) override
Definition GrVkGpu.cpp:608
GrBackendTexture onCreateBackendTexture(SkISize dimensions, const GrBackendFormat &, GrRenderable, skgpu::Mipmapped, GrProtected, std::string_view label) override
Definition GrVkGpu.cpp:1788
sk_sp< GrTexture > onCreateCompressedTexture(SkISize dimensions, const GrBackendFormat &, skgpu::Budgeted, skgpu::Mipmapped, GrProtected, const void *data, size_t dataSize) override
Definition GrVkGpu.cpp:1202
void takeOwnershipOfBuffer(sk_sp< GrGpuBuffer >) override
Definition GrVkGpu.cpp:2251
GrOpsRenderPass * onGetOpsRenderPass(GrRenderTarget *, bool useMSAASurface, GrAttachment *stencil, GrSurfaceOrigin, const SkIRect &, const GrOpsRenderPass::LoadAndStoreInfo &, const GrOpsRenderPass::StencilLoadAndStoreInfo &, const skia_private::TArray< GrSurfaceProxy *, true > &sampledProxies, GrXferBarrierFlags renderPassXferBarriers) override
Definition GrVkGpu.cpp:324
bool onTransferPixelsTo(GrTexture *, SkIRect, GrColorType textureColorType, GrColorType bufferColorType, sk_sp< GrGpuBuffer >, size_t offset, size_t rowBytes) override
Definition GrVkGpu.cpp:637
bool setBackendRenderTargetState(const GrBackendRenderTarget &, const skgpu::MutableTextureState &, skgpu::MutableTextureState *previousState, sk_sp< skgpu::RefCntedCallback > finishedCallback) override
Definition GrVkGpu.cpp:1988
~GrVkGpu() override
Definition GrVkGpu.cpp:292
void xferBarrier(GrRenderTarget *, GrXferBarrierType) override
Definition GrVkGpu.cpp:2004
void addFinishedProc(GrGpuFinishedProc finishedProc, GrGpuFinishedContext finishedContext) override
Definition GrVkGpu.cpp:2240
skgpu::VulkanMemoryAllocator * memoryAllocator() const
Definition GrVkGpu.h:68
static sk_sp< GrVkImage > MakeStencil(GrVkGpu *gpu, SkISize dimensions, int sampleCnt, VkFormat format)
Definition GrVkImage.cpp:21
static VkAccessFlags LayoutToSrcAccessMask(const VkImageLayout layout)
uint32_t mipLevels() const
Definition GrVkImage.h:93
bool isLinearTiled() const
Definition GrVkImage.h:122
void setImageLayout(const GrVkGpu *gpu, VkImageLayout newLayout, VkAccessFlags dstAccessMask, VkPipelineStageFlags dstStageMask, bool byRegion)
Definition GrVkImage.h:143
static void DestroyImageInfo(const GrVkGpu *gpu, GrVkImageInfo *)
const GrVkYcbcrConversionInfo & ycbcrConversionInfo() const
Definition GrVkImage.h:94
bool supportsInputAttachmentUsage() const
Definition GrVkImage.h:101
static sk_sp< GrVkImage > MakeMSAA(GrVkGpu *gpu, SkISize dimensions, int numSamples, VkFormat format, GrProtected isProtected, GrMemoryless memoryless)
Definition GrVkImage.cpp:39
VkImageLayout currentLayout() const
Definition GrVkImage.h:132
VkFormat imageFormat() const
Definition GrVkImage.h:82
static sk_sp< GrVkImage > MakeWrapped(GrVkGpu *gpu, SkISize dimensions, const GrVkImageInfo &, sk_sp< skgpu::MutableTextureState >, UsageFlags attachmentUsages, GrWrapOwnership, GrWrapCacheable, std::string_view label, bool forSecondaryCB=false)
static VkPipelineStageFlags LayoutToPipelineSrcStageFlags(const VkImageLayout layout)
const Resource * resource() const
Definition GrVkImage.h:118
const skgpu::VulkanAlloc & alloc() const
Definition GrVkImage.h:75
VkImage image() const
Definition GrVkImage.h:69
static bool InitImageInfo(GrVkGpu *gpu, const ImageDesc &imageDesc, GrVkImageInfo *)
bool loadMSAAFromResolve(GrVkGpu *gpu, GrVkCommandBuffer *commandBuffer, const GrVkRenderPass &renderPass, GrAttachment *dst, GrVkImage *src, const SkIRect &srcRect)
void destroyResources(GrVkGpu *gpu)
void endRenderPass(const GrVkGpu *gpu)
bool submitToQueue(GrVkGpu *gpu, VkQueue queue, skia_private::TArray< GrVkSemaphore::Resource * > &signalSemaphores, skia_private::TArray< GrVkSemaphore::Resource * > &waitSemaphores)
void clearColorImage(const GrVkGpu *gpu, GrVkImage *image, const VkClearColorValue *color, uint32_t subRangeCount, const VkImageSubresourceRange *subRanges)
void blitImage(const GrVkGpu *gpu, const GrManagedResource *srcResource, VkImage srcImage, VkImageLayout srcLayout, const GrManagedResource *dstResource, VkImage dstImage, VkImageLayout dstLayout, uint32_t blitRegionCount, const VkImageBlit *blitRegions, VkFilter filter)
void copyImageToBuffer(const GrVkGpu *gpu, GrVkImage *srcImage, VkImageLayout srcLayout, sk_sp< GrGpuBuffer > dstBuffer, uint32_t copyRegionCount, const VkBufferImageCopy *copyRegions)
void end(GrVkGpu *gpu, bool abandoningBuffer=false)
void resolveImage(GrVkGpu *gpu, const GrVkImage &srcImage, const GrVkImage &dstImage, uint32_t regionCount, const VkImageResolve *regions)
bool beginRenderPass(GrVkGpu *gpu, const GrVkRenderPass *, sk_sp< const GrVkFramebuffer >, const VkClearValue clearValues[], const GrSurface *target, const SkIRect &bounds, bool forSecondaryCB)
void copyBuffer(GrVkGpu *gpu, sk_sp< GrGpuBuffer > srcBuffer, sk_sp< GrGpuBuffer > dstBuffer, uint32_t regionCount, const VkBufferCopy *regions)
void copyImage(const GrVkGpu *gpu, GrVkImage *srcImage, VkImageLayout srcLayout, GrVkImage *dstImage, VkImageLayout dstLayout, uint32_t copyRegionCount, const VkImageCopy *copyRegions)
void executeCommands(const GrVkGpu *gpu, std::unique_ptr< GrVkSecondaryCommandBuffer > secondaryBuffer)
void fillBuffer(GrVkGpu *gpu, sk_sp< GrGpuBuffer >, VkDeviceSize offset, VkDeviceSize size, uint32_t data)
void copyBufferToImage(const GrVkGpu *gpu, VkBuffer srcBuffer, GrVkImage *dstImage, VkImageLayout dstLayout, uint32_t copyRegionCount, const VkBufferImageCopy *copyRegions)
void updateBuffer(GrVkGpu *gpu, sk_sp< GrVkBuffer > dstBuffer, VkDeviceSize dstOffset, VkDeviceSize dataSize, const void *data)
bool colorAttachmentIndex(uint32_t *index) const
bool stencilAttachmentIndex(uint32_t *index) const
bool hasResolveAttachment() const
bool wrapsSecondaryCommandBuffer() const
GrVkImage * externalAttachment() const
sk_sp< GrVkFramebuffer > externalFramebuffer() const
GrVkImage * nonMSAAAttachment() const
const GrVkImageView * resolveAttachmentView() const
static void ReconstructAttachmentsDescriptor(const GrVkCaps &vkCaps, const GrProgramInfo &programInfo, GrVkRenderPass::AttachmentsDescriptor *desc, GrVkRenderPass::AttachmentFlags *flags)
const GrVkFramebuffer * getFramebuffer(bool withResolve, bool withStencil, SelfDependencyFlags selfDepFlags, LoadFromResolve)
GrVkImage * colorAttachment() const
const GrVkImageView * colorAttachmentView() const
GrVkImage * resolveAttachment() const
static sk_sp< GrVkRenderTarget > MakeSecondaryCBRenderTarget(GrVkGpu *, SkISize, const GrVkDrawableInfo &vkInfo)
static sk_sp< GrVkRenderTarget > MakeWrappedRenderTarget(GrVkGpu *, SkISize, int sampleCnt, const GrVkImageInfo &, sk_sp< skgpu::MutableTextureState >)
GrVkPipelineState * findOrCreateCompatiblePipelineState(GrRenderTarget *, const GrProgramInfo &, VkRenderPass compatibleRenderPass, bool overrideSubpassForResolveLoad)
sk_sp< GrThreadSafePipelineBuilder > refPipelineStateCache()
void addFinishedProcToActiveCommandBuffers(sk_sp< skgpu::RefCntedCallback > finishedCallback)
GrVkCommandPool * findOrCreateCommandPool()
GrThreadSafePipelineBuilder * pipelineStateCache()
Resource * getResource()
static std::unique_ptr< GrVkSemaphore > Make(GrVkGpu *gpu, bool isOwned)
static std::unique_ptr< GrVkSemaphore > MakeWrapped(GrVkGpu *, VkSemaphore, GrSemaphoreWrapType, GrWrapOwnership)
static sk_sp< GrVkTextureRenderTarget > MakeNewTextureRenderTarget(GrVkGpu *gpu, skgpu::Budgeted budgeted, SkISize dimensions, VkFormat format, uint32_t mipLevels, int sampleCnt, GrMipmapStatus mipmapStatus, GrProtected isProtected, std::string_view label)
static sk_sp< GrVkTextureRenderTarget > MakeWrappedTextureRenderTarget(GrVkGpu *, SkISize dimensions, int sampleCnt, GrWrapOwnership, GrWrapCacheable, const GrVkImageInfo &, sk_sp< skgpu::MutableTextureState >)
GrVkImage * textureImage() const
Definition GrVkTexture.h:50
static sk_sp< GrVkTexture > MakeWrappedTexture(GrVkGpu *, SkISize dimensions, GrWrapOwnership, GrWrapCacheable, GrIOType, const GrVkImageInfo &, sk_sp< skgpu::MutableTextureState >)
static sk_sp< GrVkTexture > MakeNewTexture(GrVkGpu *, skgpu::Budgeted budgeted, SkISize dimensions, VkFormat format, uint32_t mipLevels, GrProtected, GrMipmapStatus, std::string_view label)
static int ComputeLevelCount(int baseWidth, int baseHeight)
Definition SkMipmap.cpp:134
constexpr bool empty() const
Definition SkSpan_impl.h:96
constexpr size_t size() const
Definition SkSpan_impl.h:95
T * get() const
Definition SkRefCnt.h:303
void reset(T *ptr=nullptr)
Definition SkRefCnt.h:310
void set(const MutableTextureState &that)
static sk_sp< RefCntedCallback > Make(Callback proc, Context ctx)
static sk_sp< VulkanMemoryAllocator > Make(VkInstance instance, VkPhysicalDevice physicalDevice, VkDevice device, uint32_t physicalDeviceVersion, const VulkanExtensions *extensions, const VulkanInterface *interface, bool threadSafe)
virtual std::pair< uint64_t, uint64_t > totalAllocatedAndUsedMemory() const =0
bool empty() const
Definition SkTArray.h:194
int size() const
Definition SkTArray.h:416
void reserve_exact(int n)
Definition SkTArray.h:176
T & emplace_back(Args &&... args)
Definition SkTArray.h:243
VkSurfaceKHR surface
Definition main.cc:49
sk_sp< SkImage > image
Definition examples.cpp:29
FlPixelBufferTexturePrivate * priv
static const uint8_t buffer[]
GAsyncResult * result
uint32_t uint32_t * format
uint32_t * target
FlTexture * texture
SK_API bool AsVkFormat(const GrBackendFormat &, VkFormat *)
SK_API GrBackendFormat MakeVk(VkFormat format, bool willUseDRMFormatModifiers=false)
SK_API bool GetVkImageInfo(const GrBackendRenderTarget &, GrVkImageInfo *)
SK_API GrBackendRenderTarget MakeVk(int width, int height, const GrVkImageInfo &)
SK_API VkSemaphore GetVkSemaphore(const GrBackendSemaphore &)
SK_API GrBackendTexture MakeVk(int width, int height, const GrVkImageInfo &, std::string_view label={})
SK_API bool GetVkImageInfo(const GrBackendTexture &, GrVkImageInfo *)
ClipOpAndAA opAA SkRegion region
Definition SkRecords.h:238
sk_sp< SkBlender > blender SkRect rect
Definition SkRecords.h:350
BackendSurfaceAccess
Definition SkSurface.h:44
@ kPresent
back-end surface will be used for presenting to screen
@ kNoAccess
back-end surface will not be used by client
it will be possible to load the file into Perfetto s trace viewer disable asset Prevents usage of any non test fonts unless they were explicitly Loaded via prefetched default font Indicates whether the embedding started a prefetch of the default font manager before creating the engine run In non interactive keep the shell running after the Dart script has completed enable serial On low power devices with low core running concurrent GC tasks on threads can cause them to contend with the UI thread which could potentially lead to jank This option turns off all concurrent GC activities domain network JSON encoded network policy per domain This overrides the DisallowInsecureConnections switch Embedder can specify whether to allow or disallow insecure connections at a domain level old gen heap size
Definition switches.h:259
dst
Definition cp.py:12
SK_API uint32_t GetVkQueueFamilyIndex(const MutableTextureState &state)
SK_API VkImageLayout GetVkImageLayout(const MutableTextureState &state)
void * MapAlloc(VulkanMemoryAllocator *, const VulkanAlloc &, const std::function< CheckResult > &)
void FlushMappedAlloc(VulkanMemoryAllocator *, const skgpu::VulkanAlloc &, VkDeviceSize offset, VkDeviceSize size, const std::function< CheckResult > &)
void UnmapAlloc(VulkanMemoryAllocator *, const VulkanAlloc &alloc)
static constexpr size_t VkFormatBytesPerBlock(VkFormat vkFormat)
Budgeted
Definition GpuTypes.h:35
static constexpr bool VkFormatNeedsYcbcrSampler(VkFormat format)
static constexpr bool VkFormatIsCompressed(VkFormat vkFormat)
Renderable
Definition GpuTypes.h:69
Mipmapped
Definition GpuTypes.h:53
void InvokeDeviceLostCallback(const skgpu::VulkanInterface *vulkanInterface, VkDevice vkDevice, skgpu::VulkanDeviceLostContext deviceLostContext, skgpu::VulkanDeviceLostProc deviceLostProc, bool supportsDeviceFaultInfoExtension)
SkISize CompressedDimensions(SkTextureCompressionType type, SkISize baseDimensions)
Protected
Definition GpuTypes.h:61
static void swap(TArray< T, M > &a, TArray< T, M > &b)
Definition SkTArray.h:737
Definition ref_ptr.h:256
int32_t height
int32_t width
Point offset
const skgpu::VulkanExtensions * fVkExtensions
skgpu::Protected fProtectedContext
const VkPhysicalDeviceFeatures * fDeviceFeatures
VkPhysicalDevice fPhysicalDevice
skgpu::VulkanGetProc fGetProc
const VkPhysicalDeviceFeatures2 * fDeviceFeatures2
sk_sp< skgpu::VulkanMemoryAllocator > fMemoryAllocator
VkFormat fFormat
Definition GrVkTypes.h:88
VkFormat fFormat
Definition GrVkTypes.h:30
VkImageUsageFlags fUsageFlags
Definition GrVkImage.h:185
VkImageType fImageType
Definition GrVkImage.h:178
GrProtected fIsProtected
Definition GrVkImage.h:187
VkImageTiling fImageTiling
Definition GrVkImage.h:184
int32_t fX
x-axis value
int32_t fY
y-axis value
static constexpr SkIPoint Make(int32_t x, int32_t y)
constexpr int32_t x() const
Definition SkRect.h:141
constexpr int32_t y() const
Definition SkRect.h:148
int32_t fBottom
larger y-axis bounds
Definition SkRect.h:36
constexpr SkISize size() const
Definition SkRect.h:172
constexpr int32_t height() const
Definition SkRect.h:165
int32_t fTop
smaller y-axis bounds
Definition SkRect.h:34
static constexpr SkIRect MakeSize(const SkISize &size)
Definition SkRect.h:66
constexpr int32_t width() const
Definition SkRect.h:158
constexpr SkIPoint topLeft() const
Definition SkRect.h:151
static constexpr SkIRect MakeXYWH(int32_t x, int32_t y, int32_t w, int32_t h)
Definition SkRect.h:104
int32_t fLeft
smaller x-axis bounds
Definition SkRect.h:33
bool contains(int32_t x, int32_t y) const
Definition SkRect.h:463
int32_t fRight
larger x-axis bounds
Definition SkRect.h:35
int32_t fHeight
Definition SkSize.h:18
int32_t fWidth
Definition SkSize.h:17
constexpr int32_t width() const
Definition SkSize.h:36
constexpr int32_t height() const
Definition SkSize.h:37
SkISize dimensions() const
int width() const
int height() const
VkDeviceSize dstOffset
VkDeviceSize size
VkDeviceSize srcOffset
VkOffset3D srcOffsets[2]
VkImageSubresourceLayers srcSubresource
VkOffset3D dstOffsets[2]
VkImageSubresourceLayers dstSubresource
VkExtent3D extent
VkOffset3D srcOffset
VkImageSubresourceLayers srcSubresource
VkImageSubresourceLayers dstSubresource
VkOffset3D dstOffset
VkAccessFlags dstAccessMask
VkAccessFlags srcAccessMask
VkStructureType sType
VkImageLayout newLayout
VkImageSubresourceRange subresourceRange
VkImageLayout oldLayout
VkImageSubresourceLayers dstSubresource
VkOffset3D srcOffset
VkImageSubresourceLayers srcSubresource
VkExtent3D extent
VkOffset3D dstOffset
VkImageAspectFlags aspectMask
VkPhysicalDeviceFeatures features
VkDeviceSize rowPitch
VkDeviceSize fSize
Definition VulkanTypes.h:40
VkDeviceMemory fMemory
Definition VulkanTypes.h:38
#define TRACE_EVENT0(category_group, name)
VkClearColorValue color
VkClearDepthStencilValue depthStencil
VkFlags VkPipelineStageFlags
VkImageLayout
@ VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL
@ VK_IMAGE_LAYOUT_PRESENT_SRC_KHR
@ VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL
@ VK_IMAGE_LAYOUT_PREINITIALIZED
@ VK_IMAGE_LAYOUT_UNDEFINED
@ VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL
@ VK_IMAGE_LAYOUT_GENERAL
@ VK_SHARING_MODE_EXCLUSIVE
@ VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT
VkFlags VkAccessFlags
VkFlags VkImageUsageFlags
uint64_t VkDeviceSize
Definition vulkan_core.h:96
@ VK_IMAGE_TILING_OPTIMAL
@ VK_IMAGE_TILING_LINEAR
@ VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT
void(VKAPI_PTR * PFN_vkGetPhysicalDeviceProperties)(VkPhysicalDevice physicalDevice, VkPhysicalDeviceProperties *pProperties)
@ VK_IMAGE_ASPECT_COLOR_BIT
@ VK_IMAGE_USAGE_TRANSFER_DST_BIT
@ VK_IMAGE_USAGE_SAMPLED_BIT
@ VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT
@ VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT
@ VK_IMAGE_USAGE_TRANSFER_SRC_BIT
#define VK_QUEUE_FAMILY_FOREIGN_EXT
#define VK_MAKE_VERSION(major, minor, patch)
Definition vulkan_core.h:78
@ VK_IMAGE_TYPE_2D
@ VK_FILTER_NEAREST
@ VK_FILTER_LINEAR
VkResult
@ VK_ERROR_DEVICE_LOST
@ VK_SUCCESS
@ VK_ERROR_OUT_OF_HOST_MEMORY
@ VK_ERROR_OUT_OF_DEVICE_MEMORY
@ VK_ACCESS_HOST_READ_BIT
@ VK_ACCESS_TRANSFER_WRITE_BIT
@ VK_ACCESS_HOST_WRITE_BIT
@ VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT
@ VK_ACCESS_INPUT_ATTACHMENT_READ_BIT
@ VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT
@ VK_ACCESS_TRANSFER_READ_BIT
@ VK_ACCESS_COLOR_ATTACHMENT_READ_NONCOHERENT_BIT_EXT
@ VK_ACCESS_SHADER_READ_BIT
@ VK_ACCESS_INDEX_READ_BIT
#define VK_NULL_HANDLE
Definition vulkan_core.h:46
#define VK_QUEUE_FAMILY_EXTERNAL
VkFormat
VkResult(VKAPI_PTR * PFN_vkEnumerateInstanceVersion)(uint32_t *pApiVersion)
#define VK_KHR_SWAPCHAIN_EXTENSION_NAME
VkPipelineStageFlagBits
@ VK_PIPELINE_STAGE_VERTEX_INPUT_BIT
@ VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT
@ VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT
@ VK_PIPELINE_STAGE_HOST_BIT
@ VK_PIPELINE_STAGE_TRANSFER_BIT
#define VK_QUEUE_FAMILY_IGNORED
@ VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER
@ VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER