61#if defined(SK_USE_VMA)
67#define VK_CALL(X) GR_VK_CALL(this->vkInterface(), X)
68#define VK_CALL_RET(RET, X) GR_VK_CALL_RESULT(this, RET, X)
85 backendContext.
fGetProc(
"vkEnumerateInstanceVersion",
87 uint32_t instanceVersion = 0;
88 if (!localEnumerateInstanceVersion) {
91 VkResult err = localEnumerateInstanceVersion(&instanceVersion);
93 SkDebugf(
"Failed to enumerate instance version. Err: %d\n", err);
100 backendContext.
fGetProc(
"vkGetPhysicalDeviceProperties",
104 if (!localGetPhysicalDeviceProperties) {
108 localGetPhysicalDeviceProperties(backendContext.
fPhysicalDevice, &physDeviceProperties);
109 uint32_t physDevVersion = physDeviceProperties.
apiVersion;
114 instanceVersion =
std::min(instanceVersion, apiVersion);
115 physDevVersion =
std::min(physDevVersion, apiVersion);
121 auto interface =
sk_make_sp<
skgpu::VulkanInterface>(backendContext.fGetProc,
122 backendContext.fInstance,
123 backendContext.fDevice,
128 if (!interface->validate(instanceVersion, physDevVersion, extensions)) {
144 features2.
pNext =
nullptr;
173#if defined(SK_USE_VMA)
186 SkDEBUGFAIL(
"No supplied vulkan memory allocator and unable to create one internally.");
190 std::unique_ptr<GrVkGpu> vkGpu(
new GrVkGpu(direct,
198 !vkGpu->vkCaps().supportsProtectedContent()) {
210 uint32_t instanceVersion,
211 uint32_t physicalDeviceVersion,
214 , fInterface(
std::move(interface))
215 , fMemoryAllocator(
std::move(memoryAllocator))
216 , fVkCaps(
std::move(caps))
217 , fPhysicalDevice(backendContext.fPhysicalDevice)
218 , fDevice(backendContext.fDevice)
219 , fQueue(backendContext.fQueue)
220 , fQueueIndex(backendContext.fGraphicsQueueIndex)
221 , fResourceProvider(this)
222 , fStagingBufferManager(this)
223 , fDisconnected(
false)
224 , fProtectedContext(backendContext.fProtectedContext)
225 , fDeviceLostContext(backendContext.fDeviceLostContext)
226 , fDeviceLostProc(backendContext.fDeviceLostProc) {
234 fResourceProvider.
init();
244void GrVkGpu::destroyResources() {
247 fMainCmdPool->
close();
254 fMainCmdPool->
unref();
255 fMainCmdPool =
nullptr;
258 for (
int i = 0;
i < fSemaphoresToWaitOn.
size(); ++
i) {
259 fSemaphoresToWaitOn[
i]->unref();
261 fSemaphoresToWaitOn.
clear();
263 for (
int i = 0;
i < fSemaphoresToSignal.
size(); ++
i) {
264 fSemaphoresToSignal[
i]->unref();
266 fSemaphoresToSignal.
clear();
268 fStagingBufferManager.
reset();
277 if (!fDisconnected) {
278 this->destroyResources();
282 fMemoryAllocator.
reset();
288 if (!fDisconnected) {
289 this->destroyResources();
291 fSemaphoresToWaitOn.
clear();
292 fSemaphoresToSignal.
clear();
293 fMainCmdBuffer =
nullptr;
294 fDisconnected =
true;
318 if (!fCachedOpsRenderPass) {
319 fCachedOpsRenderPass = std::make_unique<GrVkOpsRenderPass>(
this);
329 (this->vkCaps().supportsDiscardableMSAAForDMSAA() &&
347 bool withResolve =
false;
350 if (useMSAASurface && this->
vkCaps().renderTargetSupportsDiscardableMSAA(vkRT)) {
374 if (!fCachedOpsRenderPass->set(rt, std::move(framebuffer), origin,
bounds, localColorInfo,
375 stencilInfo, resolveInfo, selfDepFlags, loadFromResolve,
379 return fCachedOpsRenderPass.get();
382bool GrVkGpu::submitCommandBuffer(SyncQueue sync) {
387 SkASSERT(!fCachedOpsRenderPass || !fCachedOpsRenderPass->isActive());
390 fSemaphoresToSignal.
empty() && fSemaphoresToWaitOn.
empty()) {
401 fMainCmdBuffer->
end(
this);
403 fMainCmdPool->
close();
404 bool didSubmit = fMainCmdBuffer->
submitToQueue(
this, fQueue, fSemaphoresToSignal,
405 fSemaphoresToWaitOn);
407 if (didSubmit && sync == kForce_SyncQueue) {
418 for (
int i = 0;
i < fSemaphoresToWaitOn.
size(); ++
i) {
419 fSemaphoresToWaitOn[
i]->unref();
421 fSemaphoresToWaitOn.
clear();
428 for (
int i = 0;
i < fSemaphoresToSignal.
size(); ++
i) {
429 fSemaphoresToSignal[
i]->unref();
431 fSemaphoresToSignal.
clear();
434 fMainCmdPool->
unref();
439 fMainCmdBuffer->
begin(
this);
441 fMainCmdBuffer =
nullptr;
484 bool prepForTexSampling) {
492 if (!mipLevelCount || !texels[0].fPixels) {
497 bool success =
false;
500 if (mipLevelCount > 1) {
501 SkDebugf(
"Can't upload mipmap data to linear tiled texture");
511 if (!this->submitCommandBuffer(kForce_SyncQueue)) {
515 success = this->uploadTexDataLinear(texImage,
519 texels[0].fRowBytes);
522 success = this->uploadTexDataOptimal(texImage,
527 if (1 == mipLevelCount) {
532 if (prepForTexSampling) {
570 swap(srcAccessMask, dstAccessMask );
571 swap(srcPipelineStageFlags, dstPipelineStageFlags);
587 dstPipelineStageFlags,
589 &bufferMemoryBarrier);
638 if ((bufferOffset & 0x3) || (bufferOffset % bpp)) {
651 if (!transferBuffer) {
655 if (bufferColorType != this->
vkCaps().transferColorType(
format, surfaceColorType)) {
665 region.bufferOffset = bufferOffset;
666 region.bufferRowLength = (uint32_t)(rowBytes/bpp);
667 region.bufferImageHeight = 0;
670 region.imageExtent = { (uint32_t)
rect.width(), (uint32_t)
rect.height(), 1 };
714 if (rt->wrapsSecondaryCommandBuffer()) {
717 if (!rt->nonMSAAAttachment()) {
720 srcImage = rt->nonMSAAAttachment();
737 region.bufferImageHeight = 0;
740 region.imageExtent = {(uint32_t)
rect.width(), (uint32_t)
rect.height(), 1};
750 transferBuffer, 1, &
region);
769 SkASSERT(
src &&
src->colorAttachment() &&
src->colorAttachment()->numSamples() > 1);
782 dstImage =
static_cast<GrVkTexture*
>(dstTex)->textureImage();
795 src->colorAttachment()->setImageLayout(
this,
811 if (this->
vkCaps().renderTargetSupportsDiscardableMSAA(rt)) {
816 this->resolveImage(
target, rt, resolveRect,
820bool GrVkGpu::uploadTexDataLinear(
GrVkImage* texImage,
831 size_t trimRowBytes =
rect.width() * bpp;
844 GR_VK_CALL(interface, GetImageSubresourceLayout(fDevice,
864 mapPtr =
reinterpret_cast<char*
>(mapPtr) +
offset;
867 static_cast<size_t>(layout.
rowPitch),
890 int numMipLevels = 1;
896 individualMipOffsets->
reserve_exact(individualMipOffsets->
size() + numMipLevels);
906 size_t alignment = bytesPerBlock;
907 switch (alignment & 0b11) {
909 case 2: alignment *= 2;
break;
910 default: alignment *= 4;
break;
917 for (
int i = 0;
i < numMipLevels; ++
i) {
920 region.bufferOffset = slice->
fOffset + (*individualMipOffsets)[
i];
922 region.bufferRowLength = revisedDimensions.
width();
925 region.imageOffset = {0, 0, 0};
936bool GrVkGpu::uploadTexDataOptimal(
GrVkImage* texImage,
964 std::copy_n(texels, mipLevelCount, texelsShallowCopy.get());
967 size_t combinedBufferSize;
968 if (mipLevelCount > 1) {
971 &individualMipOffsets,
974 SkASSERT(texelsShallowCopy[0].fPixels && texelsShallowCopy[0].fRowBytes);
975 combinedBufferSize =
rect.width()*
rect.height()*bpp;
982 size_t alignment = bpp;
983 switch (alignment & 0b11) {
985 case 2: alignment *= 2;
break;
986 default: alignment *= 4;
break;
994 int uploadLeft =
rect.left();
995 int uploadTop =
rect.top();
1000 int currentWidth =
rect.width();
1001 int currentHeight =
rect.height();
1002 for (
int currentMipLevel = 0; currentMipLevel < mipLevelCount; currentMipLevel++) {
1003 if (texelsShallowCopy[currentMipLevel].fPixels) {
1004 const size_t trimRowBytes = currentWidth * bpp;
1005 const size_t rowBytes = texelsShallowCopy[currentMipLevel].fRowBytes;
1008 char*
dst =
buffer + individualMipOffsets[currentMipLevel];
1009 const char*
src = (
const char*)texelsShallowCopy[currentMipLevel].fPixels;
1014 region.bufferOffset = slice.
fOffset + individualMipOffsets[currentMipLevel];
1015 region.bufferRowLength = currentWidth;
1016 region.bufferImageHeight = currentHeight;
1018 region.imageOffset = {uploadLeft, uploadTop, 0};
1019 region.imageExtent = {(uint32_t)currentWidth, (uint32_t)currentHeight, 1};
1022 currentWidth =
std::max(1, currentWidth/2);
1023 currentHeight =
std::max(1, currentHeight/2);
1050bool GrVkGpu::uploadTexDataCompressed(
GrVkImage* uploadTexture,
1080 &individualMipOffsets,
1086 if (!slice.fBuffer) {
1089 SkASSERT(dataSize == combinedBufferSize);
1124 int renderTargetSampleCnt,
1128 uint32_t levelClearMask,
1129 std::string_view label) {
1141 this, budgeted, dimensions, pixelFormat, mipLevelCount, renderTargetSampleCnt,
1142 mipmapStatus, isProtected, label);
1145 mipLevelCount, isProtected, mipmapStatus, label);
1152 if (levelClearMask) {
1157 bool inRange =
false;
1158 GrVkImage* texImage = tex->textureImage();
1160 if (levelClearMask & (1U <<
i)) {
1162 ranges.
back().levelCount++;
1166 range.baseArrayLayer = 0;
1167 range.baseMipLevel =
i;
1168 range.layerCount = 1;
1169 range.levelCount = 1;
1172 }
else if (inRange) {
1197 int numMipLevels = 1;
1213 "VkGpu_CreateCompressedTexture");
1219 if (!this->uploadTexDataCompressed(tex->textureImage(), compression, pixelFormat,
1220 dimensions, mipmapped,
data, dataSize)) {
1277 bool needsAllocation,
1278 uint32_t graphicsQueueIndex) {
1295 if (
info.fCurrentQueueFamily != graphicsQueueIndex) {
1303 if (
info.fYcbcrConversionInfo.isValid()) {
1307 if (
info.fYcbcrConversionInfo.fExternalFormat != 0) {
1323 if (
info.fSampleCount != 1) {
1327 if (
info.fYcbcrConversionInfo.isValid() &&
info.fYcbcrConversionInfo.fExternalFormat != 0) {
1398 ioType, imageInfo, std::move(mutableState));
1404 return this->onWrapBackendTexture(beTex, ownership, cacheable,
kRead_GrIOType);
1426 bool resolveOnly = sampleCnt > 1;
1441 sampleCnt, ownership, cacheable,
1443 std::move(mutableState));
1457 static bool kResolveOnly =
false;
1475 SkASSERT(tgt->canAttemptStencilAttachment(tgt->numSamples() > 1));
1484 if (imageInfo.
width() > maxSize || imageInfo.
height() > maxSize) {
1489 if (!backendFormat.
isValid()) {
1512 auto* vkTex =
static_cast<GrVkTexture*
>(tex)->textureImage();
1514 if (vkTex->isLinearTiled()) {
1515 SkDebugf(
"Trying to create mipmap for linear tiled texture");
1523 !
caps.formatCanBeSrcofBlit(vkTex->imageFormat(),
false) ||
1535 SkASSERT(levelCount == vkTex->mipLevels());
1557 uint32_t mipLevel = 1;
1558 while (mipLevel < levelCount) {
1559 int prevWidth =
width;
1570 blitRegion.
srcOffsets[1] = { prevWidth, prevHeight, 1 };
1586 if (levelCount > 1) {
1602 SkISize dimensions,
int numStencilSamples) {
1617 SkASSERT(this->
vkCaps().isFormatRenderable(pixelFormat, numSamples));
1620 return GrVkImage::MakeMSAA(
this, dimensions, numSamples, pixelFormat, isProtected, memoryless);
1638 const size_t trimRB = srcData[
level].
info().
width() * bytesPerPixel;
1647bool GrVkGpu::createVkImageForBackendSurface(
VkFormat vkFormat,
1657 if (fProtectedContext != isProtected) {
1678 int numMipLevels = 1;
1699 imageDesc.
fLevels = numMipLevels;
1707 SkDebugf(
"Failed to init image info\n");
1716 std::array<float, 4>
color) {
1766 if (finishedCallback) {
1767 this->addFinishedCallback(std::move(finishedCallback));
1777 std::string_view label) {
1780 if (fProtectedContext != isProtected) {
1790 if (!
caps.isVkFormatTexturable(vkFormat)) {
1799 if (!this->createVkImageForBackendSurface(vkFormat, dimensions, 1,
GrTexturable::kYes,
1800 renderable, mipmapped, &
info, isProtected)) {
1811 return this->onCreateBackendTexture(dimensions,
1816 "VkGpu_CreateCompressedBackendTexture");
1819bool GrVkGpu::onUpdateCompressedBackendTexture(
const GrBackendTexture& backendTexture,
1834 std::move(mutableState));
1844 image->setImageLayout(
this,
1859 &individualMipOffsets,
1864 backendTexture.fMipmapped);
1881 image->currentLayout(),
1887 image->setImageLayout(
this,
1893 if (finishedCallback) {
1894 this->addFinishedCallback(std::move(finishedCallback));
1901 uint32_t newQueueFamilyIndex) {
1906 newLayout =
image->currentLayout();
1911 uint32_t currentQueueFamilyIndex =
image->currentQueueFamilyIndex();
1912 auto isSpecialQueue = [](uint32_t queueFamilyIndex) {
1916 if (isSpecialQueue(currentQueueFamilyIndex) && isSpecialQueue(newQueueFamilyIndex)) {
1922 image->setImageLayoutAndQueueIndex(gpu, newLayout, dstAccess, dstStage,
false,
1923 newQueueFamilyIndex);
1930 uint32_t newQueueFamilyIndex,
1936 std::move(currentState),
1940 "VkGpu_SetBackendSurfaceState",
1946 if (previousState) {
1947 previousState->
set(*
texture->getMutableState());
1950 if (finishedCallback) {
1951 this->addFinishedCallback(std::move(finishedCallback));
1965 return this->setBackendSurfaceState(
info, std::move(currentState), backendTeture.
dimensions(),
1969 std::move(finishedCallback));
1981 return this->setBackendSurfaceState(
info, std::move(currentState),
1985 previousState, std::move(finishedCallback));
2003 barrier.
pNext =
nullptr;
2014 dstStage,
true, &barrier);
2030 &attachmentsDescriptor, &attachmentFlags);
2041 if (this->
vkCaps().programInfoWillUseDiscardableMSAA(programInfo) &&
2046 &attachmentsDescriptor, attachmentFlags, selfDepFlags, loadFromResolve));
2058 if (!pipelineState) {
2065#if defined(GR_TEST_UTILS)
2066bool GrVkGpu::isTestingOnlyBackendTexture(
const GrBackendTexture& tex)
const {
2076 memset(&req, 0,
sizeof(req));
2082 return (req.
size > 0) && (req.
size <= 8192 * 8192);
2092 if (dimensions.
width() > this->caps()->maxRenderTargetSize() ||
2093 dimensions.
height() > this->caps()->maxRenderTargetSize()) {
2100 if (!this->createVkImageForBackendSurface(vkFormat,
2186void GrVkGpu::prepareSurfacesForBackendAccessAndStateUpdates(
2202 if (
GrTexture* tex = proxy->peekTexture()) {
2218 image->prepareForPresent(
this);
2239bool GrVkGpu::onSubmitToGpu(
GrSyncCpu sync) {
2241 return this->submitCommandBuffer(kForce_SyncQueue);
2243 return this->submitCommandBuffer(kSkip_SyncQueue);
2248 VK_CALL(QueueWaitIdle(fQueue));
2250 if (this->
vkCaps().mustSyncCommandBuffersWithQueue()) {
2255void GrVkGpu::onReportSubmitHistograms() {
2256#if SK_HISTOGRAMS_ENABLED
2257 uint64_t allocatedMemory = 0, usedMemory = 0;
2259 SkASSERT(usedMemory <= allocatedMemory);
2260 if (allocatedMemory > 0) {
2262 (usedMemory * 100) / allocatedMemory);
2288 SkASSERT(this->
vkCaps().canCopyImage(dstFormat, dstSampleCnt, dstHasYcbcr,
2289 srcFormat, srcSampleCnt, srcHasYcbcr));
2291 if (
src->isProtected() && !
dst->isProtected()) {
2292 SkDebugf(
"Can't copy from protected memory to non-protected");
2316 copyRegion.
extent = { (uint32_t)srcRect.
width(), (uint32_t)srcRect.
height(), 1 };
2363 if (
src->isProtected() && !
dst->isProtected()) {
2364 SkDebugf(
"Can't copy from protected memory to non-protected");
2405 if (
src->isProtected() && !
dst->isProtected()) {
2406 SkDebugf(
"Can't copy from protected memory to non-protected");
2410 this->resolveImage(
dst, srcRT, srcRect, dstPoint);
2425 SkASSERT(!dstRT->wrapsSecondaryCommandBuffer());
2428 if (
src->isProtected() && !
dst->isProtected()) {
2429 SkDebugf(
"Can't copy from protected memory to non-protected");
2449 }
else if (
dst->asTexture()) {
2450 dstImage =
static_cast<GrVkTexture*
>(
dst->asTexture())->textureImage();
2466 }
else if (
src->asTexture()) {
2468 srcImage =
static_cast<GrVkTexture*
>(
src->asTexture())->textureImage();
2483 if (srcRect.
size() == dstRect.
size()) {
2487 srcFormat, srcSampleCnt, srcHasYcbcr)) {
2488 this->copySurfaceAsResolve(
dst,
src, srcRect, dstPoint);
2492 if (this->
vkCaps().canCopyImage(dstFormat, dstSampleCnt, dstHasYcbcr,
2493 srcFormat, srcSampleCnt, srcHasYcbcr)) {
2494 this->copySurfaceAsCopyImage(
dst,
src, dstImage, srcImage, srcRect, dstPoint);
2499 if (this->
vkCaps().canCopyAsBlit(dstFormat,
2507 this->copySurfaceAsBlit(
dst,
src, dstImage, srcImage, srcRect, dstRect, filter);
2552 image->setImageLayout(
this,
2562 size_t tightRowBytes = bpp*
rect.width();
2568 region.imageExtent = { (uint32_t)
rect.width(), (uint32_t)
rect.height(), 1 };
2570 size_t transBufferRowBytes = bpp *
region.imageExtent.width;
2571 size_t imageRows =
region.imageExtent.height;
2574 transBufferRowBytes * imageRows,
2579 if (!transferBuffer) {
2587 region.bufferRowLength = 0;
2588 region.bufferImageHeight = 0;
2607 if (!this->submitCommandBuffer(kForce_SyncQueue)) {
2610 void* mappedMemory = transferBuffer->
map();
2611 if (!mappedMemory) {
2617 transferBuffer->
unmap();
2625 const SkIRect& renderPassBounds,
2626 bool forSecondaryCB) {
2648 this, renderPass, std::move(framebuffer), clears,
target, renderPassBounds, forSecondaryCB);
2664 if (!fDeviceIsLost) {
2666 fDeviceIsLost =
true;
2671 vkCaps().supportsDeviceFaultInfo());
2691 SkASSERT(fCachedOpsRenderPass.get() == renderPass);
2693 fCachedOpsRenderPass->submit();
2694 fCachedOpsRenderPass->reset();
2705 wrapType, ownership);
static void info(const char *fmt,...) SK_PRINTF_LIKE(1
SkAssertResult(font.textToGlyphs("Hello", 5, SkTextEncoding::kUTF8, glyphs, std::size(glyphs))==count)
size_t GrBackendFormatBytesPerPixel(const GrBackendFormat &format)
SkTextureCompressionType GrBackendFormatToCompressionType(const GrBackendFormat &format)
size_t GrComputeTightCombinedBufferSize(size_t bytesPerPixel, SkISize baseDimensions, TArray< size_t > *individualMipOffsets, int mipLevelCount)
static constexpr size_t GrColorTypeBytesPerPixel(GrColorType ct)
@ kBorrow_GrWrapOwnership
@ kDynamic_GrAccessPattern
@ kStatic_GrAccessPattern
@ kStream_GrAccessPattern
@ kTopLeft_GrSurfaceOrigin
void * GrGpuFinishedContext
void(* GrGpuFinishedProc)(GrGpuFinishedContext finishedContext)
static size_t fill_in_compressed_regions(GrStagingBufferManager *stagingBufferManager, TArray< VkBufferImageCopy > *regions, TArray< size_t > *individualMipOffsets, GrStagingBufferManager::Slice *slice, SkTextureCompressionType compression, VkFormat vkFormat, SkISize dimensions, skgpu::Mipmapped mipmapped)
static bool check_tex_image_info(const GrVkCaps &caps, const GrVkImageInfo &info)
void set_layout_and_queue_from_mutable_state(GrVkGpu *gpu, GrVkImage *image, VkImageLayout newLayout, uint32_t newQueueFamilyIndex)
static bool check_image_info(const GrVkCaps &caps, const GrVkImageInfo &info, bool needsAllocation, uint32_t graphicsQueueIndex)
static void add_transfer_dst_buffer_mem_barrier(GrVkGpu *gpu, GrVkBuffer *dst, size_t offset, size_t size, bool after)
static bool check_rt_image_info(const GrVkCaps &caps, const GrVkImageInfo &info, bool resolveOnly)
bool copy_src_data(char *mapPtr, VkFormat vkFormat, const TArray< size_t > &individualMipOffsets, const GrPixmap srcData[], int numMipLevels)
bool GrVkFormatIsSupported(VkFormat format)
#define GR_VK_CALL(IFACE, X)
@ kTexture_GrXferBarrierType
@ kBlend_GrXferBarrierType
#define SkDEBUGFAIL(message)
size_t SkCompressedDataSize(SkTextureCompressionType type, SkISize dimensions, TArray< size_t > *individualMipOffsets, bool mipmapped)
void SK_SPI SkDebugf(const char format[],...) SK_PRINTF_LIKE(1
static SkString resource(SkPDFResourceType type, int index)
#define INHERITED(method,...)
static void SkRectMemcpy(void *dst, size_t dstRB, const void *src, size_t srcRB, size_t trimRowBytes, int rowCount)
sk_sp< T > sk_make_sp(Args &&... args)
sk_sp< T > sk_ref_sp(T *obj)
void swap(sk_sp< T > &a, sk_sp< T > &b)
SkDEBUGCODE(SK_SPI) SkThreadID SkGetThreadID()
static constexpr bool SkToBool(const T &x)
constexpr uint32_t SkToU32(S x)
#define SK_HISTOGRAM_MEMORY_KB(name, sample)
#define SK_HISTOGRAM_PERCENTAGE(name, percent_as_int)
SkISize dimensions() const
SkISize dimensions() const
GrBackendFormat getBackendFormat() const
bool mipmapSupport() const
int maxTextureSize() const
GrResourceProvider * resourceProvider()
GrDirectContextPriv priv()
void incStencilAttachmentCreates()
void incMSAAAttachmentCreates()
bool submitToGpu(GrSyncCpu sync)
const GrCaps * caps() const
GrDirectContext * getContext()
void didWriteToSurface(GrSurface *surface, GrSurfaceOrigin origin, const SkIRect *bounds, uint32_t mipLevels=1) const
virtual void disconnect(DisconnectType)
void initCaps(sk_sp< const GrCaps > caps)
const GrImageInfo & info() const
GrLoadOp colorLoadOp() const
GrXferBarrierFlags renderPassBarriers() const
Slice allocateStagingBufferSlice(size_t size, size_t requiredAlignment=1)
SkISize dimensions() const
GrTextureType textureType() const
static sk_sp< GrVkBuffer > Make(GrVkGpu *gpu, size_t size, GrGpuBufferType bufferType, GrAccessPattern accessPattern)
void addMemoryBarrier(VkAccessFlags srcAccessMask, VkAccessFlags dstAccesMask, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask, bool byRegion) const
VkBuffer vkBuffer() const
bool canCopyAsResolve(VkFormat dstConfig, int dstSampleCnt, bool dstHasYcbcr, VkFormat srcConfig, int srcSamplecnt, bool srcHasYcbcr) const
bool isVkFormatTexturable(VkFormat) const
bool formatCanBeDstofBlit(VkFormat format, bool linearTiled) const
bool isFormatRenderable(const GrBackendFormat &format, int sampleCount) const override
bool renderTargetSupportsDiscardableMSAA(const GrVkRenderTarget *) const
bool supportsYcbcrConversion() const
VkFormat getFormatFromColorType(GrColorType colorType) const
GrColorType transferColorType(VkFormat, GrColorType surfaceColorType) const
int getRenderTargetSampleCount(int requestedCount, const GrBackendFormat &) const override
bool supportsSwapchain() const
bool supportsDRMFormatModifiers() const
VkFormat preferredStencilFormat() const
bool isVkFormatTexturableLinearly(VkFormat format) const
void pipelineBarrier(const GrVkGpu *gpu, const GrManagedResource *resource, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask, bool byRegion, BarrierType barrierType, void *barrier)
void addGrSurface(sk_sp< const GrSurface > surface)
void addGrBuffer(sk_sp< const GrBuffer > buffer)
@ kBufferMemory_BarrierType
@ kImageMemory_BarrierType
GrVkPrimaryCommandBuffer * getPrimaryCommandBuffer()
std::unique_ptr< GrSemaphore > makeSemaphore(bool isOwned) override
bool setBackendTextureState(const GrBackendTexture &, const skgpu::MutableTextureState &, skgpu::MutableTextureState *previousState, sk_sp< skgpu::RefCntedCallback > finishedCallback) override
uint32_t queueIndex() const
const GrVkCaps & vkCaps() const
void onResolveRenderTarget(GrRenderTarget *target, const SkIRect &resolveRect) override
void insertSemaphore(GrSemaphore *semaphore) override
bool zeroBuffer(sk_sp< GrGpuBuffer >)
void addImageMemoryBarrier(const GrManagedResource *, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask, bool byRegion, VkImageMemoryBarrier *barrier) const
bool loadMSAAFromResolve(GrVkCommandBuffer *commandBuffer, const GrVkRenderPass &renderPass, GrAttachment *dst, GrVkImage *src, const SkIRect &srcRect)
void addBufferMemoryBarrier(const GrManagedResource *, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask, bool byRegion, VkBufferMemoryBarrier *barrier) const
const skgpu::VulkanInterface * vkInterface() const
void addDrawable(std::unique_ptr< SkDrawable::GpuDrawHandler > drawable)
void deleteBackendTexture(const GrBackendTexture &) override
bool beginRenderPass(const GrVkRenderPass *, sk_sp< const GrVkFramebuffer >, const VkClearValue *colorClear, const GrSurface *, const SkIRect &renderPassBounds, bool forSecondaryCB)
void disconnect(DisconnectType) override
sk_sp< GrAttachment > makeStencilAttachment(const GrBackendFormat &, SkISize dimensions, int numStencilSamples) override
sk_sp< GrThreadSafePipelineBuilder > refPipelineBuilder() override
void endRenderPass(GrRenderTarget *target, GrSurfaceOrigin origin, const SkIRect &bounds)
GrVkResourceProvider & resourceProvider()
void waitSemaphore(GrSemaphore *semaphore) override
bool onRegenerateMipMapLevels(GrTexture *tex) override
GrThreadSafePipelineBuilder * pipelineBuilder() override
bool updateBuffer(sk_sp< GrVkBuffer > buffer, const void *src, VkDeviceSize offset, VkDeviceSize size)
std::unique_ptr< GrSemaphore > wrapBackendSemaphore(const GrBackendSemaphore &, GrSemaphoreWrapType, GrWrapOwnership) override
std::unique_ptr< GrSemaphore > prepareTextureForCrossContextUsage(GrTexture *) override
bool compile(const GrProgramDesc &, const GrProgramInfo &) override
static std::unique_ptr< GrGpu > Make(const skgpu::VulkanBackendContext &, const GrContextOptions &, GrDirectContext *)
void storeVkPipelineCacheData() override
GrVkPrimaryCommandBuffer * currentCommandBuffer() const
void finishOutstandingGpuWork() override
sk_sp< GrAttachment > makeMSAAAttachment(SkISize dimensions, const GrBackendFormat &format, int numSamples, GrProtected isProtected, GrMemoryless isMemoryless) override
void submit(GrOpsRenderPass *) override
bool checkVkResult(VkResult)
void submitSecondaryCommandBuffer(std::unique_ptr< GrVkSecondaryCommandBuffer >)
void takeOwnershipOfBuffer(sk_sp< GrGpuBuffer >) override
bool setBackendRenderTargetState(const GrBackendRenderTarget &, const skgpu::MutableTextureState &, skgpu::MutableTextureState *previousState, sk_sp< skgpu::RefCntedCallback > finishedCallback) override
void xferBarrier(GrRenderTarget *, GrXferBarrierType) override
skgpu::VulkanMemoryAllocator * memoryAllocator() const
static sk_sp< GrVkImage > MakeStencil(GrVkGpu *gpu, SkISize dimensions, int sampleCnt, VkFormat format)
static VkAccessFlags LayoutToSrcAccessMask(const VkImageLayout layout)
uint32_t mipLevels() const
bool isLinearTiled() const
void setImageLayout(const GrVkGpu *gpu, VkImageLayout newLayout, VkAccessFlags dstAccessMask, VkPipelineStageFlags dstStageMask, bool byRegion)
static void DestroyImageInfo(const GrVkGpu *gpu, GrVkImageInfo *)
bool supportsInputAttachmentUsage() const
static sk_sp< GrVkImage > MakeMSAA(GrVkGpu *gpu, SkISize dimensions, int numSamples, VkFormat format, GrProtected isProtected, GrMemoryless memoryless)
VkImageLayout currentLayout() const
VkFormat imageFormat() const
static sk_sp< GrVkImage > MakeWrapped(GrVkGpu *gpu, SkISize dimensions, const GrVkImageInfo &, sk_sp< skgpu::MutableTextureState >, UsageFlags attachmentUsages, GrWrapOwnership, GrWrapCacheable, std::string_view label, bool forSecondaryCB=false)
const skgpu::VulkanYcbcrConversionInfo & ycbcrConversionInfo() const
static VkPipelineStageFlags LayoutToPipelineSrcStageFlags(const VkImageLayout layout)
const Resource * resource() const
const skgpu::VulkanAlloc & alloc() const
static bool InitImageInfo(GrVkGpu *gpu, const ImageDesc &imageDesc, GrVkImageInfo *)
bool loadMSAAFromResolve(GrVkGpu *gpu, GrVkCommandBuffer *commandBuffer, const GrVkRenderPass &renderPass, GrAttachment *dst, GrVkImage *src, const SkIRect &srcRect)
void destroyResources(GrVkGpu *gpu)
void endRenderPass(const GrVkGpu *gpu)
bool submitToQueue(GrVkGpu *gpu, VkQueue queue, skia_private::TArray< GrVkSemaphore::Resource * > &signalSemaphores, skia_private::TArray< GrVkSemaphore::Resource * > &waitSemaphores)
void forceSync(GrVkGpu *gpu)
void clearColorImage(const GrVkGpu *gpu, GrVkImage *image, const VkClearColorValue *color, uint32_t subRangeCount, const VkImageSubresourceRange *subRanges)
void blitImage(const GrVkGpu *gpu, const GrManagedResource *srcResource, VkImage srcImage, VkImageLayout srcLayout, const GrManagedResource *dstResource, VkImage dstImage, VkImageLayout dstLayout, uint32_t blitRegionCount, const VkImageBlit *blitRegions, VkFilter filter)
void copyImageToBuffer(const GrVkGpu *gpu, GrVkImage *srcImage, VkImageLayout srcLayout, sk_sp< GrGpuBuffer > dstBuffer, uint32_t copyRegionCount, const VkBufferImageCopy *copyRegions)
void end(GrVkGpu *gpu, bool abandoningBuffer=false)
void resolveImage(GrVkGpu *gpu, const GrVkImage &srcImage, const GrVkImage &dstImage, uint32_t regionCount, const VkImageResolve *regions)
bool beginRenderPass(GrVkGpu *gpu, const GrVkRenderPass *, sk_sp< const GrVkFramebuffer >, const VkClearValue clearValues[], const GrSurface *target, const SkIRect &bounds, bool forSecondaryCB)
void copyBuffer(GrVkGpu *gpu, sk_sp< GrGpuBuffer > srcBuffer, sk_sp< GrGpuBuffer > dstBuffer, uint32_t regionCount, const VkBufferCopy *regions)
void copyImage(const GrVkGpu *gpu, GrVkImage *srcImage, VkImageLayout srcLayout, GrVkImage *dstImage, VkImageLayout dstLayout, uint32_t copyRegionCount, const VkImageCopy *copyRegions)
void executeCommands(const GrVkGpu *gpu, std::unique_ptr< GrVkSecondaryCommandBuffer > secondaryBuffer)
void fillBuffer(GrVkGpu *gpu, sk_sp< GrGpuBuffer >, VkDeviceSize offset, VkDeviceSize size, uint32_t data)
void copyBufferToImage(const GrVkGpu *gpu, VkBuffer srcBuffer, GrVkImage *dstImage, VkImageLayout dstLayout, uint32_t copyRegionCount, const VkBufferImageCopy *copyRegions)
void updateBuffer(GrVkGpu *gpu, sk_sp< GrVkBuffer > dstBuffer, VkDeviceSize dstOffset, VkDeviceSize dataSize, const void *data)
bool colorAttachmentIndex(uint32_t *index) const
@ kForNonCoherentAdvBlend
bool stencilAttachmentIndex(uint32_t *index) const
VkRenderPass vkRenderPass() const
bool hasResolveAttachment() const
bool wrapsSecondaryCommandBuffer() const
GrVkImage * externalAttachment() const
sk_sp< GrVkFramebuffer > externalFramebuffer() const
GrVkImage * nonMSAAAttachment() const
const GrVkImageView * resolveAttachmentView() const
static void ReconstructAttachmentsDescriptor(const GrVkCaps &vkCaps, const GrProgramInfo &programInfo, GrVkRenderPass::AttachmentsDescriptor *desc, GrVkRenderPass::AttachmentFlags *flags)
const GrVkFramebuffer * getFramebuffer(bool withResolve, bool withStencil, SelfDependencyFlags selfDepFlags, LoadFromResolve)
GrVkImage * colorAttachment() const
const GrVkImageView * colorAttachmentView() const
GrVkImage * resolveAttachment() const
static sk_sp< GrVkRenderTarget > MakeSecondaryCBRenderTarget(GrVkGpu *, SkISize, const GrVkDrawableInfo &vkInfo)
static sk_sp< GrVkRenderTarget > MakeWrappedRenderTarget(GrVkGpu *, SkISize, int sampleCnt, const GrVkImageInfo &, sk_sp< skgpu::MutableTextureState >)
void forceSyncAllCommandBuffers()
GrVkPipelineState * findOrCreateCompatiblePipelineState(GrRenderTarget *, const GrProgramInfo &, VkRenderPass compatibleRenderPass, bool overrideSubpassForResolveLoad)
sk_sp< GrThreadSafePipelineBuilder > refPipelineStateCache()
void addFinishedProcToActiveCommandBuffers(sk_sp< skgpu::RefCntedCallback > finishedCallback)
GrVkCommandPool * findOrCreateCommandPool()
void storePipelineCacheData()
void checkCommandBuffers()
GrThreadSafePipelineBuilder * pipelineStateCache()
static std::unique_ptr< GrVkSemaphore > Make(GrVkGpu *gpu, bool isOwned)
static std::unique_ptr< GrVkSemaphore > MakeWrapped(GrVkGpu *, VkSemaphore, GrSemaphoreWrapType, GrWrapOwnership)
static sk_sp< GrVkTextureRenderTarget > MakeNewTextureRenderTarget(GrVkGpu *gpu, skgpu::Budgeted budgeted, SkISize dimensions, VkFormat format, uint32_t mipLevels, int sampleCnt, GrMipmapStatus mipmapStatus, GrProtected isProtected, std::string_view label)
static sk_sp< GrVkTextureRenderTarget > MakeWrappedTextureRenderTarget(GrVkGpu *, SkISize dimensions, int sampleCnt, GrWrapOwnership, GrWrapCacheable, const GrVkImageInfo &, sk_sp< skgpu::MutableTextureState >)
GrVkImage * textureImage() const
static sk_sp< GrVkTexture > MakeWrappedTexture(GrVkGpu *, SkISize dimensions, GrWrapOwnership, GrWrapCacheable, GrIOType, const GrVkImageInfo &, sk_sp< skgpu::MutableTextureState >)
static sk_sp< GrVkTexture > MakeNewTexture(GrVkGpu *, skgpu::Budgeted budgeted, SkISize dimensions, VkFormat format, uint32_t mipLevels, GrProtected, GrMipmapStatus, std::string_view label)
static int ComputeLevelCount(int baseWidth, int baseHeight)
constexpr bool empty() const
constexpr size_t size() const
void reset(T *ptr=nullptr)
BackendApi backend() const
void set(const MutableTextureState &that)
static sk_sp< RefCntedCallback > Make(Callback proc, Context ctx)
static sk_sp< VulkanMemoryAllocator > Make(VkInstance instance, VkPhysicalDevice physicalDevice, VkDevice device, uint32_t physicalDeviceVersion, const VulkanExtensions *extensions, const VulkanInterface *interface, ThreadSafe)
virtual std::pair< uint64_t, uint64_t > totalAllocatedAndUsedMemory() const =0
void reserve_exact(int n)
T & emplace_back(Args &&... args)
FlPixelBufferTexturePrivate * priv
uint32_t uint32_t * format
static float max(float r, float g, float b)
static float min(float r, float g, float b)
SK_API bool GetVkImageInfo(const GrBackendRenderTarget &, GrVkImageInfo *)
SK_API GrBackendRenderTarget MakeVk(int width, int height, const GrVkImageInfo &)
SK_API VkSemaphore GetVkSemaphore(const GrBackendSemaphore &)
SK_API GrBackendTexture MakeVk(int width, int height, const GrVkImageInfo &, std::string_view label={})
SK_API bool GetVkImageInfo(const GrBackendTexture &, GrVkImageInfo *)
Optional< SkRect > bounds
sk_sp< const SkImage > image
ClipOpAndAA opAA SkRegion region
sk_sp< SkBlender > blender SkRect rect
@ kPresent
back-end surface will be used for presenting to screen
@ kNoAccess
back-end surface will not be used by client
DEF_SWITCHES_START aot vmservice shared library Name of the *so containing AOT compiled Dart assets for launching the service isolate vm snapshot The VM snapshot data that will be memory mapped as read only SnapshotAssetPath must be present isolate snapshot The isolate snapshot data that will be memory mapped as read only SnapshotAssetPath must be present cache dir Path to the cache directory This is different from the persistent_cache_path in embedder which is used for Skia shader cache icu native lib Path to the library file that exports the ICU data vm service The hostname IP address on which the Dart VM Service should be served If not defaults to or::depending on whether ipv6 is specified vm service A custom Dart VM Service port The default is to pick a randomly available open port disable vm Disable the Dart VM Service The Dart VM Service is never available in release mode disable vm service Disable mDNS Dart VM Service publication Bind to the IPv6 localhost address for the Dart VM Service Ignored if vm service host is set endless trace buffer
it will be possible to load the file into Perfetto s trace viewer disable asset Prevents usage of any non test fonts unless they were explicitly Loaded via prefetched default font Indicates whether the embedding started a prefetch of the default font manager before creating the engine run In non interactive keep the shell running after the Dart script has completed enable serial On low power devices with low core running concurrent GC tasks on threads can cause them to contend with the UI thread which could potentially lead to jank This option turns off all concurrent GC activities domain network JSON encoded network policy per domain This overrides the DisallowInsecureConnections switch Embedder can specify whether to allow or disallow insecure connections at a domain level old gen heap size
SK_API uint32_t GetVkQueueFamilyIndex(const MutableTextureState &state)
SK_API VkImageLayout GetVkImageLayout(const MutableTextureState &state)
void * MapAlloc(VulkanMemoryAllocator *, const VulkanAlloc &, const std::function< CheckResult > &)
void FlushMappedAlloc(VulkanMemoryAllocator *, const skgpu::VulkanAlloc &, VkDeviceSize offset, VkDeviceSize size, const std::function< CheckResult > &)
void UnmapAlloc(VulkanMemoryAllocator *, const VulkanAlloc &alloc)
static constexpr size_t VkFormatBytesPerBlock(VkFormat vkFormat)
static constexpr bool VkFormatNeedsYcbcrSampler(VkFormat format)
static constexpr bool VkFormatIsCompressed(VkFormat vkFormat)
void InvokeDeviceLostCallback(const skgpu::VulkanInterface *vulkanInterface, VkDevice vkDevice, skgpu::VulkanDeviceLostContext deviceLostContext, skgpu::VulkanDeviceLostProc deviceLostProc, bool supportsDeviceFaultInfoExtension)
SkISize CompressedDimensions(SkTextureCompressionType type, SkISize baseDimensions)
VkImageUsageFlags fUsageFlags
VkImageTiling fImageTiling
static constexpr SkIPoint Make(int32_t x, int32_t y)
constexpr int32_t x() const
constexpr int32_t y() const
int32_t fBottom
larger y-axis bounds
constexpr SkISize size() const
constexpr int32_t height() const
int32_t fTop
smaller y-axis bounds
static constexpr SkIRect MakeSize(const SkISize &size)
constexpr int32_t width() const
constexpr SkIPoint topLeft() const
static constexpr SkIRect MakeXYWH(int32_t x, int32_t y, int32_t w, int32_t h)
int32_t fLeft
smaller x-axis bounds
bool contains(int32_t x, int32_t y) const
int32_t fRight
larger x-axis bounds
constexpr int32_t width() const
constexpr int32_t height() const
SkISize dimensions() const
VkImageSubresourceLayers srcSubresource
VkImageSubresourceLayers dstSubresource
VkImageSubresourceLayers srcSubresource
VkImageSubresourceLayers dstSubresource
VkAccessFlags dstAccessMask
uint32_t dstQueueFamilyIndex
VkAccessFlags srcAccessMask
VkImageSubresourceRange subresourceRange
uint32_t srcQueueFamilyIndex
VkImageSubresourceLayers dstSubresource
VkImageSubresourceLayers srcSubresource
VkImageAspectFlags aspectMask
VkPhysicalDeviceFeatures features
const VkPhysicalDeviceFeatures2 * fDeviceFeatures2
Protected fProtectedContext
sk_sp< VulkanMemoryAllocator > fMemoryAllocator
const VkPhysicalDeviceFeatures * fDeviceFeatures
skgpu::VulkanGetProc fGetProc
VkPhysicalDevice fPhysicalDevice
const skgpu::VulkanExtensions * fVkExtensions
std::shared_ptr< const fml::Mapping > data
#define TRACE_EVENT0(category_group, name)
VkClearDepthStencilValue depthStencil
VkFlags VkPipelineStageFlags
@ VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL
@ VK_IMAGE_LAYOUT_PRESENT_SRC_KHR
@ VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL
@ VK_IMAGE_LAYOUT_PREINITIALIZED
@ VK_IMAGE_LAYOUT_UNDEFINED
@ VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL
@ VK_IMAGE_LAYOUT_GENERAL
@ VK_SHARING_MODE_EXCLUSIVE
@ VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT
VkFlags VkImageUsageFlags
@ VK_IMAGE_TILING_OPTIMAL
@ VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT
void(VKAPI_PTR * PFN_vkGetPhysicalDeviceProperties)(VkPhysicalDevice physicalDevice, VkPhysicalDeviceProperties *pProperties)
@ VK_IMAGE_ASPECT_COLOR_BIT
@ VK_IMAGE_USAGE_TRANSFER_DST_BIT
@ VK_IMAGE_USAGE_SAMPLED_BIT
@ VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT
@ VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT
@ VK_IMAGE_USAGE_TRANSFER_SRC_BIT
#define VK_QUEUE_FAMILY_FOREIGN_EXT
#define VK_MAKE_VERSION(major, minor, patch)
@ VK_ERROR_OUT_OF_HOST_MEMORY
@ VK_ERROR_OUT_OF_DEVICE_MEMORY
@ VK_ACCESS_HOST_READ_BIT
@ VK_ACCESS_TRANSFER_WRITE_BIT
@ VK_ACCESS_HOST_WRITE_BIT
@ VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT
@ VK_ACCESS_INPUT_ATTACHMENT_READ_BIT
@ VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT
@ VK_ACCESS_TRANSFER_READ_BIT
@ VK_ACCESS_COLOR_ATTACHMENT_READ_NONCOHERENT_BIT_EXT
@ VK_ACCESS_SHADER_READ_BIT
@ VK_ACCESS_INDEX_READ_BIT
#define VK_QUEUE_FAMILY_EXTERNAL
VkResult(VKAPI_PTR * PFN_vkEnumerateInstanceVersion)(uint32_t *pApiVersion)
@ VK_PIPELINE_STAGE_VERTEX_INPUT_BIT
@ VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT
@ VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT
@ VK_PIPELINE_STAGE_HOST_BIT
@ VK_PIPELINE_STAGE_TRANSFER_BIT
#define VK_QUEUE_FAMILY_IGNORED
@ VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER
@ VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER