26 fPipelineStateCache = sk_make_sp<PipelineStateCache>(gpu);
36VkPipelineCache GrVkResourceProvider::pipelineCache() {
41 createInfo.
pNext =
nullptr;
46 if (persistentCache) {
49 cached = persistentCache->load(*keyData);
51 bool usedCached =
false;
53 const uint32_t* cacheHeader = (
const uint32_t*)cached->
data();
61 if (cacheHeader[2] == devProps.
vendorID && cacheHeader[3] == devProps.
deviceID &&
62 !memcmp(&cacheHeader[4], supportedPipelineCacheUUID,
VK_UUID_SIZE)) {
81 return fPipelineCache;
89 fUniformDSHandle = GrVkDescriptorSetManager::Handle(0);
93 fInputDSHandle = GrVkDescriptorSetManager::Handle(1);
100 VkRenderPass compatibleRenderPass,
101 VkPipelineLayout layout,
104 compatibleRenderPass, layout, this->pipelineCache(), subpass);
112 CompatibleRPHandle* compatibleHandle,
121 if (!
target->getAttachmentsDescriptor(&attachmentsDesc, &attachmentFlags,
122 withResolve, withStencil)) {
127 loadFromResolve, compatibleHandle);
135 CompatibleRPHandle* compatibleHandle) {
136 for (
int i = 0;
i < fRenderPassArray.
size(); ++
i) {
137 if (fRenderPassArray[
i].isCompatible(*
desc, attachmentFlags, selfDepFlags,
139 const GrVkRenderPass* renderPass = fRenderPassArray[
i].getCompatibleRenderPass();
141 if (compatibleHandle) {
142 *compatibleHandle = CompatibleRPHandle(
i);
149 selfDepFlags, loadFromResolve);
155 if (compatibleHandle) {
156 *compatibleHandle = CompatibleRPHandle(fRenderPassArray.
size() - 1);
162 VkRenderPass renderPass, uint32_t colorAttachmentIndex) {
163 for (
int i = 0;
i < fExternalRenderPasses.
size(); ++
i) {
164 if (fExternalRenderPasses[
i]->isCompatibleExternalRP(renderPass)) {
165 fExternalRenderPasses[
i]->ref();
167 uint32_t cachedColorIndex;
168 SkASSERT(fExternalRenderPasses[
i]->colorAttachmentIndex(&cachedColorIndex));
169 SkASSERT(cachedColorIndex == colorAttachmentIndex);
171 return fExternalRenderPasses[
i];
176 colorAttachmentIndex);
177 fExternalRenderPasses.
push_back(newRenderPass);
178 newRenderPass->
ref();
179 return newRenderPass;
187 CompatibleRPHandle* compatibleHandle,
192 GrVkResourceProvider::CompatibleRPHandle tempRPHandle;
193 GrVkResourceProvider::CompatibleRPHandle* pRPHandle = compatibleHandle ? compatibleHandle
195 *pRPHandle =
target->compatibleRenderPassHandle(withResolve, withStencil, selfDepFlags,
197 if (!pRPHandle->isValid()) {
201 return this->
findRenderPass(*pRPHandle, colorOps, resolveOps, stencilOps);
209 SkASSERT(compatibleHandle.isValid() && compatibleHandle.toIndex() < fRenderPassArray.
size());
210 CompatibleRenderPassSet& compatibleSet = fRenderPassArray[compatibleHandle.toIndex()];
211 const GrVkRenderPass* renderPass = compatibleSet.getRenderPass(fGpu,
235 fSamplers.
add(sampler);
246 if (!ycbcrConversion) {
248 if (!ycbcrConversion) {
251 fYcbcrConversions.
add(ycbcrConversion);
254 ycbcrConversion->
ref();
255 return ycbcrConversion;
261 VkRenderPass compatibleRenderPass,
262 bool overrideSubpassForResolveLoad) {
263 return fPipelineStateCache->findOrCreatePipelineState(renderTarget, programInfo,
264 compatibleRenderPass,
265 overrideSubpassForResolveLoad);
271 VkRenderPass compatibleRenderPass,
274 auto tmp = fPipelineStateCache->findOrCreatePipelineState(
desc, programInfo,
275 compatibleRenderPass, stat);
277 fPipelineStateCache->stats()->incNumPreCompilationFailures();
279 fPipelineStateCache->stats()->incNumPreProgramCacheResult(*stat);
289 VkPipelineLayout pipelineLayout) {
292 for (
int i = 0;
i < fMSAALoadPipelines.
size() && !pipeline; ++
i) {
293 if (fMSAALoadPipelines[
i].fRenderPass->isCompatible(renderPass)) {
294 pipeline = fMSAALoadPipelines[
i].fPipeline;
316 this->pipelineCache());
320 fMSAALoadPipelines.
push_back({pipeline, &renderPass});
327 GrVkDescriptorSetManager::Handle* handle) {
329 for (
int i = 0;
i < fDescriptorSetManagers.
size(); ++
i) {
330 if (fDescriptorSetManagers[
i]->isZeroSampler()) {
331 *handle = GrVkDescriptorSetManager::Handle(
i);
339 *handle = GrVkDescriptorSetManager::Handle(fDescriptorSetManagers.
size() - 1);
344 GrVkDescriptorSetManager::Handle* handle) {
348 for (
int i = 0;
i < fDescriptorSetManagers.
size(); ++
i) {
349 if (fDescriptorSetManagers[
i]->isCompatible(
type, &uniformHandler)) {
350 *handle = GrVkDescriptorSetManager::Handle(
i);
358 *handle = GrVkDescriptorSetManager::Handle(fDescriptorSetManagers.
size() - 1);
362 SkASSERT(fUniformDSHandle.isValid());
363 return fDescriptorSetManagers[fUniformDSHandle.toIndex()]->layout();
368 return fDescriptorSetManagers[fInputDSHandle.toIndex()]->layout();
372 const GrVkDescriptorSetManager::Handle& handle)
const {
374 return fDescriptorSetManagers[handle.toIndex()]->layout();
378 SkASSERT(fUniformDSHandle.isValid());
379 return fDescriptorSetManagers[fUniformDSHandle.toIndex()]->getDescriptorSet(fGpu,
385 return fDescriptorSetManagers[fInputDSHandle.toIndex()]->getDescriptorSet(fGpu, fInputDSHandle);
389 const GrVkDescriptorSetManager::Handle& handle) {
391 return fDescriptorSetManagers[handle.toIndex()]->getDescriptorSet(fGpu, handle);
395 const GrVkDescriptorSetManager::Handle& handle) {
398 int managerIdx = handle.toIndex();
400 fDescriptorSetManagers[managerIdx]->recycleDescriptorSet(descSet);
405 if (!fAvailableCommandPools.
empty()) {
423 fActiveCommandPools.push_back(
result);
439 for (
int i = fActiveCommandPools.
size() - 1; !fActiveCommandPools.
empty() &&
i >= 0; --
i) {
441 if (!
pool->isOpen()) {
443 if (
buffer->finished(fGpu)) {
462 for (
int i = fActiveCommandPools.
size() - 1; !fActiveCommandPools.
empty() &&
i >= 0; --
i) {
464 if (!
pool->isOpen()) {
473 for (
int i = 0;
i < fActiveCommandPools.
size(); ++
i) {
476 buffer->addFinishedProc(finishedCallback);
487 fMSAALoadPipelines.
clear();
490 for (
int i = 0;
i < fRenderPassArray.
size(); ++
i) {
491 fRenderPassArray[
i].releaseResources();
493 fRenderPassArray.
clear();
495 for (
int i = 0;
i < fExternalRenderPasses.
size(); ++
i) {
496 fExternalRenderPasses[
i]->unref();
498 fExternalRenderPasses.
clear();
501 fSamplers.
foreach([&](
auto* elt) { elt->unref(); });
504 fYcbcrConversions.
foreach([&](
auto* elt) { elt->unref(); });
505 fYcbcrConversions.
reset();
507 fPipelineStateCache->
release();
516 fActiveCommandPools.clear();
522 fAvailableCommandPools.clear();
527 for (
int i = 0;
i < fDescriptorSetManagers.
size(); ++
i) {
528 fDescriptorSetManagers[
i]->release(fGpu);
530 fDescriptorSetManagers.
clear();
539 fAvailableCommandPools.clear();
549 &dataSize,
nullptr));
554 std::unique_ptr<uint8_t[]>
data(
new uint8_t[dataSize]);
557 &dataSize, (
void*)
data.get()));
571GrVkResourceProvider::CompatibleRenderPassSet::CompatibleRenderPassSet(
GrVkRenderPass* renderPass)
572 : fLastReturnedIndex(0) {
574 fRenderPasses.push_back(renderPass);
577bool GrVkResourceProvider::CompatibleRenderPassSet::isCompatible(
580 SelfDependencyFlags selfDepFlags,
581 LoadFromResolve loadFromResolve)
const {
585 return fRenderPasses[0]->isCompatible(attachmentsDescriptor, attachmentFlags, selfDepFlags,
589GrVkRenderPass* GrVkResourceProvider::CompatibleRenderPassSet::getRenderPass(
594 for (
int i = 0;
i < fRenderPasses.size(); ++
i) {
595 int idx = (
i + fLastReturnedIndex) % fRenderPasses.size();
596 if (fRenderPasses[idx]->equalLoadStoreOps(colorOps, resolveOps, stencilOps)) {
597 fLastReturnedIndex = idx;
598 return fRenderPasses[idx];
602 colorOps, resolveOps, stencilOps);
606 fRenderPasses.push_back(renderPass);
607 fLastReturnedIndex = fRenderPasses.size() - 1;
611void GrVkResourceProvider::CompatibleRenderPassSet::releaseResources() {
612 for (
int i = 0;
i < fRenderPasses.size(); ++
i) {
613 if (fRenderPasses[
i]) {
614 fRenderPasses[
i]->
unref();
615 fRenderPasses[
i] =
nullptr;
@ kTopLeft_GrSurfaceOrigin
#define GR_VK_CALL(IFACE, X)
#define GR_VK_CALL_RESULT(GPU, RESULT, X)
SkDEBUGCODE(SK_SPI) SkThreadID SkGetThreadID()
virtual void store(const SkData &, const SkData &)
SkTaskGroup * getTaskGroup()
GrContextOptions::PersistentCache * getPersistentCache()
GrDirectContextPriv priv()
GrDirectContext * getContext()
static GrVkCommandPool * Create(GrVkGpu *gpu)
static GrVkDescriptorPool * Create(GrVkGpu *gpu, VkDescriptorType type, uint32_t count)
static GrVkDescriptorSetManager * CreateZeroSamplerManager(GrVkGpu *gpu)
static GrVkDescriptorSetManager * CreateSamplerManager(GrVkGpu *gpu, VkDescriptorType type, const GrVkUniformHandler &)
static GrVkDescriptorSetManager * CreateInputManager(GrVkGpu *gpu)
static GrVkDescriptorSetManager * CreateUniformManager(GrVkGpu *gpu)
const skgpu::VulkanInterface * vkInterface() const
@ kPipelineCache_PersistentCacheKeyType
const VkPhysicalDeviceProperties & physicalDeviceProperties() const
bool disconnected() const
static sk_sp< GrVkPipeline > Make(GrVkGpu *, const GrGeometryProcessor::AttributeSet &vertexAttribs, const GrGeometryProcessor::AttributeSet &instanceAttribs, GrPrimitiveType, GrSurfaceOrigin, const GrStencilSettings &, int numSamples, bool isHWAntialiasState, const skgpu::BlendInfo &, bool isWireframe, bool useConservativeRaster, uint32_t subpass, VkPipelineShaderStageCreateInfo *shaderStageInfo, int shaderStageCount, VkRenderPass compatibleRenderPass, VkPipelineLayout layout, bool ownsLayout, VkPipelineCache cache)
static GrVkRenderPass * Create(GrVkGpu *, const GrVkRenderPass &compatibleRenderPass, const LoadStoreOps &colorOp, const LoadStoreOps &resolveOp, const LoadStoreOps &stencilOp)
static GrVkRenderPass * CreateSimple(GrVkGpu *, AttachmentsDescriptor *, AttachmentFlags, SelfDependencyFlags selfDepFlags, LoadFromResolve)
VkRenderPass vkRenderPass() const
const GrVkDescriptorSet * getUniformDescriptorSet()
VkDescriptorSetLayout getInputDSLayout() const
GrVkSamplerYcbcrConversion * findOrCreateCompatibleSamplerYcbcrConversion(const skgpu::VulkanYcbcrConversionInfo &ycbcrInfo)
VkDescriptorSetLayout getUniformDSLayout() const
const GrVkRenderPass * findCompatibleRenderPass(GrVkRenderTarget *target, CompatibleRPHandle *compatibleHandle, bool withResolve, bool withStencil, SelfDependencyFlags selfDepFlags, LoadFromResolve)
void forceSyncAllCommandBuffers()
GrVkPipelineState * findOrCreateCompatiblePipelineState(GrRenderTarget *, const GrProgramInfo &, VkRenderPass compatibleRenderPass, bool overrideSubpassForResolveLoad)
void recycleDescriptorSet(const GrVkDescriptorSet *descSet, const GrVkDescriptorSetManager::Handle &)
const GrVkDescriptorSet * getInputDescriptorSet()
sk_sp< const GrVkPipeline > makePipeline(const GrProgramInfo &, VkPipelineShaderStageCreateInfo *shaderStageInfo, int shaderStageCount, VkRenderPass compatibleRenderPass, VkPipelineLayout layout, uint32_t subpass)
GrVkSampler * findOrCreateCompatibleSampler(GrSamplerState, const skgpu::VulkanYcbcrConversionInfo &ycbcrInfo)
VkDescriptorSetLayout getSamplerDSLayout(const GrVkDescriptorSetManager::Handle &) const
void getZeroSamplerDescriptorSetHandle(GrVkDescriptorSetManager::Handle *handle)
const GrVkRenderPass * findCompatibleExternalRenderPass(VkRenderPass, uint32_t colorAttachmentIndex)
void addFinishedProcToActiveCommandBuffers(sk_sp< skgpu::RefCntedCallback > finishedCallback)
GrVkDescriptorPool * findOrCreateCompatibleDescriptorPool(VkDescriptorType type, uint32_t count)
const GrVkRenderPass * findRenderPass(GrVkRenderTarget *target, const GrVkRenderPass::LoadStoreOps &colorOps, const GrVkRenderPass::LoadStoreOps &resolveOps, const GrVkRenderPass::LoadStoreOps &stencilOps, CompatibleRPHandle *compatibleHandle, bool withResolve, bool withStencil, SelfDependencyFlags selfDepFlags, LoadFromResolve)
const GrVkDescriptorSet * getSamplerDescriptorSet(const GrVkDescriptorSetManager::Handle &)
sk_sp< const GrVkPipeline > findOrCreateMSAALoadPipeline(const GrVkRenderPass &renderPass, int numSamples, VkPipelineShaderStageCreateInfo *, VkPipelineLayout)
void releaseUnlockedBackendObjects()
void getSamplerDescriptorSetHandle(VkDescriptorType type, const GrVkUniformHandler &, GrVkDescriptorSetManager::Handle *handle)
GrVkCommandPool * findOrCreateCommandPool()
void storePipelineCacheData()
GrVkResourceProvider(GrVkGpu *gpu)
void checkCommandBuffers()
static GrVkSamplerYcbcrConversion * Create(GrVkGpu *gpu, const skgpu::VulkanYcbcrConversionInfo &)
static SK_END_REQUIRE_DENSE Key GenerateKey(const skgpu::VulkanYcbcrConversionInfo &ycbcrInfo)
static GrVkSampler * Create(GrVkGpu *gpu, GrSamplerState, const skgpu::VulkanYcbcrConversionInfo &)
static SK_END_REQUIRE_DENSE Key GenerateKey(GrSamplerState, const skgpu::VulkanYcbcrConversionInfo &)
static sk_sp< SkData > MakeWithoutCopy(const void *data, size_t length)
const void * data() const
T * find(const Key &key) const
void removeShuffle(int n)
T & emplace_back(Args &&... args)
const EmbeddedViewParams * params
DEF_SWITCHES_START aot vmservice shared library Name of the *so containing AOT compiled Dart assets for launching the service isolate vm snapshot The VM snapshot data that will be memory mapped as read only SnapshotAssetPath must be present isolate snapshot The isolate snapshot data that will be memory mapped as read only SnapshotAssetPath must be present cache dir Path to the cache directory This is different from the persistent_cache_path in embedder which is used for Skia shader cache icu native lib Path to the library file that exports the ICU data vm service The hostname IP address on which the Dart VM Service should be served If not defaults to or::depending on whether ipv6 is specified vm service A custom Dart VM Service port The default is to pick a randomly available open port disable vm Disable the Dart VM Service The Dart VM Service is never available in release mode disable vm service Disable mDNS Dart VM Service publication Bind to the IPv6 localhost address for the Dart VM Service Ignored if vm service host is set endless trace buffer
uint8_t pipelineCacheUUID[VK_UUID_SIZE]
VkPipelineCacheCreateFlags flags
const void * pInitialData
std::shared_ptr< const fml::Mapping > data
@ VK_PIPELINE_CACHE_HEADER_VERSION_ONE
@ VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER
@ VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
@ VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO