Flutter Engine
The Flutter Engine
GrVkResourceProvider.cpp
Go to the documentation of this file.
1/*
2* Copyright 2016 Google Inc.
3*
4* Use of this source code is governed by a BSD-style license that can be
5* found in the LICENSE file.
6*/
7
9
22
24 : fGpu(gpu)
25 , fPipelineCache(VK_NULL_HANDLE) {
26 fPipelineStateCache = sk_make_sp<PipelineStateCache>(gpu);
27}
28
30 SkASSERT(fRenderPassArray.empty());
31 SkASSERT(fExternalRenderPasses.empty());
32 SkASSERT(fMSAALoadPipelines.empty());
33 SkASSERT(VK_NULL_HANDLE == fPipelineCache);
34}
35
36VkPipelineCache GrVkResourceProvider::pipelineCache() {
37 if (fPipelineCache == VK_NULL_HANDLE) {
39 memset(&createInfo, 0, sizeof(VkPipelineCacheCreateInfo));
41 createInfo.pNext = nullptr;
42 createInfo.flags = 0;
43
44 auto persistentCache = fGpu->getContext()->priv().getPersistentCache();
45 sk_sp<SkData> cached;
46 if (persistentCache) {
48 sk_sp<SkData> keyData = SkData::MakeWithoutCopy(&key, sizeof(uint32_t));
49 cached = persistentCache->load(*keyData);
50 }
51 bool usedCached = false;
52 if (cached) {
53 const uint32_t* cacheHeader = (const uint32_t*)cached->data();
54 if (cacheHeader[1] == VK_PIPELINE_CACHE_HEADER_VERSION_ONE) {
55 // For version one of the header, the total header size is 16 bytes plus
56 // VK_UUID_SIZE bytes. See Section 9.6 (Pipeline Cache) in the vulkan spec to see
57 // the breakdown of these bytes.
58 SkASSERT(cacheHeader[0] == 16 + VK_UUID_SIZE);
60 const uint8_t* supportedPipelineCacheUUID = devProps.pipelineCacheUUID;
61 if (cacheHeader[2] == devProps.vendorID && cacheHeader[3] == devProps.deviceID &&
62 !memcmp(&cacheHeader[4], supportedPipelineCacheUUID, VK_UUID_SIZE)) {
63 createInfo.initialDataSize = cached->size();
64 createInfo.pInitialData = cached->data();
65 usedCached = true;
66 }
67 }
68 }
69 if (!usedCached) {
70 createInfo.initialDataSize = 0;
71 createInfo.pInitialData = nullptr;
72 }
73
75 GR_VK_CALL_RESULT(fGpu, result, CreatePipelineCache(fGpu->device(), &createInfo, nullptr,
76 &fPipelineCache));
77 if (VK_SUCCESS != result) {
78 fPipelineCache = VK_NULL_HANDLE;
79 }
80 }
81 return fPipelineCache;
82}
83
85 // Init uniform descriptor objects
87 fDescriptorSetManagers.emplace_back(dsm);
88 SkASSERT(1 == fDescriptorSetManagers.size());
89 fUniformDSHandle = GrVkDescriptorSetManager::Handle(0);
91 fDescriptorSetManagers.emplace_back(dsm);
92 SkASSERT(2 == fDescriptorSetManagers.size());
93 fInputDSHandle = GrVkDescriptorSetManager::Handle(1);
94}
95
97 const GrProgramInfo& programInfo,
98 VkPipelineShaderStageCreateInfo* shaderStageInfo,
99 int shaderStageCount,
100 VkRenderPass compatibleRenderPass,
101 VkPipelineLayout layout,
102 uint32_t subpass) {
103 return GrVkPipeline::Make(fGpu, programInfo, shaderStageInfo, shaderStageCount,
104 compatibleRenderPass, layout, this->pipelineCache(), subpass);
105}
106
107// To create framebuffers, we first need to create a simple RenderPass that is
108// only used for framebuffer creation. When we actually render we will create
109// RenderPasses as needed that are compatible with the framebuffer.
110const GrVkRenderPass*
112 CompatibleRPHandle* compatibleHandle,
113 bool withResolve,
114 bool withStencil,
115 SelfDependencyFlags selfDepFlags,
116 LoadFromResolve loadFromResolve) {
117 // Get attachment information from render target. This includes which attachments the render
118 // target has (color, stencil) and the attachments format and sample count.
119 GrVkRenderPass::AttachmentFlags attachmentFlags;
121 if (!target->getAttachmentsDescriptor(&attachmentsDesc, &attachmentFlags,
122 withResolve, withStencil)) {
123 return nullptr;
124 }
125
126 return this->findCompatibleRenderPass(&attachmentsDesc, attachmentFlags, selfDepFlags,
127 loadFromResolve, compatibleHandle);
128}
129
130const GrVkRenderPass*
132 GrVkRenderPass::AttachmentFlags attachmentFlags,
133 SelfDependencyFlags selfDepFlags,
134 LoadFromResolve loadFromResolve,
135 CompatibleRPHandle* compatibleHandle) {
136 for (int i = 0; i < fRenderPassArray.size(); ++i) {
137 if (fRenderPassArray[i].isCompatible(*desc, attachmentFlags, selfDepFlags,
138 loadFromResolve)) {
139 const GrVkRenderPass* renderPass = fRenderPassArray[i].getCompatibleRenderPass();
140 renderPass->ref();
141 if (compatibleHandle) {
142 *compatibleHandle = CompatibleRPHandle(i);
143 }
144 return renderPass;
145 }
146 }
147
148 GrVkRenderPass* renderPass = GrVkRenderPass::CreateSimple(fGpu, desc, attachmentFlags,
149 selfDepFlags, loadFromResolve);
150 if (!renderPass) {
151 return nullptr;
152 }
153 fRenderPassArray.emplace_back(renderPass);
154
155 if (compatibleHandle) {
156 *compatibleHandle = CompatibleRPHandle(fRenderPassArray.size() - 1);
157 }
158 return renderPass;
159}
160
162 VkRenderPass renderPass, uint32_t colorAttachmentIndex) {
163 for (int i = 0; i < fExternalRenderPasses.size(); ++i) {
164 if (fExternalRenderPasses[i]->isCompatibleExternalRP(renderPass)) {
165 fExternalRenderPasses[i]->ref();
166#ifdef SK_DEBUG
167 uint32_t cachedColorIndex;
168 SkASSERT(fExternalRenderPasses[i]->colorAttachmentIndex(&cachedColorIndex));
169 SkASSERT(cachedColorIndex == colorAttachmentIndex);
170#endif
171 return fExternalRenderPasses[i];
172 }
173 }
174
175 const GrVkRenderPass* newRenderPass = new GrVkRenderPass(fGpu, renderPass,
176 colorAttachmentIndex);
177 fExternalRenderPasses.push_back(newRenderPass);
178 newRenderPass->ref();
179 return newRenderPass;
180}
181
184 const GrVkRenderPass::LoadStoreOps& colorOps,
185 const GrVkRenderPass::LoadStoreOps& resolveOps,
186 const GrVkRenderPass::LoadStoreOps& stencilOps,
187 CompatibleRPHandle* compatibleHandle,
188 bool withResolve,
189 bool withStencil,
190 SelfDependencyFlags selfDepFlags,
191 LoadFromResolve loadFromResolve) {
192 GrVkResourceProvider::CompatibleRPHandle tempRPHandle;
193 GrVkResourceProvider::CompatibleRPHandle* pRPHandle = compatibleHandle ? compatibleHandle
194 : &tempRPHandle;
195 *pRPHandle = target->compatibleRenderPassHandle(withResolve, withStencil, selfDepFlags,
196 loadFromResolve);
197 if (!pRPHandle->isValid()) {
198 return nullptr;
199 }
200
201 return this->findRenderPass(*pRPHandle, colorOps, resolveOps, stencilOps);
202}
203
204const GrVkRenderPass*
205GrVkResourceProvider::findRenderPass(const CompatibleRPHandle& compatibleHandle,
206 const GrVkRenderPass::LoadStoreOps& colorOps,
207 const GrVkRenderPass::LoadStoreOps& resolveOps,
208 const GrVkRenderPass::LoadStoreOps& stencilOps) {
209 SkASSERT(compatibleHandle.isValid() && compatibleHandle.toIndex() < fRenderPassArray.size());
210 CompatibleRenderPassSet& compatibleSet = fRenderPassArray[compatibleHandle.toIndex()];
211 const GrVkRenderPass* renderPass = compatibleSet.getRenderPass(fGpu,
212 colorOps,
213 resolveOps,
214 stencilOps);
215 if (!renderPass) {
216 return nullptr;
217 }
218 renderPass->ref();
219 return renderPass;
220}
221
223 VkDescriptorType type, uint32_t count) {
225}
226
229 GrVkSampler* sampler = fSamplers.find(GrVkSampler::GenerateKey(params, ycbcrInfo));
230 if (!sampler) {
231 sampler = GrVkSampler::Create(fGpu, params, ycbcrInfo);
232 if (!sampler) {
233 return nullptr;
234 }
235 fSamplers.add(sampler);
236 }
237 SkASSERT(sampler);
238 sampler->ref();
239 return sampler;
240}
241
243 const skgpu::VulkanYcbcrConversionInfo& ycbcrInfo) {
244 GrVkSamplerYcbcrConversion* ycbcrConversion =
245 fYcbcrConversions.find(GrVkSamplerYcbcrConversion::GenerateKey(ycbcrInfo));
246 if (!ycbcrConversion) {
247 ycbcrConversion = GrVkSamplerYcbcrConversion::Create(fGpu, ycbcrInfo);
248 if (!ycbcrConversion) {
249 return nullptr;
250 }
251 fYcbcrConversions.add(ycbcrConversion);
252 }
253 SkASSERT(ycbcrConversion);
254 ycbcrConversion->ref();
255 return ycbcrConversion;
256}
257
259 GrRenderTarget* renderTarget,
260 const GrProgramInfo& programInfo,
261 VkRenderPass compatibleRenderPass,
262 bool overrideSubpassForResolveLoad) {
263 return fPipelineStateCache->findOrCreatePipelineState(renderTarget, programInfo,
264 compatibleRenderPass,
265 overrideSubpassForResolveLoad);
266}
267
269 const GrProgramDesc& desc,
270 const GrProgramInfo& programInfo,
271 VkRenderPass compatibleRenderPass,
273
274 auto tmp = fPipelineStateCache->findOrCreatePipelineState(desc, programInfo,
275 compatibleRenderPass, stat);
276 if (!tmp) {
277 fPipelineStateCache->stats()->incNumPreCompilationFailures();
278 } else {
279 fPipelineStateCache->stats()->incNumPreProgramCacheResult(*stat);
280 }
281
282 return tmp;
283}
284
286 const GrVkRenderPass& renderPass,
287 int numSamples,
288 VkPipelineShaderStageCreateInfo* shaderStageInfo,
289 VkPipelineLayout pipelineLayout) {
290 // Find or Create a compatible pipeline
292 for (int i = 0; i < fMSAALoadPipelines.size() && !pipeline; ++i) {
293 if (fMSAALoadPipelines[i].fRenderPass->isCompatible(renderPass)) {
294 pipeline = fMSAALoadPipelines[i].fPipeline;
295 }
296 }
297 if (!pipeline) {
298 pipeline = GrVkPipeline::Make(
299 fGpu,
300 /*vertexAttribs=*/GrGeometryProcessor::AttributeSet(),
301 /*instanceAttribs=*/GrGeometryProcessor::AttributeSet(),
305 numSamples,
306 /*isHWantialiasState=*/false,
308 /*isWireframe=*/false,
309 /*useConservativeRaster=*/false,
310 /*subpass=*/0,
311 shaderStageInfo,
312 /*shaderStageCount=*/2,
313 renderPass.vkRenderPass(),
314 pipelineLayout,
315 /*ownsLayout=*/false,
316 this->pipelineCache());
317 if (!pipeline) {
318 return nullptr;
319 }
320 fMSAALoadPipelines.push_back({pipeline, &renderPass});
321 }
322 SkASSERT(pipeline);
323 return pipeline;
324}
325
327 GrVkDescriptorSetManager::Handle* handle) {
328 SkASSERT(handle);
329 for (int i = 0; i < fDescriptorSetManagers.size(); ++i) {
330 if (fDescriptorSetManagers[i]->isZeroSampler()) {
331 *handle = GrVkDescriptorSetManager::Handle(i);
332 return;
333 }
334 }
335
338 fDescriptorSetManagers.emplace_back(dsm);
339 *handle = GrVkDescriptorSetManager::Handle(fDescriptorSetManagers.size() - 1);
340}
341
343 const GrVkUniformHandler& uniformHandler,
344 GrVkDescriptorSetManager::Handle* handle) {
345 SkASSERT(handle);
348 for (int i = 0; i < fDescriptorSetManagers.size(); ++i) {
349 if (fDescriptorSetManagers[i]->isCompatible(type, &uniformHandler)) {
350 *handle = GrVkDescriptorSetManager::Handle(i);
351 return;
352 }
353 }
354
356 uniformHandler);
357 fDescriptorSetManagers.emplace_back(dsm);
358 *handle = GrVkDescriptorSetManager::Handle(fDescriptorSetManagers.size() - 1);
359}
360
361VkDescriptorSetLayout GrVkResourceProvider::getUniformDSLayout() const {
362 SkASSERT(fUniformDSHandle.isValid());
363 return fDescriptorSetManagers[fUniformDSHandle.toIndex()]->layout();
364}
365
366VkDescriptorSetLayout GrVkResourceProvider::getInputDSLayout() const {
367 SkASSERT(fInputDSHandle.isValid());
368 return fDescriptorSetManagers[fInputDSHandle.toIndex()]->layout();
369}
370
372 const GrVkDescriptorSetManager::Handle& handle) const {
373 SkASSERT(handle.isValid());
374 return fDescriptorSetManagers[handle.toIndex()]->layout();
375}
376
378 SkASSERT(fUniformDSHandle.isValid());
379 return fDescriptorSetManagers[fUniformDSHandle.toIndex()]->getDescriptorSet(fGpu,
380 fUniformDSHandle);
381}
382
384 SkASSERT(fInputDSHandle.isValid());
385 return fDescriptorSetManagers[fInputDSHandle.toIndex()]->getDescriptorSet(fGpu, fInputDSHandle);
386}
387
389 const GrVkDescriptorSetManager::Handle& handle) {
390 SkASSERT(handle.isValid());
391 return fDescriptorSetManagers[handle.toIndex()]->getDescriptorSet(fGpu, handle);
392}
393
395 const GrVkDescriptorSetManager::Handle& handle) {
396 SkASSERT(descSet);
397 SkASSERT(handle.isValid());
398 int managerIdx = handle.toIndex();
399 SkASSERT(managerIdx < fDescriptorSetManagers.size());
400 fDescriptorSetManagers[managerIdx]->recycleDescriptorSet(descSet);
401}
402
405 if (!fAvailableCommandPools.empty()) {
406 result = fAvailableCommandPools.back();
407 fAvailableCommandPools.pop_back();
408 } else {
410 if (!result) {
411 return nullptr;
412 }
413 }
414 SkASSERT(result->unique());
416 for (const GrVkCommandPool* pool : fActiveCommandPools) {
417 SkASSERT(pool != result);
418 }
419 for (const GrVkCommandPool* pool : fAvailableCommandPools) {
420 SkASSERT(pool != result);
421 }
422 )
423 fActiveCommandPools.push_back(result);
424 result->ref();
425 return result;
426}
427
429 // When resetting a command buffer it can trigger client provided procs (e.g. release or
430 // finished) to be called. During these calls the client could trigger us to abandon the vk
431 // context, e.g. if we are in a DEVICE_LOST state. When we abandon the vk context we will
432 // unref all the fActiveCommandPools and reset the array. Since this can happen in the middle
433 // of the loop here, we need to additionally check that fActiveCommandPools still has pools on
434 // each iteration.
435 //
436 // TODO: We really need to have a more robust way to protect us from client proc calls that
437 // happen in the middle of us doing work. This may be just one of many potential pitfalls that
438 // could happen from the client triggering GrDirectContext changes during a proc call.
439 for (int i = fActiveCommandPools.size() - 1; !fActiveCommandPools.empty() && i >= 0; --i) {
440 GrVkCommandPool* pool = fActiveCommandPools[i];
441 if (!pool->isOpen()) {
442 GrVkPrimaryCommandBuffer* buffer = pool->getPrimaryCommandBuffer();
443 if (buffer->finished(fGpu)) {
444 fActiveCommandPools.removeShuffle(i);
445 SkASSERT(pool->unique());
446 pool->reset(fGpu);
447 // After resetting the pool (specifically releasing the pool's resources) we may
448 // have called a client callback proc which may have disconnected the GrVkGpu. In
449 // that case we do not want to push the pool back onto the cache, but instead just
450 // drop the pool.
451 if (fGpu->disconnected()) {
452 pool->unref();
453 return;
454 }
455 fAvailableCommandPools.push_back(pool);
456 }
457 }
458 }
459}
460
462 for (int i = fActiveCommandPools.size() - 1; !fActiveCommandPools.empty() && i >= 0; --i) {
463 GrVkCommandPool* pool = fActiveCommandPools[i];
464 if (!pool->isOpen()) {
465 GrVkPrimaryCommandBuffer* buffer = pool->getPrimaryCommandBuffer();
466 buffer->forceSync(fGpu);
467 }
468 }
469}
470
472 sk_sp<skgpu::RefCntedCallback> finishedCallback) {
473 for (int i = 0; i < fActiveCommandPools.size(); ++i) {
474 GrVkCommandPool* pool = fActiveCommandPools[i];
475 GrVkPrimaryCommandBuffer* buffer = pool->getPrimaryCommandBuffer();
476 buffer->addFinishedProc(finishedCallback);
477 }
478}
479
481 SkTaskGroup* taskGroup = fGpu->getContext()->priv().getTaskGroup();
482 if (taskGroup) {
483 taskGroup->wait();
484 }
485
486 // Release all msaa load pipelines
487 fMSAALoadPipelines.clear();
488
489 // loop over all render pass sets to make sure we destroy all the internal VkRenderPasses
490 for (int i = 0; i < fRenderPassArray.size(); ++i) {
491 fRenderPassArray[i].releaseResources();
492 }
493 fRenderPassArray.clear();
494
495 for (int i = 0; i < fExternalRenderPasses.size(); ++i) {
496 fExternalRenderPasses[i]->unref();
497 }
498 fExternalRenderPasses.clear();
499
500 // Iterate through all store GrVkSamplers and unref them before resetting the hash table.
501 fSamplers.foreach([&](auto* elt) { elt->unref(); });
502 fSamplers.reset();
503
504 fYcbcrConversions.foreach([&](auto* elt) { elt->unref(); });
505 fYcbcrConversions.reset();
506
507 fPipelineStateCache->release();
508
509 GR_VK_CALL(fGpu->vkInterface(), DestroyPipelineCache(fGpu->device(), fPipelineCache, nullptr));
510 fPipelineCache = VK_NULL_HANDLE;
511
512 for (GrVkCommandPool* pool : fActiveCommandPools) {
513 SkASSERT(pool->unique());
514 pool->unref();
515 }
516 fActiveCommandPools.clear();
517
518 for (GrVkCommandPool* pool : fAvailableCommandPools) {
519 SkASSERT(pool->unique());
520 pool->unref();
521 }
522 fAvailableCommandPools.clear();
523
524 // We must release/destroy all command buffers and pipeline states before releasing the
525 // GrVkDescriptorSetManagers. Additionally, we must release all uniform buffers since they hold
526 // refs to GrVkDescriptorSets.
527 for (int i = 0; i < fDescriptorSetManagers.size(); ++i) {
528 fDescriptorSetManagers[i]->release(fGpu);
529 }
530 fDescriptorSetManagers.clear();
531
532}
533
535 for (GrVkCommandPool* pool : fAvailableCommandPools) {
536 SkASSERT(pool->unique());
537 pool->unref();
538 }
539 fAvailableCommandPools.clear();
540}
541
543 if (this->pipelineCache() == VK_NULL_HANDLE) {
544 return;
545 }
546 size_t dataSize = 0;
548 GR_VK_CALL_RESULT(fGpu, result, GetPipelineCacheData(fGpu->device(), this->pipelineCache(),
549 &dataSize, nullptr));
550 if (result != VK_SUCCESS) {
551 return;
552 }
553
554 std::unique_ptr<uint8_t[]> data(new uint8_t[dataSize]);
555
556 GR_VK_CALL_RESULT(fGpu, result, GetPipelineCacheData(fGpu->device(), this->pipelineCache(),
557 &dataSize, (void*)data.get()));
558 if (result != VK_SUCCESS) {
559 return;
560 }
561
563 sk_sp<SkData> keyData = SkData::MakeWithoutCopy(&key, sizeof(uint32_t));
564
566 *keyData, *SkData::MakeWithoutCopy(data.get(), dataSize), SkString("VkPipelineCache"));
567}
568
569////////////////////////////////////////////////////////////////////////////////
570
571GrVkResourceProvider::CompatibleRenderPassSet::CompatibleRenderPassSet(GrVkRenderPass* renderPass)
572 : fLastReturnedIndex(0) {
573 renderPass->ref();
574 fRenderPasses.push_back(renderPass);
575}
576
577bool GrVkResourceProvider::CompatibleRenderPassSet::isCompatible(
578 const GrVkRenderPass::AttachmentsDescriptor& attachmentsDescriptor,
579 GrVkRenderPass::AttachmentFlags attachmentFlags,
580 SelfDependencyFlags selfDepFlags,
581 LoadFromResolve loadFromResolve) const {
582 // The first GrVkRenderpass should always exists since we create the basic load store
583 // render pass on create
584 SkASSERT(fRenderPasses[0]);
585 return fRenderPasses[0]->isCompatible(attachmentsDescriptor, attachmentFlags, selfDepFlags,
586 loadFromResolve);
587}
588
589GrVkRenderPass* GrVkResourceProvider::CompatibleRenderPassSet::getRenderPass(
590 GrVkGpu* gpu,
591 const GrVkRenderPass::LoadStoreOps& colorOps,
592 const GrVkRenderPass::LoadStoreOps& resolveOps,
593 const GrVkRenderPass::LoadStoreOps& stencilOps) {
594 for (int i = 0; i < fRenderPasses.size(); ++i) {
595 int idx = (i + fLastReturnedIndex) % fRenderPasses.size();
596 if (fRenderPasses[idx]->equalLoadStoreOps(colorOps, resolveOps, stencilOps)) {
597 fLastReturnedIndex = idx;
598 return fRenderPasses[idx];
599 }
600 }
601 GrVkRenderPass* renderPass = GrVkRenderPass::Create(gpu, *this->getCompatibleRenderPass(),
602 colorOps, resolveOps, stencilOps);
603 if (!renderPass) {
604 return nullptr;
605 }
606 fRenderPasses.push_back(renderPass);
607 fLastReturnedIndex = fRenderPasses.size() - 1;
608 return renderPass;
609}
610
611void GrVkResourceProvider::CompatibleRenderPassSet::releaseResources() {
612 for (int i = 0; i < fRenderPasses.size(); ++i) {
613 if (fRenderPasses[i]) {
614 fRenderPasses[i]->unref();
615 fRenderPasses[i] = nullptr;
616 }
617 }
618}
619
AutoreleasePool pool
int count
Definition: FontMgrTest.cpp:50
@ kTopLeft_GrSurfaceOrigin
Definition: GrTypes.h:148
#define GR_VK_CALL(IFACE, X)
Definition: GrVkUtil.h:24
#define GR_VK_CALL_RESULT(GPU, RESULT, X)
Definition: GrVkUtil.h:35
#define SkASSERT(cond)
Definition: SkAssert.h:116
SkDEBUGCODE(SK_SPI) SkThreadID SkGetThreadID()
GLenum type
virtual void store(const SkData &, const SkData &)
SkTaskGroup * getTaskGroup()
GrContextOptions::PersistentCache * getPersistentCache()
GrDirectContextPriv priv()
GrDirectContext * getContext()
Definition: GrGpu.h:67
static GrVkCommandPool * Create(GrVkGpu *gpu)
static GrVkDescriptorPool * Create(GrVkGpu *gpu, VkDescriptorType type, uint32_t count)
static GrVkDescriptorSetManager * CreateZeroSamplerManager(GrVkGpu *gpu)
static GrVkDescriptorSetManager * CreateSamplerManager(GrVkGpu *gpu, VkDescriptorType type, const GrVkUniformHandler &)
static GrVkDescriptorSetManager * CreateInputManager(GrVkGpu *gpu)
static GrVkDescriptorSetManager * CreateUniformManager(GrVkGpu *gpu)
const skgpu::VulkanInterface * vkInterface() const
Definition: GrVkGpu.h:60
VkDevice device() const
Definition: GrVkGpu.h:71
@ kPipelineCache_PersistentCacheKeyType
Definition: GrVkGpu.h:186
const VkPhysicalDeviceProperties & physicalDeviceProperties() const
Definition: GrVkGpu.h:75
bool disconnected() const
Definition: GrVkGpu.h:51
static sk_sp< GrVkPipeline > Make(GrVkGpu *, const GrGeometryProcessor::AttributeSet &vertexAttribs, const GrGeometryProcessor::AttributeSet &instanceAttribs, GrPrimitiveType, GrSurfaceOrigin, const GrStencilSettings &, int numSamples, bool isHWAntialiasState, const skgpu::BlendInfo &, bool isWireframe, bool useConservativeRaster, uint32_t subpass, VkPipelineShaderStageCreateInfo *shaderStageInfo, int shaderStageCount, VkRenderPass compatibleRenderPass, VkPipelineLayout layout, bool ownsLayout, VkPipelineCache cache)
static GrVkRenderPass * Create(GrVkGpu *, const GrVkRenderPass &compatibleRenderPass, const LoadStoreOps &colorOp, const LoadStoreOps &resolveOp, const LoadStoreOps &stencilOp)
static GrVkRenderPass * CreateSimple(GrVkGpu *, AttachmentsDescriptor *, AttachmentFlags, SelfDependencyFlags selfDepFlags, LoadFromResolve)
VkRenderPass vkRenderPass() const
const GrVkDescriptorSet * getUniformDescriptorSet()
VkDescriptorSetLayout getInputDSLayout() const
GrVkSamplerYcbcrConversion * findOrCreateCompatibleSamplerYcbcrConversion(const skgpu::VulkanYcbcrConversionInfo &ycbcrInfo)
VkDescriptorSetLayout getUniformDSLayout() const
const GrVkRenderPass * findCompatibleRenderPass(GrVkRenderTarget *target, CompatibleRPHandle *compatibleHandle, bool withResolve, bool withStencil, SelfDependencyFlags selfDepFlags, LoadFromResolve)
GrVkPipelineState * findOrCreateCompatiblePipelineState(GrRenderTarget *, const GrProgramInfo &, VkRenderPass compatibleRenderPass, bool overrideSubpassForResolveLoad)
void recycleDescriptorSet(const GrVkDescriptorSet *descSet, const GrVkDescriptorSetManager::Handle &)
const GrVkDescriptorSet * getInputDescriptorSet()
sk_sp< const GrVkPipeline > makePipeline(const GrProgramInfo &, VkPipelineShaderStageCreateInfo *shaderStageInfo, int shaderStageCount, VkRenderPass compatibleRenderPass, VkPipelineLayout layout, uint32_t subpass)
GrVkSampler * findOrCreateCompatibleSampler(GrSamplerState, const skgpu::VulkanYcbcrConversionInfo &ycbcrInfo)
VkDescriptorSetLayout getSamplerDSLayout(const GrVkDescriptorSetManager::Handle &) const
void getZeroSamplerDescriptorSetHandle(GrVkDescriptorSetManager::Handle *handle)
const GrVkRenderPass * findCompatibleExternalRenderPass(VkRenderPass, uint32_t colorAttachmentIndex)
void addFinishedProcToActiveCommandBuffers(sk_sp< skgpu::RefCntedCallback > finishedCallback)
GrVkDescriptorPool * findOrCreateCompatibleDescriptorPool(VkDescriptorType type, uint32_t count)
const GrVkRenderPass * findRenderPass(GrVkRenderTarget *target, const GrVkRenderPass::LoadStoreOps &colorOps, const GrVkRenderPass::LoadStoreOps &resolveOps, const GrVkRenderPass::LoadStoreOps &stencilOps, CompatibleRPHandle *compatibleHandle, bool withResolve, bool withStencil, SelfDependencyFlags selfDepFlags, LoadFromResolve)
const GrVkDescriptorSet * getSamplerDescriptorSet(const GrVkDescriptorSetManager::Handle &)
sk_sp< const GrVkPipeline > findOrCreateMSAALoadPipeline(const GrVkRenderPass &renderPass, int numSamples, VkPipelineShaderStageCreateInfo *, VkPipelineLayout)
void getSamplerDescriptorSetHandle(VkDescriptorType type, const GrVkUniformHandler &, GrVkDescriptorSetManager::Handle *handle)
GrVkCommandPool * findOrCreateCommandPool()
GrVkResourceProvider(GrVkGpu *gpu)
static GrVkSamplerYcbcrConversion * Create(GrVkGpu *gpu, const skgpu::VulkanYcbcrConversionInfo &)
static SK_END_REQUIRE_DENSE Key GenerateKey(const skgpu::VulkanYcbcrConversionInfo &ycbcrInfo)
static GrVkSampler * Create(GrVkGpu *gpu, GrSamplerState, const skgpu::VulkanYcbcrConversionInfo &)
Definition: GrVkSampler.cpp:39
static SK_END_REQUIRE_DENSE Key GenerateKey(GrSamplerState, const skgpu::VulkanYcbcrConversionInfo &)
static sk_sp< SkData > MakeWithoutCopy(const void *data, size_t length)
Definition: SkData.h:116
const void * data() const
Definition: SkData.h:37
size_t size() const
Definition: SkData.h:30
T * find(const Key &key) const
void foreach(Fn &&fn)
void add(T *entry)
T * release()
Definition: SkRefCnt.h:324
bool empty() const
Definition: SkTArray.h:199
void removeShuffle(int n)
Definition: SkTArray.h:188
int size() const
Definition: SkTArray.h:421
T & emplace_back(Args &&... args)
Definition: SkTArray.h:248
const EmbeddedViewParams * params
GAsyncResult * result
uint32_t * target
DEF_SWITCHES_START aot vmservice shared library Name of the *so containing AOT compiled Dart assets for launching the service isolate vm snapshot The VM snapshot data that will be memory mapped as read only SnapshotAssetPath must be present isolate snapshot The isolate snapshot data that will be memory mapped as read only SnapshotAssetPath must be present cache dir Path to the cache directory This is different from the persistent_cache_path in embedder which is used for Skia shader cache icu native lib Path to the library file that exports the ICU data vm service The hostname IP address on which the Dart VM Service should be served If not defaults to or::depending on whether ipv6 is specified vm service A custom Dart VM Service port The default is to pick a randomly available open port disable vm Disable the Dart VM Service The Dart VM Service is never available in release mode disable vm service Disable mDNS Dart VM Service publication Bind to the IPv6 localhost address for the Dart VM Service Ignored if vm service host is set endless trace buffer
Definition: switches.h:126
uint8_t pipelineCacheUUID[VK_UUID_SIZE]
Definition: vulkan_core.h:3236
VkPipelineCacheCreateFlags flags
Definition: vulkan_core.h:3488
std::shared_ptr< const fml::Mapping > data
Definition: texture_gles.cc:63
#define VK_UUID_SIZE
Definition: vulkan_core.h:135
@ VK_PIPELINE_CACHE_HEADER_VERSION_ONE
Definition: vulkan_core.h:1326
VkResult
Definition: vulkan_core.h:140
@ VK_SUCCESS
Definition: vulkan_core.h:141
VkDescriptorType
Definition: vulkan_core.h:2124
@ VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER
Definition: vulkan_core.h:2129
@ VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
Definition: vulkan_core.h:2126
#define VK_NULL_HANDLE
Definition: vulkan_core.h:46
@ VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO
Definition: vulkan_core.h:219