Flutter Engine
The Flutter Engine
Loading...
Searching...
No Matches
VulkanCommandBuffer.cpp
Go to the documentation of this file.
1/*
2 * Copyright 2022 Google LLC
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
9
14#include "src/gpu/DataUtils.h"
30
31using namespace skia_private;
32
33namespace skgpu::graphite {
34
35class VulkanDescriptorSet;
36
37namespace { // anonymous namespace
38
39uint64_t clamp_ubo_binding_size(const uint64_t& offset,
40 const uint64_t& bufferSize,
41 const uint64_t& maxSize) {
42 SkASSERT(offset <= bufferSize);
43 auto remainSize = bufferSize - offset;
44 return remainSize > maxSize ? maxSize : remainSize;
45}
46
47} // anonymous namespace
48
49std::unique_ptr<VulkanCommandBuffer> VulkanCommandBuffer::Make(
50 const VulkanSharedContext* sharedContext,
51 VulkanResourceProvider* resourceProvider) {
52 // Create VkCommandPool
54 if (sharedContext->isProtected() == Protected::kYes) {
55 cmdPoolCreateFlags |= VK_COMMAND_POOL_CREATE_PROTECTED_BIT;
56 }
57
58 const VkCommandPoolCreateInfo cmdPoolInfo = {
60 nullptr, // pNext
61 cmdPoolCreateFlags, // CmdPoolCreateFlags
62 sharedContext->queueIndex(), // queueFamilyIndex
63 };
65 VkCommandPool pool;
66 VULKAN_CALL_RESULT(sharedContext,
67 result,
68 CreateCommandPool(sharedContext->device(), &cmdPoolInfo, nullptr, &pool));
69 if (result != VK_SUCCESS) {
70 return nullptr;
71 }
72
73 const VkCommandBufferAllocateInfo cmdInfo = {
75 nullptr, // pNext
76 pool, // commandPool
78 1 // bufferCount
79 };
80
81 VkCommandBuffer primaryCmdBuffer;
83 sharedContext,
84 result,
85 AllocateCommandBuffers(sharedContext->device(), &cmdInfo, &primaryCmdBuffer));
86 if (result != VK_SUCCESS) {
87 VULKAN_CALL(sharedContext->interface(),
88 DestroyCommandPool(sharedContext->device(), pool, nullptr));
89 return nullptr;
90 }
91
92 return std::unique_ptr<VulkanCommandBuffer>(new VulkanCommandBuffer(pool,
93 primaryCmdBuffer,
94 sharedContext,
95 resourceProvider));
96}
97
98VulkanCommandBuffer::VulkanCommandBuffer(VkCommandPool pool,
99 VkCommandBuffer primaryCommandBuffer,
100 const VulkanSharedContext* sharedContext,
101 VulkanResourceProvider* resourceProvider)
102 : fPool(pool)
103 , fPrimaryCommandBuffer(primaryCommandBuffer)
104 , fSharedContext(sharedContext)
105 , fResourceProvider(resourceProvider) {
106 // When making a new command buffer, we automatically begin the command buffer
107 this->begin();
108}
109
111 if (fActive) {
112 // Need to end command buffer before deleting it
113 VULKAN_CALL(fSharedContext->interface(), EndCommandBuffer(fPrimaryCommandBuffer));
114 fActive = false;
115 }
116
117 if (VK_NULL_HANDLE != fSubmitFence) {
118 VULKAN_CALL(fSharedContext->interface(),
119 DestroyFence(fSharedContext->device(), fSubmitFence, nullptr));
120 }
121 // This should delete any command buffers as well.
122 VULKAN_CALL(fSharedContext->interface(),
123 DestroyCommandPool(fSharedContext->device(), fPool, nullptr));
124}
125
127 SkASSERT(!fActive);
128 VULKAN_CALL_ERRCHECK(fSharedContext, ResetCommandPool(fSharedContext->device(), fPool, 0));
129 fActiveGraphicsPipeline = nullptr;
130 fBindUniformBuffers = true;
131 fBoundIndexBuffer = VK_NULL_HANDLE;
132 fBoundIndexBufferOffset = 0;
133 fBoundIndirectBuffer = VK_NULL_HANDLE;
134 fBoundIndirectBufferOffset = 0;
135 fTextureSamplerDescSetToBind = VK_NULL_HANDLE;
136 fNumTextureSamplers = 0;
137 fUniformBuffersToBind.fill({nullptr, 0});
138 for (int i = 0; i < 4; ++i) {
139 fCachedBlendConstant[i] = -1.0;
140 }
141 for (auto& boundInputBuffer : fBoundInputBuffers) {
142 boundInputBuffer = VK_NULL_HANDLE;
143 }
144 for (auto& boundInputOffset : fBoundInputBufferOffsets) {
145 boundInputOffset = 0;
146 }
147}
148
150 this->begin();
151 return true;
152}
153
154void VulkanCommandBuffer::begin() {
155 SkASSERT(!fActive);
156 VkCommandBufferBeginInfo cmdBufferBeginInfo;
157 memset(&cmdBufferBeginInfo, 0, sizeof(VkCommandBufferBeginInfo));
159 cmdBufferBeginInfo.pNext = nullptr;
161 cmdBufferBeginInfo.pInheritanceInfo = nullptr;
162
163 VULKAN_CALL_ERRCHECK(fSharedContext,
164 BeginCommandBuffer(fPrimaryCommandBuffer, &cmdBufferBeginInfo));
165 fActive = true;
166}
167
168void VulkanCommandBuffer::end() {
169 SkASSERT(fActive);
170 SkASSERT(!fActiveRenderPass);
171
172 this->submitPipelineBarriers();
173
174 VULKAN_CALL_ERRCHECK(fSharedContext, EndCommandBuffer(fPrimaryCommandBuffer));
175
176 fActive = false;
177}
178
179void VulkanCommandBuffer::addWaitSemaphores(size_t numWaitSemaphores,
180 const BackendSemaphore* waitSemaphores) {
181 if (!waitSemaphores) {
182 SkASSERT(numWaitSemaphores == 0);
183 return;
184 }
185
186 for (size_t i = 0; i < numWaitSemaphores; ++i) {
187 auto& semaphore = waitSemaphores[i];
188 if (semaphore.isValid() && semaphore.backend() == BackendApi::kVulkan) {
189 fWaitSemaphores.push_back(semaphore.getVkSemaphore());
190 }
191 }
192}
193
194void VulkanCommandBuffer::addSignalSemaphores(size_t numSignalSemaphores,
195 const BackendSemaphore* signalSemaphores) {
196 if (!signalSemaphores) {
197 SkASSERT(numSignalSemaphores == 0);
198 return;
199 }
200
201 for (size_t i = 0; i < numSignalSemaphores; ++i) {
202 auto& semaphore = signalSemaphores[i];
203 if (semaphore.isValid() && semaphore.backend() == BackendApi::kVulkan) {
204 fSignalSemaphores.push_back(semaphore.getVkSemaphore());
205 }
206 }
207}
208
210 const MutableTextureState* newState) {
211 TextureProxy* textureProxy = static_cast<Surface*>(targetSurface)->backingTextureProxy();
212 VulkanTexture* texture = static_cast<VulkanTexture*>(textureProxy->texture());
213
214 // Even though internally we use this helper for getting src access flags and stages they
215 // can also be used for general dst flags since we don't know exactly what the client
216 // plans on using the image for.
218 if (newLayout == VK_IMAGE_LAYOUT_UNDEFINED) {
219 newLayout = texture->currentLayout();
220 }
223
224 uint32_t currentQueueFamilyIndex = texture->currentQueueFamilyIndex();
225 uint32_t newQueueFamilyIndex = skgpu::MutableTextureStates::GetVkQueueFamilyIndex(newState);
226 auto isSpecialQueue = [](uint32_t queueFamilyIndex) {
227 return queueFamilyIndex == VK_QUEUE_FAMILY_EXTERNAL ||
228 queueFamilyIndex == VK_QUEUE_FAMILY_FOREIGN_EXT;
229 };
230 if (isSpecialQueue(currentQueueFamilyIndex) && isSpecialQueue(newQueueFamilyIndex)) {
231 // It is illegal to have both the new and old queue be special queue families (i.e. external
232 // or foreign).
233 return;
234 }
235
236 texture->setImageLayoutAndQueueIndex(this,
237 newLayout,
238 dstAccess,
239 dstStage,
240 false,
241 newQueueFamilyIndex);
242}
243
244static bool submit_to_queue(const VulkanSharedContext* sharedContext,
245 VkQueue queue,
246 VkFence fence,
247 uint32_t waitCount,
248 const VkSemaphore* waitSemaphores,
249 const VkPipelineStageFlags* waitStages,
250 uint32_t commandBufferCount,
251 const VkCommandBuffer* commandBuffers,
252 uint32_t signalCount,
253 const VkSemaphore* signalSemaphores,
254 Protected protectedContext) {
255 VkProtectedSubmitInfo protectedSubmitInfo;
256 if (protectedContext == Protected::kYes) {
257 memset(&protectedSubmitInfo, 0, sizeof(VkProtectedSubmitInfo));
259 protectedSubmitInfo.pNext = nullptr;
260 protectedSubmitInfo.protectedSubmit = VK_TRUE;
261 }
262
263 VkSubmitInfo submitInfo;
264 memset(&submitInfo, 0, sizeof(VkSubmitInfo));
266 submitInfo.pNext = protectedContext == Protected::kYes ? &protectedSubmitInfo : nullptr;
267 submitInfo.waitSemaphoreCount = waitCount;
268 submitInfo.pWaitSemaphores = waitSemaphores;
269 submitInfo.pWaitDstStageMask = waitStages;
270 submitInfo.commandBufferCount = commandBufferCount;
271 submitInfo.pCommandBuffers = commandBuffers;
272 submitInfo.signalSemaphoreCount = signalCount;
273 submitInfo.pSignalSemaphores = signalSemaphores;
275 VULKAN_CALL_RESULT(sharedContext, result, QueueSubmit(queue, 1, &submitInfo, fence));
276 return result == VK_SUCCESS;
277}
278
279bool VulkanCommandBuffer::submit(VkQueue queue) {
280 this->end();
281
282 auto device = fSharedContext->device();
283 VkResult err;
284
285 if (fSubmitFence == VK_NULL_HANDLE) {
286 VkFenceCreateInfo fenceInfo;
287 memset(&fenceInfo, 0, sizeof(VkFenceCreateInfo));
290 fSharedContext, err, CreateFence(device, &fenceInfo, nullptr, &fSubmitFence));
291 if (err) {
292 fSubmitFence = VK_NULL_HANDLE;
293 return false;
294 }
295 } else {
296 // This cannot return DEVICE_LOST so we assert we succeeded.
297 VULKAN_CALL_RESULT(fSharedContext, err, ResetFences(device, 1, &fSubmitFence));
298 SkASSERT(err == VK_SUCCESS);
299 }
300
301 SkASSERT(fSubmitFence != VK_NULL_HANDLE);
302 int waitCount = fWaitSemaphores.size();
303 TArray<VkPipelineStageFlags> vkWaitStages(waitCount);
304 for (int i = 0; i < waitCount; ++i) {
307 }
308
309 bool submitted = submit_to_queue(fSharedContext,
310 queue,
311 fSubmitFence,
312 waitCount,
313 fWaitSemaphores.data(),
314 vkWaitStages.data(),
315 /*commandBufferCount*/ 1,
316 &fPrimaryCommandBuffer,
317 fSignalSemaphores.size(),
318 fSignalSemaphores.data(),
319 fSharedContext->isProtected());
320 fWaitSemaphores.clear();
321 fSignalSemaphores.clear();
322 if (!submitted) {
323 // Destroy the fence or else we will try to wait forever for it to finish.
324 VULKAN_CALL(fSharedContext->interface(), DestroyFence(device, fSubmitFence, nullptr));
325 fSubmitFence = VK_NULL_HANDLE;
326 return false;
327 }
328 return true;
329}
330
332 SkASSERT(!fActive);
333 if (VK_NULL_HANDLE == fSubmitFence) {
334 return true;
335 }
336
337 VkResult err;
338 VULKAN_CALL_RESULT_NOCHECK(fSharedContext->interface(), err,
339 GetFenceStatus(fSharedContext->device(), fSubmitFence));
340 switch (err) {
341 case VK_SUCCESS:
343 return true;
344
345 case VK_NOT_READY:
346 return false;
347
348 default:
349 SKGPU_LOG_F("Error calling vkGetFenceStatus. Error: %d", err);
350 SK_ABORT("Got an invalid fence status");
351 return false;
352 }
353}
354
356 if (fSubmitFence == VK_NULL_HANDLE) {
357 return;
358 }
359 VULKAN_CALL_ERRCHECK(fSharedContext,
360 WaitForFences(fSharedContext->device(),
361 1,
362 &fSubmitFence,
363 /*waitAll=*/true,
364 /*timeout=*/UINT64_MAX));
365}
366
367void VulkanCommandBuffer::updateRtAdjustUniform(const SkRect& viewport) {
368 SkASSERT(fActive && !fActiveRenderPass);
369
370 // Vulkan's framebuffer space has (0, 0) at the top left. This agrees with Skia's device coords.
371 // However, in NDC (-1, -1) is the bottom left. So we flip the origin here (assuming all
372 // surfaces we have are TopLeft origin). We then store the adjustment values as a uniform.
373 const float x = viewport.x() - fReplayTranslation.x();
374 const float y = viewport.y() - fReplayTranslation.y();
375 float invTwoW = 2.f / viewport.width();
376 float invTwoH = 2.f / viewport.height();
377 const float rtAdjust[4] = {invTwoW, invTwoH, -1.f - x * invTwoW, -1.f - y * invTwoH};
378
379 sk_sp<Buffer> intrinsicUniformBuffer = fResourceProvider->refIntrinsicConstantBuffer();
380 const VulkanBuffer* intrinsicVulkanBuffer =
381 static_cast<VulkanBuffer*>(intrinsicUniformBuffer.get());
382 SkASSERT(intrinsicVulkanBuffer);
383
385 {intrinsicUniformBuffer.get(), /*offset=*/0};
386
387 this->updateBuffer(intrinsicVulkanBuffer,
388 &rtAdjust,
390
391 // Ensure the buffer update is completed and made visible before reading
392 intrinsicVulkanBuffer->setBufferAccess(this, VK_ACCESS_UNIFORM_READ_BIT,
394 this->trackResource(std::move(intrinsicUniformBuffer));
395}
396
398 const Texture* colorTexture,
399 const Texture* resolveTexture,
400 const Texture* depthStencilTexture,
401 SkRect viewport,
402 const DrawPassList& drawPasses) {
403 for (const auto& drawPass : drawPasses) {
404 // Our current implementation of setting texture image layouts does not allow layout changes
405 // once we have already begun a render pass, so prior to any other commands, set the layout
406 // of all sampled textures from the drawpass so they can be sampled from the shader.
407 const skia_private::TArray<sk_sp<TextureProxy>>& sampledTextureProxies =
408 drawPass->sampledTextures();
409 for (const sk_sp<TextureProxy>& textureProxy : sampledTextureProxies) {
410 VulkanTexture* vulkanTexture = const_cast<VulkanTexture*>(
411 static_cast<const VulkanTexture*>(
412 textureProxy->texture()));
413 vulkanTexture->setImageLayout(this,
417 false);
418 this->submitPipelineBarriers();
419 }
420 }
421
422 this->updateRtAdjustUniform(viewport);
423 this->setViewport(viewport);
424
425 if (!this->beginRenderPass(renderPassDesc, colorTexture, resolveTexture, depthStencilTexture)) {
426 return false;
427 }
428
429 for (const auto& drawPass : drawPasses) {
430 this->addDrawPass(drawPass.get());
431 }
432
433 this->endRenderPass();
434 return true;
435}
436
437bool VulkanCommandBuffer::updateLoadMSAAVertexBuffer() {
438 const Buffer* vertexBuffer = fResourceProvider->loadMSAAVertexBuffer();
439 if (!vertexBuffer) {
440 return false;
441 }
442 const VulkanBuffer* vulkanVertexBuffer = static_cast<const VulkanBuffer*>(vertexBuffer);
443 SkASSERT(vulkanVertexBuffer->bufferUsageFlags() & VK_BUFFER_USAGE_VERTEX_BUFFER_BIT);
444
445 // Determine vertices in NDC. TODO: When only wanting to draw a portion of the resolve
446 // texture, these values will need to be dynamically determined. For now, simply span the
447 // range of NDC since we want to reference the entire resolve texture.
448 static constexpr float kVertices[8] = { 1.f, 1.f,
449 1.f, -1.f,
450 -1.f, 1.f,
451 -1.f, -1.f };
452 this->updateBuffer(vulkanVertexBuffer,
453 &kVertices,
455
456 // Ensure the buffer update is completed and made visible before reading
457 vulkanVertexBuffer->setBufferAccess(this, VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT,
459
460 return true;
461}
462
463bool VulkanCommandBuffer::updateAndBindLoadMSAAInputAttachment(const VulkanTexture& resolveTexture)
464{
465 // Fetch a descriptor set that contains one input attachment
466 STArray<1, DescriptorData> inputDescriptors =
468 sk_sp<VulkanDescriptorSet> set = fResourceProvider->findOrCreateDescriptorSet(
469 SkSpan<DescriptorData>{&inputDescriptors.front(), inputDescriptors.size()});
470 if (!set) {
471 return false;
472 }
473
474 VkDescriptorImageInfo textureInfo;
475 memset(&textureInfo, 0, sizeof(VkDescriptorImageInfo));
476 textureInfo.sampler = VK_NULL_HANDLE;
477 textureInfo.imageView =
478 resolveTexture.getImageView(VulkanImageView::Usage::kAttachment)->imageView();
479 textureInfo.imageLayout = resolveTexture.currentLayout();
480
481 VkWriteDescriptorSet writeInfo;
482 memset(&writeInfo, 0, sizeof(VkWriteDescriptorSet));
484 writeInfo.pNext = nullptr;
485 writeInfo.dstSet = *set->descriptorSet();
487 writeInfo.dstArrayElement = 0;
488 writeInfo.descriptorCount = 1;
490 writeInfo.pImageInfo = &textureInfo;
491 writeInfo.pBufferInfo = nullptr;
492 writeInfo.pTexelBufferView = nullptr;
493
494 VULKAN_CALL(fSharedContext->interface(),
495 UpdateDescriptorSets(fSharedContext->device(),
496 /*descriptorWriteCount=*/1,
497 &writeInfo,
498 /*descriptorCopyCount=*/0,
499 /*pDescriptorCopies=*/nullptr));
500
501 VULKAN_CALL(fSharedContext->interface(),
502 CmdBindDescriptorSets(fPrimaryCommandBuffer,
504 fActiveGraphicsPipeline->layout(),
506 /*setCount=*/1,
507 set->descriptorSet(),
508 /*dynamicOffsetCount=*/0,
509 /*dynamicOffsets=*/nullptr));
510
511 this->trackResource(std::move(set));
512 return true;
513}
514
515bool VulkanCommandBuffer::loadMSAAFromResolve(const RenderPassDesc& renderPassDesc,
516 VulkanTexture& resolveTexture,
517 SkISize dstDimensions) {
518 sk_sp<VulkanGraphicsPipeline> loadPipeline =
519 fResourceProvider->findOrCreateLoadMSAAPipeline(renderPassDesc);
520 if (!loadPipeline) {
521 SKGPU_LOG_E("Unable to create pipeline to load resolve texture into MSAA attachment");
522 return false;
523 }
524
525 this->bindGraphicsPipeline(loadPipeline.get());
526 // Make sure we do not attempt to bind uniform or texture/sampler descriptors because we do
527 // not use them for loading MSAA from resolve.
528 fBindUniformBuffers = false;
529 fBindTextureSamplers = false;
530
531 this->setScissor(/*left=*/0, /*top=*/0, dstDimensions.width(), dstDimensions.height());
532
533 if (!this->updateAndBindLoadMSAAInputAttachment(resolveTexture)) {
534 SKGPU_LOG_E("Unable to update and bind an input attachment descriptor for loading MSAA "
535 "from resolve");
536 return false;
537 }
538
539 SkASSERT(fResourceProvider->loadMSAAVertexBuffer());
540 this->bindVertexBuffers(fResourceProvider->loadMSAAVertexBuffer(),
541 /*vertexOffset=*/0,
542 /*instanceBuffer=*/nullptr,
543 /*instanceOffset=*/0);
544
545 this->draw(PrimitiveType::kTriangleStrip, /*baseVertex=*/0, /*vertexCount=*/4);
546 this->nextSubpass();
547
548 // If we loaded the resolve attachment, then we would have set the image layout to be
549 // VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL so that it could be used at the start as an
550 // input attachment. However, when we switched to the main subpass it will transition the
551 // layout internally to VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL. Thus we need to update our
552 // tracking of the layout to match the new layout.
553 resolveTexture.updateImageLayout(VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
554
555 // After using a distinct descriptor set layout for loading MSAA from resolve, we will need to
556 // (re-)bind any descriptor sets.
557 fBindUniformBuffers = true;
558 fBindTextureSamplers = true;
559 return true;
560}
561
562namespace {
563void setup_texture_layouts(VulkanCommandBuffer* cmdBuf,
564 VulkanTexture* colorTexture,
565 VulkanTexture* resolveTexture,
566 VulkanTexture* depthStencilTexture,
567 bool loadMSAAFromResolve) {
568 if (colorTexture) {
569 colorTexture->setImageLayout(cmdBuf,
574 /*byRegion=*/false);
575 if (resolveTexture) {
576 if (loadMSAAFromResolve) {
577 // When loading MSAA from resolve, the texture is used in the first subpass as an
578 // input attachment. Subsequent subpass(es) need the resolve texture to provide read
579 // access to the color attachment (for use cases such as blending), so add access
580 // and pipeline stage flags for both usages.
581 resolveTexture->setImageLayout(cmdBuf,
587 /*byRegion=*/false);
588 } else {
589 resolveTexture->setImageLayout(cmdBuf,
594 /*byRegion=*/false);
595 }
596 }
597 }
598 if (depthStencilTexture) {
599 depthStencilTexture->setImageLayout(cmdBuf,
604 /*byRegion=*/false);
605 }
606}
607
608void track_attachments(VulkanCommandBuffer* cmdBuf,
609 VulkanTexture* colorTexture,
610 VulkanTexture* resolveTexture,
611 VulkanTexture* depthStencilTexture) {
612 if (colorTexture) {
613 cmdBuf->trackResource(sk_ref_sp(colorTexture));
614 }
615 if (resolveTexture){
616 cmdBuf->trackResource(sk_ref_sp(resolveTexture));
617 }
618 if (depthStencilTexture) {
619 cmdBuf->trackResource(sk_ref_sp(depthStencilTexture));
620 }
621}
622
623void gather_attachment_views(skia_private::TArray<VkImageView>& attachmentViews,
624 VulkanTexture* colorTexture,
625 VulkanTexture* resolveTexture,
626 VulkanTexture* depthStencilTexture) {
627 if (colorTexture) {
628 VkImageView& colorAttachmentView = attachmentViews.push_back();
629 colorAttachmentView =
630 colorTexture->getImageView(VulkanImageView::Usage::kAttachment)->imageView();
631
632 if (resolveTexture) {
633 VkImageView& resolveView = attachmentViews.push_back();
634 resolveView =
635 resolveTexture->getImageView(VulkanImageView::Usage::kAttachment)->imageView();
636 }
637 }
638
639 if (depthStencilTexture) {
640 VkImageView& stencilView = attachmentViews.push_back();
641 stencilView =
642 depthStencilTexture->getImageView(VulkanImageView::Usage::kAttachment)->imageView();
643 }
644}
645
646void gather_clear_values(
648 const RenderPassDesc& renderPassDesc,
649 VulkanTexture* colorTexture,
650 VulkanTexture* depthStencilTexture,
651 int depthStencilAttachmentIdx) {
653 if (colorTexture) {
654 VkClearValue& colorAttachmentClear =
656 memset(&colorAttachmentClear, 0, sizeof(VkClearValue));
657 colorAttachmentClear.color = {{renderPassDesc.fClearColor[0],
658 renderPassDesc.fClearColor[1],
659 renderPassDesc.fClearColor[2],
660 renderPassDesc.fClearColor[3]}};
661 }
662 // Resolve texture does not have a clear value
663 if (depthStencilTexture) {
664 VkClearValue& depthStencilAttachmentClear = clearValues.at(depthStencilAttachmentIdx);
665 memset(&depthStencilAttachmentClear, 0, sizeof(VkClearValue));
666 depthStencilAttachmentClear.depthStencil = {renderPassDesc.fClearDepth,
667 renderPassDesc.fClearStencil};
668 }
669}
670
671} // anonymous namespace
672
673bool VulkanCommandBuffer::beginRenderPass(const RenderPassDesc& renderPassDesc,
674 const Texture* colorTexture,
675 const Texture* resolveTexture,
676 const Texture* depthStencilTexture) {
677 // TODO: Check that Textures match RenderPassDesc
678 VulkanTexture* vulkanColorTexture =
679 const_cast<VulkanTexture*>(static_cast<const VulkanTexture*>(colorTexture));
680 VulkanTexture* vulkanResolveTexture =
681 const_cast<VulkanTexture*>(static_cast<const VulkanTexture*>(resolveTexture));
682 VulkanTexture* vulkanDepthStencilTexture =
683 const_cast<VulkanTexture*>(static_cast<const VulkanTexture*>(depthStencilTexture));
684
685 SkASSERT(resolveTexture ? renderPassDesc.fColorResolveAttachment.fStoreOp == StoreOp::kStore
686 : true);
687
688 // Determine if we need to load MSAA from resolve, and if so, make certain that key conditions
689 // are met before proceeding.
690 bool loadMSAAFromResolve = renderPassDesc.fColorResolveAttachment.fTextureInfo.isValid() &&
691 renderPassDesc.fColorResolveAttachment.fLoadOp == LoadOp::kLoad;
692 if (loadMSAAFromResolve && (!vulkanResolveTexture || !vulkanColorTexture ||
693 !vulkanResolveTexture->supportsInputAttachmentUsage())) {
694 SKGPU_LOG_E("Cannot begin render pass. In order to load MSAA from resolve, the color "
695 "attachment must have input attachment usage and both the color and resolve "
696 "attachments must be valid.");
697 return false;
698 }
699
700 track_attachments(this, vulkanColorTexture, vulkanResolveTexture, vulkanDepthStencilTexture);
701
702 // Before beginning a renderpass, set all textures to the appropriate image layout.
703 setup_texture_layouts(this,
704 vulkanColorTexture,
705 vulkanResolveTexture,
706 vulkanDepthStencilTexture,
707 loadMSAAFromResolve);
708
709 static constexpr int kMaxNumAttachments = 3;
710 // Gather attachment views neeeded for frame buffer creation.
711 skia_private::TArray<VkImageView> attachmentViews;
712 gather_attachment_views(
713 attachmentViews, vulkanColorTexture, vulkanResolveTexture, vulkanDepthStencilTexture);
714
715 // Gather clear values needed for RenderPassBeginInfo. Indexed by attachment number.
717 // The depth/stencil attachment can be at attachment index 1 or 2 depending on whether there is
718 // a resolve texture attachment for this renderpass.
719 int depthStencilAttachmentIndex = resolveTexture ? 2 : 1;
720 gather_clear_values(clearValues,
721 renderPassDesc,
722 vulkanColorTexture,
723 vulkanDepthStencilTexture,
724 depthStencilAttachmentIndex);
725
726 sk_sp<VulkanRenderPass> vulkanRenderPass =
727 fResourceProvider->findOrCreateRenderPass(renderPassDesc, /*compatibleOnly=*/false);
728 if (!vulkanRenderPass) {
729 SKGPU_LOG_W("Could not create Vulkan RenderPass");
730 return false;
731 }
732 this->submitPipelineBarriers();
733 this->trackResource(vulkanRenderPass);
734
735 int frameBufferWidth = 0;
736 int frameBufferHeight = 0;
737 // TODO: Get frame buffer render area from RenderPassDesc. Account for granularity if it wasn't
738 // already. For now, simply set the render area to be the entire frame buffer.
739 if (colorTexture) {
740 frameBufferWidth = colorTexture->dimensions().width();
741 frameBufferHeight = colorTexture->dimensions().height();
742 } else if (depthStencilTexture) {
743 frameBufferWidth = depthStencilTexture->dimensions().width();
744 frameBufferHeight = depthStencilTexture->dimensions().height();
745 }
746 sk_sp<VulkanFramebuffer> framebuffer = fResourceProvider->createFramebuffer(fSharedContext,
747 attachmentViews,
748 *vulkanRenderPass,
749 frameBufferWidth,
750 frameBufferHeight);
751 if (!framebuffer) {
752 SKGPU_LOG_W("Could not create Vulkan Framebuffer");
753 return false;
754 }
755
756 VkRenderPassBeginInfo beginInfo;
757 memset(&beginInfo, 0, sizeof(VkRenderPassBeginInfo));
759 beginInfo.pNext = nullptr;
760 beginInfo.renderPass = vulkanRenderPass->renderPass();
761 beginInfo.framebuffer = framebuffer->framebuffer();
762 beginInfo.renderArea = {{ 0, 0 },
763 { (unsigned int) frameBufferWidth, (unsigned int) frameBufferHeight }};
764 beginInfo.clearValueCount = clearValues.size();
765 beginInfo.pClearValues = clearValues.begin();
766
767 // If loading MSAA from resolve, we need to update and bind a vertex buffer w/ NDC. This entails
768 // take care of some necessary preparations that must be performed while there is not an active
769 // renderpass.
770 if (loadMSAAFromResolve) {
771 // We manually load the contents of the resolve texture into the MSAA attachment as a draw,
772 // so the MSAA attachment's load op should be LoadOp::kDiscard.
773 SkASSERT(renderPassDesc.fColorAttachment.fLoadOp == LoadOp::kDiscard);
774 SkASSERT(!fActiveRenderPass);
775 SkASSERT(resolveTexture);
776
777 if (!this->updateLoadMSAAVertexBuffer()) {
778 SKGPU_LOG_E("Failed to update vertex buffer for loading MSAA from resolve");
779 return false;
780 }
781 }
782
783 // Submit pipeline barriers to ensure any image layout transitions are recorded prior to
784 // beginning the render pass.
785 this->submitPipelineBarriers();
786 // TODO: If we add support for secondary command buffers, dynamically determine subpass contents
787 VULKAN_CALL(fSharedContext->interface(),
788 CmdBeginRenderPass(fPrimaryCommandBuffer,
789 &beginInfo,
791 fActiveRenderPass = true;
792
793 if (loadMSAAFromResolve && !this->loadMSAAFromResolve(renderPassDesc,
794 *vulkanResolveTexture,
795 vulkanColorTexture->dimensions())) {
796 SKGPU_LOG_E("Failed to load MSAA from resolve");
797 this->endRenderPass();
798 return false;
799 }
800
801 // Once we have an active render pass, the command buffer should hold on to a frame buffer ref.
802 this->trackResource(std::move(framebuffer));
803 return true;
804}
805
806void VulkanCommandBuffer::endRenderPass() {
807 SkASSERT(fActive);
808 VULKAN_CALL(fSharedContext->interface(), CmdEndRenderPass(fPrimaryCommandBuffer));
809 fActiveRenderPass = false;
810}
811
812void VulkanCommandBuffer::addDrawPass(const DrawPass* drawPass) {
813 drawPass->addResourceRefs(this);
814 for (auto [type, cmdPtr] : drawPass->commands()) {
815 switch (type) {
816 case DrawPassCommands::Type::kBindGraphicsPipeline: {
817 auto bgp = static_cast<DrawPassCommands::BindGraphicsPipeline*>(cmdPtr);
818 this->bindGraphicsPipeline(drawPass->getPipeline(bgp->fPipelineIndex));
819 break;
820 }
821 case DrawPassCommands::Type::kSetBlendConstants: {
822 auto sbc = static_cast<DrawPassCommands::SetBlendConstants*>(cmdPtr);
823 this->setBlendConstants(sbc->fBlendConstants);
824 break;
825 }
826 case DrawPassCommands::Type::kBindUniformBuffer: {
827 auto bub = static_cast<DrawPassCommands::BindUniformBuffer*>(cmdPtr);
828 this->recordBufferBindingInfo(bub->fInfo, bub->fSlot);
829 break;
830 }
831 case DrawPassCommands::Type::kBindDrawBuffers: {
832 auto bdb = static_cast<DrawPassCommands::BindDrawBuffers*>(cmdPtr);
833 this->bindDrawBuffers(
834 bdb->fVertices, bdb->fInstances, bdb->fIndices, bdb->fIndirect);
835 break;
836 }
837 case DrawPassCommands::Type::kBindTexturesAndSamplers: {
838 auto bts = static_cast<DrawPassCommands::BindTexturesAndSamplers*>(cmdPtr);
839 this->recordTextureAndSamplerDescSet(*drawPass, *bts);
840 break;
841 }
842 case DrawPassCommands::Type::kSetScissor: {
843 auto ss = static_cast<DrawPassCommands::SetScissor*>(cmdPtr);
844 const SkIRect& rect = ss->fScissor;
845 this->setScissor(rect.fLeft, rect.fTop, rect.width(), rect.height());
846 break;
847 }
848 case DrawPassCommands::Type::kDraw: {
849 auto draw = static_cast<DrawPassCommands::Draw*>(cmdPtr);
850 this->draw(draw->fType, draw->fBaseVertex, draw->fVertexCount);
851 break;
852 }
853 case DrawPassCommands::Type::kDrawIndexed: {
854 auto draw = static_cast<DrawPassCommands::DrawIndexed*>(cmdPtr);
855 this->drawIndexed(
856 draw->fType, draw->fBaseIndex, draw->fIndexCount, draw->fBaseVertex);
857 break;
858 }
859 case DrawPassCommands::Type::kDrawInstanced: {
860 auto draw = static_cast<DrawPassCommands::DrawInstanced*>(cmdPtr);
861 this->drawInstanced(draw->fType,
862 draw->fBaseVertex,
863 draw->fVertexCount,
864 draw->fBaseInstance,
865 draw->fInstanceCount);
866 break;
867 }
868 case DrawPassCommands::Type::kDrawIndexedInstanced: {
869 auto draw = static_cast<DrawPassCommands::DrawIndexedInstanced*>(cmdPtr);
870 this->drawIndexedInstanced(draw->fType,
871 draw->fBaseIndex,
872 draw->fIndexCount,
873 draw->fBaseVertex,
874 draw->fBaseInstance,
875 draw->fInstanceCount);
876 break;
877 }
878 case DrawPassCommands::Type::kDrawIndirect: {
879 auto draw = static_cast<DrawPassCommands::DrawIndirect*>(cmdPtr);
880 this->drawIndirect(draw->fType);
881 break;
882 }
883 case DrawPassCommands::Type::kDrawIndexedIndirect: {
884 auto draw = static_cast<DrawPassCommands::DrawIndexedIndirect*>(cmdPtr);
885 this->drawIndexedIndirect(draw->fType);
886 break;
887 }
888 }
889 }
890}
891
892void VulkanCommandBuffer::bindGraphicsPipeline(const GraphicsPipeline* graphicsPipeline) {
893 fActiveGraphicsPipeline = static_cast<const VulkanGraphicsPipeline*>(graphicsPipeline);
894 SkASSERT(fActiveRenderPass);
895 VULKAN_CALL(fSharedContext->interface(), CmdBindPipeline(fPrimaryCommandBuffer,
897 fActiveGraphicsPipeline->pipeline()));
898 // TODO(b/293924877): Compare pipeline layouts. If 2 pipelines have the same pipeline layout,
899 // then descriptor sets do not need to be re-bound. For now, simply force a re-binding of
900 // descriptor sets with any new bindGraphicsPipeline DrawPassCommand.
901 fBindUniformBuffers = true;
902}
903
904void VulkanCommandBuffer::setBlendConstants(float* blendConstants) {
905 SkASSERT(fActive);
906 if (0 != memcmp(blendConstants, fCachedBlendConstant, 4 * sizeof(float))) {
907 VULKAN_CALL(fSharedContext->interface(),
908 CmdSetBlendConstants(fPrimaryCommandBuffer, blendConstants));
909 memcpy(fCachedBlendConstant, blendConstants, 4 * sizeof(float));
910 }
911}
912
913void VulkanCommandBuffer::recordBufferBindingInfo(const BindBufferInfo& info, UniformSlot slot) {
914 unsigned int bufferIndex = 0;
915 switch (slot) {
918 break;
921 break;
922 default:
923 SkASSERT(false);
924 }
925
926 fUniformBuffersToBind[bufferIndex] = info;
927 fBindUniformBuffers = true;
928}
929
930void VulkanCommandBuffer::syncDescriptorSets() {
931 if (fBindUniformBuffers) {
932 this->bindUniformBuffers();
933 // Changes to descriptor sets in lower slot numbers disrupt later set bindings. Currently,
934 // the descriptor set which houses uniform buffers is at a lower slot than the texture /
935 // sampler set, so rebinding uniform buffers necessitates re-binding any texture/samplers.
936 fBindTextureSamplers = true;
937 }
938 if (fBindTextureSamplers) {
939 this->bindTextureSamplers();
940 }
941}
942
943void VulkanCommandBuffer::bindUniformBuffers() {
944 fBindUniformBuffers = false;
945
946 // We always bind at least one uniform buffer descriptor for intrinsic uniforms, but can bind
947 // up to three (one for render step uniforms, one for paint uniforms).
950 if (fActiveGraphicsPipeline->hasStepUniforms() &&
951 fUniformBuffersToBind[VulkanGraphicsPipeline::kRenderStepUniformBufferIndex].fBuffer) {
953 }
954 if (fActiveGraphicsPipeline->hasFragmentUniforms() &&
955 fUniformBuffersToBind[VulkanGraphicsPipeline::kPaintUniformBufferIndex].fBuffer) {
957 }
958 sk_sp<VulkanDescriptorSet> set = fResourceProvider->findOrCreateDescriptorSet(
959 SkSpan<DescriptorData>{&descriptors.front(), descriptors.size()});
960
961 if (!set) {
962 SKGPU_LOG_E("Unable to find or create descriptor set");
963 return;
964 }
965 static uint64_t maxUniformBufferRange = static_cast<const VulkanSharedContext*>(
966 fSharedContext)->vulkanCaps().maxUniformBufferRange();
967
968 for (int i = 0; i < descriptors.size(); i++) {
969 int descriptorBindingIndex = descriptors.at(i).bindingIndex;
970 SkASSERT(static_cast<unsigned long>(descriptorBindingIndex)
971 < fUniformBuffersToBind.size());
972 if (fUniformBuffersToBind[descriptorBindingIndex].fBuffer) {
973 VkDescriptorBufferInfo bufferInfo;
974 memset(&bufferInfo, 0, sizeof(VkDescriptorBufferInfo));
975 auto vulkanBuffer = static_cast<const VulkanBuffer*>(
976 fUniformBuffersToBind[descriptorBindingIndex].fBuffer);
977 bufferInfo.buffer = vulkanBuffer->vkBuffer();
978 bufferInfo.offset = fUniformBuffersToBind[descriptorBindingIndex].fOffset;
979 bufferInfo.range = clamp_ubo_binding_size(bufferInfo.offset, vulkanBuffer->size(),
980 maxUniformBufferRange);
981
982 VkWriteDescriptorSet writeInfo;
983 memset(&writeInfo, 0, sizeof(VkWriteDescriptorSet));
985 writeInfo.pNext = nullptr;
986 writeInfo.dstSet = *set->descriptorSet();
987 writeInfo.dstBinding = descriptorBindingIndex;
988 writeInfo.dstArrayElement = 0;
989 writeInfo.descriptorCount = descriptors.at(i).count;
990 writeInfo.descriptorType = DsTypeEnumToVkDs(descriptors.at(i).type);
991 writeInfo.pImageInfo = nullptr;
992 writeInfo.pBufferInfo = &bufferInfo;
993 writeInfo.pTexelBufferView = nullptr;
994
995 // TODO(b/293925059): Migrate to updating all the uniform descriptors with one driver
996 // call. Calling UpdateDescriptorSets once to encapsulate updates to all uniform
997 // descriptors would be ideal, but that led to issues with draws where all the UBOs
998 // within that set would unexpectedly be assigned the same offset. Updating them one at
999 // a time within this loop works in the meantime but is suboptimal.
1000 VULKAN_CALL(fSharedContext->interface(),
1001 UpdateDescriptorSets(fSharedContext->device(),
1002 /*descriptorWriteCount=*/1,
1003 &writeInfo,
1004 /*descriptorCopyCount=*/0,
1005 /*pDescriptorCopies=*/nullptr));
1006 }
1007 }
1008 VULKAN_CALL(fSharedContext->interface(),
1009 CmdBindDescriptorSets(fPrimaryCommandBuffer,
1011 fActiveGraphicsPipeline->layout(),
1013 /*setCount=*/1,
1014 set->descriptorSet(),
1015 /*dynamicOffsetCount=*/0,
1016 /*dynamicOffsets=*/nullptr));
1017 this->trackResource(std::move(set));
1018}
1019
1020void VulkanCommandBuffer::bindDrawBuffers(const BindBufferInfo& vertices,
1021 const BindBufferInfo& instances,
1022 const BindBufferInfo& indices,
1023 const BindBufferInfo& indirect) {
1024 this->bindVertexBuffers(vertices.fBuffer,
1025 vertices.fOffset,
1026 instances.fBuffer,
1027 instances.fOffset);
1028 this->bindIndexBuffer(indices.fBuffer, indices.fOffset);
1029 this->bindIndirectBuffer(indirect.fBuffer, indirect.fOffset);
1030}
1031
1032void VulkanCommandBuffer::bindVertexBuffers(const Buffer* vertexBuffer,
1033 size_t vertexOffset,
1034 const Buffer* instanceBuffer,
1035 size_t instanceOffset) {
1036 this->bindInputBuffer(vertexBuffer, vertexOffset,
1038 this->bindInputBuffer(instanceBuffer, instanceOffset,
1040}
1041
1042void VulkanCommandBuffer::bindInputBuffer(const Buffer* buffer, VkDeviceSize offset,
1043 uint32_t binding) {
1044 if (buffer) {
1045 VkBuffer vkBuffer = static_cast<const VulkanBuffer*>(buffer)->vkBuffer();
1046 SkASSERT(vkBuffer != VK_NULL_HANDLE);
1047 if (vkBuffer != fBoundInputBuffers[binding] ||
1048 offset != fBoundInputBufferOffsets[binding]) {
1049 VULKAN_CALL(fSharedContext->interface(),
1050 CmdBindVertexBuffers(fPrimaryCommandBuffer,
1051 binding,
1052 /*bindingCount=*/1,
1053 &vkBuffer,
1054 &offset));
1055 fBoundInputBuffers[binding] = vkBuffer;
1056 fBoundInputBufferOffsets[binding] = offset;
1057 this->trackResource(sk_ref_sp(buffer));
1058 }
1059 }
1060}
1061
1062void VulkanCommandBuffer::bindIndexBuffer(const Buffer* indexBuffer, size_t offset) {
1063 if (indexBuffer) {
1064 VkBuffer vkBuffer = static_cast<const VulkanBuffer*>(indexBuffer)->vkBuffer();
1065 SkASSERT(vkBuffer != VK_NULL_HANDLE);
1066 if (vkBuffer != fBoundIndexBuffer || offset != fBoundIndexBufferOffset) {
1067 VULKAN_CALL(fSharedContext->interface(), CmdBindIndexBuffer(fPrimaryCommandBuffer,
1068 vkBuffer,
1069 offset,
1071 fBoundIndexBuffer = vkBuffer;
1072 fBoundIndexBufferOffset = offset;
1073 this->trackResource(sk_ref_sp(indexBuffer));
1074 }
1075 } else {
1076 fBoundIndexBuffer = VK_NULL_HANDLE;
1077 fBoundIndexBufferOffset = 0;
1078 }
1079}
1080
1081void VulkanCommandBuffer::bindIndirectBuffer(const Buffer* indirectBuffer, size_t offset) {
1082 // Indirect buffers are not bound via the command buffer, but specified in the draw cmd.
1083 if (indirectBuffer) {
1084 fBoundIndirectBuffer = static_cast<const VulkanBuffer*>(indirectBuffer)->vkBuffer();
1085 fBoundIndirectBufferOffset = offset;
1086 this->trackResource(sk_ref_sp(indirectBuffer));
1087 } else {
1088 fBoundIndirectBuffer = VK_NULL_HANDLE;
1089 fBoundIndirectBufferOffset = 0;
1090 }
1091}
1092
1093void VulkanCommandBuffer::recordTextureAndSamplerDescSet(
1094 const DrawPass& drawPass, const DrawPassCommands::BindTexturesAndSamplers& command) {
1095 if (command.fNumTexSamplers == 0) {
1096 fNumTextureSamplers = 0;
1097 fTextureSamplerDescSetToBind = VK_NULL_HANDLE;
1098 fBindTextureSamplers = false;
1099 return;
1100 }
1101 // Query resource provider to obtain a descriptor set for the texture/samplers
1102 TArray<DescriptorData> descriptors(command.fNumTexSamplers);
1103 for (int i = 0; i < command.fNumTexSamplers; i++) {
1105 /*descCount=*/1,
1106 /*bindingIdx=*/i,
1108 }
1109 sk_sp<VulkanDescriptorSet> set = fResourceProvider->findOrCreateDescriptorSet(
1110 SkSpan<DescriptorData>{&descriptors.front(), descriptors.size()});
1111
1112 if (!set) {
1113 SKGPU_LOG_E("Unable to find or create descriptor set");
1114 fNumTextureSamplers = 0;
1115 fTextureSamplerDescSetToBind = VK_NULL_HANDLE;
1116 fBindTextureSamplers = false;
1117 return;
1118 }
1119 // Populate the descriptor set with texture/sampler descriptors
1120 TArray<VkWriteDescriptorSet> writeDescriptorSets(command.fNumTexSamplers);
1121 TArray<VkDescriptorImageInfo> descriptorImageInfos(command.fNumTexSamplers);
1122 for (int i = 0; i < command.fNumTexSamplers; ++i) {
1123 auto texture = const_cast<VulkanTexture*>(static_cast<const VulkanTexture*>(
1124 drawPass.getTexture(command.fTextureIndices[i])));
1125 auto sampler = static_cast<const VulkanSampler*>(
1126 drawPass.getSampler(command.fSamplerIndices[i]));
1127 if (!texture || !sampler) {
1128 // TODO(b/294198324): Investigate the root cause for null texture or samplers on
1129 // Ubuntu QuadP400 GPU
1130 SKGPU_LOG_E("Texture and sampler must not be null");
1131 fNumTextureSamplers = 0;
1132 fTextureSamplerDescSetToBind = VK_NULL_HANDLE;
1133 fBindTextureSamplers = false;
1134 return;
1135 }
1136
1137 VkDescriptorImageInfo& textureInfo = descriptorImageInfos.push_back();
1138 memset(&textureInfo, 0, sizeof(VkDescriptorImageInfo));
1139 textureInfo.sampler = sampler->vkSampler();
1140 textureInfo.imageView =
1141 texture->getImageView(VulkanImageView::Usage::kShaderInput)->imageView();
1143
1144 VkWriteDescriptorSet& writeInfo = writeDescriptorSets.push_back();
1145 memset(&writeInfo, 0, sizeof(VkWriteDescriptorSet));
1147 writeInfo.pNext = nullptr;
1148 writeInfo.dstSet = *set->descriptorSet();
1149 writeInfo.dstBinding = i;
1150 writeInfo.dstArrayElement = 0;
1151 writeInfo.descriptorCount = 1;
1153 writeInfo.pImageInfo = &textureInfo;
1154 writeInfo.pBufferInfo = nullptr;
1155 writeInfo.pTexelBufferView = nullptr;
1156 }
1157
1158 VULKAN_CALL(fSharedContext->interface(),
1159 UpdateDescriptorSets(fSharedContext->device(),
1160 command.fNumTexSamplers,
1161 &writeDescriptorSets[0],
1162 /*descriptorCopyCount=*/0,
1163 /*pDescriptorCopies=*/nullptr));
1164
1165 // Store the updated descriptor set to be actually bound later on. This avoids binding and
1166 // potentially having to re-bind in cases where earlier descriptor sets change while going
1167 // through drawpass commands.
1168 fTextureSamplerDescSetToBind = *set->descriptorSet();
1169 fBindTextureSamplers = true;
1170 fNumTextureSamplers = command.fNumTexSamplers;
1171 this->trackResource(std::move(set));
1172}
1173
1174void VulkanCommandBuffer::bindTextureSamplers() {
1175 fBindTextureSamplers = false;
1176 if (fTextureSamplerDescSetToBind != VK_NULL_HANDLE &&
1177 fActiveGraphicsPipeline->numTextureSamplers() == fNumTextureSamplers) {
1178 VULKAN_CALL(fSharedContext->interface(),
1179 CmdBindDescriptorSets(fPrimaryCommandBuffer,
1181 fActiveGraphicsPipeline->layout(),
1183 /*setCount=*/1,
1184 &fTextureSamplerDescSetToBind,
1185 /*dynamicOffsetCount=*/0,
1186 /*dynamicOffsets=*/nullptr));
1187 }
1188}
1189
1190void VulkanCommandBuffer::setScissor(unsigned int left, unsigned int top, unsigned int width,
1191 unsigned int height) {
1192 VkRect2D scissor = {
1193 {(int32_t)left, (int32_t)top},
1194 {width, height}
1195 };
1196 VULKAN_CALL(fSharedContext->interface(),
1197 CmdSetScissor(fPrimaryCommandBuffer,
1198 /*firstScissor=*/0,
1199 /*scissorCount=*/1,
1200 &scissor));
1201}
1202
1203void VulkanCommandBuffer::draw(PrimitiveType,
1204 unsigned int baseVertex,
1205 unsigned int vertexCount) {
1206 SkASSERT(fActiveRenderPass);
1207 this->syncDescriptorSets();
1208 // TODO: set primitive type via dynamic state if available
1209 VULKAN_CALL(fSharedContext->interface(),
1210 CmdDraw(fPrimaryCommandBuffer,
1211 vertexCount,
1212 /*instanceCount=*/1,
1213 baseVertex,
1214 /*firstInstance=*/0));
1215}
1216
1217void VulkanCommandBuffer::drawIndexed(PrimitiveType,
1218 unsigned int baseIndex,
1219 unsigned int indexCount,
1220 unsigned int baseVertex) {
1221 SkASSERT(fActiveRenderPass);
1222 this->syncDescriptorSets();
1223 // TODO: set primitive type via dynamic state if available
1224 VULKAN_CALL(fSharedContext->interface(),
1225 CmdDrawIndexed(fPrimaryCommandBuffer,
1226 indexCount,
1227 /*instanceCount=*/1,
1228 baseIndex,
1229 baseVertex,
1230 /*firstInstance=*/0));
1231}
1232
1233void VulkanCommandBuffer::drawInstanced(PrimitiveType,
1234 unsigned int baseVertex,
1235 unsigned int vertexCount,
1236 unsigned int baseInstance,
1237 unsigned int instanceCount) {
1238 SkASSERT(fActiveRenderPass);
1239 this->syncDescriptorSets();
1240 // TODO: set primitive type via dynamic state if available
1241 VULKAN_CALL(fSharedContext->interface(),
1242 CmdDraw(fPrimaryCommandBuffer,
1243 vertexCount,
1244 instanceCount,
1245 baseVertex,
1246 baseInstance));
1247}
1248
1249void VulkanCommandBuffer::drawIndexedInstanced(PrimitiveType,
1250 unsigned int baseIndex,
1251 unsigned int indexCount,
1252 unsigned int baseVertex,
1253 unsigned int baseInstance,
1254 unsigned int instanceCount) {
1255 SkASSERT(fActiveRenderPass);
1256 this->syncDescriptorSets();
1257 // TODO: set primitive type via dynamic state if available
1258 VULKAN_CALL(fSharedContext->interface(),
1259 CmdDrawIndexed(fPrimaryCommandBuffer,
1260 indexCount,
1261 instanceCount,
1262 baseIndex,
1263 baseVertex,
1264 baseInstance));
1265}
1266
1267void VulkanCommandBuffer::drawIndirect(PrimitiveType) {
1268 SkASSERT(fActiveRenderPass);
1269 this->syncDescriptorSets();
1270 // TODO: set primitive type via dynamic state if available
1271 // Currently we can only support doing one indirect draw operation at a time,
1272 // so stride is irrelevant.
1273 VULKAN_CALL(fSharedContext->interface(),
1274 CmdDrawIndirect(fPrimaryCommandBuffer,
1275 fBoundIndirectBuffer,
1276 fBoundIndirectBufferOffset,
1277 /*drawCount=*/1,
1278 /*stride=*/0));
1279}
1280
1281void VulkanCommandBuffer::drawIndexedIndirect(PrimitiveType) {
1282 SkASSERT(fActiveRenderPass);
1283 this->syncDescriptorSets();
1284 // TODO: set primitive type via dynamic state if available
1285 // Currently we can only support doing one indirect draw operation at a time,
1286 // so stride is irrelevant.
1287 VULKAN_CALL(fSharedContext->interface(),
1288 CmdDrawIndexedIndirect(fPrimaryCommandBuffer,
1289 fBoundIndirectBuffer,
1290 fBoundIndirectBufferOffset,
1291 /*drawCount=*/1,
1292 /*stride=*/0));
1293}
1294
1296
1298 size_t srcOffset,
1299 const Buffer* dstBuffer,
1300 size_t dstOffset,
1301 size_t size) {
1302 auto vkSrcBuffer = static_cast<const VulkanBuffer*>(srcBuffer);
1303 auto vkDstBuffer = static_cast<const VulkanBuffer*>(dstBuffer);
1304
1305 SkASSERT(vkSrcBuffer->bufferUsageFlags() & VK_BUFFER_USAGE_TRANSFER_SRC_BIT);
1306 SkASSERT(vkDstBuffer->bufferUsageFlags() & VK_BUFFER_USAGE_TRANSFER_DST_BIT);
1307
1308 VkBufferCopy region;
1309 memset(&region, 0, sizeof(VkBufferCopy));
1310 region.srcOffset = srcOffset;
1311 region.dstOffset = dstOffset;
1312 region.size = size;
1313
1314 this->submitPipelineBarriers();
1315
1316 VULKAN_CALL(fSharedContext->interface(),
1317 CmdCopyBuffer(fPrimaryCommandBuffer,
1318 vkSrcBuffer->vkBuffer(),
1319 vkDstBuffer->vkBuffer(),
1320 /*regionCount=*/1,
1321 &region));
1322
1323 return true;
1324}
1325
1327 SkIRect srcRect,
1328 const Buffer* buffer,
1329 size_t bufferOffset,
1330 size_t bufferRowBytes) {
1331 const VulkanTexture* srcTexture = static_cast<const VulkanTexture*>(texture);
1332 auto dstBuffer = static_cast<const VulkanBuffer*>(buffer);
1333 SkASSERT(dstBuffer->bufferUsageFlags() & VK_BUFFER_USAGE_TRANSFER_DST_BIT);
1334
1335 // Obtain the VkFormat of the source texture so we can determine bytes per block.
1336 VulkanTextureInfo srcTextureInfo;
1337 texture->textureInfo().getVulkanTextureInfo(&srcTextureInfo);
1338 size_t bytesPerBlock = VkFormatBytesPerBlock(srcTextureInfo.fFormat);
1339
1340 // Set up copy region
1341 VkBufferImageCopy region;
1342 memset(&region, 0, sizeof(VkBufferImageCopy));
1343 region.bufferOffset = bufferOffset;
1344 // Vulkan expects bufferRowLength in texels, not bytes.
1345 region.bufferRowLength = (uint32_t)(bufferRowBytes/bytesPerBlock);
1346 region.bufferImageHeight = 0; // Tightly packed
1347 region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, /*mipLevel=*/0, 0, 1 };
1348 region.imageOffset = { srcRect.left(), srcRect.top(), /*z=*/0 };
1349 region.imageExtent = { (uint32_t)srcRect.width(), (uint32_t)srcRect.height(), /*depth=*/1 };
1350
1351 // Enable editing of the source texture so we can change its layout so it can be copied from.
1352 const_cast<VulkanTexture*>(srcTexture)->setImageLayout(this,
1356 false);
1357 // Set current access mask for buffer
1358 const_cast<VulkanBuffer*>(dstBuffer)->setBufferAccess(this,
1361
1362 this->submitPipelineBarriers();
1363
1364 VULKAN_CALL(fSharedContext->interface(),
1365 CmdCopyImageToBuffer(fPrimaryCommandBuffer,
1366 srcTexture->vkImage(),
1368 dstBuffer->vkBuffer(),
1369 /*regionCount=*/1,
1370 &region));
1371 return true;
1372}
1373
1375 const Texture* texture,
1376 const BufferTextureCopyData* copyData,
1377 int count) {
1378 auto srcBuffer = static_cast<const VulkanBuffer*>(buffer);
1379 SkASSERT(srcBuffer->bufferUsageFlags() & VK_BUFFER_USAGE_TRANSFER_SRC_BIT);
1380 const VulkanTexture* dstTexture = static_cast<const VulkanTexture*>(texture);
1381
1382 // Obtain the VkFormat of the destination texture so we can determine bytes per block.
1383 VulkanTextureInfo dstTextureInfo;
1384 dstTexture->textureInfo().getVulkanTextureInfo(&dstTextureInfo);
1385 size_t bytesPerBlock = VkFormatBytesPerBlock(dstTextureInfo.fFormat);
1386 SkISize oneBlockDims = CompressedDimensions(dstTexture->textureInfo().compressionType(),
1387 {1, 1});
1388
1389 // Set up copy regions.
1391 for (int i = 0; i < count; ++i) {
1392 VkBufferImageCopy& region = regions.push_back();
1393 memset(&region, 0, sizeof(VkBufferImageCopy));
1394 region.bufferOffset = copyData[i].fBufferOffset;
1395 // copyData provides row length in bytes, but Vulkan expects bufferRowLength in texels.
1396 // For compressed this is the number of logical pixels not the number of blocks.
1397 region.bufferRowLength =
1398 (uint32_t)((copyData[i].fBufferRowBytes/bytesPerBlock) * oneBlockDims.fWidth);
1399 region.bufferImageHeight = 0; // Tightly packed
1400 region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, copyData[i].fMipLevel, 0, 1 };
1401 region.imageOffset = { copyData[i].fRect.left(),
1402 copyData[i].fRect.top(),
1403 /*z=*/0 };
1404 region.imageExtent = { (uint32_t)copyData[i].fRect.width(),
1405 (uint32_t)copyData[i].fRect.height(),
1406 /*depth=*/1 };
1407 }
1408
1409 // Enable editing of the destination texture so we can change its layout so it can be copied to.
1410 const_cast<VulkanTexture*>(dstTexture)->setImageLayout(this,
1414 false);
1415
1416 this->submitPipelineBarriers();
1417
1418 VULKAN_CALL(fSharedContext->interface(),
1419 CmdCopyBufferToImage(fPrimaryCommandBuffer,
1420 srcBuffer->vkBuffer(),
1421 dstTexture->vkImage(),
1423 regions.size(),
1424 regions.begin()));
1425 return true;
1426}
1427
1429 SkIRect srcRect,
1430 const Texture* dst,
1431 SkIPoint dstPoint,
1432 int mipLevel) {
1433 const VulkanTexture* srcTexture = static_cast<const VulkanTexture*>(src);
1434 const VulkanTexture* dstTexture = static_cast<const VulkanTexture*>(dst);
1435
1436 VkImageCopy copyRegion;
1437 memset(&copyRegion, 0, sizeof(VkImageCopy));
1438 copyRegion.srcSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
1439 copyRegion.srcOffset = { srcRect.fLeft, srcRect.fTop, 0 };
1440 copyRegion.dstSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, (uint32_t)mipLevel, 0, 1 };
1441 copyRegion.dstOffset = { dstPoint.fX, dstPoint.fY, 0 };
1442 copyRegion.extent = { (uint32_t)srcRect.width(), (uint32_t)srcRect.height(), 1 };
1443
1444 // Enable editing of the src texture so we can change its layout so it can be copied from.
1445 const_cast<VulkanTexture*>(srcTexture)->setImageLayout(this,
1449 false);
1450 // Enable editing of the destination texture so we can change its layout so it can be copied to.
1451 const_cast<VulkanTexture*>(dstTexture)->setImageLayout(this,
1455 false);
1456
1457 this->submitPipelineBarriers();
1458
1459 VULKAN_CALL(fSharedContext->interface(),
1460 CmdCopyImage(fPrimaryCommandBuffer,
1461 srcTexture->vkImage(),
1463 dstTexture->vkImage(),
1465 /*regionCount=*/1,
1466 &copyRegion));
1467
1468 return true;
1469}
1470
1471bool VulkanCommandBuffer::onSynchronizeBufferToCpu(const Buffer* buffer, bool* outDidResultInWork) {
1472 static_cast<const VulkanBuffer*>(buffer)->setBufferAccess(this,
1475
1476 *outDidResultInWork = true;
1477 return true;
1478}
1479
1480bool VulkanCommandBuffer::onClearBuffer(const Buffer*, size_t offset, size_t size) {
1481 return false;
1482}
1483
1485 VkPipelineStageFlags srcStageMask,
1486 VkPipelineStageFlags dstStageMask,
1487 VkBufferMemoryBarrier* barrier) {
1488 SkASSERT(resource);
1489 this->pipelineBarrier(resource,
1490 srcStageMask,
1491 dstStageMask,
1492 /*byRegion=*/false,
1493 kBufferMemory_BarrierType,
1494 barrier);
1495}
1496
1498 VkPipelineStageFlags dstStageMask,
1499 VkBufferMemoryBarrier* barrier) {
1500 // We don't pass in a resource here to the command buffer. The command buffer only is using it
1501 // to hold a ref, but every place where we add a buffer memory barrier we are doing some other
1502 // command with the buffer on the command buffer. Thus those other commands will already cause
1503 // the command buffer to be holding a ref to the buffer.
1504 this->pipelineBarrier(/*resource=*/nullptr,
1505 srcStageMask,
1506 dstStageMask,
1507 /*byRegion=*/false,
1508 kBufferMemory_BarrierType,
1509 barrier);
1510}
1511
1513 VkPipelineStageFlags srcStageMask,
1514 VkPipelineStageFlags dstStageMask,
1515 bool byRegion,
1516 VkImageMemoryBarrier* barrier) {
1517 SkASSERT(resource);
1518 this->pipelineBarrier(resource,
1519 srcStageMask,
1520 dstStageMask,
1521 byRegion,
1522 kImageMemory_BarrierType,
1523 barrier);
1524}
1525
1526void VulkanCommandBuffer::pipelineBarrier(const Resource* resource,
1527 VkPipelineStageFlags srcStageMask,
1528 VkPipelineStageFlags dstStageMask,
1529 bool byRegion,
1530 BarrierType barrierType,
1531 void* barrier) {
1532 // TODO: Do we need to handle wrapped command buffers?
1533 // SkASSERT(!this->isWrapped());
1534 SkASSERT(fActive);
1535#ifdef SK_DEBUG
1536 // For images we can have barriers inside of render passes but they require us to add more
1537 // support in subpasses which need self dependencies to have barriers inside them. Also, we can
1538 // never have buffer barriers inside of a render pass. For now we will just assert that we are
1539 // not in a render pass.
1540 bool isValidSubpassBarrier = false;
1541 if (barrierType == kImageMemory_BarrierType) {
1542 VkImageMemoryBarrier* imgBarrier = static_cast<VkImageMemoryBarrier*>(barrier);
1543 isValidSubpassBarrier = (imgBarrier->newLayout == imgBarrier->oldLayout) &&
1546 byRegion;
1547 }
1548 SkASSERT(!fActiveRenderPass || isValidSubpassBarrier);
1549#endif
1550
1551 if (barrierType == kBufferMemory_BarrierType) {
1552 const VkBufferMemoryBarrier* barrierPtr = static_cast<VkBufferMemoryBarrier*>(barrier);
1553 fBufferBarriers.push_back(*barrierPtr);
1554 } else {
1555 SkASSERT(barrierType == kImageMemory_BarrierType);
1556 const VkImageMemoryBarrier* barrierPtr = static_cast<VkImageMemoryBarrier*>(barrier);
1557 // We need to check if we are adding a pipeline barrier that covers part of the same
1558 // subresource range as a barrier that is already in current batch. If it does, then we must
1559 // submit the first batch because the vulkan spec does not define a specific ordering for
1560 // barriers submitted in the same batch.
1561 // TODO: Look if we can gain anything by merging barriers together instead of submitting
1562 // the old ones.
1563 for (int i = 0; i < fImageBarriers.size(); ++i) {
1564 VkImageMemoryBarrier& currentBarrier = fImageBarriers[i];
1565 if (barrierPtr->image == currentBarrier.image) {
1566 const VkImageSubresourceRange newRange = barrierPtr->subresourceRange;
1567 const VkImageSubresourceRange oldRange = currentBarrier.subresourceRange;
1568 SkASSERT(newRange.aspectMask == oldRange.aspectMask);
1569 SkASSERT(newRange.baseArrayLayer == oldRange.baseArrayLayer);
1570 SkASSERT(newRange.layerCount == oldRange.layerCount);
1571 uint32_t newStart = newRange.baseMipLevel;
1572 uint32_t newEnd = newRange.baseMipLevel + newRange.levelCount - 1;
1573 uint32_t oldStart = oldRange.baseMipLevel;
1574 uint32_t oldEnd = oldRange.baseMipLevel + oldRange.levelCount - 1;
1575 if (std::max(newStart, oldStart) <= std::min(newEnd, oldEnd)) {
1576 this->submitPipelineBarriers();
1577 break;
1578 }
1579 }
1580 }
1581 fImageBarriers.push_back(*barrierPtr);
1582 }
1583 fBarriersByRegion |= byRegion;
1584 fSrcStageMask = fSrcStageMask | srcStageMask;
1585 fDstStageMask = fDstStageMask | dstStageMask;
1586
1587 if (resource) {
1588 this->trackResource(sk_ref_sp(resource));
1589 }
1590 if (fActiveRenderPass) {
1591 this->submitPipelineBarriers(true);
1592 }
1593}
1594
1595void VulkanCommandBuffer::submitPipelineBarriers(bool forSelfDependency) {
1596 SkASSERT(fActive);
1597
1598 // TODO: Do we need to handle SecondaryCommandBuffers as well?
1599
1600 // Currently we never submit a pipeline barrier without at least one buffer or image barrier.
1601 if (fBufferBarriers.size() || fImageBarriers.size()) {
1602 // For images we can have barriers inside of render passes but they require us to add more
1603 // support in subpasses which need self dependencies to have barriers inside them. Also, we
1604 // can never have buffer barriers inside of a render pass. For now we will just assert that
1605 // we are not in a render pass.
1606 SkASSERT(!fActiveRenderPass || forSelfDependency);
1607 // TODO: Do we need to handle wrapped CommandBuffers?
1608 // SkASSERT(!this->isWrapped());
1609 SkASSERT(fSrcStageMask && fDstStageMask);
1610
1611 VkDependencyFlags dependencyFlags = fBarriersByRegion ? VK_DEPENDENCY_BY_REGION_BIT : 0;
1612 VULKAN_CALL(fSharedContext->interface(),
1613 CmdPipelineBarrier(fPrimaryCommandBuffer, fSrcStageMask, fDstStageMask,
1614 dependencyFlags,
1615 /*memoryBarrierCount=*/0, /*pMemoryBarrier=*/nullptr,
1616 fBufferBarriers.size(), fBufferBarriers.begin(),
1617 fImageBarriers.size(), fImageBarriers.begin()));
1618 fBufferBarriers.clear();
1619 fImageBarriers.clear();
1620 fBarriersByRegion = false;
1621 fSrcStageMask = 0;
1622 fDstStageMask = 0;
1623 }
1624 SkASSERT(!fBufferBarriers.size());
1625 SkASSERT(!fImageBarriers.size());
1626 SkASSERT(!fBarriersByRegion);
1627 SkASSERT(!fSrcStageMask);
1628 SkASSERT(!fDstStageMask);
1629}
1630
1631void VulkanCommandBuffer::updateBuffer(const VulkanBuffer* buffer,
1632 const void* data,
1633 size_t dataSize,
1634 size_t dstOffset) {
1635 // vkCmdUpdateBuffer can only be called outside of a render pass.
1636 SkASSERT(fActive && !fActiveRenderPass);
1637 if (!buffer || buffer->vkBuffer() == VK_NULL_HANDLE) {
1638 SKGPU_LOG_W("VulkanCommandBuffer::updateBuffer requires a valid VulkanBuffer pointer backed"
1639 "by a valid VkBuffer handle");
1640 return;
1641 }
1642
1643 // Per the spec, vkCmdUpdateBuffer is treated as a “transfer" operation for the purposes of
1644 // synchronization barriers. Ensure this write operation occurs after any previous read
1645 // operations and without clobbering any other write operations on the same memory in the cache.
1647 this->submitPipelineBarriers();
1648
1649 VULKAN_CALL(fSharedContext->interface(), CmdUpdateBuffer(fPrimaryCommandBuffer,
1650 buffer->vkBuffer(),
1651 dstOffset,
1652 dataSize,
1653 data));
1654}
1655
1656void VulkanCommandBuffer::nextSubpass() {
1657 // TODO: Use VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS if we add secondary cmd buffers
1658 VULKAN_CALL(fSharedContext->interface(),
1659 CmdNextSubpass(fPrimaryCommandBuffer, VK_SUBPASS_CONTENTS_INLINE));
1660}
1661
1662void VulkanCommandBuffer::setViewport(const SkRect& viewport) {
1663 VkViewport vkViewport = {
1664 viewport.fLeft,
1665 viewport.fTop,
1666 viewport.width(),
1667 viewport.height(),
1668 0.0f, // minDepth
1669 1.0f, // maxDepth
1670 };
1671 VULKAN_CALL(fSharedContext->interface(),
1672 CmdSetViewport(fPrimaryCommandBuffer,
1673 /*firstViewport=*/0,
1674 /*viewportCount=*/1,
1675 &vkViewport));
1676}
1677
1678} // namespace skgpu::graphite
AutoreleasePool pool
static void info(const char *fmt,...) SK_PRINTF_LIKE(1
Definition DM.cpp:213
SkRect fRect
int count
#define SKGPU_LOG_E(fmt,...)
Definition Log.h:38
#define SKGPU_LOG_W(fmt,...)
Definition Log.h:40
#define SKGPU_LOG_F(fmt,...)
Definition Log.h:36
#define SK_ABORT(message,...)
Definition SkAssert.h:70
#define SkASSERT(cond)
Definition SkAssert.h:116
static bool left(const SkPoint &p0, const SkPoint &p1)
sk_sp< T > sk_ref_sp(T *obj)
Definition SkRefCnt.h:381
#define VULKAN_CALL(IFACE, X)
#define VULKAN_CALL_ERRCHECK(SHARED_CONTEXT, X)
#define VULKAN_CALL_RESULT(SHARED_CONTEXT, RESULT, X)
#define VULKAN_CALL_RESULT_NOCHECK(IFACE, RESULT, X)
Type::kYUV Type::kRGBA() int(0.7 *637)
T * get() const
Definition SkRefCnt.h:303
void trackResource(sk_sp< Resource > resource)
SkTextureCompressionType compressionType() const
const Texture * texture() const
const TextureInfo & textureInfo() const
Definition Texture.h:32
void prepareSurfaceForStateUpdate(SkSurface *targetSurface, const MutableTextureState *newState) override
bool onCopyTextureToBuffer(const Texture *, SkIRect srcRect, const Buffer *, size_t bufferOffset, size_t bufferRowBytes) override
bool onSynchronizeBufferToCpu(const Buffer *, bool *outDidResultInWork) override
void addBufferMemoryBarrier(const Resource *resource, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask, VkBufferMemoryBarrier *barrier)
bool onClearBuffer(const Buffer *, size_t offset, size_t size) override
bool onAddComputePass(DispatchGroupSpan) override
bool onAddRenderPass(const RenderPassDesc &, const Texture *colorTexture, const Texture *resolveTexture, const Texture *depthStencilTexture, SkRect viewport, const DrawPassList &) override
void addSignalSemaphores(size_t numWaitSemaphores, const BackendSemaphore *signalSemaphores) override
bool onCopyBufferToTexture(const Buffer *, const Texture *, const BufferTextureCopyData *copyData, int count) override
bool onCopyBufferToBuffer(const Buffer *srcBuffer, size_t srcOffset, const Buffer *dstBuffer, size_t dstOffset, size_t size) override
bool onCopyTextureToTexture(const Texture *src, SkIRect srcRect, const Texture *dst, SkIPoint dstPoint, int mipLevel) override
void addImageMemoryBarrier(const Resource *, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask, bool byRegion, VkImageMemoryBarrier *barrier)
void addWaitSemaphores(size_t numWaitSemaphores, const BackendSemaphore *waitSemaphores) override
static std::unique_ptr< VulkanCommandBuffer > Make(const VulkanSharedContext *, VulkanResourceProvider *)
static constexpr unsigned int kPaintUniformBufferIndex
static constexpr unsigned int kInputAttachmentBindingIndex
static const DescriptorData kRenderStepUniformDescriptor
static constexpr unsigned int kInstanceBufferIndex
static const DescriptorData kInputAttachmentDescriptor
static const DescriptorData kIntrinsicUniformBufferDescriptor
static constexpr unsigned int kInputAttachmentDescSetIndex
static constexpr unsigned int kVertexBufferIndex
static constexpr unsigned int kIntrinsicUniformBufferIndex
static constexpr unsigned int kRenderStepUniformBufferIndex
static const DescriptorData kPaintUniformDescriptor
static constexpr unsigned int kTextureBindDescSetIndex
static constexpr unsigned int kUniformBufferDescSetIndex
static constexpr int kColorAttachmentIdx
static constexpr int kMaxExpectedAttachmentCount
const skgpu::VulkanInterface * interface() const
void setImageLayout(VulkanCommandBuffer *buffer, VkImageLayout newLayout, VkAccessFlags dstAccessMask, VkPipelineStageFlags dstStageMask, bool byRegion) const
static VkAccessFlags LayoutToSrcAccessMask(const VkImageLayout layout)
static VkPipelineStageFlags LayoutToPipelineSrcStageFlags(const VkImageLayout layout)
T * push_back_n(int n)
Definition SkTArray.h:262
int size() const
Definition SkTArray.h:416
VkDevice device
Definition main.cc:53
VkQueue queue
Definition main.cc:55
static const uint8_t buffer[]
GAsyncResult * result
FlTexture * texture
double y
double x
sk_sp< SkBlender > blender SkRect rect
Definition SkRecords.h:350
dict commands
Definition dom.py:171
DEF_SWITCHES_START aot vmservice shared library Name of the *so containing AOT compiled Dart assets for launching the service isolate vm snapshot The VM snapshot data that will be memory mapped as read only SnapshotAssetPath must be present isolate snapshot The isolate snapshot data that will be memory mapped as read only SnapshotAssetPath must be present cache dir Path to the cache directory This is different from the persistent_cache_path in embedder which is used for Skia shader cache icu native lib Path to the library file that exports the ICU data vm service The hostname IP address on which the Dart VM Service should be served If not set
Definition switches.h:76
SK_API uint32_t GetVkQueueFamilyIndex(const MutableTextureState &state)
SK_API VkImageLayout GetVkImageLayout(const MutableTextureState &state)
static bool submit_to_queue(const VulkanSharedContext *sharedContext, VkQueue queue, VkFence fence, uint32_t waitCount, const VkSemaphore *waitSemaphores, const VkPipelineStageFlags *waitStages, uint32_t commandBufferCount, const VkCommandBuffer *commandBuffers, uint32_t signalCount, const VkSemaphore *signalSemaphores, Protected protectedContext)
VkDescriptorType DsTypeEnumToVkDs(DescriptorType type)
static constexpr size_t VkFormatBytesPerBlock(VkFormat vkFormat)
SkISize CompressedDimensions(SkTextureCompressionType type, SkISize baseDimensions)
Protected
Definition GpuTypes.h:61
list command
Definition valgrind.py:24
int32_t height
int32_t width
Point offset
constexpr int32_t y() const
int32_t fX
x-axis value
int32_t fY
y-axis value
constexpr int32_t x() const
constexpr int32_t top() const
Definition SkRect.h:120
constexpr int32_t height() const
Definition SkRect.h:165
int32_t fTop
smaller y-axis bounds
Definition SkRect.h:34
constexpr int32_t width() const
Definition SkRect.h:158
int32_t fLeft
smaller x-axis bounds
Definition SkRect.h:33
constexpr int32_t left() const
Definition SkRect.h:113
int32_t fWidth
Definition SkSize.h:17
constexpr int32_t width() const
Definition SkSize.h:36
constexpr int32_t height() const
Definition SkSize.h:37
SkScalar fLeft
smaller x-axis bounds
Definition extension.cpp:14
constexpr float x() const
Definition SkRect.h:720
constexpr float y() const
Definition SkRect.h:727
constexpr float height() const
Definition SkRect.h:769
constexpr float width() const
Definition SkRect.h:762
SkScalar fTop
smaller y-axis bounds
Definition extension.cpp:15
const VkCommandBufferInheritanceInfo * pInheritanceInfo
VkCommandBufferUsageFlags flags
VkImageLayout imageLayout
VkStructureType sType
VkExtent3D extent
VkOffset3D srcOffset
VkImageSubresourceLayers srcSubresource
VkImageSubresourceLayers dstSubresource
VkOffset3D dstOffset
VkImageLayout newLayout
VkImageSubresourceRange subresourceRange
VkImageLayout oldLayout
VkImageAspectFlags aspectMask
VkStructureType sType
const VkClearValue * pClearValues
VkStructureType sType
VkRenderPass renderPass
VkFramebuffer framebuffer
uint32_t waitSemaphoreCount
const VkPipelineStageFlags * pWaitDstStageMask
uint32_t commandBufferCount
const VkSemaphore * pWaitSemaphores
uint32_t signalSemaphoreCount
const VkCommandBuffer * pCommandBuffers
const void * pNext
const VkSemaphore * pSignalSemaphores
VkStructureType sType
const VkBufferView * pTexelBufferView
VkStructureType sType
const VkDescriptorImageInfo * pImageInfo
const VkDescriptorBufferInfo * pBufferInfo
VkDescriptorSet dstSet
VkDescriptorType descriptorType
VkClearColorValue color
VkClearDepthStencilValue depthStencil
VkFlags VkPipelineStageFlags
VkImageLayout
@ VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL
@ VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL
@ VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL
@ VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL
@ VK_IMAGE_LAYOUT_UNDEFINED
@ VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL
@ VK_COMMAND_BUFFER_LEVEL_PRIMARY
@ VK_INDEX_TYPE_UINT16
#define VK_TRUE
@ VK_DEPENDENCY_BY_REGION_BIT
VkFlags VkAccessFlags
VkFlags VkDependencyFlags
uint64_t VkDeviceSize
Definition vulkan_core.h:96
VkFlags VkCommandPoolCreateFlags
@ VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT
@ VK_PIPELINE_BIND_POINT_GRAPHICS
@ VK_IMAGE_ASPECT_COLOR_BIT
#define VK_QUEUE_FAMILY_FOREIGN_EXT
@ VK_BUFFER_USAGE_TRANSFER_DST_BIT
@ VK_BUFFER_USAGE_VERTEX_BUFFER_BIT
@ VK_BUFFER_USAGE_TRANSFER_SRC_BIT
VkResult
@ VK_ERROR_DEVICE_LOST
@ VK_SUCCESS
@ VK_NOT_READY
@ VK_ACCESS_HOST_READ_BIT
@ VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT
@ VK_ACCESS_TRANSFER_WRITE_BIT
@ VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT
@ VK_ACCESS_INPUT_ATTACHMENT_READ_BIT
@ VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT
@ VK_ACCESS_TRANSFER_READ_BIT
@ VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT
@ VK_ACCESS_SHADER_READ_BIT
@ VK_ACCESS_UNIFORM_READ_BIT
@ VK_ACCESS_COLOR_ATTACHMENT_READ_BIT
@ VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
#define VK_NULL_HANDLE
Definition vulkan_core.h:46
#define VK_QUEUE_FAMILY_EXTERNAL
@ VK_SUBPASS_CONTENTS_INLINE
@ VK_COMMAND_POOL_CREATE_TRANSIENT_BIT
@ VK_COMMAND_POOL_CREATE_PROTECTED_BIT
@ VK_PIPELINE_STAGE_VERTEX_INPUT_BIT
@ VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT
@ VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT
@ VK_PIPELINE_STAGE_VERTEX_SHADER_BIT
@ VK_PIPELINE_STAGE_HOST_BIT
@ VK_PIPELINE_STAGE_TRANSFER_BIT
@ VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT
#define VK_QUEUE_FAMILY_IGNORED
@ VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO
@ VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET
@ VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO
@ VK_STRUCTURE_TYPE_PROTECTED_SUBMIT_INFO
@ VK_STRUCTURE_TYPE_FENCE_CREATE_INFO
@ VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO
@ VK_STRUCTURE_TYPE_SUBMIT_INFO
@ VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO