Flutter Engine
The Flutter Engine
GrVkCommandBuffer.cpp
Go to the documentation of this file.
1/*
2 * Copyright 2015 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
9
10#include "include/core/SkRect.h"
23
24using namespace skia_private;
25
27 for (auto& boundInputBuffer : fBoundInputBuffers) {
28 boundInputBuffer = VK_NULL_HANDLE;
29 }
31
32 memset(&fCachedViewport, 0, sizeof(VkViewport));
33 fCachedViewport.width = - 1.0f; // Viewport must have a width greater than 0
34
35 memset(&fCachedScissor, 0, sizeof(VkRect2D));
36 fCachedScissor.offset.x = -1; // Scissor offset must be greater that 0 to be valid
37
38 for (int i = 0; i < 4; ++i) {
39 fCachedBlendConstant[i] = -1.0;
40 }
41}
42
43void GrVkCommandBuffer::freeGPUData(const GrGpu* gpu, VkCommandPool cmdPool) const {
44 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
50 SkASSERT(cmdPool != VK_NULL_HANDLE);
51 SkASSERT(!this->isWrapped());
52
53 const GrVkGpu* vkGpu = (const GrVkGpu*)gpu;
54 GR_VK_CALL(vkGpu->vkInterface(), FreeCommandBuffers(vkGpu->device(), cmdPool, 1, &fCmdBuffer));
55
56 this->onFreeGPUData(vkGpu);
57}
58
60 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
61 SkASSERT(!fIsActive || this->isWrapped());
62 fTrackedResources.clear();
64
66 fTrackedGpuSurfaces.clear();
67
68 this->invalidateState();
69
70 this->onReleaseResources();
71}
72
73////////////////////////////////////////////////////////////////////////////////
74// CommandBuffer commands
75////////////////////////////////////////////////////////////////////////////////
76
79 VkPipelineStageFlags srcStageMask,
80 VkPipelineStageFlags dstStageMask,
81 bool byRegion,
82 BarrierType barrierType,
83 void* barrier) {
84 SkASSERT(!this->isWrapped());
86#ifdef SK_DEBUG
87 // For images we can have barriers inside of render passes but they require us to add more
88 // support in subpasses which need self dependencies to have barriers inside them. Also, we can
89 // never have buffer barriers inside of a render pass. For now we will just assert that we are
90 // not in a render pass.
91 bool isValidSubpassBarrier = false;
92 if (barrierType == kImageMemory_BarrierType) {
93 VkImageMemoryBarrier* imgBarrier = static_cast<VkImageMemoryBarrier*>(barrier);
94 isValidSubpassBarrier = (imgBarrier->newLayout == imgBarrier->oldLayout) &&
97 byRegion;
98 }
99 SkASSERT(!fActiveRenderPass || isValidSubpassBarrier);
100#endif
101
102 if (barrierType == kBufferMemory_BarrierType) {
103 const VkBufferMemoryBarrier* barrierPtr = static_cast<VkBufferMemoryBarrier*>(barrier);
104 fBufferBarriers.push_back(*barrierPtr);
105 } else {
106 SkASSERT(barrierType == kImageMemory_BarrierType);
107 const VkImageMemoryBarrier* barrierPtr = static_cast<VkImageMemoryBarrier*>(barrier);
108 // We need to check if we are adding a pipeline barrier that covers part of the same
109 // subresource range as a barrier that is already in current batch. If it does, then we must
110 // submit the first batch because the vulkan spec does not define a specific ordering for
111 // barriers submitted in the same batch.
112 // TODO: Look if we can gain anything by merging barriers together instead of submitting
113 // the old ones.
114 for (int i = 0; i < fImageBarriers.size(); ++i) {
115 VkImageMemoryBarrier& currentBarrier = fImageBarriers[i];
116 if (barrierPtr->image == currentBarrier.image) {
117 const VkImageSubresourceRange newRange = barrierPtr->subresourceRange;
118 const VkImageSubresourceRange oldRange = currentBarrier.subresourceRange;
119 SkASSERT(newRange.aspectMask == oldRange.aspectMask);
120 SkASSERT(newRange.baseArrayLayer == oldRange.baseArrayLayer);
121 SkASSERT(newRange.layerCount == oldRange.layerCount);
122 uint32_t newStart = newRange.baseMipLevel;
123 uint32_t newEnd = newRange.baseMipLevel + newRange.levelCount - 1;
124 uint32_t oldStart = oldRange.baseMipLevel;
125 uint32_t oldEnd = oldRange.baseMipLevel + oldRange.levelCount - 1;
126 if (std::max(newStart, oldStart) <= std::min(newEnd, oldEnd)) {
127 this->submitPipelineBarriers(gpu);
128 break;
129 }
130 }
131 }
132 fImageBarriers.push_back(*barrierPtr);
133 }
134 fBarriersByRegion |= byRegion;
135 fSrcStageMask = fSrcStageMask | srcStageMask;
136 fDstStageMask = fDstStageMask | dstStageMask;
137
138 fHasWork = true;
139 if (resource) {
140 this->addResource(resource);
141 }
142 if (fActiveRenderPass) {
143 this->submitPipelineBarriers(gpu, true);
144 }
145}
146
147void GrVkCommandBuffer::submitPipelineBarriers(const GrVkGpu* gpu, bool forSelfDependency) {
149
150 // Currently we never submit a pipeline barrier without at least one memory barrier.
152 // For images we can have barriers inside of render passes but they require us to add more
153 // support in subpasses which need self dependencies to have barriers inside them. Also, we
154 // can never have buffer barriers inside of a render pass. For now we will just assert that
155 // we are not in a render pass.
156 SkASSERT(!fActiveRenderPass || forSelfDependency);
157 SkASSERT(!this->isWrapped());
159
160 // TODO(https://crbug.com/1469231): The linked bug references a crash report from calling
161 // CmdPipelineBarrier. The checks below were added to ensure that we are passing in buffer
162 // counts >= 0, and in the case of >0, that the buffers are non-null. Evaluate whether this
163 // change leads to a reduction in crash instances. If not, the issue may lie within the
164 // driver itself and these checks can be removed.
165 if (!fBufferBarriers.empty() && fBufferBarriers.begin() == nullptr) {
166 fBufferBarriers.clear(); // Sets the size to 0
167 }
168 if (!fImageBarriers.empty() && fImageBarriers.begin() == nullptr) {
169 fImageBarriers.clear(); // Sets the size to 0
170 }
171
173 GR_VK_CALL(gpu->vkInterface(), CmdPipelineBarrier(
174 fCmdBuffer, fSrcStageMask, fDstStageMask, dependencyFlags, 0, nullptr,
179 fBarriersByRegion = false;
180 fSrcStageMask = 0;
181 fDstStageMask = 0;
182 }
188}
189
190void GrVkCommandBuffer::bindInputBuffer(GrVkGpu* gpu, uint32_t binding,
192 VkBuffer vkBuffer = static_cast<const GrVkBuffer*>(buffer.get())->vkBuffer();
193 SkASSERT(VK_NULL_HANDLE != vkBuffer);
194 SkASSERT(binding < kMaxInputBuffers);
195 // TODO: once vbuffer->offset() no longer always returns 0, we will need to track the offset
196 // to know if we can skip binding or not.
197 if (vkBuffer != fBoundInputBuffers[binding]) {
199 GR_VK_CALL(gpu->vkInterface(), CmdBindVertexBuffers(fCmdBuffer,
200 binding,
201 1,
202 &vkBuffer,
203 &offset));
204 fBoundInputBuffers[binding] = vkBuffer;
205 this->addGrBuffer(std::move(buffer));
206 }
207}
208
210 VkBuffer vkBuffer = static_cast<const GrVkBuffer*>(buffer.get())->vkBuffer();
211 SkASSERT(VK_NULL_HANDLE != vkBuffer);
212 // TODO: once ibuffer->offset() no longer always returns 0, we will need to track the offset
213 // to know if we can skip binding or not.
214 if (vkBuffer != fBoundIndexBuffer) {
215 GR_VK_CALL(gpu->vkInterface(), CmdBindIndexBuffer(fCmdBuffer,
216 vkBuffer, /*offset=*/0,
218 fBoundIndexBuffer = vkBuffer;
219 this->addGrBuffer(std::move(buffer));
220 }
221}
222
224 int numAttachments,
225 const VkClearAttachment* attachments,
226 int numRects,
227 const VkClearRect* clearRects) {
230 SkASSERT(numAttachments > 0);
231 SkASSERT(numRects > 0);
232
233 this->addingWork(gpu);
234
235#ifdef SK_DEBUG
236 for (int i = 0; i < numAttachments; ++i) {
237 if (attachments[i].aspectMask == VK_IMAGE_ASPECT_COLOR_BIT) {
238 uint32_t testIndex;
240 SkASSERT(testIndex == attachments[i].colorAttachment);
241 }
242 }
243#endif
244 GR_VK_CALL(gpu->vkInterface(), CmdClearAttachments(fCmdBuffer,
245 numAttachments,
246 attachments,
247 numRects,
248 clearRects));
250 this->invalidateState();
251 }
252}
253
255 VkPipelineLayout layout,
256 uint32_t firstSet,
257 uint32_t setCount,
258 const VkDescriptorSet* descriptorSets,
259 uint32_t dynamicOffsetCount,
260 const uint32_t* dynamicOffsets) {
262 GR_VK_CALL(gpu->vkInterface(), CmdBindDescriptorSets(fCmdBuffer,
264 layout,
265 firstSet,
266 setCount,
267 descriptorSets,
268 dynamicOffsetCount,
269 dynamicOffsets));
270}
271
274 GR_VK_CALL(gpu->vkInterface(), CmdBindPipeline(fCmdBuffer,
276 pipeline->pipeline()));
277 this->addResource(std::move(pipeline));
278}
279
280void GrVkCommandBuffer::pushConstants(const GrVkGpu* gpu, VkPipelineLayout layout,
281 VkShaderStageFlags stageFlags, uint32_t offset, uint32_t size,
282 const void* values) {
284 // offset and size must be a multiple of 4
285 SkASSERT(!SkToBool(offset & 0x3));
286 SkASSERT(!SkToBool(size & 0x3));
287 GR_VK_CALL(gpu->vkInterface(), CmdPushConstants(fCmdBuffer,
288 layout,
289 stageFlags,
290 offset,
291 size,
292 values));
293}
294
296 uint32_t indexCount,
297 uint32_t instanceCount,
298 uint32_t firstIndex,
299 int32_t vertexOffset,
300 uint32_t firstInstance) {
303 this->addingWork(gpu);
304 GR_VK_CALL(gpu->vkInterface(), CmdDrawIndexed(fCmdBuffer,
305 indexCount,
306 instanceCount,
307 firstIndex,
308 vertexOffset,
309 firstInstance));
310}
311
313 uint32_t vertexCount,
314 uint32_t instanceCount,
315 uint32_t firstVertex,
316 uint32_t firstInstance) {
319 this->addingWork(gpu);
320 GR_VK_CALL(gpu->vkInterface(), CmdDraw(fCmdBuffer,
321 vertexCount,
322 instanceCount,
323 firstVertex,
324 firstInstance));
325}
326
328 sk_sp<const GrBuffer> indirectBuffer,
330 uint32_t drawCount,
331 uint32_t stride) {
334 SkASSERT(!indirectBuffer->isCpuBuffer());
335 this->addingWork(gpu);
336 VkBuffer vkBuffer = static_cast<const GrVkBuffer*>(indirectBuffer.get())->vkBuffer();
337 GR_VK_CALL(gpu->vkInterface(), CmdDrawIndirect(fCmdBuffer,
338 vkBuffer,
339 offset,
340 drawCount,
341 stride));
342 this->addGrBuffer(std::move(indirectBuffer));
343}
344
346 sk_sp<const GrBuffer> indirectBuffer,
348 uint32_t drawCount,
349 uint32_t stride) {
352 SkASSERT(!indirectBuffer->isCpuBuffer());
353 this->addingWork(gpu);
354 VkBuffer vkBuffer = static_cast<const GrVkBuffer*>(indirectBuffer.get())->vkBuffer();
355 GR_VK_CALL(gpu->vkInterface(), CmdDrawIndexedIndirect(fCmdBuffer,
356 vkBuffer,
357 offset,
358 drawCount,
359 stride));
360 this->addGrBuffer(std::move(indirectBuffer));
361}
362
364 uint32_t firstViewport,
365 uint32_t viewportCount,
366 const VkViewport* viewports) {
368 SkASSERT(1 == viewportCount);
369 if (0 != memcmp(viewports, &fCachedViewport, sizeof(VkViewport))) {
370 GR_VK_CALL(gpu->vkInterface(), CmdSetViewport(fCmdBuffer,
371 firstViewport,
372 viewportCount,
373 viewports));
374 fCachedViewport = viewports[0];
375 }
376}
377
379 uint32_t firstScissor,
380 uint32_t scissorCount,
381 const VkRect2D* scissors) {
383 SkASSERT(1 == scissorCount);
384 if (0 != memcmp(scissors, &fCachedScissor, sizeof(VkRect2D))) {
385 GR_VK_CALL(gpu->vkInterface(), CmdSetScissor(fCmdBuffer,
386 firstScissor,
387 scissorCount,
388 scissors));
389 fCachedScissor = scissors[0];
390 }
391}
392
394 const float blendConstants[4]) {
396 if (0 != memcmp(blendConstants, fCachedBlendConstant, 4 * sizeof(float))) {
397 GR_VK_CALL(gpu->vkInterface(), CmdSetBlendConstants(fCmdBuffer, blendConstants));
398 memcpy(fCachedBlendConstant, blendConstants, 4 * sizeof(float));
399 }
400}
401
403 this->submitPipelineBarriers(gpu);
404 fHasWork = true;
405}
406
407///////////////////////////////////////////////////////////////////////////////
408// PrimaryCommandBuffer
409////////////////////////////////////////////////////////////////////////////////
411 // Should have ended any render pass we're in the middle of
413}
414
416 VkCommandPool cmdPool) {
417 const VkCommandBufferAllocateInfo cmdInfo = {
419 nullptr, // pNext
420 cmdPool, // commandPool
422 1 // bufferCount
423 };
424
425 VkCommandBuffer cmdBuffer;
426 VkResult err;
427 GR_VK_CALL_RESULT(gpu, err, AllocateCommandBuffers(gpu->device(), &cmdInfo, &cmdBuffer));
428 if (err) {
429 return nullptr;
430 }
431 return new GrVkPrimaryCommandBuffer(cmdBuffer);
432}
433
436 VkCommandBufferBeginInfo cmdBufferBeginInfo;
437 memset(&cmdBufferBeginInfo, 0, sizeof(VkCommandBufferBeginInfo));
439 cmdBufferBeginInfo.pNext = nullptr;
441 cmdBufferBeginInfo.pInheritanceInfo = nullptr;
442
443 GR_VK_CALL_ERRCHECK(gpu, BeginCommandBuffer(fCmdBuffer, &cmdBufferBeginInfo));
444 fIsActive = true;
445}
446
447void GrVkPrimaryCommandBuffer::end(GrVkGpu* gpu, bool abandoningBuffer) {
450
451 // If we are in the process of abandoning the context then the GrResourceCache will have freed
452 // all resources before destroying the GrVkGpu. When we destroy the GrVkGpu we call end on the
453 // command buffer to keep all our state tracking consistent. However, the vulkan validation
454 // layers complain about calling end on a command buffer that contains resources that have
455 // already been deleted. From the vulkan API it isn't required to end the command buffer to
456 // delete it, so we just skip the vulkan API calls and update our own state tracking.
457 if (!abandoningBuffer) {
458 this->submitPipelineBarriers(gpu);
459
460 GR_VK_CALL_ERRCHECK(gpu, EndCommandBuffer(fCmdBuffer));
461 }
462 this->invalidateState();
463 fIsActive = false;
464 fHasWork = false;
465}
466
468 const GrVkRenderPass* renderPass,
470 const VkClearValue clearValues[],
471 const GrSurface* target,
472 const SkIRect& bounds,
473 bool forSecondaryCB) {
476
477 SkASSERT(framebuffer);
478
479 this->addingWork(gpu);
480
481 VkRenderPassBeginInfo beginInfo;
482 VkRect2D renderArea;
483 renderArea.offset = { bounds.fLeft , bounds.fTop };
484 renderArea.extent = { (uint32_t)bounds.width(), (uint32_t)bounds.height() };
485
486 memset(&beginInfo, 0, sizeof(VkRenderPassBeginInfo));
488 beginInfo.pNext = nullptr;
489 beginInfo.renderPass = renderPass->vkRenderPass();
490 beginInfo.framebuffer = framebuffer->framebuffer();
491 beginInfo.renderArea = renderArea;
492 beginInfo.clearValueCount = renderPass->clearValueCount();
493 beginInfo.pClearValues = clearValues;
494
497
498 GR_VK_CALL(gpu->vkInterface(), CmdBeginRenderPass(fCmdBuffer, &beginInfo, contents));
499 fActiveRenderPass = renderPass;
500 this->addResource(renderPass);
501 this->addResource(std::move(framebuffer));
502 this->addGrSurface(sk_ref_sp(target));
503 return true;
504}
505
509 this->addingWork(gpu);
510 GR_VK_CALL(gpu->vkInterface(), CmdEndRenderPass(fCmdBuffer));
511 fActiveRenderPass = nullptr;
512}
513
514
515void GrVkPrimaryCommandBuffer::nexSubpass(GrVkGpu* gpu, bool forSecondaryCB) {
520 GR_VK_CALL(gpu->vkInterface(), CmdNextSubpass(fCmdBuffer, contents));
521}
522
524 std::unique_ptr<GrVkSecondaryCommandBuffer> buffer) {
525 // The Vulkan spec allows secondary command buffers to be executed on a primary command buffer
526 // if the command pools both were created from were created with the same queue family. However,
527 // we currently always create them from the same pool.
529 SkASSERT(!buffer->fIsActive);
531 SkASSERT(fActiveRenderPass->isCompatible(*buffer->fActiveRenderPass));
532
533 this->addingWork(gpu);
534
535 GR_VK_CALL(gpu->vkInterface(), CmdExecuteCommands(fCmdBuffer, 1, &buffer->fCmdBuffer));
536 fSecondaryCommandBuffers.push_back(std::move(buffer));
537 // When executing a secondary command buffer all state (besides render pass state) becomes
538 // invalidated and must be reset. This includes bound buffers, pipelines, dynamic state, etc.
539 this->invalidateState();
540}
541
542static bool submit_to_queue(GrVkGpu* gpu,
543 VkQueue queue,
544 VkFence fence,
545 uint32_t waitCount,
546 const VkSemaphore* waitSemaphores,
547 const VkPipelineStageFlags* waitStages,
548 uint32_t commandBufferCount,
549 const VkCommandBuffer* commandBuffers,
550 uint32_t signalCount,
551 const VkSemaphore* signalSemaphores,
552 GrProtected protectedContext) {
553 VkProtectedSubmitInfo protectedSubmitInfo;
554 if (protectedContext == GrProtected::kYes) {
555 memset(&protectedSubmitInfo, 0, sizeof(VkProtectedSubmitInfo));
557 protectedSubmitInfo.pNext = nullptr;
558 protectedSubmitInfo.protectedSubmit = VK_TRUE;
559 }
560
561 VkSubmitInfo submitInfo;
562 memset(&submitInfo, 0, sizeof(VkSubmitInfo));
564 submitInfo.pNext = protectedContext == GrProtected::kYes ? &protectedSubmitInfo : nullptr;
565 submitInfo.waitSemaphoreCount = waitCount;
566 submitInfo.pWaitSemaphores = waitSemaphores;
567 submitInfo.pWaitDstStageMask = waitStages;
568 submitInfo.commandBufferCount = commandBufferCount;
569 submitInfo.pCommandBuffers = commandBuffers;
570 submitInfo.signalSemaphoreCount = signalCount;
571 submitInfo.pSignalSemaphores = signalSemaphores;
573 GR_VK_CALL_RESULT(gpu, result, QueueSubmit(queue, 1, &submitInfo, fence));
574 return result == VK_SUCCESS;
575}
576
578 GrVkGpu* gpu,
579 VkQueue queue,
580 TArray<GrVkSemaphore::Resource*>& signalSemaphores,
581 TArray<GrVkSemaphore::Resource*>& waitSemaphores) {
583
584 VkResult err;
585 if (VK_NULL_HANDLE == fSubmitFence) {
586 VkFenceCreateInfo fenceInfo;
587 memset(&fenceInfo, 0, sizeof(VkFenceCreateInfo));
589 GR_VK_CALL_RESULT(gpu, err, CreateFence(gpu->device(), &fenceInfo, nullptr,
590 &fSubmitFence));
591 if (err) {
592 fSubmitFence = VK_NULL_HANDLE;
593 return false;
594 }
595 } else {
596 // This cannot return DEVICE_LOST so we assert we succeeded.
597 GR_VK_CALL_RESULT(gpu, err, ResetFences(gpu->device(), 1, &fSubmitFence));
598 SkASSERT(err == VK_SUCCESS);
599 }
600
601 int signalCount = signalSemaphores.size();
602 int waitCount = waitSemaphores.size();
603
604 bool submitted = false;
605
606 if (0 == signalCount && 0 == waitCount) {
607 // This command buffer has no dependent semaphores so we can simply just submit it to the
608 // queue with no worries.
609 submitted = submit_to_queue(
610 gpu, queue, fSubmitFence, 0, nullptr, nullptr, 1, &fCmdBuffer, 0, nullptr,
612 } else {
613 TArray<VkSemaphore> vkSignalSems(signalCount);
614 for (int i = 0; i < signalCount; ++i) {
615 if (signalSemaphores[i]->shouldSignal()) {
616 this->addResource(signalSemaphores[i]);
617 vkSignalSems.push_back(signalSemaphores[i]->semaphore());
618 }
619 }
620
621 TArray<VkSemaphore> vkWaitSems(waitCount);
622 TArray<VkPipelineStageFlags> vkWaitStages(waitCount);
623 for (int i = 0; i < waitCount; ++i) {
624 if (waitSemaphores[i]->shouldWait()) {
625 this->addResource(waitSemaphores[i]);
626 vkWaitSems.push_back(waitSemaphores[i]->semaphore());
627 // We only block the fragment stage since client provided resources are not used
628 // before the fragment stage. This allows the driver to begin vertex work while
629 // waiting on the semaphore. We also add in the transfer stage for uses of clients
630 // calling read or write pixels.
633 }
634 }
635 submitted = submit_to_queue(gpu, queue, fSubmitFence, vkWaitSems.size(),
636 vkWaitSems.begin(), vkWaitStages.begin(), 1, &fCmdBuffer,
637 vkSignalSems.size(), vkSignalSems.begin(),
639 if (submitted) {
640 for (int i = 0; i < signalCount; ++i) {
641 signalSemaphores[i]->markAsSignaled();
642 }
643 for (int i = 0; i < waitCount; ++i) {
644 waitSemaphores[i]->markAsWaited();
645 }
646 }
647 }
648
649 if (!submitted) {
650 // Destroy the fence or else we will try to wait forever for it to finish.
651 GR_VK_CALL(gpu->vkInterface(), DestroyFence(gpu->device(), fSubmitFence, nullptr));
652 fSubmitFence = VK_NULL_HANDLE;
653 return false;
654 }
655 return true;
656}
657
659 if (fSubmitFence == VK_NULL_HANDLE) {
660 return;
661 }
662 GR_VK_CALL_ERRCHECK(gpu, WaitForFences(gpu->device(), 1, &fSubmitFence, true, UINT64_MAX));
663}
664
667 if (VK_NULL_HANDLE == fSubmitFence) {
668 return true;
669 }
670
671 VkResult err;
672 GR_VK_CALL_RESULT_NOCHECK(gpu, err, GetFenceStatus(gpu->device(), fSubmitFence));
673 switch (err) {
674 case VK_SUCCESS:
676 return true;
677
678 case VK_NOT_READY:
679 return false;
680
681 default:
682 SkDebugf("Error getting fence status: %d\n", err);
683 SK_ABORT("Got an invalid fence status");
684 return false;
685 }
686}
687
689 fFinishedProcs.push_back(std::move(finishedProc));
690}
691
692void GrVkPrimaryCommandBuffer::onReleaseResources() {
693 for (int i = 0; i < fSecondaryCommandBuffers.size(); ++i) {
694 fSecondaryCommandBuffers[i]->releaseResources();
695 }
696 this->callFinishedProcs();
697}
698
700 for (int i = 0; i < fSecondaryCommandBuffers.size(); ++i) {
701 fSecondaryCommandBuffers[i].release()->recycle(cmdPool);
702 }
703 fSecondaryCommandBuffers.clear();
704}
705
707 GrVkImage* srcImage,
708 VkImageLayout srcLayout,
709 GrVkImage* dstImage,
710 VkImageLayout dstLayout,
711 uint32_t copyRegionCount,
712 const VkImageCopy* copyRegions) {
715 this->addingWork(gpu);
716 this->addResource(srcImage->resource());
717 this->addResource(dstImage->resource());
718 GR_VK_CALL(gpu->vkInterface(), CmdCopyImage(fCmdBuffer,
719 srcImage->image(),
720 srcLayout,
721 dstImage->image(),
722 dstLayout,
723 copyRegionCount,
724 copyRegions));
725}
726
728 const GrManagedResource* srcResource,
729 VkImage srcImage,
730 VkImageLayout srcLayout,
731 const GrManagedResource* dstResource,
732 VkImage dstImage,
733 VkImageLayout dstLayout,
734 uint32_t blitRegionCount,
735 const VkImageBlit* blitRegions,
736 VkFilter filter) {
739 this->addingWork(gpu);
740 this->addResource(srcResource);
741 this->addResource(dstResource);
742 GR_VK_CALL(gpu->vkInterface(), CmdBlitImage(fCmdBuffer,
743 srcImage,
744 srcLayout,
745 dstImage,
746 dstLayout,
747 blitRegionCount,
748 blitRegions,
749 filter));
750}
751
753 const GrVkImage& srcImage,
754 const GrVkImage& dstImage,
755 uint32_t blitRegionCount,
756 const VkImageBlit* blitRegions,
757 VkFilter filter) {
758 this->blitImage(gpu,
759 srcImage.resource(),
760 srcImage.image(),
761 srcImage.currentLayout(),
762 dstImage.resource(),
763 dstImage.image(),
764 dstImage.currentLayout(),
765 blitRegionCount,
766 blitRegions,
767 filter);
768}
769
770
772 GrVkImage* srcImage,
773 VkImageLayout srcLayout,
774 sk_sp<GrGpuBuffer> dstBuffer,
775 uint32_t copyRegionCount,
776 const VkBufferImageCopy* copyRegions) {
779 this->addingWork(gpu);
780 GrVkBuffer* vkBuffer = static_cast<GrVkBuffer*>(dstBuffer.get());
781 GR_VK_CALL(gpu->vkInterface(), CmdCopyImageToBuffer(fCmdBuffer,
782 srcImage->image(),
783 srcLayout,
784 vkBuffer->vkBuffer(),
785 copyRegionCount,
786 copyRegions));
787 this->addResource(srcImage->resource());
788 this->addGrBuffer(std::move(dstBuffer));
789}
790
792 VkBuffer srcBuffer,
793 GrVkImage* dstImage,
794 VkImageLayout dstLayout,
795 uint32_t copyRegionCount,
796 const VkBufferImageCopy* copyRegions) {
799 this->addingWork(gpu);
800
801 GR_VK_CALL(gpu->vkInterface(), CmdCopyBufferToImage(fCmdBuffer,
802 srcBuffer,
803 dstImage->image(),
804 dstLayout,
805 copyRegionCount,
806 copyRegions));
807 this->addResource(dstImage->resource());
808}
809
814 uint32_t data) {
817 this->addingWork(gpu);
818
819 const GrVkBuffer* bufferVk = static_cast<GrVkBuffer*>(buffer.get());
820
821 GR_VK_CALL(gpu->vkInterface(), CmdFillBuffer(fCmdBuffer,
822 bufferVk->vkBuffer(),
823 offset,
824 size,
825 data));
826 this->addGrBuffer(std::move(buffer));
827}
828
830 sk_sp<GrGpuBuffer> srcBuffer,
831 sk_sp<GrGpuBuffer> dstBuffer,
832 uint32_t regionCount,
833 const VkBufferCopy* regions) {
836 this->addingWork(gpu);
837#ifdef SK_DEBUG
838 for (uint32_t i = 0; i < regionCount; ++i) {
839 const VkBufferCopy& region = regions[i];
840 SkASSERT(region.size > 0);
841 SkASSERT(region.srcOffset < srcBuffer->size());
842 SkASSERT(region.dstOffset < dstBuffer->size());
843 SkASSERT(region.srcOffset + region.size <= srcBuffer->size());
844 SkASSERT(region.dstOffset + region.size <= dstBuffer->size());
845 }
846#endif
847
848 const GrVkBuffer* srcVk = static_cast<GrVkBuffer*>(srcBuffer.get());
849 const GrVkBuffer* dstVk = static_cast<GrVkBuffer*>(dstBuffer.get());
850
851 GR_VK_CALL(gpu->vkInterface(), CmdCopyBuffer(fCmdBuffer,
852 srcVk->vkBuffer(),
853 dstVk->vkBuffer(),
854 regionCount,
855 regions));
856 this->addGrBuffer(std::move(srcBuffer));
857 this->addGrBuffer(std::move(dstBuffer));
858}
859
861 sk_sp<GrVkBuffer> dstBuffer,
862 VkDeviceSize dstOffset,
863 VkDeviceSize dataSize,
864 const void* data) {
867 SkASSERT(0 == (dstOffset & 0x03)); // four byte aligned
868 // TODO: handle larger transfer sizes
869 SkASSERT(dataSize <= 65536);
870 SkASSERT(0 == (dataSize & 0x03)); // four byte aligned
871 this->addingWork(gpu);
873 gpu->vkInterface(),
874 CmdUpdateBuffer(
875 fCmdBuffer, dstBuffer->vkBuffer(), dstOffset, dataSize, (const uint32_t*)data));
876 this->addGrBuffer(std::move(dstBuffer));
877}
878
882 uint32_t subRangeCount,
883 const VkImageSubresourceRange* subRanges) {
886 this->addingWork(gpu);
887 this->addResource(image->resource());
888 GR_VK_CALL(gpu->vkInterface(), CmdClearColorImage(fCmdBuffer,
889 image->image(),
890 image->currentLayout(),
891 color,
892 subRangeCount,
893 subRanges));
894}
895
899 uint32_t subRangeCount,
900 const VkImageSubresourceRange* subRanges) {
903 this->addingWork(gpu);
904 this->addResource(image->resource());
905 GR_VK_CALL(gpu->vkInterface(), CmdClearDepthStencilImage(fCmdBuffer,
906 image->image(),
907 image->currentLayout(),
908 color,
909 subRangeCount,
910 subRanges));
911}
912
914 const GrVkImage& srcImage,
915 const GrVkImage& dstImage,
916 uint32_t regionCount,
917 const VkImageResolve* regions) {
920
921 this->addingWork(gpu);
922 this->addResource(srcImage.resource());
923 this->addResource(dstImage.resource());
924
925 GR_VK_CALL(gpu->vkInterface(), CmdResolveImage(fCmdBuffer,
926 srcImage.image(),
927 srcImage.currentLayout(),
928 dstImage.image(),
929 dstImage.currentLayout(),
930 regionCount,
931 regions));
932}
933
934void GrVkPrimaryCommandBuffer::onFreeGPUData(const GrVkGpu* gpu) const {
936 // Destroy the fence, if any
937 if (VK_NULL_HANDLE != fSubmitFence) {
938 GR_VK_CALL(gpu->vkInterface(), DestroyFence(gpu->device(), fSubmitFence, nullptr));
939 }
940 SkASSERT(fSecondaryCommandBuffers.empty());
941}
942
943///////////////////////////////////////////////////////////////////////////////
944// SecondaryCommandBuffer
945////////////////////////////////////////////////////////////////////////////////
946
948 GrVkCommandPool* cmdPool) {
949 SkASSERT(cmdPool);
950 const VkCommandBufferAllocateInfo cmdInfo = {
952 nullptr, // pNext
953 cmdPool->vkCommandPool(), // commandPool
955 1 // bufferCount
956 };
957
958 VkCommandBuffer cmdBuffer;
959 VkResult err;
960 GR_VK_CALL_RESULT(gpu, err, AllocateCommandBuffers(gpu->device(), &cmdInfo, &cmdBuffer));
961 if (err) {
962 return nullptr;
963 }
964 return new GrVkSecondaryCommandBuffer(cmdBuffer, /*externalRenderPass=*/nullptr);
965}
966
968 VkCommandBuffer cmdBuffer, const GrVkRenderPass* externalRenderPass) {
969 return new GrVkSecondaryCommandBuffer(cmdBuffer, externalRenderPass);
970}
971
973 const GrVkRenderPass* compatibleRenderPass) {
975 SkASSERT(!this->isWrapped());
976 SkASSERT(compatibleRenderPass);
977 fActiveRenderPass = compatibleRenderPass;
978
979 VkCommandBufferInheritanceInfo inheritanceInfo;
980 memset(&inheritanceInfo, 0, sizeof(VkCommandBufferInheritanceInfo));
982 inheritanceInfo.pNext = nullptr;
983 inheritanceInfo.renderPass = fActiveRenderPass->vkRenderPass();
984 inheritanceInfo.subpass = 0; // Currently only using 1 subpass for each render pass
985 inheritanceInfo.framebuffer = framebuffer ? framebuffer->framebuffer() : VK_NULL_HANDLE;
986 inheritanceInfo.occlusionQueryEnable = false;
987 inheritanceInfo.queryFlags = 0;
988 inheritanceInfo.pipelineStatistics = 0;
989
990 VkCommandBufferBeginInfo cmdBufferBeginInfo;
991 memset(&cmdBufferBeginInfo, 0, sizeof(VkCommandBufferBeginInfo));
993 cmdBufferBeginInfo.pNext = nullptr;
996 cmdBufferBeginInfo.pInheritanceInfo = &inheritanceInfo;
997
998 GR_VK_CALL_ERRCHECK(gpu, BeginCommandBuffer(fCmdBuffer, &cmdBufferBeginInfo));
999
1000 fIsActive = true;
1001}
1002
1005 SkASSERT(!this->isWrapped());
1006 GR_VK_CALL_ERRCHECK(gpu, EndCommandBuffer(fCmdBuffer));
1007 this->invalidateState();
1008 fHasWork = false;
1009 fIsActive = false;
1010}
1011
1013 if (this->isWrapped()) {
1014 delete this;
1015 } else {
1016 cmdPool->recycleSecondaryCommandBuffer(this);
1017 }
1018}
SkAssertResult(font.textToGlyphs("Hello", 5, SkTextEncoding::kUTF8, glyphs, std::size(glyphs))==count)
skgpu::Protected GrProtected
Definition: GrTypes.h:139
static bool submit_to_queue(GrVkGpu *gpu, VkQueue queue, VkFence fence, uint32_t waitCount, const VkSemaphore *waitSemaphores, const VkPipelineStageFlags *waitStages, uint32_t commandBufferCount, const VkCommandBuffer *commandBuffers, uint32_t signalCount, const VkSemaphore *signalSemaphores, GrProtected protectedContext)
#define GR_VK_CALL(IFACE, X)
Definition: GrVkUtil.h:24
#define GR_VK_CALL_RESULT_NOCHECK(GPU, RESULT, X)
Definition: GrVkUtil.h:43
#define GR_VK_CALL_RESULT(GPU, RESULT, X)
Definition: GrVkUtil.h:35
#define GR_VK_CALL_ERRCHECK(GPU, X)
Definition: GrVkUtil.h:50
#define SK_ABORT(message,...)
Definition: SkAssert.h:70
#define SkASSERT(cond)
Definition: SkAssert.h:116
void SK_SPI SkDebugf(const char format[],...) SK_PRINTF_LIKE(1
static SkString resource(SkPDFResourceType type, int index)
sk_sp< T > sk_ref_sp(T *obj)
Definition: SkRefCnt.h:381
static constexpr bool SkToBool(const T &x)
Definition: SkTo.h:35
#define TRACE_FUNC
Definition: SkTraceEvent.h:30
virtual bool isCpuBuffer() const =0
size_t size() const final
Definition: GrGpuBuffer.h:34
Definition: GrGpu.h:62
VkBuffer vkBuffer() const
Definition: GrVkBuffer.h:24
bool mustInvalidatePrimaryCmdBufferStateAfterClearAttachments() const
Definition: GrVkCaps.h:185
TrackedResourceArray< sk_sp< const GrManagedResource > > fTrackedResources
void pipelineBarrier(const GrVkGpu *gpu, const GrManagedResource *resource, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask, bool byRegion, BarrierType barrierType, void *barrier)
skia_private::STArray< 16, sk_sp< const GrBuffer > > fTrackedGpuBuffers
virtual void onFreeGPUData(const GrVkGpu *gpu) const =0
void setBlendConstants(const GrVkGpu *gpu, const float blendConstants[4])
void setViewport(const GrVkGpu *gpu, uint32_t firstViewport, uint32_t viewportCount, const VkViewport *viewports)
VkCommandBuffer fCmdBuffer
void bindPipeline(const GrVkGpu *gpu, sk_sp< const GrVkPipeline > pipeline)
void addResource(sk_sp< const GrManagedResource > resource)
void bindIndexBuffer(GrVkGpu *gpu, sk_sp< const GrBuffer > buffer)
void addingWork(const GrVkGpu *gpu)
virtual void onReleaseResources()
void drawIndexed(const GrVkGpu *gpu, uint32_t indexCount, uint32_t instanceCount, uint32_t firstIndex, int32_t vertexOffset, uint32_t firstInstance)
static constexpr uint32_t kMaxInputBuffers
VkPipelineStageFlags fDstStageMask
void submitPipelineBarriers(const GrVkGpu *gpu, bool forSelfDependency=false)
void clearAttachments(const GrVkGpu *gpu, int numAttachments, const VkClearAttachment *attachments, int numRects, const VkClearRect *clearRects)
void setScissor(const GrVkGpu *gpu, uint32_t firstScissor, uint32_t scissorCount, const VkRect2D *scissors)
skia_private::STArray< 2, VkImageMemoryBarrier > fImageBarriers
void addGrSurface(sk_sp< const GrSurface > surface)
VkViewport fCachedViewport
skia_private::STArray< 16, gr_cb< const GrSurface > > fTrackedGpuSurfaces
void addGrBuffer(sk_sp< const GrBuffer > buffer)
void bindInputBuffer(GrVkGpu *gpu, uint32_t binding, sk_sp< const GrBuffer > buffer)
skia_private::STArray< 1, VkBufferMemoryBarrier > fBufferBarriers
const GrVkRenderPass * fActiveRenderPass
void drawIndirect(const GrVkGpu *gpu, sk_sp< const GrBuffer > indirectBuffer, VkDeviceSize offset, uint32_t drawCount, uint32_t stride)
void pushConstants(const GrVkGpu *gpu, VkPipelineLayout layout, VkShaderStageFlags stageFlags, uint32_t offset, uint32_t size, const void *values)
VkPipelineStageFlags fSrcStageMask
bool isWrapped() const
void freeGPUData(const GrGpu *gpu, VkCommandPool pool) const
VkBuffer fBoundInputBuffers[kMaxInputBuffers]
void bindDescriptorSets(const GrVkGpu *gpu, VkPipelineLayout layout, uint32_t firstSet, uint32_t setCount, const VkDescriptorSet *descriptorSets, uint32_t dynamicOffsetCount, const uint32_t *dynamicOffsets)
TrackedResourceArray< gr_rp< const GrRecycledResource > > fTrackedRecycledResources
void draw(const GrVkGpu *gpu, uint32_t vertexCount, uint32_t instanceCount, uint32_t firstVertex, uint32_t firstInstance)
void drawIndexedIndirect(const GrVkGpu *gpu, sk_sp< const GrBuffer > indirectBuffer, VkDeviceSize offset, uint32_t drawCount, uint32_t stride)
void recycleSecondaryCommandBuffer(GrVkSecondaryCommandBuffer *buffer)
VkCommandPool vkCommandPool() const
VkFramebuffer framebuffer() const
const GrVkCaps & vkCaps() const
Definition: GrVkGpu.h:61
const skgpu::VulkanInterface * vkInterface() const
Definition: GrVkGpu.h:60
VkDevice device() const
Definition: GrVkGpu.h:71
bool protectedContext() const
Definition: GrVkGpu.h:81
VkImageLayout currentLayout() const
Definition: GrVkImage.h:133
const Resource * resource() const
Definition: GrVkImage.h:119
VkImage image() const
Definition: GrVkImage.h:70
VkPipeline pipeline() const
Definition: GrVkPipeline.h:63
void endRenderPass(const GrVkGpu *gpu)
bool submitToQueue(GrVkGpu *gpu, VkQueue queue, skia_private::TArray< GrVkSemaphore::Resource * > &signalSemaphores, skia_private::TArray< GrVkSemaphore::Resource * > &waitSemaphores)
void forceSync(GrVkGpu *gpu)
void recycleSecondaryCommandBuffers(GrVkCommandPool *cmdPool)
void clearColorImage(const GrVkGpu *gpu, GrVkImage *image, const VkClearColorValue *color, uint32_t subRangeCount, const VkImageSubresourceRange *subRanges)
void blitImage(const GrVkGpu *gpu, const GrManagedResource *srcResource, VkImage srcImage, VkImageLayout srcLayout, const GrManagedResource *dstResource, VkImage dstImage, VkImageLayout dstLayout, uint32_t blitRegionCount, const VkImageBlit *blitRegions, VkFilter filter)
void copyImageToBuffer(const GrVkGpu *gpu, GrVkImage *srcImage, VkImageLayout srcLayout, sk_sp< GrGpuBuffer > dstBuffer, uint32_t copyRegionCount, const VkBufferImageCopy *copyRegions)
void end(GrVkGpu *gpu, bool abandoningBuffer=false)
void resolveImage(GrVkGpu *gpu, const GrVkImage &srcImage, const GrVkImage &dstImage, uint32_t regionCount, const VkImageResolve *regions)
bool beginRenderPass(GrVkGpu *gpu, const GrVkRenderPass *, sk_sp< const GrVkFramebuffer >, const VkClearValue clearValues[], const GrSurface *target, const SkIRect &bounds, bool forSecondaryCB)
void copyBuffer(GrVkGpu *gpu, sk_sp< GrGpuBuffer > srcBuffer, sk_sp< GrGpuBuffer > dstBuffer, uint32_t regionCount, const VkBufferCopy *regions)
void copyImage(const GrVkGpu *gpu, GrVkImage *srcImage, VkImageLayout srcLayout, GrVkImage *dstImage, VkImageLayout dstLayout, uint32_t copyRegionCount, const VkImageCopy *copyRegions)
void addFinishedProc(sk_sp< skgpu::RefCntedCallback > finishedProc)
void clearDepthStencilImage(const GrVkGpu *gpu, GrVkImage *image, const VkClearDepthStencilValue *color, uint32_t subRangeCount, const VkImageSubresourceRange *subRanges)
void executeCommands(const GrVkGpu *gpu, std::unique_ptr< GrVkSecondaryCommandBuffer > secondaryBuffer)
void nexSubpass(GrVkGpu *gpu, bool forSecondaryCB)
void fillBuffer(GrVkGpu *gpu, sk_sp< GrGpuBuffer >, VkDeviceSize offset, VkDeviceSize size, uint32_t data)
void copyBufferToImage(const GrVkGpu *gpu, VkBuffer srcBuffer, GrVkImage *dstImage, VkImageLayout dstLayout, uint32_t copyRegionCount, const VkBufferImageCopy *copyRegions)
static GrVkPrimaryCommandBuffer * Create(GrVkGpu *gpu, VkCommandPool cmdPool)
void updateBuffer(GrVkGpu *gpu, sk_sp< GrVkBuffer > dstBuffer, VkDeviceSize dstOffset, VkDeviceSize dataSize, const void *data)
bool colorAttachmentIndex(uint32_t *index) const
bool isCompatible(GrVkRenderTarget *target, SelfDependencyFlags selfDepFlags, LoadFromResolve) const
uint32_t clearValueCount() const
VkRenderPass vkRenderPass() const
static GrVkSecondaryCommandBuffer * Create(GrVkGpu *gpu, GrVkCommandPool *cmdPool)
void recycle(GrVkCommandPool *cmdPool)
void begin(GrVkGpu *gpu, const GrVkFramebuffer *framebuffer, const GrVkRenderPass *compatibleRenderPass)
T * get() const
Definition: SkRefCnt.h:303
bool empty() const
Definition: SkTArray.h:199
int size() const
Definition: SkTArray.h:421
DlColor color
VkQueue queue
Definition: main.cc:55
GAsyncResult * result
uint32_t * target
static float max(float r, float g, float b)
Definition: hsl.cpp:49
static float min(float r, float g, float b)
Definition: hsl.cpp:48
Optional< SkRect > bounds
Definition: SkRecords.h:189
sk_sp< const SkImage > image
Definition: SkRecords.h:269
ClipOpAndAA opAA SkRegion region
Definition: SkRecords.h:238
DEF_SWITCHES_START aot vmservice shared library Name of the *so containing AOT compiled Dart assets for launching the service isolate vm snapshot The VM snapshot data that will be memory mapped as read only SnapshotAssetPath must be present isolate snapshot The isolate snapshot data that will be memory mapped as read only SnapshotAssetPath must be present cache dir Path to the cache directory This is different from the persistent_cache_path in embedder which is used for Skia shader cache icu native lib Path to the library file that exports the ICU data vm service The hostname IP address on which the Dart VM Service should be served If not defaults to or::depending on whether ipv6 is specified vm service A custom Dart VM Service port The default is to pick a randomly available open port disable vm Disable the Dart VM Service The Dart VM Service is never available in release mode disable vm service Disable mDNS Dart VM Service publication Bind to the IPv6 localhost address for the Dart VM Service Ignored if vm service host is set endless trace buffer
Definition: switches.h:126
it will be possible to load the file into Perfetto s trace viewer disable asset Prevents usage of any non test fonts unless they were explicitly Loaded via prefetched default font Indicates whether the embedding started a prefetch of the default font manager before creating the engine run In non interactive keep the shell running after the Dart script has completed enable serial On low power devices with low core running concurrent GC tasks on threads can cause them to contend with the UI thread which could potentially lead to jank This option turns off all concurrent GC activities domain network JSON encoded network policy per domain This overrides the DisallowInsecureConnections switch Embedder can specify whether to allow or disallow insecure connections at a domain level old gen heap size
Definition: switches.h:259
Protected
Definition: GpuTypes.h:61
SeparatedVector2 offset
Definition: SkRect.h:32
const VkCommandBufferInheritanceInfo * pInheritanceInfo
Definition: vulkan_core.h:3894
VkStructureType sType
Definition: vulkan_core.h:3891
VkCommandBufferUsageFlags flags
Definition: vulkan_core.h:3893
VkQueryControlFlags queryFlags
Definition: vulkan_core.h:3886
VkQueryPipelineStatisticFlags pipelineStatistics
Definition: vulkan_core.h:3887
VkStructureType sType
Definition: vulkan_core.h:3386
uint32_t dstQueueFamilyIndex
Definition: vulkan_core.h:2943
VkImageLayout newLayout
Definition: vulkan_core.h:2941
VkImageSubresourceRange subresourceRange
Definition: vulkan_core.h:2945
VkImageLayout oldLayout
Definition: vulkan_core.h:2940
uint32_t srcQueueFamilyIndex
Definition: vulkan_core.h:2942
VkImageAspectFlags aspectMask
Definition: vulkan_core.h:2928
int32_t x
Definition: vulkan_core.h:2869
VkStructureType sType
Definition: vulkan_core.h:5417
VkExtent2D extent
Definition: vulkan_core.h:2881
VkOffset2D offset
Definition: vulkan_core.h:2880
const VkClearValue * pClearValues
Definition: vulkan_core.h:3977
VkStructureType sType
Definition: vulkan_core.h:3971
VkRenderPass renderPass
Definition: vulkan_core.h:3973
VkFramebuffer framebuffer
Definition: vulkan_core.h:3974
uint32_t waitSemaphoreCount
Definition: vulkan_core.h:3285
const VkPipelineStageFlags * pWaitDstStageMask
Definition: vulkan_core.h:3287
uint32_t commandBufferCount
Definition: vulkan_core.h:3288
const VkSemaphore * pWaitSemaphores
Definition: vulkan_core.h:3286
uint32_t signalSemaphoreCount
Definition: vulkan_core.h:3290
const VkCommandBuffer * pCommandBuffers
Definition: vulkan_core.h:3289
const void * pNext
Definition: vulkan_core.h:3284
const VkSemaphore * pSignalSemaphores
Definition: vulkan_core.h:3291
VkStructureType sType
Definition: vulkan_core.h:3283
std::shared_ptr< const fml::Mapping > data
Definition: texture_gles.cc:63
#define TRACE_EVENT0(category_group, name)
Definition: trace_event.h:131
VkFlags VkPipelineStageFlags
Definition: vulkan_core.h:2470
VkImageLayout
Definition: vulkan_core.h:1330
@ VK_COMMAND_BUFFER_LEVEL_PRIMARY
Definition: vulkan_core.h:2178
@ VK_COMMAND_BUFFER_LEVEL_SECONDARY
Definition: vulkan_core.h:2179
@ VK_INDEX_TYPE_UINT16
Definition: vulkan_core.h:2184
#define VK_TRUE
Definition: vulkan_core.h:131
@ VK_DEPENDENCY_BY_REGION_BIT
Definition: vulkan_core.h:2776
VkFlags VkDependencyFlags
Definition: vulkan_core.h:2784
uint64_t VkDeviceSize
Definition: vulkan_core.h:96
@ VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT
Definition: vulkan_core.h:2830
@ VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT
Definition: vulkan_core.h:2831
@ VK_PIPELINE_BIND_POINT_GRAPHICS
Definition: vulkan_core.h:2166
@ VK_IMAGE_ASPECT_COLOR_BIT
Definition: vulkan_core.h:2238
VkFlags VkShaderStageFlags
Definition: vulkan_core.h:2731
VkFilter
Definition: vulkan_core.h:2100
VkResult
Definition: vulkan_core.h:140
@ VK_ERROR_DEVICE_LOST
Definition: vulkan_core.h:150
@ VK_SUCCESS
Definition: vulkan_core.h:141
@ VK_NOT_READY
Definition: vulkan_core.h:142
#define VK_NULL_HANDLE
Definition: vulkan_core.h:46
VkSubpassContents
Definition: vulkan_core.h:2192
@ VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS
Definition: vulkan_core.h:2194
@ VK_SUBPASS_CONTENTS_INLINE
Definition: vulkan_core.h:2193
@ VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT
Definition: vulkan_core.h:2442
@ VK_PIPELINE_STAGE_TRANSFER_BIT
Definition: vulkan_core.h:2447
#define VK_QUEUE_FAMILY_IGNORED
Definition: vulkan_core.h:127
@ VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO
Definition: vulkan_core.h:243
@ VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO
Definition: vulkan_core.h:242
@ VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO
Definition: vulkan_core.h:244
@ VK_STRUCTURE_TYPE_PROTECTED_SUBMIT_INFO
Definition: vulkan_core.h:288
@ VK_STRUCTURE_TYPE_FENCE_CREATE_INFO
Definition: vulkan_core.h:210
@ VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO
Definition: vulkan_core.h:245
@ VK_STRUCTURE_TYPE_SUBMIT_INFO
Definition: vulkan_core.h:206