38 for (
int i = 0;
i < 4; ++
i) {
91 bool isValidSubpassBarrier =
false;
116 if (barrierPtr->
image == currentBarrier.
image) {
192 VkBuffer vkBuffer =
static_cast<const GrVkBuffer*
>(
buffer.get())->vkBuffer();
210 VkBuffer vkBuffer =
static_cast<const GrVkBuffer*
>(
buffer.get())->vkBuffer();
236 for (
int i = 0;
i < numAttachments; ++
i) {
240 SkASSERT(testIndex == attachments[
i].colorAttachment);
255 VkPipelineLayout layout,
258 const VkDescriptorSet* descriptorSets,
259 uint32_t dynamicOffsetCount,
260 const uint32_t* dynamicOffsets) {
297 uint32_t instanceCount,
299 int32_t vertexOffset,
300 uint32_t firstInstance) {
313 uint32_t vertexCount,
314 uint32_t instanceCount,
315 uint32_t firstVertex,
316 uint32_t firstInstance) {
336 VkBuffer vkBuffer =
static_cast<const GrVkBuffer*
>(indirectBuffer.
get())->vkBuffer();
354 VkBuffer vkBuffer =
static_cast<const GrVkBuffer*
>(indirectBuffer.
get())->vkBuffer();
364 uint32_t firstViewport,
365 uint32_t viewportCount,
379 uint32_t firstScissor,
380 uint32_t scissorCount,
394 const float blendConstants[4]) {
416 VkCommandPool cmdPool) {
425 VkCommandBuffer cmdBuffer;
439 cmdBufferBeginInfo.
pNext =
nullptr;
457 if (!abandoningBuffer) {
473 bool forSecondaryCB) {
488 beginInfo.
pNext =
nullptr;
524 std::unique_ptr<GrVkSecondaryCommandBuffer>
buffer) {
546 const VkSemaphore* waitSemaphores,
548 uint32_t commandBufferCount,
549 const VkCommandBuffer* commandBuffers,
550 uint32_t signalCount,
551 const VkSemaphore* signalSemaphores,
557 protectedSubmitInfo.
pNext =
nullptr;
601 int signalCount = signalSemaphores.
size();
602 int waitCount = waitSemaphores.
size();
604 bool submitted =
false;
606 if (0 == signalCount && 0 == waitCount) {
610 gpu,
queue, fSubmitFence, 0,
nullptr,
nullptr, 1, &
fCmdBuffer, 0,
nullptr,
614 for (
int i = 0;
i < signalCount; ++
i) {
615 if (signalSemaphores[
i]->shouldSignal()) {
617 vkSignalSems.
push_back(signalSemaphores[
i]->semaphore());
623 for (
int i = 0;
i < waitCount; ++
i) {
624 if (waitSemaphores[
i]->shouldWait()) {
626 vkWaitSems.
push_back(waitSemaphores[
i]->semaphore());
637 vkSignalSems.
size(), vkSignalSems.
begin(),
640 for (
int i = 0;
i < signalCount; ++
i) {
641 signalSemaphores[
i]->markAsSignaled();
643 for (
int i = 0;
i < waitCount; ++
i) {
644 waitSemaphores[
i]->markAsWaited();
682 SkDebugf(
"Error getting fence status: %d\n", err);
683 SK_ABORT(
"Got an invalid fence status");
689 fFinishedProcs.
push_back(std::move(finishedProc));
692void GrVkPrimaryCommandBuffer::onReleaseResources() {
693 for (
int i = 0;
i < fSecondaryCommandBuffers.
size(); ++
i) {
694 fSecondaryCommandBuffers[
i]->releaseResources();
700 for (
int i = 0;
i < fSecondaryCommandBuffers.
size(); ++
i) {
701 fSecondaryCommandBuffers[
i].release()->recycle(cmdPool);
703 fSecondaryCommandBuffers.
clear();
711 uint32_t copyRegionCount,
734 uint32_t blitRegionCount,
755 uint32_t blitRegionCount,
775 uint32_t copyRegionCount,
795 uint32_t copyRegionCount,
832 uint32_t regionCount,
838 for (uint32_t
i = 0;
i < regionCount; ++
i) {
875 fCmdBuffer, dstBuffer->vkBuffer(), dstOffset, dataSize, (
const uint32_t*)
data));
882 uint32_t subRangeCount,
890 image->currentLayout(),
899 uint32_t subRangeCount,
907 image->currentLayout(),
916 uint32_t regionCount,
934void GrVkPrimaryCommandBuffer::onFreeGPUData(
const GrVkGpu* gpu)
const {
958 VkCommandBuffer cmdBuffer;
968 VkCommandBuffer cmdBuffer,
const GrVkRenderPass* externalRenderPass) {
982 inheritanceInfo.
pNext =
nullptr;
993 cmdBufferBeginInfo.
pNext =
nullptr;
SkAssertResult(font.textToGlyphs("Hello", 5, SkTextEncoding::kUTF8, glyphs, std::size(glyphs))==count)
skgpu::Protected GrProtected
static bool submit_to_queue(GrVkGpu *gpu, VkQueue queue, VkFence fence, uint32_t waitCount, const VkSemaphore *waitSemaphores, const VkPipelineStageFlags *waitStages, uint32_t commandBufferCount, const VkCommandBuffer *commandBuffers, uint32_t signalCount, const VkSemaphore *signalSemaphores, GrProtected protectedContext)
#define GR_VK_CALL(IFACE, X)
#define GR_VK_CALL_RESULT_NOCHECK(GPU, RESULT, X)
#define GR_VK_CALL_RESULT(GPU, RESULT, X)
#define GR_VK_CALL_ERRCHECK(GPU, X)
#define SK_ABORT(message,...)
void SK_SPI SkDebugf(const char format[],...) SK_PRINTF_LIKE(1
static SkString resource(SkPDFResourceType type, int index)
sk_sp< T > sk_ref_sp(T *obj)
static constexpr bool SkToBool(const T &x)
virtual bool isCpuBuffer() const =0
size_t size() const final
VkBuffer vkBuffer() const
bool mustInvalidatePrimaryCmdBufferStateAfterClearAttachments() const
TrackedResourceArray< sk_sp< const GrManagedResource > > fTrackedResources
float fCachedBlendConstant[4]
void pipelineBarrier(const GrVkGpu *gpu, const GrManagedResource *resource, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask, bool byRegion, BarrierType barrierType, void *barrier)
skia_private::STArray< 16, sk_sp< const GrBuffer > > fTrackedGpuBuffers
virtual void onFreeGPUData(const GrVkGpu *gpu) const =0
void setBlendConstants(const GrVkGpu *gpu, const float blendConstants[4])
void setViewport(const GrVkGpu *gpu, uint32_t firstViewport, uint32_t viewportCount, const VkViewport *viewports)
VkCommandBuffer fCmdBuffer
void bindPipeline(const GrVkGpu *gpu, sk_sp< const GrVkPipeline > pipeline)
void addResource(sk_sp< const GrManagedResource > resource)
void bindIndexBuffer(GrVkGpu *gpu, sk_sp< const GrBuffer > buffer)
void addingWork(const GrVkGpu *gpu)
virtual void onReleaseResources()
void drawIndexed(const GrVkGpu *gpu, uint32_t indexCount, uint32_t instanceCount, uint32_t firstIndex, int32_t vertexOffset, uint32_t firstInstance)
static constexpr uint32_t kMaxInputBuffers
VkPipelineStageFlags fDstStageMask
void submitPipelineBarriers(const GrVkGpu *gpu, bool forSelfDependency=false)
void clearAttachments(const GrVkGpu *gpu, int numAttachments, const VkClearAttachment *attachments, int numRects, const VkClearRect *clearRects)
void setScissor(const GrVkGpu *gpu, uint32_t firstScissor, uint32_t scissorCount, const VkRect2D *scissors)
skia_private::STArray< 2, VkImageMemoryBarrier > fImageBarriers
void addGrSurface(sk_sp< const GrSurface > surface)
VkViewport fCachedViewport
skia_private::STArray< 16, gr_cb< const GrSurface > > fTrackedGpuSurfaces
VkBuffer fBoundIndexBuffer
void addGrBuffer(sk_sp< const GrBuffer > buffer)
void bindInputBuffer(GrVkGpu *gpu, uint32_t binding, sk_sp< const GrBuffer > buffer)
skia_private::STArray< 1, VkBufferMemoryBarrier > fBufferBarriers
const GrVkRenderPass * fActiveRenderPass
void drawIndirect(const GrVkGpu *gpu, sk_sp< const GrBuffer > indirectBuffer, VkDeviceSize offset, uint32_t drawCount, uint32_t stride)
void pushConstants(const GrVkGpu *gpu, VkPipelineLayout layout, VkShaderStageFlags stageFlags, uint32_t offset, uint32_t size, const void *values)
VkPipelineStageFlags fSrcStageMask
@ kBufferMemory_BarrierType
@ kImageMemory_BarrierType
void freeGPUData(const GrGpu *gpu, VkCommandPool pool) const
VkBuffer fBoundInputBuffers[kMaxInputBuffers]
void bindDescriptorSets(const GrVkGpu *gpu, VkPipelineLayout layout, uint32_t firstSet, uint32_t setCount, const VkDescriptorSet *descriptorSets, uint32_t dynamicOffsetCount, const uint32_t *dynamicOffsets)
TrackedResourceArray< gr_rp< const GrRecycledResource > > fTrackedRecycledResources
void draw(const GrVkGpu *gpu, uint32_t vertexCount, uint32_t instanceCount, uint32_t firstVertex, uint32_t firstInstance)
void drawIndexedIndirect(const GrVkGpu *gpu, sk_sp< const GrBuffer > indirectBuffer, VkDeviceSize offset, uint32_t drawCount, uint32_t stride)
void recycleSecondaryCommandBuffer(GrVkSecondaryCommandBuffer *buffer)
VkCommandPool vkCommandPool() const
VkFramebuffer framebuffer() const
const GrVkCaps & vkCaps() const
const skgpu::VulkanInterface * vkInterface() const
bool protectedContext() const
VkImageLayout currentLayout() const
const Resource * resource() const
VkPipeline pipeline() const
void endRenderPass(const GrVkGpu *gpu)
bool submitToQueue(GrVkGpu *gpu, VkQueue queue, skia_private::TArray< GrVkSemaphore::Resource * > &signalSemaphores, skia_private::TArray< GrVkSemaphore::Resource * > &waitSemaphores)
void forceSync(GrVkGpu *gpu)
void recycleSecondaryCommandBuffers(GrVkCommandPool *cmdPool)
void clearColorImage(const GrVkGpu *gpu, GrVkImage *image, const VkClearColorValue *color, uint32_t subRangeCount, const VkImageSubresourceRange *subRanges)
void blitImage(const GrVkGpu *gpu, const GrManagedResource *srcResource, VkImage srcImage, VkImageLayout srcLayout, const GrManagedResource *dstResource, VkImage dstImage, VkImageLayout dstLayout, uint32_t blitRegionCount, const VkImageBlit *blitRegions, VkFilter filter)
void copyImageToBuffer(const GrVkGpu *gpu, GrVkImage *srcImage, VkImageLayout srcLayout, sk_sp< GrGpuBuffer > dstBuffer, uint32_t copyRegionCount, const VkBufferImageCopy *copyRegions)
void end(GrVkGpu *gpu, bool abandoningBuffer=false)
void resolveImage(GrVkGpu *gpu, const GrVkImage &srcImage, const GrVkImage &dstImage, uint32_t regionCount, const VkImageResolve *regions)
bool finished(GrVkGpu *gpu)
bool beginRenderPass(GrVkGpu *gpu, const GrVkRenderPass *, sk_sp< const GrVkFramebuffer >, const VkClearValue clearValues[], const GrSurface *target, const SkIRect &bounds, bool forSecondaryCB)
void copyBuffer(GrVkGpu *gpu, sk_sp< GrGpuBuffer > srcBuffer, sk_sp< GrGpuBuffer > dstBuffer, uint32_t regionCount, const VkBufferCopy *regions)
void copyImage(const GrVkGpu *gpu, GrVkImage *srcImage, VkImageLayout srcLayout, GrVkImage *dstImage, VkImageLayout dstLayout, uint32_t copyRegionCount, const VkImageCopy *copyRegions)
~GrVkPrimaryCommandBuffer() override
void addFinishedProc(sk_sp< skgpu::RefCntedCallback > finishedProc)
void clearDepthStencilImage(const GrVkGpu *gpu, GrVkImage *image, const VkClearDepthStencilValue *color, uint32_t subRangeCount, const VkImageSubresourceRange *subRanges)
void executeCommands(const GrVkGpu *gpu, std::unique_ptr< GrVkSecondaryCommandBuffer > secondaryBuffer)
void nexSubpass(GrVkGpu *gpu, bool forSecondaryCB)
void fillBuffer(GrVkGpu *gpu, sk_sp< GrGpuBuffer >, VkDeviceSize offset, VkDeviceSize size, uint32_t data)
void copyBufferToImage(const GrVkGpu *gpu, VkBuffer srcBuffer, GrVkImage *dstImage, VkImageLayout dstLayout, uint32_t copyRegionCount, const VkBufferImageCopy *copyRegions)
static GrVkPrimaryCommandBuffer * Create(GrVkGpu *gpu, VkCommandPool cmdPool)
void updateBuffer(GrVkGpu *gpu, sk_sp< GrVkBuffer > dstBuffer, VkDeviceSize dstOffset, VkDeviceSize dataSize, const void *data)
bool colorAttachmentIndex(uint32_t *index) const
bool isCompatible(GrVkRenderTarget *target, SelfDependencyFlags selfDepFlags, LoadFromResolve) const
uint32_t clearValueCount() const
VkRenderPass vkRenderPass() const
static GrVkSecondaryCommandBuffer * Create(GrVkGpu *gpu, GrVkCommandPool *cmdPool)
void recycle(GrVkCommandPool *cmdPool)
void begin(GrVkGpu *gpu, const GrVkFramebuffer *framebuffer, const GrVkRenderPass *compatibleRenderPass)
static float max(float r, float g, float b)
static float min(float r, float g, float b)
Optional< SkRect > bounds
sk_sp< const SkImage > image
ClipOpAndAA opAA SkRegion region
DEF_SWITCHES_START aot vmservice shared library Name of the *so containing AOT compiled Dart assets for launching the service isolate vm snapshot The VM snapshot data that will be memory mapped as read only SnapshotAssetPath must be present isolate snapshot The isolate snapshot data that will be memory mapped as read only SnapshotAssetPath must be present cache dir Path to the cache directory This is different from the persistent_cache_path in embedder which is used for Skia shader cache icu native lib Path to the library file that exports the ICU data vm service The hostname IP address on which the Dart VM Service should be served If not defaults to or::depending on whether ipv6 is specified vm service A custom Dart VM Service port The default is to pick a randomly available open port disable vm Disable the Dart VM Service The Dart VM Service is never available in release mode disable vm service Disable mDNS Dart VM Service publication Bind to the IPv6 localhost address for the Dart VM Service Ignored if vm service host is set endless trace buffer
it will be possible to load the file into Perfetto s trace viewer disable asset Prevents usage of any non test fonts unless they were explicitly Loaded via prefetched default font Indicates whether the embedding started a prefetch of the default font manager before creating the engine run In non interactive keep the shell running after the Dart script has completed enable serial On low power devices with low core running concurrent GC tasks on threads can cause them to contend with the UI thread which could potentially lead to jank This option turns off all concurrent GC activities domain network JSON encoded network policy per domain This overrides the DisallowInsecureConnections switch Embedder can specify whether to allow or disallow insecure connections at a domain level old gen heap size
const VkCommandBufferInheritanceInfo * pInheritanceInfo
VkCommandBufferUsageFlags flags
VkQueryControlFlags queryFlags
VkBool32 occlusionQueryEnable
VkFramebuffer framebuffer
VkQueryPipelineStatisticFlags pipelineStatistics
uint32_t dstQueueFamilyIndex
VkImageSubresourceRange subresourceRange
uint32_t srcQueueFamilyIndex
VkImageAspectFlags aspectMask
const VkClearValue * pClearValues
VkFramebuffer framebuffer
uint32_t waitSemaphoreCount
const VkPipelineStageFlags * pWaitDstStageMask
uint32_t commandBufferCount
const VkSemaphore * pWaitSemaphores
uint32_t signalSemaphoreCount
const VkCommandBuffer * pCommandBuffers
const VkSemaphore * pSignalSemaphores
std::shared_ptr< const fml::Mapping > data
#define TRACE_EVENT0(category_group, name)
VkFlags VkPipelineStageFlags
@ VK_COMMAND_BUFFER_LEVEL_PRIMARY
@ VK_COMMAND_BUFFER_LEVEL_SECONDARY
@ VK_DEPENDENCY_BY_REGION_BIT
VkFlags VkDependencyFlags
@ VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT
@ VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT
@ VK_PIPELINE_BIND_POINT_GRAPHICS
@ VK_IMAGE_ASPECT_COLOR_BIT
VkFlags VkShaderStageFlags
@ VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS
@ VK_SUBPASS_CONTENTS_INLINE
@ VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT
@ VK_PIPELINE_STAGE_TRANSFER_BIT
#define VK_QUEUE_FAMILY_IGNORED
@ VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO
@ VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO
@ VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO
@ VK_STRUCTURE_TYPE_PROTECTED_SUBMIT_INFO
@ VK_STRUCTURE_TYPE_FENCE_CREATE_INFO
@ VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO
@ VK_STRUCTURE_TYPE_SUBMIT_INFO