12#include "vulkan/vulkan_enums.hpp"
13#include "vulkan/vulkan_structs.hpp"
19 vk::AccessFlags src_access_mask,
20 vk::AccessFlags dst_access_mask,
21 vk::ImageLayout old_layout,
22 vk::ImageLayout new_layout,
25 uint32_t base_mip_level,
26 uint32_t mip_level_count = 1u) {
27 if (old_layout == new_layout) {
31 vk::ImageMemoryBarrier barrier;
32 barrier.srcAccessMask = src_access_mask;
33 barrier.dstAccessMask = dst_access_mask;
34 barrier.oldLayout = old_layout;
35 barrier.newLayout = new_layout;
38 barrier.image =
image;
39 barrier.subresourceRange.aspectMask = vk::ImageAspectFlagBits::eColor;
40 barrier.subresourceRange.baseMipLevel = base_mip_level;
41 barrier.subresourceRange.levelCount = mip_level_count;
42 barrier.subresourceRange.baseArrayLayer = 0u;
43 barrier.subresourceRange.layerCount = 1u;
45 cmd.pipelineBarrier(src_stage, dst_stage, {},
nullptr,
nullptr, barrier);
48BlitPassVK::BlitPassVK(std::shared_ptr<CommandBufferVK> command_buffer)
49 : command_buffer_(
std::move(command_buffer)) {}
51BlitPassVK::~BlitPassVK() =
default;
53void BlitPassVK::OnSetLabel(std::string label) {
57 label_ = std::move(label);
61bool BlitPassVK::IsValid()
const {
66bool BlitPassVK::EncodeCommands(
67 const std::shared_ptr<Allocator>& transients_allocator)
const {
72bool BlitPassVK::OnCopyTextureToTextureCommand(
73 std::shared_ptr<Texture>
source,
74 std::shared_ptr<Texture> destination,
78 auto&
encoder = *command_buffer_->GetEncoder();
79 const auto& cmd_buffer =
encoder.GetCommandBuffer();
81 const auto&
src = TextureVK::Cast(*
source);
82 const auto&
dst = TextureVK::Cast(*destination);
88 BarrierVK src_barrier;
89 src_barrier.cmd_buffer = cmd_buffer;
90 src_barrier.new_layout = vk::ImageLayout::eTransferSrcOptimal;
91 src_barrier.src_access = vk::AccessFlagBits::eTransferWrite |
92 vk::AccessFlagBits::eShaderWrite |
93 vk::AccessFlagBits::eColorAttachmentWrite;
94 src_barrier.src_stage = vk::PipelineStageFlagBits::eTransfer |
95 vk::PipelineStageFlagBits::eFragmentShader |
96 vk::PipelineStageFlagBits::eColorAttachmentOutput;
97 src_barrier.dst_access = vk::AccessFlagBits::eTransferRead;
98 src_barrier.dst_stage = vk::PipelineStageFlagBits::eTransfer;
100 BarrierVK dst_barrier;
101 dst_barrier.cmd_buffer = cmd_buffer;
102 dst_barrier.new_layout = vk::ImageLayout::eTransferDstOptimal;
103 dst_barrier.src_access = {};
104 dst_barrier.src_stage = vk::PipelineStageFlagBits::eTopOfPipe;
105 dst_barrier.dst_access =
106 vk::AccessFlagBits::eShaderRead | vk::AccessFlagBits::eTransferWrite;
107 dst_barrier.dst_stage = vk::PipelineStageFlagBits::eFragmentShader |
108 vk::PipelineStageFlagBits::eTransfer;
110 if (!
src.SetLayout(src_barrier) || !
dst.SetLayout(dst_barrier)) {
115 vk::ImageCopy image_copy;
117 image_copy.setSrcSubresource(
118 vk::ImageSubresourceLayers(vk::ImageAspectFlagBits::eColor, 0, 0, 1));
119 image_copy.setDstSubresource(
120 vk::ImageSubresourceLayers(vk::ImageAspectFlagBits::eColor, 0, 0, 1));
122 image_copy.srcOffset =
123 vk::Offset3D(source_region.GetX(), source_region.GetY(), 0);
124 image_copy.dstOffset =
125 vk::Offset3D(destination_origin.x, destination_origin.y, 0);
127 vk::Extent3D(source_region.GetWidth(), source_region.GetHeight(), 1);
131 cmd_buffer.copyImage(
src.GetImage(),
132 src_barrier.new_layout,
134 dst_barrier.new_layout,
140 if (
dst.IsSwapchainImage()) {
145 barrier.cmd_buffer = cmd_buffer;
146 barrier.new_layout = vk::ImageLayout::eShaderReadOnlyOptimal;
147 barrier.src_access = {};
148 barrier.src_stage = vk::PipelineStageFlagBits::eTopOfPipe;
149 barrier.dst_access = vk::AccessFlagBits::eShaderRead;
150 barrier.dst_stage = vk::PipelineStageFlagBits::eFragmentShader;
152 return dst.SetLayout(barrier);
156bool BlitPassVK::OnCopyTextureToBufferCommand(
157 std::shared_ptr<Texture>
source,
158 std::shared_ptr<DeviceBuffer> destination,
160 size_t destination_offset,
162 auto&
encoder = *command_buffer_->GetEncoder();
163 const auto& cmd_buffer =
encoder.GetCommandBuffer();
166 const auto&
src = TextureVK::Cast(*
source);
173 barrier.cmd_buffer = cmd_buffer;
174 barrier.new_layout = vk::ImageLayout::eTransferSrcOptimal;
175 barrier.src_access = vk::AccessFlagBits::eShaderWrite |
176 vk::AccessFlagBits::eTransferWrite |
177 vk::AccessFlagBits::eColorAttachmentWrite;
178 barrier.src_stage = vk::PipelineStageFlagBits::eFragmentShader |
179 vk::PipelineStageFlagBits::eTransfer |
180 vk::PipelineStageFlagBits::eColorAttachmentOutput;
181 barrier.dst_access = vk::AccessFlagBits::eShaderRead;
182 barrier.dst_stage = vk::PipelineStageFlagBits::eVertexShader |
183 vk::PipelineStageFlagBits::eFragmentShader;
185 const auto&
dst = DeviceBufferVK::Cast(*destination);
187 vk::BufferImageCopy image_copy;
188 image_copy.setBufferOffset(destination_offset);
189 image_copy.setBufferRowLength(0);
190 image_copy.setBufferImageHeight(0);
191 image_copy.setImageSubresource(
192 vk::ImageSubresourceLayers(vk::ImageAspectFlagBits::eColor, 0, 0, 1));
193 image_copy.setImageOffset(
194 vk::Offset3D(source_region.GetX(), source_region.GetY(), 0));
195 image_copy.setImageExtent(
196 vk::Extent3D(source_region.GetWidth(), source_region.GetHeight(), 1));
198 if (!
src.SetLayout(barrier)) {
203 cmd_buffer.copyImageToBuffer(
src.GetImage(),
211 if (destination->GetDeviceBufferDescriptor().readback) {
212 vk::MemoryBarrier barrier;
213 barrier.srcAccessMask = vk::AccessFlagBits::eTransferWrite;
214 barrier.dstAccessMask = vk::AccessFlagBits::eHostRead;
216 cmd_buffer.pipelineBarrier(vk::PipelineStageFlagBits::eTransfer,
217 vk::PipelineStageFlagBits::eHost, {}, 1,
218 &barrier, 0, {}, 0, {});
224bool BlitPassVK::ConvertTextureToShaderRead(
225 const std::shared_ptr<Texture>&
texture) {
226 auto&
encoder = *command_buffer_->GetEncoder();
227 const auto& cmd_buffer =
encoder.GetCommandBuffer();
230 barrier.cmd_buffer = cmd_buffer;
231 barrier.src_access = vk::AccessFlagBits::eTransferWrite;
232 barrier.src_stage = vk::PipelineStageFlagBits::eTransfer;
233 barrier.dst_access = vk::AccessFlagBits::eShaderRead;
234 barrier.dst_stage = vk::PipelineStageFlagBits::eFragmentShader;
236 barrier.new_layout = vk::ImageLayout::eShaderReadOnlyOptimal;
238 const auto& texture_vk = TextureVK::Cast(*
texture);
244 return texture_vk.SetLayout(barrier);
248bool BlitPassVK::OnCopyBufferToTextureCommand(
250 std::shared_ptr<Texture> destination,
251 IRect destination_region,
254 bool convert_to_read) {
255 auto&
encoder = *command_buffer_->GetEncoder();
256 const auto& cmd_buffer =
encoder.GetCommandBuffer();
259 const auto&
dst = TextureVK::Cast(*destination);
260 const auto&
src = DeviceBufferVK::Cast(*
source.buffer);
266 BarrierVK dst_barrier;
267 dst_barrier.cmd_buffer = cmd_buffer;
268 dst_barrier.new_layout = vk::ImageLayout::eTransferDstOptimal;
269 dst_barrier.src_access = {};
270 dst_barrier.src_stage = vk::PipelineStageFlagBits::eTopOfPipe;
271 dst_barrier.dst_access =
272 vk::AccessFlagBits::eShaderRead | vk::AccessFlagBits::eTransferWrite;
273 dst_barrier.dst_stage = vk::PipelineStageFlagBits::eFragmentShader |
274 vk::PipelineStageFlagBits::eTransfer;
276 vk::BufferImageCopy image_copy;
277 image_copy.setBufferOffset(
source.range.offset);
278 image_copy.setBufferRowLength(0);
279 image_copy.setBufferImageHeight(0);
280 image_copy.setImageSubresource(
281 vk::ImageSubresourceLayers(vk::ImageAspectFlagBits::eColor, 0, 0, 1));
282 image_copy.imageOffset.x = destination_region.GetX();
283 image_copy.imageOffset.y = destination_region.GetY();
284 image_copy.imageOffset.z = 0u;
285 image_copy.imageExtent.width = destination_region.GetWidth();
286 image_copy.imageExtent.height = destination_region.GetHeight();
287 image_copy.imageExtent.depth = 1u;
292 if (!
dst.SetLayout(dst_barrier)) {
297 cmd_buffer.copyBufferToImage(
src.GetBuffer(),
299 dst_barrier.new_layout,
304 if (convert_to_read) {
306 barrier.cmd_buffer = cmd_buffer;
307 barrier.src_access = vk::AccessFlagBits::eTransferWrite;
308 barrier.src_stage = vk::PipelineStageFlagBits::eTransfer;
309 barrier.dst_access = vk::AccessFlagBits::eShaderRead;
310 barrier.dst_stage = vk::PipelineStageFlagBits::eFragmentShader;
312 barrier.new_layout = vk::ImageLayout::eShaderReadOnlyOptimal;
314 if (!
dst.SetLayout(barrier)) {
323bool BlitPassVK::OnGenerateMipmapCommand(std::shared_ptr<Texture>
texture,
325 auto&
encoder = *command_buffer_->GetEncoder();
328 const auto size =
src.GetTextureDescriptor().size;
329 uint32_t mip_count =
src.GetTextureDescriptor().mip_count;
331 if (mip_count < 2u) {
350 vk::AccessFlagBits::eTransferWrite |
351 vk::AccessFlagBits::eColorAttachmentWrite,
352 vk::AccessFlagBits::eTransferRead,
354 vk::ImageLayout::eTransferDstOptimal,
355 vk::PipelineStageFlagBits::eTransfer |
356 vk::PipelineStageFlagBits::eColorAttachmentOutput,
357 vk::PipelineStageFlagBits::eTransfer,
361 vk::ImageMemoryBarrier barrier;
362 barrier.image =
image;
365 barrier.subresourceRange.aspectMask = vk::ImageAspectFlagBits::eColor;
366 barrier.subresourceRange.baseArrayLayer = 0;
367 barrier.subresourceRange.layerCount = 1;
368 barrier.subresourceRange.levelCount = 1;
373 for (
size_t mip_level = 1u; mip_level < mip_count; mip_level++) {
374 barrier.subresourceRange.baseMipLevel = mip_level - 1;
375 barrier.oldLayout = vk::ImageLayout::eTransferDstOptimal;
376 barrier.newLayout = vk::ImageLayout::eTransferSrcOptimal;
377 barrier.srcAccessMask = vk::AccessFlagBits::eTransferWrite;
378 barrier.dstAccessMask = vk::AccessFlagBits::eTransferRead;
384 cmd.pipelineBarrier(vk::PipelineStageFlagBits::eTransfer,
385 vk::PipelineStageFlagBits::eTransfer, {}, {}, {},
389 blit.srcSubresource.aspectMask = vk::ImageAspectFlagBits::eColor;
390 blit.srcSubresource.baseArrayLayer = 0u;
391 blit.srcSubresource.layerCount = 1u;
392 blit.srcSubresource.mipLevel = mip_level - 1;
394 blit.dstSubresource.aspectMask = vk::ImageAspectFlagBits::eColor;
395 blit.dstSubresource.baseArrayLayer = 0u;
396 blit.dstSubresource.layerCount = 1u;
397 blit.dstSubresource.mipLevel = mip_level;
400 blit.srcOffsets[1].x = std::max<int32_t>(
width, 1u);
401 blit.srcOffsets[1].y = std::max<int32_t>(
height, 1u);
402 blit.srcOffsets[1].z = 1u;
408 blit.dstOffsets[1].x = std::max<int32_t>(
width, 1u);
409 blit.dstOffsets[1].y = std::max<int32_t>(
height, 1u);
410 blit.dstOffsets[1].z = 1u;
413 vk::ImageLayout::eTransferSrcOptimal,
415 vk::ImageLayout::eTransferDstOptimal,
421 barrier.oldLayout = vk::ImageLayout::eTransferSrcOptimal;
422 barrier.newLayout = vk::ImageLayout::eShaderReadOnlyOptimal;
423 barrier.srcAccessMask = vk::AccessFlagBits::eTransferRead;
424 barrier.dstAccessMask = vk::AccessFlagBits::eShaderRead;
429 cmd.pipelineBarrier(vk::PipelineStageFlagBits::eTransfer,
430 vk::PipelineStageFlagBits::eFragmentShader, {}, {}, {},
434 barrier.subresourceRange.baseMipLevel = mip_count - 1;
435 barrier.oldLayout = vk::ImageLayout::eTransferDstOptimal;
436 barrier.newLayout = vk::ImageLayout::eShaderReadOnlyOptimal;
437 barrier.srcAccessMask = vk::AccessFlagBits::eTransferWrite;
438 barrier.dstAccessMask = vk::AccessFlagBits::eShaderRead;
440 cmd.pipelineBarrier(vk::PipelineStageFlagBits::eTransfer,
441 vk::PipelineStageFlagBits::eFragmentShader, {}, {}, {},
446 src.SetLayoutWithoutEncoding(vk::ImageLayout::eShaderReadOnlyOptimal);
447 src.SetMipMapGenerated();
sk_sp< const SkImage > image
it will be possible to load the file into Perfetto s trace viewer disable asset Prevents usage of any non test fonts unless they were explicitly Loaded via prefetched default font Indicates whether the embedding started a prefetch of the default font manager before creating the engine run In non interactive keep the shell running after the Dart script has completed enable serial On low power devices with low core running concurrent GC tasks on threads can cause them to contend with the UI thread which could potentially lead to jank This option turns off all concurrent GC activities domain network JSON encoded network policy per domain This overrides the DisallowInsecureConnections switch Embedder can specify whether to allow or disallow insecure connections at a domain level old gen heap size
static void InsertImageMemoryBarrier(const vk::CommandBuffer &cmd, const vk::Image &image, vk::AccessFlags src_access_mask, vk::AccessFlags dst_access_mask, vk::ImageLayout old_layout, vk::ImageLayout new_layout, vk::PipelineStageFlags src_stage, vk::PipelineStageFlags dst_stage, uint32_t base_mip_level, uint32_t mip_level_count=1u)
#define VK_QUEUE_FAMILY_IGNORED