19 const vk::Image&
image,
20 vk::AccessFlags src_access_mask,
21 vk::AccessFlags dst_access_mask,
22 vk::ImageLayout old_layout,
23 vk::ImageLayout new_layout,
24 vk::PipelineStageFlags src_stage,
25 vk::PipelineStageFlags dst_stage,
26 uint32_t base_mip_level,
27 uint32_t mip_level_count = 1u) {
28 if (old_layout == new_layout) {
32 vk::ImageMemoryBarrier barrier;
33 barrier.srcAccessMask = src_access_mask;
34 barrier.dstAccessMask = dst_access_mask;
35 barrier.oldLayout = old_layout;
36 barrier.newLayout = new_layout;
39 barrier.image =
image;
40 barrier.subresourceRange.aspectMask = vk::ImageAspectFlagBits::eColor;
41 barrier.subresourceRange.baseMipLevel = base_mip_level;
42 barrier.subresourceRange.levelCount = mip_level_count;
43 barrier.subresourceRange.baseArrayLayer = 0u;
44 barrier.subresourceRange.layerCount = 1u;
46 cmd.pipelineBarrier(src_stage, dst_stage, {},
nullptr,
nullptr, barrier);
64 const auto& cmd_buffer =
encoder.GetCommandBuffer();
75 src_barrier.
new_layout = vk::ImageLayout::eTransferSrcOptimal;
76 src_barrier.
src_access = vk::AccessFlagBits::eTransferWrite |
77 vk::AccessFlagBits::eShaderWrite |
78 vk::AccessFlagBits::eColorAttachmentWrite;
79 src_barrier.
src_stage = vk::PipelineStageFlagBits::eTransfer |
80 vk::PipelineStageFlagBits::eFragmentShader |
81 vk::PipelineStageFlagBits::eColorAttachmentOutput;
82 src_barrier.
dst_access = vk::AccessFlagBits::eTransferRead;
83 src_barrier.
dst_stage = vk::PipelineStageFlagBits::eTransfer;
87 dst_barrier.
new_layout = vk::ImageLayout::eTransferDstOptimal;
89 dst_barrier.
src_stage = vk::PipelineStageFlagBits::eTopOfPipe;
91 vk::AccessFlagBits::eShaderRead | vk::AccessFlagBits::eTransferWrite;
92 dst_barrier.
dst_stage = vk::PipelineStageFlagBits::eFragmentShader |
93 vk::PipelineStageFlagBits::eTransfer;
95 if (!src.SetLayout(src_barrier) || !dst.SetLayout(dst_barrier)) {
100 vk::ImageCopy image_copy;
102 image_copy.setSrcSubresource(
103 vk::ImageSubresourceLayers(vk::ImageAspectFlagBits::eColor, 0, 0, 1));
104 image_copy.setDstSubresource(
105 vk::ImageSubresourceLayers(vk::ImageAspectFlagBits::eColor, 0, 0, 1));
107 image_copy.srcOffset =
109 image_copy.dstOffset =
116 cmd_buffer.copyImage(src.GetImage(),
125 if (dst.IsSwapchainImage()) {
131 barrier.
new_layout = vk::ImageLayout::eShaderReadOnlyOptimal;
133 barrier.
src_stage = vk::PipelineStageFlagBits::eTopOfPipe;
134 barrier.
dst_access = vk::AccessFlagBits::eShaderRead;
135 barrier.
dst_stage = vk::PipelineStageFlagBits::eFragmentShader;
137 return dst.SetLayout(barrier);
151 const auto& cmd_buffer =
encoder.GetCommandBuffer();
162 barrier.
new_layout = vk::ImageLayout::eTransferSrcOptimal;
163 barrier.
src_access = vk::AccessFlagBits::eShaderWrite |
164 vk::AccessFlagBits::eTransferWrite |
165 vk::AccessFlagBits::eColorAttachmentWrite;
166 barrier.
src_stage = vk::PipelineStageFlagBits::eFragmentShader |
167 vk::PipelineStageFlagBits::eTransfer |
168 vk::PipelineStageFlagBits::eColorAttachmentOutput;
169 barrier.
dst_access = vk::AccessFlagBits::eShaderRead;
170 barrier.
dst_stage = vk::PipelineStageFlagBits::eVertexShader |
171 vk::PipelineStageFlagBits::eFragmentShader;
175 vk::BufferImageCopy image_copy;
177 image_copy.setBufferRowLength(0);
178 image_copy.setBufferImageHeight(0);
179 image_copy.setImageSubresource(
180 vk::ImageSubresourceLayers(vk::ImageAspectFlagBits::eColor, 0, 0, 1));
181 image_copy.setImageOffset(
183 image_copy.setImageExtent(
186 if (!src.SetLayout(barrier)) {
191 cmd_buffer.copyImageToBuffer(src.GetImage(),
199 if (
destination->GetDeviceBufferDescriptor().readback) {
200 vk::MemoryBarrier barrier;
201 barrier.srcAccessMask = vk::AccessFlagBits::eTransferWrite;
202 barrier.dstAccessMask = vk::AccessFlagBits::eHostRead;
204 cmd_buffer.pipelineBarrier(vk::PipelineStageFlagBits::eTransfer,
205 vk::PipelineStageFlagBits::eHost, {}, 1,
206 &barrier, 0, {}, 0, {});
223 const auto& cmd_buffer =
encoder.GetCommandBuffer();
235 dst_barrier.
new_layout = vk::ImageLayout::eTransferDstOptimal;
237 dst_barrier.
src_stage = vk::PipelineStageFlagBits::eTopOfPipe;
239 vk::AccessFlagBits::eShaderRead | vk::AccessFlagBits::eTransferWrite;
240 dst_barrier.
dst_stage = vk::PipelineStageFlagBits::eFragmentShader |
241 vk::PipelineStageFlagBits::eTransfer;
243 vk::BufferImageCopy image_copy;
245 image_copy.setBufferRowLength(0);
246 image_copy.setBufferImageHeight(0);
247 image_copy.setImageSubresource(
248 vk::ImageSubresourceLayers(vk::ImageAspectFlagBits::eColor, 0, 0, 1));
249 image_copy.setImageOffset(
251 image_copy.setImageExtent(vk::Extent3D(
destination->GetSize().width,
254 if (!dst.SetLayout(dst_barrier)) {
259 cmd_buffer.copyBufferToImage(src.GetBuffer(),
281 const auto size = src.GetTextureDescriptor().size;
282 uint32_t mip_count = src.GetTextureDescriptor().mip_count;
284 if (mip_count < 2u) {
288 const auto&
image = src.GetImage();
289 const auto& cmd =
encoder.GetCommandBuffer();
303 vk::AccessFlagBits::eTransferWrite,
304 vk::AccessFlagBits::eTransferRead,
306 vk::ImageLayout::eTransferDstOptimal,
307 vk::PipelineStageFlagBits::eTransfer,
308 vk::PipelineStageFlagBits::eTransfer,
313 vk::ImageMemoryBarrier barrier;
314 barrier.image =
image;
317 barrier.subresourceRange.aspectMask = vk::ImageAspectFlagBits::eColor;
318 barrier.subresourceRange.baseArrayLayer = 0;
319 barrier.subresourceRange.layerCount = 1;
320 barrier.subresourceRange.levelCount = 1;
323 size_t width = size.width;
324 size_t height = size.height;
325 for (
size_t mip_level = 1u; mip_level < mip_count; mip_level++) {
326 barrier.subresourceRange.baseMipLevel = mip_level - 1;
327 barrier.oldLayout = vk::ImageLayout::eTransferDstOptimal;
328 barrier.newLayout = vk::ImageLayout::eTransferSrcOptimal;
329 barrier.srcAccessMask = vk::AccessFlagBits::eTransferWrite;
330 barrier.dstAccessMask = vk::AccessFlagBits::eTransferRead;
336 cmd.pipelineBarrier(vk::PipelineStageFlagBits::eTransfer,
337 vk::PipelineStageFlagBits::eTransfer, {}, {}, {},
341 blit.srcSubresource.aspectMask = vk::ImageAspectFlagBits::eColor;
342 blit.srcSubresource.baseArrayLayer = 0u;
343 blit.srcSubresource.layerCount = 1u;
344 blit.srcSubresource.mipLevel = mip_level - 1;
346 blit.dstSubresource.aspectMask = vk::ImageAspectFlagBits::eColor;
347 blit.dstSubresource.baseArrayLayer = 0u;
348 blit.dstSubresource.layerCount = 1u;
349 blit.dstSubresource.mipLevel = mip_level;
352 blit.srcOffsets[1].x = std::max<int32_t>(
width, 1u);
353 blit.srcOffsets[1].y = std::max<int32_t>(
height, 1u);
354 blit.srcOffsets[1].z = 1u;
360 blit.dstOffsets[1].x = std::max<int32_t>(
width, 1u);
361 blit.dstOffsets[1].y = std::max<int32_t>(
height, 1u);
362 blit.dstOffsets[1].z = 1u;
365 vk::ImageLayout::eTransferSrcOptimal,
367 vk::ImageLayout::eTransferDstOptimal,
373 barrier.oldLayout = vk::ImageLayout::eTransferSrcOptimal;
374 barrier.newLayout = vk::ImageLayout::eShaderReadOnlyOptimal;
375 barrier.srcAccessMask = vk::AccessFlagBits::eTransferRead;
376 barrier.dstAccessMask = vk::AccessFlagBits::eShaderRead;
381 cmd.pipelineBarrier(vk::PipelineStageFlagBits::eTransfer,
382 vk::PipelineStageFlagBits::eFragmentShader, {}, {}, {},
386 barrier.subresourceRange.baseMipLevel = mip_count - 1;
387 barrier.oldLayout = vk::ImageLayout::eTransferDstOptimal;
388 barrier.newLayout = vk::ImageLayout::eShaderReadOnlyOptimal;
389 barrier.srcAccessMask = vk::AccessFlagBits::eTransferWrite;
390 barrier.dstAccessMask = vk::AccessFlagBits::eShaderRead;
392 cmd.pipelineBarrier(vk::PipelineStageFlagBits::eTransfer,
393 vk::PipelineStageFlagBits::eFragmentShader, {}, {}, {},
398 src.SetLayoutWithoutEncoding(vk::ImageLayout::eShaderReadOnlyOptimal);
399 src.SetMipMapGenerated();
static void InsertImageMemoryBarrier(const vk::CommandBuffer &cmd, const vk::Image &image, vk::AccessFlags src_access_mask, vk::AccessFlags dst_access_mask, vk::ImageLayout old_layout, vk::ImageLayout new_layout, vk::PipelineStageFlags src_stage, vk::PipelineStageFlags dst_stage, uint32_t base_mip_level, uint32_t mip_level_count=1u)
bool Encode(CommandEncoderVK &encoder) const override
bool Encode(CommandEncoderVK &encoder) const override
bool Encode(CommandEncoderVK &encoder) const override