10#include "vulkan/vulkan_core.h"
11#include "vulkan/vulkan_enums.hpp"
12#include "vulkan/vulkan_structs.hpp"
17 const vk::Image&
image,
18 vk::AccessFlags src_access_mask,
19 vk::AccessFlags dst_access_mask,
20 vk::ImageLayout old_layout,
21 vk::ImageLayout new_layout,
22 vk::PipelineStageFlags src_stage,
23 vk::PipelineStageFlags dst_stage,
24 uint32_t base_mip_level,
25 uint32_t mip_level_count = 1u) {
26 if (old_layout == new_layout) {
30 vk::ImageMemoryBarrier barrier;
31 barrier.srcAccessMask = src_access_mask;
32 barrier.dstAccessMask = dst_access_mask;
33 barrier.oldLayout = old_layout;
34 barrier.newLayout = new_layout;
35 barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
36 barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
37 barrier.image =
image;
38 barrier.subresourceRange.aspectMask = vk::ImageAspectFlagBits::eColor;
39 barrier.subresourceRange.baseMipLevel = base_mip_level;
40 barrier.subresourceRange.levelCount = mip_level_count;
41 barrier.subresourceRange.baseArrayLayer = 0u;
42 barrier.subresourceRange.layerCount = 1u;
44 cmd.pipelineBarrier(src_stage, dst_stage, {},
nullptr,
nullptr, barrier);
47BlitPassVK::BlitPassVK(std::shared_ptr<CommandBufferVK> command_buffer,
48 const WorkaroundsVK& workarounds)
49 : command_buffer_(
std::move(command_buffer)), workarounds_(workarounds) {}
51BlitPassVK::~BlitPassVK() =
default;
53void BlitPassVK::OnSetLabel(std::string_view label) {}
56bool BlitPassVK::IsValid()
const {
61bool BlitPassVK::EncodeCommands()
const {
66bool BlitPassVK::OnCopyTextureToTextureCommand(
67 std::shared_ptr<Texture> source,
68 std::shared_ptr<Texture> destination,
70 IPoint destination_origin,
71 std::string_view label) {
72 const auto& cmd_buffer = command_buffer_->GetCommandBuffer();
74 const auto& src = TextureVK::Cast(*source);
75 const auto& dst = TextureVK::Cast(*destination);
77 if (!command_buffer_->Track(source) || !command_buffer_->Track(destination)) {
81 BarrierVK src_barrier;
82 src_barrier.cmd_buffer = cmd_buffer;
83 src_barrier.new_layout = vk::ImageLayout::eTransferSrcOptimal;
84 src_barrier.src_access = vk::AccessFlagBits::eTransferWrite |
85 vk::AccessFlagBits::eShaderWrite |
86 vk::AccessFlagBits::eColorAttachmentWrite;
87 src_barrier.src_stage = vk::PipelineStageFlagBits::eTransfer |
88 vk::PipelineStageFlagBits::eFragmentShader |
89 vk::PipelineStageFlagBits::eColorAttachmentOutput;
90 src_barrier.dst_access = vk::AccessFlagBits::eTransferRead;
91 src_barrier.dst_stage = vk::PipelineStageFlagBits::eTransfer;
93 BarrierVK dst_barrier;
94 dst_barrier.cmd_buffer = cmd_buffer;
95 dst_barrier.new_layout = vk::ImageLayout::eTransferDstOptimal;
96 dst_barrier.src_access = {};
97 dst_barrier.src_stage = vk::PipelineStageFlagBits::eTopOfPipe;
98 dst_barrier.dst_access =
99 vk::AccessFlagBits::eShaderRead | vk::AccessFlagBits::eTransferWrite;
100 dst_barrier.dst_stage = vk::PipelineStageFlagBits::eFragmentShader |
101 vk::PipelineStageFlagBits::eTransfer;
103 if (!src.SetLayout(src_barrier) || !dst.SetLayout(dst_barrier)) {
108 vk::ImageCopy image_copy;
110 image_copy.setSrcSubresource(
111 vk::ImageSubresourceLayers(vk::ImageAspectFlagBits::eColor, 0, 0, 1));
112 image_copy.setDstSubresource(
113 vk::ImageSubresourceLayers(vk::ImageAspectFlagBits::eColor, 0, 0, 1));
115 image_copy.srcOffset =
116 vk::Offset3D(source_region.GetX(), source_region.GetY(), 0);
117 image_copy.dstOffset =
118 vk::Offset3D(destination_origin.x, destination_origin.y, 0);
120 vk::Extent3D(source_region.GetWidth(), source_region.GetHeight(), 1);
124 cmd_buffer.copyImage(src.GetImage(),
125 src_barrier.new_layout,
127 dst_barrier.new_layout,
133 if (dst.IsSwapchainImage()) {
138 barrier.cmd_buffer = cmd_buffer;
139 barrier.new_layout = vk::ImageLayout::eShaderReadOnlyOptimal;
140 barrier.src_access = {};
141 barrier.src_stage = vk::PipelineStageFlagBits::eTopOfPipe;
142 barrier.dst_access = vk::AccessFlagBits::eShaderRead;
143 barrier.dst_stage = vk::PipelineStageFlagBits::eFragmentShader;
145 return dst.SetLayout(barrier);
149bool BlitPassVK::OnCopyTextureToBufferCommand(
150 std::shared_ptr<Texture> source,
151 std::shared_ptr<DeviceBuffer> destination,
153 size_t destination_offset,
154 std::string_view label) {
155 const auto& cmd_buffer = command_buffer_->GetCommandBuffer();
158 const auto& src = TextureVK::Cast(*source);
160 if (!command_buffer_->Track(source) || !command_buffer_->Track(destination)) {
165 barrier.cmd_buffer = cmd_buffer;
166 barrier.new_layout = vk::ImageLayout::eTransferSrcOptimal;
167 barrier.src_access = vk::AccessFlagBits::eShaderWrite |
168 vk::AccessFlagBits::eTransferWrite |
169 vk::AccessFlagBits::eColorAttachmentWrite;
170 barrier.src_stage = vk::PipelineStageFlagBits::eFragmentShader |
171 vk::PipelineStageFlagBits::eTransfer |
172 vk::PipelineStageFlagBits::eColorAttachmentOutput;
173 barrier.dst_access = vk::AccessFlagBits::eShaderRead;
174 barrier.dst_stage = vk::PipelineStageFlagBits::eVertexShader |
175 vk::PipelineStageFlagBits::eFragmentShader;
177 const auto& dst = DeviceBufferVK::Cast(*destination);
179 vk::BufferImageCopy image_copy;
180 image_copy.setBufferOffset(destination_offset);
181 image_copy.setBufferRowLength(0);
182 image_copy.setBufferImageHeight(0);
183 image_copy.setImageSubresource(
184 vk::ImageSubresourceLayers(vk::ImageAspectFlagBits::eColor, 0, 0, 1));
185 image_copy.setImageOffset(
186 vk::Offset3D(source_region.GetX(), source_region.GetY(), 0));
187 image_copy.setImageExtent(
188 vk::Extent3D(source_region.GetWidth(), source_region.GetHeight(), 1));
190 if (!src.SetLayout(barrier)) {
195 cmd_buffer.copyImageToBuffer(src.GetImage(),
203 if (destination->GetDeviceBufferDescriptor().readback) {
204 vk::MemoryBarrier barrier;
205 barrier.srcAccessMask = vk::AccessFlagBits::eTransferWrite;
206 barrier.dstAccessMask = vk::AccessFlagBits::eHostRead;
208 cmd_buffer.pipelineBarrier(vk::PipelineStageFlagBits::eTransfer,
209 vk::PipelineStageFlagBits::eHost, {}, 1,
210 &barrier, 0, {}, 0, {});
216bool BlitPassVK::ConvertTextureToShaderRead(
217 const std::shared_ptr<Texture>&
texture) {
218 const auto& cmd_buffer = command_buffer_->GetCommandBuffer();
221 barrier.cmd_buffer = cmd_buffer;
222 barrier.src_access = vk::AccessFlagBits::eTransferWrite;
223 barrier.src_stage = vk::PipelineStageFlagBits::eTransfer;
224 barrier.dst_access = vk::AccessFlagBits::eShaderRead;
225 barrier.dst_stage = vk::PipelineStageFlagBits::eFragmentShader;
227 barrier.new_layout = vk::ImageLayout::eShaderReadOnlyOptimal;
229 const auto& texture_vk = TextureVK::Cast(*
texture);
231 if (!command_buffer_->Track(
texture)) {
235 return texture_vk.SetLayout(barrier);
239bool BlitPassVK::OnCopyBufferToTextureCommand(
241 std::shared_ptr<Texture> destination,
242 IRect destination_region,
243 std::string_view label,
246 bool convert_to_read) {
247 const auto& cmd_buffer = command_buffer_->GetCommandBuffer();
250 const auto& dst = TextureVK::Cast(*destination);
251 const auto& src = DeviceBufferVK::Cast(*source.GetBuffer());
253 std::shared_ptr<const DeviceBuffer> source_buffer = source.TakeBuffer();
254 if ((source_buffer && !command_buffer_->Track(source_buffer)) ||
255 !command_buffer_->Track(destination)) {
259 BarrierVK dst_barrier;
260 dst_barrier.cmd_buffer = cmd_buffer;
261 dst_barrier.new_layout = vk::ImageLayout::eTransferDstOptimal;
262 dst_barrier.src_access = {};
263 dst_barrier.src_stage = vk::PipelineStageFlagBits::eTopOfPipe;
264 dst_barrier.dst_access =
265 vk::AccessFlagBits::eShaderRead | vk::AccessFlagBits::eTransferWrite;
266 dst_barrier.dst_stage = vk::PipelineStageFlagBits::eFragmentShader |
267 vk::PipelineStageFlagBits::eTransfer;
269 vk::BufferImageCopy image_copy;
270 image_copy.setBufferOffset(source.GetRange().offset);
271 image_copy.setBufferRowLength(0);
272 image_copy.setBufferImageHeight(0);
273 image_copy.setImageSubresource(vk::ImageSubresourceLayers(
274 vk::ImageAspectFlagBits::eColor, mip_level, slice, 1));
275 image_copy.imageOffset.x = destination_region.GetX();
276 image_copy.imageOffset.y = destination_region.GetY();
277 image_copy.imageOffset.z = 0u;
278 image_copy.imageExtent.width = destination_region.GetWidth();
279 image_copy.imageExtent.height = destination_region.GetHeight();
280 image_copy.imageExtent.depth = 1u;
285 if (!dst.SetLayout(dst_barrier)) {
290 cmd_buffer.copyBufferToImage(src.GetBuffer(),
292 dst_barrier.new_layout,
297 if (convert_to_read) {
299 barrier.cmd_buffer = cmd_buffer;
300 barrier.src_access = vk::AccessFlagBits::eTransferWrite;
301 barrier.src_stage = vk::PipelineStageFlagBits::eTransfer;
302 barrier.dst_access = vk::AccessFlagBits::eShaderRead;
303 barrier.dst_stage = vk::PipelineStageFlagBits::eFragmentShader;
305 barrier.new_layout = vk::ImageLayout::eShaderReadOnlyOptimal;
307 if (!dst.SetLayout(barrier)) {
316bool BlitPassVK::ResizeTexture(
const std::shared_ptr<Texture>& source,
317 const std::shared_ptr<Texture>& destination) {
318 const auto& cmd_buffer = command_buffer_->GetCommandBuffer();
320 const auto& src = TextureVK::Cast(*source);
321 const auto& dst = TextureVK::Cast(*destination);
323 if (!command_buffer_->Track(source) || !command_buffer_->Track(destination)) {
327 BarrierVK src_barrier;
328 src_barrier.cmd_buffer = cmd_buffer;
329 src_barrier.new_layout = vk::ImageLayout::eTransferSrcOptimal;
330 src_barrier.src_access = vk::AccessFlagBits::eTransferWrite |
331 vk::AccessFlagBits::eShaderWrite |
332 vk::AccessFlagBits::eColorAttachmentWrite;
333 src_barrier.src_stage = vk::PipelineStageFlagBits::eTransfer |
334 vk::PipelineStageFlagBits::eFragmentShader |
335 vk::PipelineStageFlagBits::eColorAttachmentOutput;
336 src_barrier.dst_access = vk::AccessFlagBits::eTransferRead;
337 src_barrier.dst_stage = vk::PipelineStageFlagBits::eTransfer;
339 BarrierVK dst_barrier;
340 dst_barrier.cmd_buffer = cmd_buffer;
341 dst_barrier.new_layout = vk::ImageLayout::eTransferDstOptimal;
342 dst_barrier.src_access = {};
343 dst_barrier.src_stage = vk::PipelineStageFlagBits::eTopOfPipe;
344 dst_barrier.dst_access =
345 vk::AccessFlagBits::eShaderRead | vk::AccessFlagBits::eTransferWrite;
346 dst_barrier.dst_stage = vk::PipelineStageFlagBits::eFragmentShader |
347 vk::PipelineStageFlagBits::eTransfer;
349 if (!src.SetLayout(src_barrier) || !dst.SetLayout(dst_barrier)) {
355 blit.srcSubresource.aspectMask = vk::ImageAspectFlagBits::eColor;
356 blit.srcSubresource.baseArrayLayer = 0u;
357 blit.srcSubresource.layerCount = 1u;
358 blit.srcSubresource.mipLevel = 0;
360 blit.dstSubresource.aspectMask = vk::ImageAspectFlagBits::eColor;
361 blit.dstSubresource.baseArrayLayer = 0u;
362 blit.dstSubresource.layerCount = 1u;
363 blit.dstSubresource.mipLevel = 0;
366 blit.srcOffsets[1].x = std::max<int32_t>(source->GetSize().width, 1u);
367 blit.srcOffsets[1].y = std::max<int32_t>(source->GetSize().height, 1u);
368 blit.srcOffsets[1].z = 1u;
371 blit.dstOffsets[1].x = std::max<int32_t>(destination->GetSize().width, 1u);
372 blit.dstOffsets[1].y = std::max<int32_t>(destination->GetSize().height, 1u);
373 blit.dstOffsets[1].z = 1u;
375 cmd_buffer.blitImage(src.GetImage(),
376 src_barrier.new_layout,
378 dst_barrier.new_layout,
388 barrier.cmd_buffer = cmd_buffer;
389 barrier.new_layout = vk::ImageLayout::eShaderReadOnlyOptimal;
390 barrier.src_access = {};
391 barrier.src_stage = vk::PipelineStageFlagBits::eTopOfPipe;
392 barrier.dst_access = vk::AccessFlagBits::eShaderRead;
393 barrier.dst_stage = vk::PipelineStageFlagBits::eFragmentShader;
395 return dst.SetLayout(barrier);
399bool BlitPassVK::OnGenerateMipmapCommand(std::shared_ptr<Texture>
texture,
400 std::string_view label) {
401 auto& src = TextureVK::Cast(*
texture);
403 const auto size = src.GetTextureDescriptor().size;
404 uint32_t mip_count = src.GetTextureDescriptor().mip_count;
406 if (mip_count < 2u) {
410 const auto&
image = src.GetImage();
411 const auto& cmd = command_buffer_->GetCommandBuffer();
413 if (!command_buffer_->Track(
texture)) {
425 vk::AccessFlagBits::eTransferWrite |
426 vk::AccessFlagBits::eColorAttachmentWrite,
427 vk::AccessFlagBits::eTransferRead,
429 vk::ImageLayout::eTransferDstOptimal,
430 vk::PipelineStageFlagBits::eTransfer |
431 vk::PipelineStageFlagBits::eColorAttachmentOutput,
432 vk::PipelineStageFlagBits::eTransfer,
436 vk::ImageMemoryBarrier barrier;
437 barrier.image =
image;
438 barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
439 barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
440 barrier.subresourceRange.aspectMask = vk::ImageAspectFlagBits::eColor;
441 barrier.subresourceRange.baseArrayLayer = 0;
442 barrier.subresourceRange.layerCount = 1;
443 barrier.subresourceRange.levelCount = 1;
448 for (
size_t mip_level = 1u; mip_level < mip_count; mip_level++) {
449 barrier.subresourceRange.baseMipLevel = mip_level - 1;
450 barrier.oldLayout = vk::ImageLayout::eTransferDstOptimal;
451 barrier.newLayout = vk::ImageLayout::eTransferSrcOptimal;
452 barrier.srcAccessMask = vk::AccessFlagBits::eTransferWrite;
453 barrier.dstAccessMask = vk::AccessFlagBits::eTransferRead;
459 cmd.pipelineBarrier(vk::PipelineStageFlagBits::eTransfer,
460 vk::PipelineStageFlagBits::eTransfer, {}, {}, {},
464 blit.srcSubresource.aspectMask = vk::ImageAspectFlagBits::eColor;
465 blit.srcSubresource.baseArrayLayer = 0u;
466 blit.srcSubresource.layerCount = 1u;
467 blit.srcSubresource.mipLevel = mip_level - 1;
469 blit.dstSubresource.aspectMask = vk::ImageAspectFlagBits::eColor;
470 blit.dstSubresource.baseArrayLayer = 0u;
471 blit.dstSubresource.layerCount = 1u;
472 blit.dstSubresource.mipLevel = mip_level;
475 blit.srcOffsets[1].x = std::max<int32_t>(
width, 1u);
476 blit.srcOffsets[1].y = std::max<int32_t>(
height, 1u);
477 blit.srcOffsets[1].z = 1u;
487 blit.dstOffsets[1].x = std::max<int32_t>(
width, 1u);
488 blit.dstOffsets[1].y = std::max<int32_t>(
height, 1u);
489 blit.dstOffsets[1].z = 1u;
492 vk::ImageLayout::eTransferSrcOptimal,
494 vk::ImageLayout::eTransferDstOptimal,
502 barrier.subresourceRange.baseMipLevel = mip_count - 1;
503 barrier.subresourceRange.levelCount = 1;
504 barrier.oldLayout = vk::ImageLayout::eTransferDstOptimal;
505 barrier.newLayout = vk::ImageLayout::eTransferSrcOptimal;
506 barrier.srcAccessMask = vk::AccessFlagBits::eTransferWrite;
507 barrier.dstAccessMask = vk::AccessFlagBits::eTransferRead;
509 cmd.pipelineBarrier(vk::PipelineStageFlagBits::eTransfer,
510 vk::PipelineStageFlagBits::eTransfer, {}, {}, {},
515 barrier.subresourceRange.baseMipLevel = 0;
516 barrier.subresourceRange.levelCount = mip_count;
517 barrier.oldLayout = vk::ImageLayout::eTransferSrcOptimal;
518 barrier.newLayout = vk::ImageLayout::eShaderReadOnlyOptimal;
519 barrier.srcAccessMask = vk::AccessFlagBits::eTransferRead;
520 barrier.dstAccessMask = vk::AccessFlagBits::eShaderRead;
522 cmd.pipelineBarrier(vk::PipelineStageFlagBits::eTransfer,
523 vk::PipelineStageFlagBits::eFragmentShader, {}, {}, {},
528 src.SetLayoutWithoutEncoding(vk::ImageLayout::eShaderReadOnlyOptimal);
529 src.SetMipMapGenerated();
FlutterVulkanImage * image
it will be possible to load the file into Perfetto s trace viewer use test Running tests that layout and measure text will not yield consistent results across various platforms Enabling this option will make font resolution default to the Ahem test font on all disable asset Prevents usage of any non test fonts unless they were explicitly Loaded via prefetched default font Indicates whether the embedding started a prefetch of the default font manager before creating the engine run In non interactive keep the shell running after the Dart script has completed enable serial On low power devices with low core running concurrent GC tasks on threads can cause them to contend with the UI thread which could potentially lead to jank This option turns off all concurrent GC activities domain network JSON encoded network policy per domain This overrides the DisallowInsecureConnections switch Embedder can specify whether to allow or disallow insecure connections at a domain level old gen heap size
static void InsertImageMemoryBarrier(const vk::CommandBuffer &cmd, const vk::Image &image, vk::AccessFlags src_access_mask, vk::AccessFlags dst_access_mask, vk::ImageLayout old_layout, vk::ImageLayout new_layout, vk::PipelineStageFlags src_stage, vk::PipelineStageFlags dst_stage, uint32_t base_mip_level, uint32_t mip_level_count=1u)