Flutter Engine
The Flutter Engine
blit_pass_vk.cc
Go to the documentation of this file.
1// Copyright 2013 The Flutter Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
6
11#include "vulkan/vulkan_core.h"
12#include "vulkan/vulkan_enums.hpp"
13#include "vulkan/vulkan_structs.hpp"
14
15namespace impeller {
16
17static void InsertImageMemoryBarrier(const vk::CommandBuffer& cmd,
18 const vk::Image& image,
19 vk::AccessFlags src_access_mask,
20 vk::AccessFlags dst_access_mask,
21 vk::ImageLayout old_layout,
22 vk::ImageLayout new_layout,
23 vk::PipelineStageFlags src_stage,
24 vk::PipelineStageFlags dst_stage,
25 uint32_t base_mip_level,
26 uint32_t mip_level_count = 1u) {
27 if (old_layout == new_layout) {
28 return;
29 }
30
31 vk::ImageMemoryBarrier barrier;
32 barrier.srcAccessMask = src_access_mask;
33 barrier.dstAccessMask = dst_access_mask;
34 barrier.oldLayout = old_layout;
35 barrier.newLayout = new_layout;
36 barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
37 barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
38 barrier.image = image;
39 barrier.subresourceRange.aspectMask = vk::ImageAspectFlagBits::eColor;
40 barrier.subresourceRange.baseMipLevel = base_mip_level;
41 barrier.subresourceRange.levelCount = mip_level_count;
42 barrier.subresourceRange.baseArrayLayer = 0u;
43 barrier.subresourceRange.layerCount = 1u;
44
45 cmd.pipelineBarrier(src_stage, dst_stage, {}, nullptr, nullptr, barrier);
46}
47
48BlitPassVK::BlitPassVK(std::shared_ptr<CommandBufferVK> command_buffer)
49 : command_buffer_(std::move(command_buffer)) {}
50
51BlitPassVK::~BlitPassVK() = default;
52
53void BlitPassVK::OnSetLabel(std::string label) {
54 if (label.empty()) {
55 return;
56 }
57 label_ = std::move(label);
58}
59
60// |BlitPass|
61bool BlitPassVK::IsValid() const {
62 return true;
63}
64
65// |BlitPass|
66bool BlitPassVK::EncodeCommands(
67 const std::shared_ptr<Allocator>& transients_allocator) const {
68 return true;
69}
70
71// |BlitPass|
72bool BlitPassVK::OnCopyTextureToTextureCommand(
73 std::shared_ptr<Texture> source,
74 std::shared_ptr<Texture> destination,
75 IRect source_region,
76 IPoint destination_origin,
77 std::string label) {
78 auto& encoder = *command_buffer_->GetEncoder();
79 const auto& cmd_buffer = encoder.GetCommandBuffer();
80
81 const auto& src = TextureVK::Cast(*source);
82 const auto& dst = TextureVK::Cast(*destination);
83
84 if (!encoder.Track(source) || !encoder.Track(destination)) {
85 return false;
86 }
87
88 BarrierVK src_barrier;
89 src_barrier.cmd_buffer = cmd_buffer;
90 src_barrier.new_layout = vk::ImageLayout::eTransferSrcOptimal;
91 src_barrier.src_access = vk::AccessFlagBits::eTransferWrite |
92 vk::AccessFlagBits::eShaderWrite |
93 vk::AccessFlagBits::eColorAttachmentWrite;
94 src_barrier.src_stage = vk::PipelineStageFlagBits::eTransfer |
95 vk::PipelineStageFlagBits::eFragmentShader |
96 vk::PipelineStageFlagBits::eColorAttachmentOutput;
97 src_barrier.dst_access = vk::AccessFlagBits::eTransferRead;
98 src_barrier.dst_stage = vk::PipelineStageFlagBits::eTransfer;
99
100 BarrierVK dst_barrier;
101 dst_barrier.cmd_buffer = cmd_buffer;
102 dst_barrier.new_layout = vk::ImageLayout::eTransferDstOptimal;
103 dst_barrier.src_access = {};
104 dst_barrier.src_stage = vk::PipelineStageFlagBits::eTopOfPipe;
105 dst_barrier.dst_access =
106 vk::AccessFlagBits::eShaderRead | vk::AccessFlagBits::eTransferWrite;
107 dst_barrier.dst_stage = vk::PipelineStageFlagBits::eFragmentShader |
108 vk::PipelineStageFlagBits::eTransfer;
109
110 if (!src.SetLayout(src_barrier) || !dst.SetLayout(dst_barrier)) {
111 VALIDATION_LOG << "Could not complete layout transitions.";
112 return false;
113 }
114
115 vk::ImageCopy image_copy;
116
117 image_copy.setSrcSubresource(
118 vk::ImageSubresourceLayers(vk::ImageAspectFlagBits::eColor, 0, 0, 1));
119 image_copy.setDstSubresource(
120 vk::ImageSubresourceLayers(vk::ImageAspectFlagBits::eColor, 0, 0, 1));
121
122 image_copy.srcOffset =
123 vk::Offset3D(source_region.GetX(), source_region.GetY(), 0);
124 image_copy.dstOffset =
125 vk::Offset3D(destination_origin.x, destination_origin.y, 0);
126 image_copy.extent =
127 vk::Extent3D(source_region.GetWidth(), source_region.GetHeight(), 1);
128
129 // Issue the copy command now that the images are already in the right
130 // layouts.
131 cmd_buffer.copyImage(src.GetImage(), //
132 src_barrier.new_layout, //
133 dst.GetImage(), //
134 dst_barrier.new_layout, //
135 image_copy //
136 );
137
138 // If this is an onscreen texture, do not transition the layout
139 // back to shader read.
140 if (dst.IsSwapchainImage()) {
141 return true;
142 }
143
144 BarrierVK barrier;
145 barrier.cmd_buffer = cmd_buffer;
146 barrier.new_layout = vk::ImageLayout::eShaderReadOnlyOptimal;
147 barrier.src_access = {};
148 barrier.src_stage = vk::PipelineStageFlagBits::eTopOfPipe;
149 barrier.dst_access = vk::AccessFlagBits::eShaderRead;
150 barrier.dst_stage = vk::PipelineStageFlagBits::eFragmentShader;
151
152 return dst.SetLayout(barrier);
153}
154
155// |BlitPass|
156bool BlitPassVK::OnCopyTextureToBufferCommand(
157 std::shared_ptr<Texture> source,
158 std::shared_ptr<DeviceBuffer> destination,
159 IRect source_region,
160 size_t destination_offset,
161 std::string label) {
162 auto& encoder = *command_buffer_->GetEncoder();
163 const auto& cmd_buffer = encoder.GetCommandBuffer();
164
165 // cast source and destination to TextureVK
166 const auto& src = TextureVK::Cast(*source);
167
168 if (!encoder.Track(source) || !encoder.Track(destination)) {
169 return false;
170 }
171
172 BarrierVK barrier;
173 barrier.cmd_buffer = cmd_buffer;
174 barrier.new_layout = vk::ImageLayout::eTransferSrcOptimal;
175 barrier.src_access = vk::AccessFlagBits::eShaderWrite |
176 vk::AccessFlagBits::eTransferWrite |
177 vk::AccessFlagBits::eColorAttachmentWrite;
178 barrier.src_stage = vk::PipelineStageFlagBits::eFragmentShader |
179 vk::PipelineStageFlagBits::eTransfer |
180 vk::PipelineStageFlagBits::eColorAttachmentOutput;
181 barrier.dst_access = vk::AccessFlagBits::eShaderRead;
182 barrier.dst_stage = vk::PipelineStageFlagBits::eVertexShader |
183 vk::PipelineStageFlagBits::eFragmentShader;
184
185 const auto& dst = DeviceBufferVK::Cast(*destination);
186
187 vk::BufferImageCopy image_copy;
188 image_copy.setBufferOffset(destination_offset);
189 image_copy.setBufferRowLength(0);
190 image_copy.setBufferImageHeight(0);
191 image_copy.setImageSubresource(
192 vk::ImageSubresourceLayers(vk::ImageAspectFlagBits::eColor, 0, 0, 1));
193 image_copy.setImageOffset(
194 vk::Offset3D(source_region.GetX(), source_region.GetY(), 0));
195 image_copy.setImageExtent(
196 vk::Extent3D(source_region.GetWidth(), source_region.GetHeight(), 1));
197
198 if (!src.SetLayout(barrier)) {
199 VALIDATION_LOG << "Could not encode layout transition.";
200 return false;
201 }
202
203 cmd_buffer.copyImageToBuffer(src.GetImage(), //
204 barrier.new_layout, //
205 dst.GetBuffer(), //
206 image_copy //
207 );
208
209 // If the buffer is used for readback, then apply a transfer -> host memory
210 // barrier.
211 if (destination->GetDeviceBufferDescriptor().readback) {
212 vk::MemoryBarrier barrier;
213 barrier.srcAccessMask = vk::AccessFlagBits::eTransferWrite;
214 barrier.dstAccessMask = vk::AccessFlagBits::eHostRead;
215
216 cmd_buffer.pipelineBarrier(vk::PipelineStageFlagBits::eTransfer,
217 vk::PipelineStageFlagBits::eHost, {}, 1,
218 &barrier, 0, {}, 0, {});
219 }
220
221 return true;
222}
223
224bool BlitPassVK::ConvertTextureToShaderRead(
225 const std::shared_ptr<Texture>& texture) {
226 auto& encoder = *command_buffer_->GetEncoder();
227 const auto& cmd_buffer = encoder.GetCommandBuffer();
228
229 BarrierVK barrier;
230 barrier.cmd_buffer = cmd_buffer;
231 barrier.src_access = vk::AccessFlagBits::eTransferWrite;
232 barrier.src_stage = vk::PipelineStageFlagBits::eTransfer;
233 barrier.dst_access = vk::AccessFlagBits::eShaderRead;
234 barrier.dst_stage = vk::PipelineStageFlagBits::eFragmentShader;
235
236 barrier.new_layout = vk::ImageLayout::eShaderReadOnlyOptimal;
237
238 const auto& texture_vk = TextureVK::Cast(*texture);
239
240 if (!encoder.Track(texture)) {
241 return false;
242 }
243
244 return texture_vk.SetLayout(barrier);
245}
246
247// |BlitPass|
248bool BlitPassVK::OnCopyBufferToTextureCommand(
249 BufferView source,
250 std::shared_ptr<Texture> destination,
251 IRect destination_region,
252 std::string label,
253 uint32_t slice,
254 bool convert_to_read) {
255 auto& encoder = *command_buffer_->GetEncoder();
256 const auto& cmd_buffer = encoder.GetCommandBuffer();
257
258 // cast destination to TextureVK
259 const auto& dst = TextureVK::Cast(*destination);
260 const auto& src = DeviceBufferVK::Cast(*source.buffer);
261
262 if (!encoder.Track(source.buffer) || !encoder.Track(destination)) {
263 return false;
264 }
265
266 BarrierVK dst_barrier;
267 dst_barrier.cmd_buffer = cmd_buffer;
268 dst_barrier.new_layout = vk::ImageLayout::eTransferDstOptimal;
269 dst_barrier.src_access = {};
270 dst_barrier.src_stage = vk::PipelineStageFlagBits::eTopOfPipe;
271 dst_barrier.dst_access =
272 vk::AccessFlagBits::eShaderRead | vk::AccessFlagBits::eTransferWrite;
273 dst_barrier.dst_stage = vk::PipelineStageFlagBits::eFragmentShader |
274 vk::PipelineStageFlagBits::eTransfer;
275
276 vk::BufferImageCopy image_copy;
277 image_copy.setBufferOffset(source.range.offset);
278 image_copy.setBufferRowLength(0);
279 image_copy.setBufferImageHeight(0);
280 image_copy.setImageSubresource(
281 vk::ImageSubresourceLayers(vk::ImageAspectFlagBits::eColor, 0, 0, 1));
282 image_copy.imageOffset.x = destination_region.GetX();
283 image_copy.imageOffset.y = destination_region.GetY();
284 image_copy.imageOffset.z = 0u;
285 image_copy.imageExtent.width = destination_region.GetWidth();
286 image_copy.imageExtent.height = destination_region.GetHeight();
287 image_copy.imageExtent.depth = 1u;
288
289 // Note: this barrier should do nothing if we're already in the transfer dst
290 // optimal state. This is important for performance of repeated blit pass
291 // encoding.
292 if (!dst.SetLayout(dst_barrier)) {
293 VALIDATION_LOG << "Could not encode layout transition.";
294 return false;
295 }
296
297 cmd_buffer.copyBufferToImage(src.GetBuffer(), //
298 dst.GetImage(), //
299 dst_barrier.new_layout, //
300 image_copy //
301 );
302
303 // Transition to shader-read.
304 if (convert_to_read) {
305 BarrierVK barrier;
306 barrier.cmd_buffer = cmd_buffer;
307 barrier.src_access = vk::AccessFlagBits::eTransferWrite;
308 barrier.src_stage = vk::PipelineStageFlagBits::eTransfer;
309 barrier.dst_access = vk::AccessFlagBits::eShaderRead;
310 barrier.dst_stage = vk::PipelineStageFlagBits::eFragmentShader;
311
312 barrier.new_layout = vk::ImageLayout::eShaderReadOnlyOptimal;
313
314 if (!dst.SetLayout(barrier)) {
315 return false;
316 }
317 }
318
319 return true;
320}
321
322// |BlitPass|
323bool BlitPassVK::OnGenerateMipmapCommand(std::shared_ptr<Texture> texture,
324 std::string label) {
325 auto& encoder = *command_buffer_->GetEncoder();
326 auto& src = TextureVK::Cast(*texture);
327
328 const auto size = src.GetTextureDescriptor().size;
329 uint32_t mip_count = src.GetTextureDescriptor().mip_count;
330
331 if (mip_count < 2u) {
332 return true;
333 }
334
335 const auto& image = src.GetImage();
336 const auto& cmd = encoder.GetCommandBuffer();
337
338 if (!encoder.Track(texture)) {
339 return false;
340 }
341
342 // Initialize all mip levels to be in TransferDst mode. Later, in a loop,
343 // after writing to that mip level, we'll first switch its layout to
344 // TransferSrc to prepare the mip level after it, use the image as the source
345 // of the blit, before finally switching it to ShaderReadOnly so its available
346 // for sampling in a shader.
348 /*cmd=*/cmd,
349 /*image=*/image,
350 /*src_access_mask=*/vk::AccessFlagBits::eTransferWrite |
351 vk::AccessFlagBits::eColorAttachmentWrite,
352 /*dst_access_mask=*/vk::AccessFlagBits::eTransferRead,
353 /*old_layout=*/src.GetLayout(),
354 /*new_layout=*/vk::ImageLayout::eTransferDstOptimal,
355 /*src_stage=*/vk::PipelineStageFlagBits::eTransfer |
356 vk::PipelineStageFlagBits::eColorAttachmentOutput,
357 /*dst_stage=*/vk::PipelineStageFlagBits::eTransfer,
358 /*base_mip_level=*/0u,
359 /*mip_level_count=*/mip_count);
360
361 vk::ImageMemoryBarrier barrier;
362 barrier.image = image;
363 barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
364 barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
365 barrier.subresourceRange.aspectMask = vk::ImageAspectFlagBits::eColor;
366 barrier.subresourceRange.baseArrayLayer = 0;
367 barrier.subresourceRange.layerCount = 1;
368 barrier.subresourceRange.levelCount = 1;
369
370 // Blit from the mip level N - 1 to mip level N.
371 size_t width = size.width;
372 size_t height = size.height;
373 for (size_t mip_level = 1u; mip_level < mip_count; mip_level++) {
374 barrier.subresourceRange.baseMipLevel = mip_level - 1;
375 barrier.oldLayout = vk::ImageLayout::eTransferDstOptimal;
376 barrier.newLayout = vk::ImageLayout::eTransferSrcOptimal;
377 barrier.srcAccessMask = vk::AccessFlagBits::eTransferWrite;
378 barrier.dstAccessMask = vk::AccessFlagBits::eTransferRead;
379
380 // We just finished writing to the previous (N-1) mip level or it was the
381 // base mip level. These were initialized to TransferDst earler. We are now
382 // going to read from it to write to the current level (N) . So it must be
383 // converted to TransferSrc.
384 cmd.pipelineBarrier(vk::PipelineStageFlagBits::eTransfer,
385 vk::PipelineStageFlagBits::eTransfer, {}, {}, {},
386 {barrier});
387
388 vk::ImageBlit blit;
389 blit.srcSubresource.aspectMask = vk::ImageAspectFlagBits::eColor;
390 blit.srcSubresource.baseArrayLayer = 0u;
391 blit.srcSubresource.layerCount = 1u;
392 blit.srcSubresource.mipLevel = mip_level - 1;
393
394 blit.dstSubresource.aspectMask = vk::ImageAspectFlagBits::eColor;
395 blit.dstSubresource.baseArrayLayer = 0u;
396 blit.dstSubresource.layerCount = 1u;
397 blit.dstSubresource.mipLevel = mip_level;
398
399 // offsets[0] is origin.
400 blit.srcOffsets[1].x = std::max<int32_t>(width, 1u);
401 blit.srcOffsets[1].y = std::max<int32_t>(height, 1u);
402 blit.srcOffsets[1].z = 1u;
403
404 width = width / 2;
405 height = height / 2;
406
407 // offsets[0] is origin.
408 blit.dstOffsets[1].x = std::max<int32_t>(width, 1u);
409 blit.dstOffsets[1].y = std::max<int32_t>(height, 1u);
410 blit.dstOffsets[1].z = 1u;
411
412 cmd.blitImage(image, // src image
413 vk::ImageLayout::eTransferSrcOptimal, // src layout
414 image, // dst image
415 vk::ImageLayout::eTransferDstOptimal, // dst layout
416 1u, // region count
417 &blit, // regions
418 vk::Filter::eLinear // filter
419 );
420
421 barrier.oldLayout = vk::ImageLayout::eTransferSrcOptimal;
422 barrier.newLayout = vk::ImageLayout::eShaderReadOnlyOptimal;
423 barrier.srcAccessMask = vk::AccessFlagBits::eTransferRead;
424 barrier.dstAccessMask = vk::AccessFlagBits::eShaderRead;
425
426 // Now that the blit is done, the image at the previous level (N-1)
427 // is done reading from (TransferSrc)/ Now we must prepare it to be read
428 // from a shader (ShaderReadOnly).
429 cmd.pipelineBarrier(vk::PipelineStageFlagBits::eTransfer,
430 vk::PipelineStageFlagBits::eFragmentShader, {}, {}, {},
431 {barrier});
432 }
433
434 barrier.subresourceRange.baseMipLevel = mip_count - 1;
435 barrier.oldLayout = vk::ImageLayout::eTransferDstOptimal;
436 barrier.newLayout = vk::ImageLayout::eShaderReadOnlyOptimal;
437 barrier.srcAccessMask = vk::AccessFlagBits::eTransferWrite;
438 barrier.dstAccessMask = vk::AccessFlagBits::eShaderRead;
439
440 cmd.pipelineBarrier(vk::PipelineStageFlagBits::eTransfer,
441 vk::PipelineStageFlagBits::eFragmentShader, {}, {}, {},
442 {barrier});
443
444 // We modified the layouts of this image from underneath it. Tell it its new
445 // state so it doesn't try to perform redundant transitions under the hood.
446 src.SetLayoutWithoutEncoding(vk::ImageLayout::eShaderReadOnlyOptimal);
447 src.SetMipMapGenerated();
448
449 return true;
450}
451
452} // namespace impeller
SkBitmap source
Definition: examples.cpp:28
FlTexture * texture
sk_sp< const SkImage > image
Definition: SkRecords.h:269
it will be possible to load the file into Perfetto s trace viewer disable asset Prevents usage of any non test fonts unless they were explicitly Loaded via prefetched default font Indicates whether the embedding started a prefetch of the default font manager before creating the engine run In non interactive keep the shell running after the Dart script has completed enable serial On low power devices with low core running concurrent GC tasks on threads can cause them to contend with the UI thread which could potentially lead to jank This option turns off all concurrent GC activities domain network JSON encoded network policy per domain This overrides the DisallowInsecureConnections switch Embedder can specify whether to allow or disallow insecure connections at a domain level old gen heap size
Definition: switches.h:259
CanvasImage Image
Definition: dart_ui.cc:55
dst
Definition: cp.py:12
IRect64 IRect
Definition: rect.h:772
TPoint< int64_t > IPoint
Definition: point.h:323
static void InsertImageMemoryBarrier(const vk::CommandBuffer &cmd, const vk::Image &image, vk::AccessFlags src_access_mask, vk::AccessFlags dst_access_mask, vk::ImageLayout old_layout, vk::ImageLayout new_layout, vk::PipelineStageFlags src_stage, vk::PipelineStageFlags dst_stage, uint32_t base_mip_level, uint32_t mip_level_count=1u)
Definition: blit_pass_vk.cc:17
Definition: ref_ptr.h:256
int32_t height
int32_t width
#define VALIDATION_LOG
Definition: validation.h:73
#define VK_QUEUE_FAMILY_IGNORED
Definition: vulkan_core.h:127