12#include "vulkan/vulkan_structs.hpp"
16ComputePassVK::ComputePassVK(std::shared_ptr<const Context> context,
17 std::shared_ptr<CommandBufferVK> command_buffer)
18 : ComputePass(
std::move(context)),
19 command_buffer_(
std::move(command_buffer)) {
22 max_wg_size_ = ContextVK::Cast(*context_)
25 .limits.maxComputeWorkGroupSize;
29ComputePassVK::~ComputePassVK() =
default;
31bool ComputePassVK::IsValid()
const {
35void ComputePassVK::OnSetLabel(
const std::string& label) {
43void ComputePassVK::SetCommandLabel(std::string_view label) {
45 command_buffer_->GetEncoder()->PushDebugGroup(label);
51void ComputePassVK::SetPipeline(
52 const std::shared_ptr<Pipeline<ComputePipelineDescriptor>>& pipeline) {
53 const auto& pipeline_vk = ComputePipelineVK::Cast(*pipeline);
54 const vk::CommandBuffer& command_buffer_vk =
55 command_buffer_->GetEncoder()->GetCommandBuffer();
56 command_buffer_vk.bindPipeline(vk::PipelineBindPoint::eCompute,
57 pipeline_vk.GetPipeline());
58 pipeline_layout_ = pipeline_vk.GetPipelineLayout();
60 auto descriptor_result =
61 command_buffer_->GetEncoder()->AllocateDescriptorSets(
62 pipeline_vk.GetDescriptorSetLayout(), ContextVK::Cast(*context_));
63 if (!descriptor_result.ok()) {
66 descriptor_set_ = descriptor_result.value();
67 pipeline_valid_ =
true;
72 if (grid_size.IsEmpty() || !pipeline_valid_) {
73 bound_image_offset_ = 0u;
74 bound_buffer_offset_ = 0u;
75 descriptor_write_offset_ = 0u;
77 pipeline_valid_ =
false;
79 "Invalid pipeline or empty grid.");
82 const ContextVK& context_vk = ContextVK::Cast(*context_);
83 for (
auto i = 0u;
i < descriptor_write_offset_;
i++) {
84 write_workspace_[
i].dstSet = descriptor_set_;
87 context_vk.GetDevice().updateDescriptorSets(descriptor_write_offset_,
88 write_workspace_.data(), 0u, {});
89 const vk::CommandBuffer& command_buffer_vk =
90 command_buffer_->GetEncoder()->GetCommandBuffer();
92 command_buffer_vk.bindDescriptorSets(
93 vk::PipelineBindPoint::eCompute,
102 int64_t
width = grid_size.width;
103 int64_t
height = grid_size.height;
107 command_buffer_vk.dispatch(
width, 1, 1);
109 while (
width > max_wg_size_[0]) {
112 while (
height > max_wg_size_[1]) {
120 command_buffer_->GetEncoder()->PopDebugGroup();
125 bound_image_offset_ = 0u;
126 bound_buffer_offset_ = 0u;
127 descriptor_write_offset_ = 0u;
129 pipeline_valid_ =
false;
137 const ShaderUniformSlot& slot,
138 const ShaderMetadata& metadata,
140 return BindResource(slot.binding,
type, view);
144bool ComputePassVK::BindResource(
147 const SampledImageSlot& slot,
148 const ShaderMetadata& metadata,
149 std::shared_ptr<const Texture>
texture,
150 const std::unique_ptr<const Sampler>& sampler) {
154 if (!
texture->IsValid() || !sampler) {
157 const TextureVK& texture_vk = TextureVK::Cast(*
texture);
158 const SamplerVK& sampler_vk = SamplerVK::Cast(*sampler);
160 if (!command_buffer_->GetEncoder()->Track(
texture)) {
164 vk::DescriptorImageInfo image_info;
165 image_info.imageLayout = vk::ImageLayout::eShaderReadOnlyOptimal;
166 image_info.sampler = sampler_vk.GetSampler();
167 image_info.imageView = texture_vk.GetImageView();
168 image_workspace_[bound_image_offset_++] = image_info;
170 vk::WriteDescriptorSet write_set;
171 write_set.dstBinding = slot.binding;
172 write_set.descriptorCount = 1u;
174 write_set.pImageInfo = &image_workspace_[bound_image_offset_ - 1];
176 write_workspace_[descriptor_write_offset_++] = write_set;
180bool ComputePassVK::BindResource(
size_t binding,
182 const BufferView& view) {
187 const std::shared_ptr<const DeviceBuffer>& device_buffer = view.buffer;
188 auto buffer = DeviceBufferVK::Cast(*device_buffer).GetBuffer();
193 if (!command_buffer_->GetEncoder()->Track(device_buffer)) {
197 uint32_t
offset = view.range.offset;
199 vk::DescriptorBufferInfo buffer_info;
200 buffer_info.buffer =
buffer;
201 buffer_info.offset =
offset;
202 buffer_info.range = view.range.length;
203 buffer_workspace_[bound_buffer_offset_++] = buffer_info;
205 vk::WriteDescriptorSet write_set;
206 write_set.dstBinding = binding;
207 write_set.descriptorCount = 1u;
209 write_set.pBufferInfo = &buffer_workspace_[bound_buffer_offset_ - 1];
211 write_workspace_[descriptor_write_offset_++] = write_set;
222void ComputePassVK::AddBufferMemoryBarrier() {
223 vk::MemoryBarrier barrier;
224 barrier.srcAccessMask = vk::AccessFlagBits::eShaderWrite;
225 barrier.dstAccessMask = vk::AccessFlagBits::eShaderRead;
227 command_buffer_->GetEncoder()->GetCommandBuffer().pipelineBarrier(
228 vk::PipelineStageFlagBits::eComputeShader,
229 vk::PipelineStageFlagBits::eComputeShader, {}, 1, &barrier, 0, {}, 0, {});
233void ComputePassVK::AddTextureMemoryBarrier() {
234 vk::MemoryBarrier barrier;
235 barrier.srcAccessMask = vk::AccessFlagBits::eShaderWrite;
236 barrier.dstAccessMask = vk::AccessFlagBits::eShaderRead;
238 command_buffer_->GetEncoder()->GetCommandBuffer().pipelineBarrier(
239 vk::PipelineStageFlagBits::eComputeShader,
240 vk::PipelineStageFlagBits::eComputeShader, {}, 1, &barrier, 0, {}, 0, {});
244bool ComputePassVK::EncodeCommands()
const {
253 vk::MemoryBarrier barrier;
254 barrier.srcAccessMask = vk::AccessFlagBits::eShaderWrite;
255 barrier.dstAccessMask =
256 vk::AccessFlagBits::eIndexRead | vk::AccessFlagBits::eVertexAttributeRead;
258 command_buffer_->GetEncoder()->GetCommandBuffer().pipelineBarrier(
259 vk::PipelineStageFlagBits::eComputeShader,
260 vk::PipelineStageFlagBits::eVertexInput, {}, 1, &barrier, 0, {}, 0, {});
static float max(float r, float g, float b)
DEF_SWITCHES_START aot vmservice shared library Name of the *so containing AOT compiled Dart assets for launching the service isolate vm snapshot The VM snapshot data that will be memory mapped as read only SnapshotAssetPath must be present isolate snapshot The isolate snapshot data that will be memory mapped as read only SnapshotAssetPath must be present cache dir Path to the cache directory This is different from the persistent_cache_path in embedder which is used for Skia shader cache icu native lib Path to the library file that exports the ICU data vm service The hostname IP address on which the Dart VM Service should be served If not defaults to or::depending on whether ipv6 is specified vm service A custom Dart VM Service port The default is to pick a randomly available open port disable vm Disable the Dart VM Service The Dart VM Service is never available in release mode disable vm service Disable mDNS Dart VM Service publication Bind to the IPv6 localhost address for the Dart VM Service Ignored if vm service host is set endless trace buffer
constexpr vk::DescriptorType ToVKDescriptorType(DescriptorType type)
static constexpr size_t kMaxBindings