12#include "vulkan/vulkan_structs.hpp"
16ComputePassVK::ComputePassVK(std::shared_ptr<const Context> context,
17 std::shared_ptr<CommandBufferVK> command_buffer)
18 : ComputePass(
std::move(context)),
19 command_buffer_(
std::move(command_buffer)) {
22 max_wg_size_ = ContextVK::Cast(*context_)
25 .limits.maxComputeWorkGroupSize;
29ComputePassVK::~ComputePassVK() =
default;
31bool ComputePassVK::IsValid()
const {
35void ComputePassVK::OnSetLabel(
const std::string& label) {
43void ComputePassVK::SetCommandLabel(std::string_view label) {
45 command_buffer_->GetEncoder()->PushDebugGroup(label);
51void ComputePassVK::SetPipeline(
53 const auto& pipeline_vk = ComputePipelineVK::Cast(*pipeline);
54 const vk::CommandBuffer& command_buffer_vk =
55 command_buffer_->GetEncoder()->GetCommandBuffer();
56 command_buffer_vk.bindPipeline(vk::PipelineBindPoint::eCompute,
57 pipeline_vk.GetPipeline());
58 pipeline_layout_ = pipeline_vk.GetPipelineLayout();
60 auto descriptor_result =
61 command_buffer_->GetEncoder()->AllocateDescriptorSets(
62 pipeline_vk.GetDescriptorSetLayout(), ContextVK::Cast(*context_));
63 if (!descriptor_result.ok()) {
66 descriptor_set_ = descriptor_result.value();
67 pipeline_valid_ =
true;
72 if (grid_size.
IsEmpty() || !pipeline_valid_) {
73 bound_image_offset_ = 0u;
74 bound_buffer_offset_ = 0u;
75 descriptor_write_offset_ = 0u;
77 pipeline_valid_ =
false;
79 "Invalid pipeline or empty grid.");
82 const ContextVK& context_vk = ContextVK::Cast(*context_);
83 for (
auto i = 0u; i < descriptor_write_offset_; i++) {
84 write_workspace_[i].dstSet = descriptor_set_;
87 context_vk.
GetDevice().updateDescriptorSets(descriptor_write_offset_,
88 write_workspace_.data(), 0u, {});
89 const vk::CommandBuffer& command_buffer_vk =
90 command_buffer_->GetEncoder()->GetCommandBuffer();
92 command_buffer_vk.bindDescriptorSets(
93 vk::PipelineBindPoint::eCompute,
107 command_buffer_vk.dispatch(
width, 1, 1);
109 while (
width > max_wg_size_[0]) {
110 width = std::max(
static_cast<int64_t
>(1),
width / 2);
112 while (
height > max_wg_size_[1]) {
120 command_buffer_->GetEncoder()->PopDebugGroup();
125 bound_image_offset_ = 0u;
126 bound_buffer_offset_ = 0u;
127 descriptor_write_offset_ = 0u;
129 pipeline_valid_ =
false;
144bool ComputePassVK::BindResource(
149 std::shared_ptr<const Texture>
texture,
150 const std::unique_ptr<const Sampler>& sampler) {
151 if (bound_image_offset_ >= kMaxBindings) {
154 if (!
texture->IsValid() || !sampler) {
158 const SamplerVK& sampler_vk = SamplerVK::Cast(*sampler);
160 if (!command_buffer_->GetEncoder()->Track(
texture)) {
164 vk::DescriptorImageInfo image_info;
165 image_info.imageLayout = vk::ImageLayout::eShaderReadOnlyOptimal;
168 image_workspace_[bound_image_offset_++] = image_info;
170 vk::WriteDescriptorSet write_set;
171 write_set.dstBinding = slot.
binding;
172 write_set.descriptorCount = 1u;
174 write_set.pImageInfo = &image_workspace_[bound_image_offset_ - 1];
176 write_workspace_[descriptor_write_offset_++] = write_set;
180bool ComputePassVK::BindResource(
size_t binding,
183 if (bound_buffer_offset_ >= kMaxBindings) {
187 const std::shared_ptr<const DeviceBuffer>& device_buffer = view.
buffer;
188 auto buffer = DeviceBufferVK::Cast(*device_buffer).GetBuffer();
193 if (!command_buffer_->GetEncoder()->Track(device_buffer)) {
199 vk::DescriptorBufferInfo buffer_info;
200 buffer_info.buffer =
buffer;
201 buffer_info.offset =
offset;
203 buffer_workspace_[bound_buffer_offset_++] = buffer_info;
205 vk::WriteDescriptorSet write_set;
206 write_set.dstBinding = binding;
207 write_set.descriptorCount = 1u;
209 write_set.pBufferInfo = &buffer_workspace_[bound_buffer_offset_ - 1];
211 write_workspace_[descriptor_write_offset_++] = write_set;
222void ComputePassVK::AddBufferMemoryBarrier() {
223 vk::MemoryBarrier barrier;
224 barrier.srcAccessMask = vk::AccessFlagBits::eShaderWrite;
225 barrier.dstAccessMask = vk::AccessFlagBits::eShaderRead;
227 command_buffer_->GetEncoder()->GetCommandBuffer().pipelineBarrier(
228 vk::PipelineStageFlagBits::eComputeShader,
229 vk::PipelineStageFlagBits::eComputeShader, {}, 1, &barrier, 0, {}, 0, {});
233void ComputePassVK::AddTextureMemoryBarrier() {
234 vk::MemoryBarrier barrier;
235 barrier.srcAccessMask = vk::AccessFlagBits::eShaderWrite;
236 barrier.dstAccessMask = vk::AccessFlagBits::eShaderRead;
238 command_buffer_->GetEncoder()->GetCommandBuffer().pipelineBarrier(
239 vk::PipelineStageFlagBits::eComputeShader,
240 vk::PipelineStageFlagBits::eComputeShader, {}, 1, &barrier, 0, {}, 0, {});
244bool ComputePassVK::EncodeCommands()
const {
253 vk::MemoryBarrier barrier;
254 barrier.srcAccessMask = vk::AccessFlagBits::eShaderWrite;
255 barrier.dstAccessMask =
256 vk::AccessFlagBits::eIndexRead | vk::AccessFlagBits::eVertexAttributeRead;
258 command_buffer_->GetEncoder()->GetCommandBuffer().pipelineBarrier(
259 vk::PipelineStageFlagBits::eComputeShader,
260 vk::PipelineStageFlagBits::eVertexInput, {}, 1, &barrier, 0, {}, 0, {});
const vk::Device & GetDevice() const
Describes the fixed function and programmable aspects of rendering and compute operations performed b...
vk::Sampler GetSampler() const
vk::ImageView GetImageView() const
static const uint8_t buffer[]
constexpr vk::DescriptorType ToVKDescriptorType(DescriptorType type)
std::shared_ptr< const DeviceBuffer > buffer
Metadata required to bind a combined texture and sampler.
size_t binding
The Vulkan binding value.
constexpr bool IsEmpty() const
Returns true if either of the width or height are 0, negative, or NaN.