15#include "vulkan/vulkan_enums.hpp"
16#include "vulkan/vulkan_handles.hpp"
17#include "vulkan/vulkan_structs.hpp"
31 vk::UniqueCommandPool&& pool,
32 std::vector<vk::UniqueCommandBuffer>&& buffers,
34 std::weak_ptr<CommandPoolRecyclerVK> recycler)
35 : pool_(
std::move(pool)),
36 buffers_(
std::move(buffers)),
37 unused_count_(unused_count),
38 recycler_(
std::move(recycler)) {}
41 auto const recycler = recycler_.lock();
53 recycler->Reclaim(std::move(pool_), std::move(buffers_),
62 vk::UniqueCommandPool pool_;
67 std::vector<vk::UniqueCommandBuffer> buffers_;
68 const size_t unused_count_;
69 std::weak_ptr<CommandPoolRecyclerVK> recycler_;
77 auto const context = context_.lock();
81 auto const recycler = context->GetCommandPoolRecycler();
87 size_t unused_count = unused_command_buffers_.size();
88 for (
auto i = 0u;
i < unused_command_buffers_.size();
i++) {
89 collected_buffers_.push_back(std::move(unused_command_buffers_[
i]));
91 unused_command_buffers_.clear();
94 std::move(pool_), std::move(collected_buffers_), unused_count, recycler);
97 context->GetResourceManager(), std::move(reset_pool_when_dropped));
102 auto const context = context_.lock();
107 Lock lock(pool_mutex_);
111 if (!unused_command_buffers_.empty()) {
112 vk::UniqueCommandBuffer buffer = std::move(unused_command_buffers_.back());
113 unused_command_buffers_.pop_back();
117 auto const device = context->GetDevice();
118 vk::CommandBufferAllocateInfo info;
119 info.setCommandPool(pool_.get());
120 info.setCommandBufferCount(1u);
121 info.setLevel(vk::CommandBufferLevel::ePrimary);
122 auto [result, buffers] =
device.allocateCommandBuffersUnique(info);
123 if (result != vk::Result::eSuccess) {
126 return std::move(buffers[0]);
130 Lock lock(pool_mutex_);
137 collected_buffers_.push_back(std::move(buffer));
141 Lock lock(pool_mutex_);
146 for (
auto& buffer : collected_buffers_) {
149 for (
auto& buffer : unused_command_buffers_) {
152 unused_command_buffers_.clear();
153 collected_buffers_.clear();
158 std::unordered_map<uint64_t, std::shared_ptr<CommandPoolVK>>;
173static std::unordered_map<
175 std::unordered_map<std::thread::id, std::weak_ptr<CommandPoolVK>>>
179 const std::shared_ptr<ContextVK>& context)
180 : context_(context), context_hash_(context->GetHash()) {}
186 auto it = g_all_pools_map.find(context.
GetHash());
187 return it != g_all_pools_map.end() ? it->second.size() : 0;
192 auto const strong_context = context_.lock();
193 if (!strong_context) {
202 auto const it = pool_map.find(context_hash_);
203 if (it != pool_map.end()) {
208 auto data = Create();
213 auto const resource = std::make_shared<CommandPoolVK>(
214 std::move(
data->pool), std::move(
data->buffers), context_);
215 pool_map.emplace(context_hash_, resource);
219 g_all_pools_map[context_hash_][std::this_thread::get_id()] = resource;
226std::optional<CommandPoolRecyclerVK::RecycledData>
227CommandPoolRecyclerVK::Create() {
229 if (
auto data = Reuse()) {
234 auto context = context_.lock();
238 vk::CommandPoolCreateInfo info;
239 info.setQueueFamilyIndex(context->GetGraphicsQueue()->GetIndex().family);
240 info.setFlags(vk::CommandPoolCreateFlagBits::eTransient);
242 auto device = context->GetDevice();
243 auto [result, pool] =
device.createCommandPoolUnique(info);
244 if (result != vk::Result::eSuccess) {
247 return CommandPoolRecyclerVK::RecycledData{.pool = std::move(pool),
251std::optional<CommandPoolRecyclerVK::RecycledData>
252CommandPoolRecyclerVK::Reuse() {
254 Lock recycled_lock(recycled_mutex_);
255 if (recycled_.empty()) {
260 auto data = std::move(recycled_.back());
261 recycled_.pop_back();
262 return std::move(
data);
266 vk::UniqueCommandPool&& pool,
267 std::vector<vk::UniqueCommandBuffer>&& buffers,
270 auto strong_context = context_.lock();
271 if (!strong_context) {
274 auto device = strong_context->GetDevice();
275 vk::CommandPoolResetFlags flags;
278 flags = vk::CommandPoolResetFlagBits::eReleaseResources;
280 const auto result =
device.resetCommandPool(pool.get(), flags);
281 if (result != vk::Result::eSuccess) {
282 VALIDATION_LOG <<
"Could not reset command pool: " << vk::to_string(result);
286 Lock recycled_lock(recycled_mutex_);
294 pool_map->erase(context_hash_);
299 auto found = g_all_pools_map.find(context_hash_);
300 if (found != g_all_pools_map.end()) {
301 found->second.erase(std::this_thread::get_id());
315 auto found = g_all_pools_map.find(context_hash_);
316 if (found != g_all_pools_map.end()) {
317 for (
auto& [thread_id, weak_pool] : found->second) {
318 auto pool = weak_pool.lock();
326 g_all_pools_map.erase(found);
BackgroundCommandPoolVK(BackgroundCommandPoolVK &&)=default
BackgroundCommandPoolVK(vk::UniqueCommandPool &&pool, std::vector< vk::UniqueCommandBuffer > &&buffers, size_t unused_count, std::weak_ptr< CommandPoolRecyclerVK > recycler)
static constexpr size_t kUnusedCommandBufferLimit
~BackgroundCommandPoolVK()
CommandPoolRecyclerVK(const std::shared_ptr< ContextVK > &context)
Creates a recycler for the given |ContextVK|.
void Dispose()
Clears this context's thread-local command pool.
static int GetGlobalPoolCount(const ContextVK &context)
std::shared_ptr< CommandPoolVK > Get()
Gets a command pool for the current thread.
void DestroyThreadLocalPools()
Clean up resources held by all per-thread command pools associated with the context.
void Reclaim(vk::UniqueCommandPool &&pool, std::vector< vk::UniqueCommandBuffer > &&buffers, bool should_trim=false)
Returns a command pool to be reset on a background thread.
void CollectCommandBuffer(vk::UniqueCommandBuffer &&buffer)
Collects the given |vk::CommandBuffer| to be retained.
void Destroy()
Delete all Vulkan objects in this command pool.
vk::UniqueCommandBuffer CreateCommandBuffer()
Creates and returns a new |vk::CommandBuffer|.
A unique handle to a resource which will be reclaimed by the specified resource manager.
static Mutex g_all_pools_map_mutex
static thread_local std::unique_ptr< CommandPoolMap > tls_command_pool_map
std::unordered_map< uint64_t, std::shared_ptr< CommandPoolVK > > CommandPoolMap
A unique command pool and zero or more recycled command buffers.
vk::UniqueCommandPool pool
std::shared_ptr< const fml::Mapping > data
#define IPLR_GUARDED_BY(x)