Flutter Engine
 
Loading...
Searching...
No Matches
command_pool_vk.cc
Go to the documentation of this file.
1// Copyright 2013 The Flutter Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
6
7#include <memory>
8#include <optional>
9#include <utility>
10
13
14#include "impeller/renderer/backend/vulkan/vk.h" // IWYU pragma: keep.
15#include "vulkan/vulkan_enums.hpp"
16#include "vulkan/vulkan_handles.hpp"
17#include "vulkan/vulkan_structs.hpp"
18
19namespace impeller {
20
21// Holds the command pool in a background thread, recyling it when not in use.
23 public:
25
26 // The recycler also recycles command buffers that were never used, up to a
27 // limit of 16 per frame. This number was somewhat arbitrarily chosen.
28 static constexpr size_t kUnusedCommandBufferLimit = 16u;
29
31 vk::UniqueCommandPool&& pool,
32 std::vector<vk::UniqueCommandBuffer>&& buffers,
33 size_t unused_count,
34 std::weak_ptr<CommandPoolRecyclerVK> recycler)
35 : pool_(std::move(pool)),
36 buffers_(std::move(buffers)),
37 unused_count_(unused_count),
38 recycler_(std::move(recycler)) {}
39
41 auto const recycler = recycler_.lock();
42
43 // Not only does this prevent recycling when the context is being destroyed,
44 // but it also prevents the destructor from effectively being called twice;
45 // once for the original BackgroundCommandPoolVK() and once for the moved
46 // BackgroundCommandPoolVK().
47 if (!recycler) {
48 return;
49 }
50 // If there are many unused command buffers, release some of them and
51 // trim the command pool.
52 bool should_trim = unused_count_ > kUnusedCommandBufferLimit;
53 recycler->Reclaim(std::move(pool_), std::move(buffers_),
54 /*should_trim=*/should_trim);
55 }
56
57 private:
59
60 BackgroundCommandPoolVK& operator=(const BackgroundCommandPoolVK&) = delete;
61
62 vk::UniqueCommandPool pool_;
63
64 // These are retained because the destructor of the C++ UniqueCommandBuffer
65 // wrapper type will attempt to reset the cmd buffer, and doing so may be a
66 // thread safety violation as this may happen on the fence waiter thread.
67 std::vector<vk::UniqueCommandBuffer> buffers_;
68 const size_t unused_count_;
69 std::weak_ptr<CommandPoolRecyclerVK> recycler_;
70};
71
73 if (!pool_) {
74 return;
75 }
76
77 auto const context = context_.lock();
78 if (!context) {
79 return;
80 }
81 auto const recycler = context->GetCommandPoolRecycler();
82 if (!recycler) {
83 return;
84 }
85 // Any unused command buffers are added to the set of used command buffers.
86 // both will be reset to the initial state when the pool is reset.
87 size_t unused_count = unused_command_buffers_.size();
88 for (auto i = 0u; i < unused_command_buffers_.size(); i++) {
89 collected_buffers_.push_back(std::move(unused_command_buffers_[i]));
90 }
91 unused_command_buffers_.clear();
92
93 auto reset_pool_when_dropped = BackgroundCommandPoolVK(
94 std::move(pool_), std::move(collected_buffers_), unused_count, recycler);
95
97 context->GetResourceManager(), std::move(reset_pool_when_dropped));
98}
99
100// TODO(matanlurey): Return a status_or<> instead of {} when we have one.
101vk::UniqueCommandBuffer CommandPoolVK::CreateCommandBuffer() {
102 auto const context = context_.lock();
103 if (!context) {
104 return {};
105 }
106
107 Lock lock(pool_mutex_);
108 if (!pool_) {
109 return {};
110 }
111 if (!unused_command_buffers_.empty()) {
112 vk::UniqueCommandBuffer buffer = std::move(unused_command_buffers_.back());
113 unused_command_buffers_.pop_back();
114 return buffer;
115 }
116
117 auto const device = context->GetDevice();
118 vk::CommandBufferAllocateInfo info;
119 info.setCommandPool(pool_.get());
120 info.setCommandBufferCount(1u);
121 info.setLevel(vk::CommandBufferLevel::ePrimary);
122 auto [result, buffers] = device.allocateCommandBuffersUnique(info);
123 if (result != vk::Result::eSuccess) {
124 return {};
125 }
126 return std::move(buffers[0]);
127}
128
129void CommandPoolVK::CollectCommandBuffer(vk::UniqueCommandBuffer&& buffer) {
130 Lock lock(pool_mutex_);
131 if (!pool_) {
132 // If the command pool has already been destroyed, then its buffers have
133 // already been freed.
134 buffer.release();
135 return;
136 }
137 collected_buffers_.push_back(std::move(buffer));
138}
139
141 Lock lock(pool_mutex_);
142 pool_.reset();
143
144 // When the command pool is destroyed, all of its command buffers are freed.
145 // Handles allocated from that pool are now invalid and must be discarded.
146 for (auto& buffer : collected_buffers_) {
147 buffer.release();
148 }
149 for (auto& buffer : unused_command_buffers_) {
150 buffer.release();
151 }
152 unused_command_buffers_.clear();
153 collected_buffers_.clear();
154}
155
156// Associates a resource with a thread and context.
158 std::unordered_map<uint64_t, std::shared_ptr<CommandPoolVK>>;
159
160// CommandPoolVK Lifecycle:
161// 1. End of frame will reset the command pool (clearing this on a thread).
162// There will still be references to the command pool from the uncompleted
163// command buffers.
164// 2. The last reference to the command pool will be released from the fence
165// waiter thread, which will schedule a task on the resource
166// manager thread, which in turn will reset the command pool and make it
167// available for reuse ("recycle").
168static thread_local std::unique_ptr<CommandPoolMap> tls_command_pool_map;
169
170// Map each context to a list of all thread-local command pools associated
171// with that context.
173static std::unordered_map<
174 uint64_t,
175 std::unordered_map<std::thread::id, std::weak_ptr<CommandPoolVK>>>
177
179 const std::shared_ptr<ContextVK>& context)
180 : context_(context), context_hash_(context->GetHash()) {}
181
182// Visible for testing.
183// Returns the number of pools in g_all_pools_map for the given context.
185 Lock all_pools_lock(g_all_pools_map_mutex);
186 auto it = g_all_pools_map.find(context.GetHash());
187 return it != g_all_pools_map.end() ? it->second.size() : 0;
188}
189
190// TODO(matanlurey): Return a status_or<> instead of nullptr when we have one.
191std::shared_ptr<CommandPoolVK> CommandPoolRecyclerVK::Get() {
192 auto const strong_context = context_.lock();
193 if (!strong_context) {
194 return nullptr;
195 }
196
197 // If there is a resource in used for this thread and context, return it.
198 if (!tls_command_pool_map.get()) {
200 }
201 CommandPoolMap& pool_map = *tls_command_pool_map.get();
202 auto const it = pool_map.find(context_hash_);
203 if (it != pool_map.end()) {
204 return it->second;
205 }
206
207 // Otherwise, create a new resource and return it.
208 auto data = Create();
209 if (!data || !data->pool) {
210 return nullptr;
211 }
212
213 auto const resource = std::make_shared<CommandPoolVK>(
214 std::move(data->pool), std::move(data->buffers), context_);
215 pool_map.emplace(context_hash_, resource);
216
217 {
218 Lock all_pools_lock(g_all_pools_map_mutex);
219 g_all_pools_map[context_hash_][std::this_thread::get_id()] = resource;
220 }
221
222 return resource;
223}
224
225// TODO(matanlurey): Return a status_or<> instead of nullopt when we have one.
226std::optional<CommandPoolRecyclerVK::RecycledData>
227CommandPoolRecyclerVK::Create() {
228 // If we can reuse a command pool and its buffers, do so.
229 if (auto data = Reuse()) {
230 return data;
231 }
232
233 // Otherwise, create a new one.
234 auto context = context_.lock();
235 if (!context) {
236 return std::nullopt;
237 }
238 vk::CommandPoolCreateInfo info;
239 info.setQueueFamilyIndex(context->GetGraphicsQueue()->GetIndex().family);
240 info.setFlags(vk::CommandPoolCreateFlagBits::eTransient);
241
242 auto device = context->GetDevice();
243 auto [result, pool] = device.createCommandPoolUnique(info);
244 if (result != vk::Result::eSuccess) {
245 return std::nullopt;
246 }
247 return CommandPoolRecyclerVK::RecycledData{.pool = std::move(pool),
248 .buffers = {}};
249}
250
251std::optional<CommandPoolRecyclerVK::RecycledData>
252CommandPoolRecyclerVK::Reuse() {
253 // If there are no recycled pools, return nullopt.
254 Lock recycled_lock(recycled_mutex_);
255 if (recycled_.empty()) {
256 return std::nullopt;
257 }
258
259 // Otherwise, remove and return a recycled pool.
260 auto data = std::move(recycled_.back());
261 recycled_.pop_back();
262 return std::move(data);
263}
264
266 vk::UniqueCommandPool&& pool,
267 std::vector<vk::UniqueCommandBuffer>&& buffers,
268 bool should_trim) {
269 // Reset the pool on a background thread.
270 auto strong_context = context_.lock();
271 if (!strong_context) {
272 return;
273 }
274 auto device = strong_context->GetDevice();
275 vk::CommandPoolResetFlags flags;
276 if (should_trim) {
277 buffers.clear();
278 flags = vk::CommandPoolResetFlagBits::eReleaseResources;
279 }
280 const auto result = device.resetCommandPool(pool.get(), flags);
281 if (result != vk::Result::eSuccess) {
282 VALIDATION_LOG << "Could not reset command pool: " << vk::to_string(result);
283 }
284
285 // Move the pool to the recycled list.
286 Lock recycled_lock(recycled_mutex_);
287 recycled_.push_back(
288 RecycledData{.pool = std::move(pool), .buffers = std::move(buffers)});
289}
290
292 CommandPoolMap* pool_map = tls_command_pool_map.get();
293 if (pool_map) {
294 pool_map->erase(context_hash_);
295 }
296
297 {
298 Lock all_pools_lock(g_all_pools_map_mutex);
299 auto found = g_all_pools_map.find(context_hash_);
300 if (found != g_all_pools_map.end()) {
301 found->second.erase(std::this_thread::get_id());
302 }
303 }
304}
305
307 // Delete the context's entry in this thread's command pool map.
308 if (tls_command_pool_map.get()) {
309 tls_command_pool_map.get()->erase(context_hash_);
310 }
311
312 // Destroy all other thread-local CommandPoolVK instances associated with
313 // this context.
314 Lock all_pools_lock(g_all_pools_map_mutex);
315 auto found = g_all_pools_map.find(context_hash_);
316 if (found != g_all_pools_map.end()) {
317 for (auto& [thread_id, weak_pool] : found->second) {
318 auto pool = weak_pool.lock();
319 if (!pool) {
320 continue;
321 }
322 // Delete all objects held by this pool. The destroyed pool will still
323 // remain in its thread's TLS map until that thread exits.
324 pool->Destroy();
325 }
326 g_all_pools_map.erase(found);
327 }
328}
329
330} // namespace impeller
BackgroundCommandPoolVK(BackgroundCommandPoolVK &&)=default
BackgroundCommandPoolVK(vk::UniqueCommandPool &&pool, std::vector< vk::UniqueCommandBuffer > &&buffers, size_t unused_count, std::weak_ptr< CommandPoolRecyclerVK > recycler)
static constexpr size_t kUnusedCommandBufferLimit
CommandPoolRecyclerVK(const std::shared_ptr< ContextVK > &context)
Creates a recycler for the given |ContextVK|.
void Dispose()
Clears this context's thread-local command pool.
static int GetGlobalPoolCount(const ContextVK &context)
std::shared_ptr< CommandPoolVK > Get()
Gets a command pool for the current thread.
void DestroyThreadLocalPools()
Clean up resources held by all per-thread command pools associated with the context.
void Reclaim(vk::UniqueCommandPool &&pool, std::vector< vk::UniqueCommandBuffer > &&buffers, bool should_trim=false)
Returns a command pool to be reset on a background thread.
void CollectCommandBuffer(vk::UniqueCommandBuffer &&buffer)
Collects the given |vk::CommandBuffer| to be retained.
void Destroy()
Delete all Vulkan objects in this command pool.
vk::UniqueCommandBuffer CreateCommandBuffer()
Creates and returns a new |vk::CommandBuffer|.
uint64_t GetHash() const
Definition context_vk.h:104
A unique handle to a resource which will be reclaimed by the specified resource manager.
VkDevice device
Definition main.cc:69
static Mutex g_all_pools_map_mutex
static thread_local std::unique_ptr< CommandPoolMap > tls_command_pool_map
std::unordered_map< uint64_t, std::shared_ptr< CommandPoolVK > > CommandPoolMap
Definition ref_ptr.h:261
A unique command pool and zero or more recycled command buffers.
std::shared_ptr< const fml::Mapping > data
#define IPLR_GUARDED_BY(x)
#define VALIDATION_LOG
Definition validation.h:91