Flutter Engine
The Flutter Engine
Loading...
Searching...
No Matches
command_pool_vk.cc
Go to the documentation of this file.
1// Copyright 2013 The Flutter Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
6
7#include <memory>
8#include <optional>
9#include <utility>
10
13
14#include "impeller/renderer/backend/vulkan/vk.h" // IWYU pragma: keep.
15#include "vulkan/vulkan_handles.hpp"
16#include "vulkan/vulkan_structs.hpp"
17
18namespace impeller {
19
20// Holds the command pool in a background thread, recyling it when not in use.
22 public:
24
25 // The recycler also recycles command buffers that were never used, up to a
26 // limit of 16 per frame. This number was somewhat arbitrarily chosen.
27 static constexpr size_t kUnusedCommandBufferLimit = 16u;
28
30 vk::UniqueCommandPool&& pool,
31 std::vector<vk::UniqueCommandBuffer>&& buffers,
32 size_t unused_count,
33 std::weak_ptr<CommandPoolRecyclerVK> recycler)
34 : pool_(std::move(pool)),
35 buffers_(std::move(buffers)),
36 unused_count_(unused_count),
37 recycler_(std::move(recycler)) {}
38
40 auto const recycler = recycler_.lock();
41
42 // Not only does this prevent recycling when the context is being destroyed,
43 // but it also prevents the destructor from effectively being called twice;
44 // once for the original BackgroundCommandPoolVK() and once for the moved
45 // BackgroundCommandPoolVK().
46 if (!recycler) {
47 return;
48 }
49 // If there are many unused command buffers, release some of them.
50 if (unused_count_ > kUnusedCommandBufferLimit) {
51 for (auto i = 0u; i < unused_count_; i++) {
52 buffers_.pop_back();
53 }
54 }
55
56 recycler->Reclaim(std::move(pool_), std::move(buffers_));
57 }
58
59 private:
61
62 BackgroundCommandPoolVK& operator=(const BackgroundCommandPoolVK&) = delete;
63
64 vk::UniqueCommandPool pool_;
65
66 // These are retained because the destructor of the C++ UniqueCommandBuffer
67 // wrapper type will attempt to reset the cmd buffer, and doing so may be a
68 // thread safety violation as this may happen on the fence waiter thread.
69 std::vector<vk::UniqueCommandBuffer> buffers_;
70 const size_t unused_count_;
71 std::weak_ptr<CommandPoolRecyclerVK> recycler_;
72};
73
75 if (!pool_) {
76 return;
77 }
78
79 auto const context = context_.lock();
80 if (!context) {
81 return;
82 }
83 auto const recycler = context->GetCommandPoolRecycler();
84 if (!recycler) {
85 return;
86 }
87 // Any unused command buffers are added to the set of used command buffers.
88 // both will be reset to the initial state when the pool is reset.
89 size_t unused_count = unused_command_buffers_.size();
90 for (auto i = 0u; i < unused_command_buffers_.size(); i++) {
91 collected_buffers_.push_back(std::move(unused_command_buffers_[i]));
92 }
93 unused_command_buffers_.clear();
94
95 auto reset_pool_when_dropped = BackgroundCommandPoolVK(
96 std::move(pool_), std::move(collected_buffers_), unused_count, recycler);
97
99 context->GetResourceManager(), std::move(reset_pool_when_dropped));
100}
101
102// TODO(matanlurey): Return a status_or<> instead of {} when we have one.
103vk::UniqueCommandBuffer CommandPoolVK::CreateCommandBuffer() {
104 auto const context = context_.lock();
105 if (!context) {
106 return {};
107 }
108
109 Lock lock(pool_mutex_);
110 if (!pool_) {
111 return {};
112 }
113 if (!unused_command_buffers_.empty()) {
114 vk::UniqueCommandBuffer buffer = std::move(unused_command_buffers_.back());
115 unused_command_buffers_.pop_back();
116 return buffer;
117 }
118
119 auto const device = context->GetDevice();
120 vk::CommandBufferAllocateInfo info;
121 info.setCommandPool(pool_.get());
122 info.setCommandBufferCount(1u);
123 info.setLevel(vk::CommandBufferLevel::ePrimary);
124 auto [result, buffers] = device.allocateCommandBuffersUnique(info);
125 if (result != vk::Result::eSuccess) {
126 return {};
127 }
128 return std::move(buffers[0]);
129}
130
131void CommandPoolVK::CollectCommandBuffer(vk::UniqueCommandBuffer&& buffer) {
132 Lock lock(pool_mutex_);
133 if (!pool_) {
134 // If the command pool has already been destroyed, then its buffers have
135 // already been freed.
136 buffer.release();
137 return;
138 }
139 collected_buffers_.push_back(std::move(buffer));
140}
141
143 Lock lock(pool_mutex_);
144 pool_.reset();
145
146 // When the command pool is destroyed, all of its command buffers are freed.
147 // Handles allocated from that pool are now invalid and must be discarded.
148 for (auto& buffer : collected_buffers_) {
149 buffer.release();
150 }
151 for (auto& buffer : unused_command_buffers_) {
152 buffer.release();
153 }
154 unused_command_buffers_.clear();
155 collected_buffers_.clear();
156}
157
158// Associates a resource with a thread and context.
160 std::unordered_map<uint64_t, std::shared_ptr<CommandPoolVK>>;
161
162// CommandPoolVK Lifecycle:
163// 1. End of frame will reset the command pool (clearing this on a thread).
164// There will still be references to the command pool from the uncompleted
165// command buffers.
166// 2. The last reference to the command pool will be released from the fence
167// waiter thread, which will schedule a task on the resource
168// manager thread, which in turn will reset the command pool and make it
169// available for reuse ("recycle").
170static thread_local std::unique_ptr<CommandPoolMap> tls_command_pool_map;
171
172// Map each context to a list of all thread-local command pools associated
173// with that context.
175static std::unordered_map<
176 const ContextVK*,
177 std::vector<std::weak_ptr<CommandPoolVK>>> g_all_pools_map
179
180// TODO(matanlurey): Return a status_or<> instead of nullptr when we have one.
181std::shared_ptr<CommandPoolVK> CommandPoolRecyclerVK::Get() {
182 auto const strong_context = context_.lock();
183 if (!strong_context) {
184 return nullptr;
185 }
186
187 // If there is a resource in used for this thread and context, return it.
188 if (!tls_command_pool_map.get()) {
190 }
191 CommandPoolMap& pool_map = *tls_command_pool_map.get();
192 auto const hash = strong_context->GetHash();
193 auto const it = pool_map.find(hash);
194 if (it != pool_map.end()) {
195 return it->second;
196 }
197
198 // Otherwise, create a new resource and return it.
199 auto data = Create();
200 if (!data || !data->pool) {
201 return nullptr;
202 }
203
204 auto const resource = std::make_shared<CommandPoolVK>(
205 std::move(data->pool), std::move(data->buffers), context_);
206 pool_map.emplace(hash, resource);
207
208 {
209 Lock all_pools_lock(g_all_pools_map_mutex);
210 g_all_pools_map[strong_context.get()].push_back(resource);
211 }
212
213 return resource;
214}
215
216// TODO(matanlurey): Return a status_or<> instead of nullopt when we have one.
217std::optional<CommandPoolRecyclerVK::RecycledData>
218CommandPoolRecyclerVK::Create() {
219 // If we can reuse a command pool and its buffers, do so.
220 if (auto data = Reuse()) {
221 return data;
222 }
223
224 // Otherwise, create a new one.
225 auto context = context_.lock();
226 if (!context) {
227 return std::nullopt;
228 }
229 vk::CommandPoolCreateInfo info;
230 info.setQueueFamilyIndex(context->GetGraphicsQueue()->GetIndex().family);
231 info.setFlags(vk::CommandPoolCreateFlagBits::eTransient);
232
233 auto device = context->GetDevice();
234 auto [result, pool] = device.createCommandPoolUnique(info);
235 if (result != vk::Result::eSuccess) {
236 return std::nullopt;
237 }
238 return CommandPoolRecyclerVK::RecycledData{.pool = std::move(pool),
239 .buffers = {}};
240}
241
242std::optional<CommandPoolRecyclerVK::RecycledData>
243CommandPoolRecyclerVK::Reuse() {
244 // If there are no recycled pools, return nullopt.
245 Lock recycled_lock(recycled_mutex_);
246 if (recycled_.empty()) {
247 return std::nullopt;
248 }
249
250 // Otherwise, remove and return a recycled pool.
251 auto data = std::move(recycled_.back());
252 recycled_.pop_back();
253 return std::move(data);
254}
255
257 vk::UniqueCommandPool&& pool,
258 std::vector<vk::UniqueCommandBuffer>&& buffers) {
259 // Reset the pool on a background thread.
260 auto strong_context = context_.lock();
261 if (!strong_context) {
262 return;
263 }
264 auto device = strong_context->GetDevice();
265 device.resetCommandPool(pool.get());
266
267 // Move the pool to the recycled list.
268 Lock recycled_lock(recycled_mutex_);
269 recycled_.push_back(
270 RecycledData{.pool = std::move(pool), .buffers = std::move(buffers)});
271}
272
274 // Ensure all recycled pools are reclaimed before this is destroyed.
275 Dispose();
276}
277
279 CommandPoolMap* pool_map = tls_command_pool_map.get();
280 if (pool_map) {
281 pool_map->clear();
282 }
283}
284
286 // Delete the context's entry in this thread's command pool map.
287 if (tls_command_pool_map.get()) {
288 tls_command_pool_map.get()->erase(context->GetHash());
289 }
290
291 // Destroy all other thread-local CommandPoolVK instances associated with
292 // this context.
293 Lock all_pools_lock(g_all_pools_map_mutex);
294 auto found = g_all_pools_map.find(context);
295 if (found != g_all_pools_map.end()) {
296 for (auto& weak_pool : found->second) {
297 auto pool = weak_pool.lock();
298 if (!pool) {
299 continue;
300 }
301 // Delete all objects held by this pool. The destroyed pool will still
302 // remain in its thread's TLS map until that thread exits.
303 pool->Destroy();
304 }
305 g_all_pools_map.erase(found);
306 }
307}
308
309} // namespace impeller
AutoreleasePool pool
static void info(const char *fmt,...) SK_PRINTF_LIKE(1
Definition DM.cpp:213
static uint32_t hash(const SkShaderBase::GradientInfo &v)
BackgroundCommandPoolVK(BackgroundCommandPoolVK &&)=default
BackgroundCommandPoolVK(vk::UniqueCommandPool &&pool, std::vector< vk::UniqueCommandBuffer > &&buffers, size_t unused_count, std::weak_ptr< CommandPoolRecyclerVK > recycler)
static constexpr size_t kUnusedCommandBufferLimit
void Dispose()
Clears all recycled command pools to let them be reclaimed.
std::shared_ptr< CommandPoolVK > Get()
Gets a command pool for the current thread.
static void DestroyThreadLocalPools(const ContextVK *context)
Clean up resources held by all per-thread command pools associated with the given context.
void Reclaim(vk::UniqueCommandPool &&pool, std::vector< vk::UniqueCommandBuffer > &&buffers)
Returns a command pool to be reset on a background thread.
void CollectCommandBuffer(vk::UniqueCommandBuffer &&buffer)
Collects the given |vk::CommandBuffer| to be retained.
void Destroy()
Delete all Vulkan objects in this command pool.
vk::UniqueCommandBuffer CreateCommandBuffer()
Creates and returns a new |vk::CommandBuffer|.
uint64_t GetHash() const
Definition context_vk.h:67
A unique handle to a resource which will be reclaimed by the specified resource manager.
VkDevice device
Definition main.cc:53
static const uint8_t buffer[]
GAsyncResult * result
DEF_SWITCHES_START aot vmservice shared library Name of the *so containing AOT compiled Dart assets for launching the service isolate vm snapshot data
Definition switches.h:41
static Mutex g_all_pools_map_mutex
static thread_local std::unique_ptr< CommandPoolMap > tls_command_pool_map
std::unordered_map< uint64_t, std::shared_ptr< CommandPoolVK > > CommandPoolMap
Definition ref_ptr.h:256
A unique command pool and zero or more recycled command buffers.
#define IPLR_GUARDED_BY(x)