Flutter Engine
The Flutter Engine
pipeline_library_vk.cc
Go to the documentation of this file.
1// Copyright 2013 The Flutter Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
6
7#include <chrono>
8#include <cstdint>
9#include <optional>
10#include <sstream>
11
12#include "flutter/fml/container.h"
13#include "flutter/fml/trace_event.h"
22#include "vulkan/vulkan_core.h"
23#include "vulkan/vulkan_enums.hpp"
24
25namespace impeller {
26
27PipelineLibraryVK::PipelineLibraryVK(
28 const std::shared_ptr<DeviceHolderVK>& device_holder,
29 std::shared_ptr<const Capabilities> caps,
30 fml::UniqueFD cache_directory,
31 std::shared_ptr<fml::ConcurrentTaskRunner> worker_task_runner)
32 : device_holder_(device_holder),
33 pso_cache_(std::make_shared<PipelineCacheVK>(std::move(caps),
34 device_holder,
35 std::move(cache_directory))),
36 worker_task_runner_(std::move(worker_task_runner)) {
37 FML_DCHECK(worker_task_runner_);
38 if (!pso_cache_->IsValid() || !worker_task_runner_) {
39 return;
40 }
41
42 is_valid_ = true;
43}
44
45PipelineLibraryVK::~PipelineLibraryVK() = default;
46
47// |PipelineLibrary|
48bool PipelineLibraryVK::IsValid() const {
49 return is_valid_;
50}
51
52std::unique_ptr<ComputePipelineVK> PipelineLibraryVK::CreateComputePipeline(
53 const ComputePipelineDescriptor& desc) {
54 TRACE_EVENT0("flutter", __FUNCTION__);
55 vk::ComputePipelineCreateInfo pipeline_info;
56
57 //----------------------------------------------------------------------------
58 /// Shader Stage
59 ///
60 const auto entrypoint = desc.GetStageEntrypoint();
61 if (!entrypoint) {
62 VALIDATION_LOG << "Compute shader is missing an entrypoint.";
63 return nullptr;
64 }
65
66 std::shared_ptr<DeviceHolderVK> strong_device = device_holder_.lock();
67 if (!strong_device) {
68 return nullptr;
69 }
70 auto device_properties = strong_device->GetPhysicalDevice().getProperties();
71 auto max_wg_size = device_properties.limits.maxComputeWorkGroupSize;
72
73 // Give all compute shaders a specialization constant entry for the
74 // workgroup/threadgroup size.
75 vk::SpecializationMapEntry specialization_map_entry[1];
76
77 uint32_t workgroup_size_x = max_wg_size[0];
78 specialization_map_entry[0].constantID = 0;
79 specialization_map_entry[0].offset = 0;
80 specialization_map_entry[0].size = sizeof(uint32_t);
81
82 vk::SpecializationInfo specialization_info;
83 specialization_info.mapEntryCount = 1;
84 specialization_info.pMapEntries = &specialization_map_entry[0];
85 specialization_info.dataSize = sizeof(uint32_t);
86 specialization_info.pData = &workgroup_size_x;
87
88 vk::PipelineShaderStageCreateInfo info;
89 info.setStage(vk::ShaderStageFlagBits::eCompute);
90 info.setPName("main");
91 info.setModule(ShaderFunctionVK::Cast(entrypoint.get())->GetModule());
92 info.setPSpecializationInfo(&specialization_info);
93 pipeline_info.setStage(info);
94
95 //----------------------------------------------------------------------------
96 /// Pipeline Layout a.k.a the descriptor sets and uniforms.
97 ///
98 std::vector<vk::DescriptorSetLayoutBinding> desc_bindings;
99
100 for (auto layout : desc.GetDescriptorSetLayouts()) {
101 auto vk_desc_layout = ToVKDescriptorSetLayoutBinding(layout);
102 desc_bindings.push_back(vk_desc_layout);
103 }
104
105 vk::DescriptorSetLayoutCreateInfo descs_layout_info;
106 descs_layout_info.setBindings(desc_bindings);
107
108 auto [descs_result, descs_layout] =
109 strong_device->GetDevice().createDescriptorSetLayoutUnique(
110 descs_layout_info);
111 if (descs_result != vk::Result::eSuccess) {
112 VALIDATION_LOG << "unable to create uniform descriptors";
113 return nullptr;
114 }
115
116 ContextVK::SetDebugName(strong_device->GetDevice(), descs_layout.get(),
117 "Descriptor Set Layout " + desc.GetLabel());
118
119 //----------------------------------------------------------------------------
120 /// Create the pipeline layout.
121 ///
122 vk::PipelineLayoutCreateInfo pipeline_layout_info;
123 pipeline_layout_info.setSetLayouts(descs_layout.get());
124 auto pipeline_layout = strong_device->GetDevice().createPipelineLayoutUnique(
125 pipeline_layout_info);
126 if (pipeline_layout.result != vk::Result::eSuccess) {
127 VALIDATION_LOG << "Could not create pipeline layout for pipeline "
128 << desc.GetLabel() << ": "
129 << vk::to_string(pipeline_layout.result);
130 return nullptr;
131 }
132 pipeline_info.setLayout(pipeline_layout.value.get());
133
134 //----------------------------------------------------------------------------
135 /// Finally, all done with the setup info. Create the pipeline itself.
136 ///
137 auto pipeline = pso_cache_->CreatePipeline(pipeline_info);
138 if (!pipeline) {
139 VALIDATION_LOG << "Could not create graphics pipeline: " << desc.GetLabel();
140 return nullptr;
141 }
142
143 ContextVK::SetDebugName(strong_device->GetDevice(), *pipeline_layout.value,
144 "Pipeline Layout " + desc.GetLabel());
145 ContextVK::SetDebugName(strong_device->GetDevice(), *pipeline,
146 "Pipeline " + desc.GetLabel());
147
148 return std::make_unique<ComputePipelineVK>(
149 device_holder_,
150 weak_from_this(), //
151 desc, //
152 std::move(pipeline), //
153 std::move(pipeline_layout.value), //
154 std::move(descs_layout) //
155 );
156}
157
158// |PipelineLibrary|
159PipelineFuture<PipelineDescriptor> PipelineLibraryVK::GetPipeline(
160 PipelineDescriptor descriptor,
161 bool async) {
162 Lock lock(pipelines_mutex_);
163 if (auto found = pipelines_.find(descriptor); found != pipelines_.end()) {
164 return found->second;
165 }
166
167 cache_dirty_ = true;
168 if (!IsValid()) {
169 return {
170 descriptor,
171 RealizedFuture<std::shared_ptr<Pipeline<PipelineDescriptor>>>(nullptr)};
172 }
173
174 auto promise = std::make_shared<
175 NoExceptionPromise<std::shared_ptr<Pipeline<PipelineDescriptor>>>>();
176 auto pipeline_future =
177 PipelineFuture<PipelineDescriptor>{descriptor, promise->get_future()};
178 pipelines_[descriptor] = pipeline_future;
179
180 auto weak_this = weak_from_this();
181
182 auto generation_task = [descriptor, weak_this, promise]() {
183 auto thiz = weak_this.lock();
184 if (!thiz) {
185 promise->set_value(nullptr);
186 VALIDATION_LOG << "Pipeline library was collected before the pipeline "
187 "could be created.";
188 return;
189 }
190
191 promise->set_value(PipelineVK::Create(
192 descriptor, //
193 PipelineLibraryVK::Cast(*thiz).device_holder_.lock(), //
194 weak_this //
195 ));
196 };
197
198 if (async) {
199 worker_task_runner_->PostTask(generation_task);
200 } else {
201 generation_task();
202 }
203
204 return pipeline_future;
205}
206
207// |PipelineLibrary|
208PipelineFuture<ComputePipelineDescriptor> PipelineLibraryVK::GetPipeline(
209 ComputePipelineDescriptor descriptor,
210 bool async) {
211 Lock lock(compute_pipelines_mutex_);
212 if (auto found = compute_pipelines_.find(descriptor);
213 found != compute_pipelines_.end()) {
214 return found->second;
215 }
216
217 cache_dirty_ = true;
218 if (!IsValid()) {
219 return {
220 descriptor,
221 RealizedFuture<std::shared_ptr<Pipeline<ComputePipelineDescriptor>>>(
222 nullptr)};
223 }
224
225 auto promise = std::make_shared<
226 std::promise<std::shared_ptr<Pipeline<ComputePipelineDescriptor>>>>();
227 auto pipeline_future = PipelineFuture<ComputePipelineDescriptor>{
228 descriptor, promise->get_future()};
229 compute_pipelines_[descriptor] = pipeline_future;
230
231 auto weak_this = weak_from_this();
232
233 auto generation_task = [descriptor, weak_this, promise]() {
234 auto self = weak_this.lock();
235 if (!self) {
236 promise->set_value(nullptr);
237 VALIDATION_LOG << "Pipeline library was collected before the pipeline "
238 "could be created.";
239 return;
240 }
241
242 auto pipeline =
243 PipelineLibraryVK::Cast(*self).CreateComputePipeline(descriptor);
244 if (!pipeline) {
245 promise->set_value(nullptr);
246 VALIDATION_LOG << "Could not create pipeline: " << descriptor.GetLabel();
247 return;
248 }
249
250 promise->set_value(std::move(pipeline));
251 };
252
253 if (async) {
254 worker_task_runner_->PostTask(generation_task);
255 } else {
256 generation_task();
257 }
258
259 return pipeline_future;
260}
261
262// |PipelineLibrary|
263void PipelineLibraryVK::RemovePipelinesWithEntryPoint(
264 std::shared_ptr<const ShaderFunction> function) {
265 Lock lock(pipelines_mutex_);
266
267 fml::erase_if(pipelines_, [&](auto item) {
268 return item->first.GetEntrypointForStage(function->GetStage())
269 ->IsEqual(*function);
270 });
271}
272
273void PipelineLibraryVK::DidAcquireSurfaceFrame() {
274 if (++frames_acquired_ == 50u) {
275 if (cache_dirty_) {
276 cache_dirty_ = false;
277 PersistPipelineCacheToDisk();
278 }
279 frames_acquired_ = 0;
280 }
281}
282
283void PipelineLibraryVK::PersistPipelineCacheToDisk() {
284 worker_task_runner_->PostTask(
285 [weak_cache = decltype(pso_cache_)::weak_type(pso_cache_)]() {
286 auto cache = weak_cache.lock();
287 if (!cache) {
288 return;
289 }
290 cache->PersistCacheToDisk();
291 });
292}
293
294const std::shared_ptr<PipelineCacheVK>& PipelineLibraryVK::GetPSOCache() const {
295 return pso_cache_;
296}
297
298const std::shared_ptr<fml::ConcurrentTaskRunner>&
299PipelineLibraryVK::GetWorkerTaskRunner() const {
300 return worker_task_runner_;
301}
302
303} // namespace impeller
static void info(const char *fmt,...) SK_PRINTF_LIKE(1
Definition: DM.cpp:213
static sk_sp< Effect > Create()
Definition: RefCntTest.cpp:117
#define FML_DCHECK(condition)
Definition: logging.h:103
Dart_NativeFunction function
Definition: fuchsia.cc:51
DEF_SWITCHES_START aot vmservice shared library Name of the *so containing AOT compiled Dart assets for launching the service isolate vm snapshot The VM snapshot data that will be memory mapped as read only SnapshotAssetPath must be present isolate snapshot The isolate snapshot data that will be memory mapped as read only SnapshotAssetPath must be present cache dir Path to the cache directory This is different from the persistent_cache_path in embedder which is used for Skia shader cache icu native lib Path to the library file that exports the ICU data vm service The hostname IP address on which the Dart VM Service should be served If not defaults to or::depending on whether ipv6 is specified vm service A custom Dart VM Service port The default is to pick a randomly available open port disable vm Disable the Dart VM Service The Dart VM Service is never available in release mode disable vm service Disable mDNS Dart VM Service publication Bind to the IPv6 localhost address for the Dart VM Service Ignored if vm service host is set endless trace Enable an endless trace buffer The default is a ring buffer This is useful when very old events need to viewed For during application launch Memory usage will continue to grow indefinitely however Start app with an specific route defined on the framework flutter assets Path to the Flutter assets directory enable service port Allow the VM service to fallback to automatic port selection if binding to a specified port fails trace Trace early application lifecycle Automatically switches to an endless trace buffer trace skia Filters out all Skia trace event categories except those that are specified in this comma separated list dump skp on shader Automatically dump the skp that triggers new shader compilations This is useful for writing custom ShaderWarmUp to reduce jank By this is not enabled to reduce the overhead purge persistent cache
Definition: switches.h:191
void erase_if(Collection &container, std::function< bool(typename Collection::iterator)> predicate)
Definition: container.h:16
constexpr vk::DescriptorSetLayoutBinding ToVKDescriptorSetLayoutBinding(const DescriptorSetLayout &layout)
Definition: formats_vk.h:290
Definition: ref_ptr.h:256
static SkString to_string(int n)
Definition: nanobench.cpp:119
#define TRACE_EVENT0(category_group, name)
Definition: trace_event.h:131
#define VALIDATION_LOG
Definition: validation.h:73