Flutter Engine Uber Docs
Docs for the entire Flutter Engine repo.
 
Loading...
Searching...
No Matches
context_vk.cc
Go to the documentation of this file.
1// Copyright 2013 The Flutter Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
6#include <thread>
7#include <unordered_map>
8
17
18#ifdef FML_OS_ANDROID
19#include <pthread.h>
20#include <sys/resource.h>
21#include <sys/time.h>
22#endif // FML_OS_ANDROID
23
24#include <map>
25#include <memory>
26#include <optional>
27#include <string>
28#include <vector>
29
46
47VULKAN_HPP_DEFAULT_DISPATCH_LOADER_DYNAMIC_STORAGE
48
49namespace impeller {
50
51static bool gHasValidationLayers = false;
52
56
57static std::optional<vk::PhysicalDevice> PickPhysicalDevice(
58 const CapabilitiesVK& caps,
59 const vk::Instance& instance) {
60 for (const auto& device : instance.enumeratePhysicalDevices().value) {
61 if (caps.GetEnabledDeviceFeatures(device).has_value()) {
62 return device;
63 }
64 }
65 return std::nullopt;
66}
67
68static std::vector<vk::DeviceQueueCreateInfo> GetQueueCreateInfos(
69 std::initializer_list<QueueIndexVK> queues) {
70 std::map<size_t /* family */, size_t /* index */> family_index_map;
71 for (const auto& queue : queues) {
72 family_index_map[queue.family] = 0;
73 }
74 for (const auto& queue : queues) {
75 auto value = family_index_map[queue.family];
76 family_index_map[queue.family] = std::max(value, queue.index);
77 }
78
79 static float kQueuePriority = 1.0f;
80 std::vector<vk::DeviceQueueCreateInfo> infos;
81 for (const auto& item : family_index_map) {
82 vk::DeviceQueueCreateInfo info;
83 info.setQueueFamilyIndex(item.first);
84 info.setQueueCount(item.second + 1);
85 info.setQueuePriorities(kQueuePriority);
86 infos.push_back(info);
87 }
88 return infos;
89}
90
91static std::optional<QueueIndexVK> PickQueue(const vk::PhysicalDevice& device,
92 vk::QueueFlagBits flags) {
93 // This can be modified to ensure that dedicated queues are returned for each
94 // queue type depending on support.
95 const auto families = device.getQueueFamilyProperties();
96 for (size_t i = 0u; i < families.size(); i++) {
97 if (!(families[i].queueFlags & flags)) {
98 continue;
99 }
100 return QueueIndexVK{.family = i, .index = 0};
101 }
102 return std::nullopt;
103}
104
105std::shared_ptr<ContextVK> ContextVK::Create(Settings settings) {
106 auto context = std::shared_ptr<ContextVK>(new ContextVK(settings.flags));
107 context->Setup(std::move(settings));
108 if (!context->IsValid()) {
109 return nullptr;
110 }
111 return context;
112}
113
114// static
115size_t ContextVK::ChooseThreadCountForWorkers(size_t hardware_concurrency) {
116 // Never create more than 4 worker threads. Attempt to use up to
117 // half of the available concurrency.
118 return std::clamp(hardware_concurrency / 2ull, /*lo=*/1ull, /*hi=*/4ull);
119}
120
121namespace {
122std::atomic_uint64_t context_count = 0;
123uint64_t CalculateHash(void* ptr) {
124 return context_count.fetch_add(1);
125}
126} // namespace
127
128ContextVK::ContextVK(const Flags& flags)
129 : Context(flags), hash_(CalculateHash(this)) {}
130
132 if (device_holder_ && device_holder_->device) {
133 [[maybe_unused]] auto result = device_holder_->device->waitIdle();
134 }
135 if (command_pool_recycler_) {
136 command_pool_recycler_->DestroyThreadLocalPools();
137 }
138}
139
143
144/* version 2.0.0 */
145static constexpr uint32_t kImpellerEngineVersion =
146 VK_MAKE_API_VERSION(0, 2, 0, 0);
147
148void ContextVK::Setup(Settings settings) {
149 TRACE_EVENT0("impeller", "ContextVK::Setup");
150
151 if (!settings.proc_address_callback) {
152 VALIDATION_LOG << "Missing proc address callback.";
153 return;
154 }
155
156 raster_message_loop_ = fml::ConcurrentMessageLoop::Create(
157 ChooseThreadCountForWorkers(std::thread::hardware_concurrency()));
158
159 auto& dispatcher = VULKAN_HPP_DEFAULT_DISPATCHER;
160 dispatcher.init(settings.proc_address_callback);
161
162 std::vector<std::string> embedder_instance_extensions;
163 std::vector<std::string> embedder_device_extensions;
164 if (settings.embedder_data.has_value()) {
165 embedder_instance_extensions = settings.embedder_data->instance_extensions;
166 embedder_device_extensions = settings.embedder_data->device_extensions;
167 }
168 auto caps = std::shared_ptr<CapabilitiesVK>(new CapabilitiesVK(
169 settings.enable_validation, //
170 settings.fatal_missing_validations, //
171 /*use_embedder_extensions=*/settings.embedder_data.has_value(), //
172 embedder_instance_extensions, //
173 embedder_device_extensions //
174 ));
175
176 if (!caps->IsValid()) {
177 VALIDATION_LOG << "Could not determine device capabilities.";
178 return;
179 }
180
181 gHasValidationLayers = caps->AreValidationsEnabled();
182
183 auto enabled_layers = caps->GetEnabledLayers();
184 auto enabled_extensions = caps->GetEnabledInstanceExtensions();
185
186 if (!enabled_layers.has_value() || !enabled_extensions.has_value()) {
187 VALIDATION_LOG << "Device has insufficient capabilities.";
188 return;
189 }
190
191 vk::InstanceCreateFlags instance_flags = {};
192
193 if (std::find(enabled_extensions.value().begin(),
194 enabled_extensions.value().end(),
195 "VK_KHR_portability_enumeration") !=
196 enabled_extensions.value().end()) {
197 instance_flags |= vk::InstanceCreateFlagBits::eEnumeratePortabilityKHR;
198 }
199
200 std::vector<const char*> enabled_layers_c;
201 std::vector<const char*> enabled_extensions_c;
202
203 for (const auto& layer : enabled_layers.value()) {
204 enabled_layers_c.push_back(layer.c_str());
205 }
206
207 for (const auto& ext : enabled_extensions.value()) {
208 enabled_extensions_c.push_back(ext.c_str());
209 }
210
211 vk::ApplicationInfo application_info;
212
213 // Use the same encoding macro as vulkan versions, but otherwise engine
214 // version is intended to be the version of the Impeller engine. This version
215 // information, along with the application name below is provided to allow
216 // IHVs to make optimizations and/or disable functionality based on knowledge
217 // of the engine version (for example, to work around bugs). We don't tie this
218 // to the overall Flutter version as that version is not yet defined when the
219 // engine is compiled. Instead we can manually bump it occasionally.
220 //
221 // variant, major, minor, patch
222 application_info.setApplicationVersion(VK_API_VERSION_1_0);
223 application_info.setApiVersion(VK_API_VERSION_1_1);
224 application_info.setEngineVersion(kImpellerEngineVersion);
225 application_info.setPEngineName("Impeller");
226 application_info.setPApplicationName("Impeller");
227
228 vk::StructureChain<vk::InstanceCreateInfo, vk::ValidationFeaturesEXT>
229 instance_chain;
230
231 if (!caps->AreValidationsEnabled()) {
232 instance_chain.unlink<vk::ValidationFeaturesEXT>();
233 }
234
235 std::vector<vk::ValidationFeatureEnableEXT> enabled_validations = {
236 vk::ValidationFeatureEnableEXT::eSynchronizationValidation,
237 };
238
239 auto validation = instance_chain.get<vk::ValidationFeaturesEXT>();
240 validation.setEnabledValidationFeatures(enabled_validations);
241
242 auto instance_info = instance_chain.get<vk::InstanceCreateInfo>();
243 instance_info.setPEnabledLayerNames(enabled_layers_c);
244 instance_info.setPEnabledExtensionNames(enabled_extensions_c);
245 instance_info.setPApplicationInfo(&application_info);
246 instance_info.setFlags(instance_flags);
247
248 auto device_holder = std::make_shared<DeviceHolderImpl>();
249 if (!settings.embedder_data.has_value()) {
250 auto instance = vk::createInstanceUnique(instance_info);
251 if (instance.result != vk::Result::eSuccess) {
252 VALIDATION_LOG << "Could not create Vulkan instance: "
253 << vk::to_string(instance.result);
254 return;
255 }
256 device_holder->instance = std::move(instance.value);
257 } else {
258 device_holder->instance.reset(settings.embedder_data->instance);
259 device_holder->owned = false;
260 }
261 dispatcher.init(device_holder->instance.get());
262
263 //----------------------------------------------------------------------------
264 /// Setup the debug report.
265 ///
266 /// Do this as early as possible since we could use the debug report from
267 /// initialization issues.
268 ///
269 auto debug_report =
270 std::make_unique<DebugReportVK>(*caps, device_holder->instance.get());
271
272 if (!debug_report->IsValid()) {
273 VALIDATION_LOG << "Could not set up debug report.";
274 return;
275 }
276
277 //----------------------------------------------------------------------------
278 /// Pick the physical device.
279 ///
280 if (!settings.embedder_data.has_value()) {
281 auto physical_device =
282 PickPhysicalDevice(*caps, device_holder->instance.get());
283 if (!physical_device.has_value()) {
284 VALIDATION_LOG << "No valid Vulkan device found.";
285 return;
286 }
287 device_holder->physical_device = physical_device.value();
288 } else {
289 device_holder->physical_device = settings.embedder_data->physical_device;
290 }
291
292 //----------------------------------------------------------------------------
293 /// Pick device queues.
294 ///
295 auto graphics_queue =
296 PickQueue(device_holder->physical_device, vk::QueueFlagBits::eGraphics);
297 auto transfer_queue =
298 PickQueue(device_holder->physical_device, vk::QueueFlagBits::eTransfer);
299 auto compute_queue =
300 PickQueue(device_holder->physical_device, vk::QueueFlagBits::eCompute);
301
302 if (!graphics_queue.has_value()) {
303 VALIDATION_LOG << "Could not pick graphics queue.";
304 return;
305 }
306 if (!transfer_queue.has_value()) {
307 transfer_queue = graphics_queue.value();
308 }
309 if (!compute_queue.has_value()) {
310 VALIDATION_LOG << "Could not pick compute queue.";
311 return;
312 }
313
314 //----------------------------------------------------------------------------
315 /// Create the logical device.
316 ///
318 caps->GetEnabledDeviceExtensions(device_holder->physical_device);
319 if (!enabled_device_extensions.has_value()) {
320 // This shouldn't happen since we already did device selection. But
321 // doesn't hurt to check again.
322 return;
323 }
324
325 std::vector<const char*> enabled_device_extensions_c;
326 for (const auto& ext : enabled_device_extensions.value()) {
327 enabled_device_extensions_c.push_back(ext.c_str());
328 }
329
330 const auto queue_create_infos = GetQueueCreateInfos(
331 {graphics_queue.value(), compute_queue.value(), transfer_queue.value()});
332
333 const auto enabled_features =
334 caps->GetEnabledDeviceFeatures(device_holder->physical_device);
335 if (!enabled_features.has_value()) {
336 // This shouldn't happen since the device can't be picked if this was not
337 // true. But doesn't hurt to check.
338 return;
339 }
340
341 vk::DeviceCreateInfo device_info;
342
343 device_info.setPNext(&enabled_features.value().get());
344 device_info.setQueueCreateInfos(queue_create_infos);
345 device_info.setPEnabledExtensionNames(enabled_device_extensions_c);
346 // Device layers are deprecated and ignored.
347
348 if (!settings.embedder_data.has_value()) {
349 auto device_result =
350 device_holder->physical_device.createDeviceUnique(device_info);
351 if (device_result.result != vk::Result::eSuccess) {
352 VALIDATION_LOG << "Could not create logical device.";
353 return;
354 }
355 device_holder->device = std::move(device_result.value);
356 } else {
357 device_holder->device.reset(settings.embedder_data->device);
358 }
359
360 if (!caps->SetPhysicalDevice(device_holder->physical_device,
361 *enabled_features)) {
362 VALIDATION_LOG << "Capabilities could not be updated.";
363 return;
364 }
365
366 //----------------------------------------------------------------------------
367 /// Create the allocator.
368 ///
369 auto allocator = std::shared_ptr<AllocatorVK>(new AllocatorVK(
370 weak_from_this(), //
371 application_info.apiVersion, //
372 device_holder->physical_device, //
373 device_holder, //
374 device_holder->instance.get(), //
375 *caps //
376 ));
377
378 if (!allocator->IsValid()) {
379 VALIDATION_LOG << "Could not create memory allocator.";
380 return;
381 }
382
383 //----------------------------------------------------------------------------
384 /// Setup the pipeline library.
385 ///
386 auto pipeline_library = std::shared_ptr<PipelineLibraryVK>(
387 new PipelineLibraryVK(device_holder, //
388 caps, //
389 std::move(settings.cache_directory), //
390 raster_message_loop_->GetTaskRunner() //
391 ));
392
393 if (!pipeline_library->IsValid()) {
394 VALIDATION_LOG << "Could not create pipeline library.";
395 return;
396 }
397
398 auto sampler_library =
399 std::shared_ptr<SamplerLibraryVK>(new SamplerLibraryVK(device_holder));
400
401 auto shader_library = std::shared_ptr<ShaderLibraryVK>(
402 new ShaderLibraryVK(device_holder, //
403 settings.shader_libraries_data) //
404 );
405
406 if (!shader_library->IsValid()) {
407 VALIDATION_LOG << "Could not create shader library.";
408 return;
409 }
410
411 //----------------------------------------------------------------------------
412 /// Create the fence waiter.
413 ///
414 auto fence_waiter =
415 std::shared_ptr<FenceWaiterVK>(new FenceWaiterVK(device_holder));
416
417 //----------------------------------------------------------------------------
418 /// Create the resource manager and command pool recycler.
419 ///
420 auto resource_manager = ResourceManagerVK::Create();
421 if (!resource_manager) {
422 VALIDATION_LOG << "Could not create resource manager.";
423 return;
424 }
425
426 auto command_pool_recycler =
427 std::make_shared<CommandPoolRecyclerVK>(shared_from_this());
428 if (!command_pool_recycler) {
429 VALIDATION_LOG << "Could not create command pool recycler.";
430 return;
431 }
432
433 auto descriptor_pool_recycler =
434 std::make_shared<DescriptorPoolRecyclerVK>(weak_from_this());
435 if (!descriptor_pool_recycler) {
436 VALIDATION_LOG << "Could not create descriptor pool recycler.";
437 return;
438 }
439
440 //----------------------------------------------------------------------------
441 /// Fetch the queues.
442 ///
443 QueuesVK queues;
444 if (!settings.embedder_data.has_value()) {
445 queues = QueuesVK::FromQueueIndices(device_holder->device.get(), //
446 graphics_queue.value(), //
447 compute_queue.value(), //
448 transfer_queue.value() //
449 );
450 } else {
451 queues =
452 QueuesVK::FromEmbedderQueue(settings.embedder_data->queue,
453 settings.embedder_data->queue_family_index);
454 }
455 if (!queues.IsValid()) {
456 VALIDATION_LOG << "Could not fetch device queues.";
457 return;
458 }
459
460 VkPhysicalDeviceProperties physical_device_properties;
461 dispatcher.vkGetPhysicalDeviceProperties(device_holder->physical_device,
462 &physical_device_properties);
463
464 //----------------------------------------------------------------------------
465 /// All done!
466 ///
467
468 // Apply workarounds for broken drivers.
469 auto driver_info =
470 std::make_unique<DriverInfoVK>(device_holder->physical_device);
471 workarounds_ = GetWorkaroundsFromDriverInfo(*driver_info);
472 caps->ApplyWorkarounds(workarounds_);
473 sampler_library->ApplyWorkarounds(workarounds_);
474
475 device_holder_ = std::move(device_holder);
476 idle_waiter_vk_ = std::make_shared<IdleWaiterVK>(device_holder_);
477 driver_info_ = std::move(driver_info);
478 debug_report_ = std::move(debug_report);
479 allocator_ = std::move(allocator);
480 shader_library_ = std::move(shader_library);
481 sampler_library_ = std::move(sampler_library);
482 pipeline_library_ = std::move(pipeline_library);
483 yuv_conversion_library_ = std::shared_ptr<YUVConversionLibraryVK>(
484 new YUVConversionLibraryVK(device_holder_));
485 queues_ = std::move(queues);
486 device_capabilities_ = std::move(caps);
487 fence_waiter_ = std::move(fence_waiter);
488 resource_manager_ = std::move(resource_manager);
489 command_pool_recycler_ = std::move(command_pool_recycler);
490 descriptor_pool_recycler_ = std::move(descriptor_pool_recycler);
491 device_name_ = std::string(physical_device_properties.deviceName);
492 command_queue_vk_ = std::make_shared<CommandQueueVK>(weak_from_this());
493 should_enable_surface_control_ = settings.enable_surface_control;
494 should_batch_cmd_buffers_ = !workarounds_.batch_submit_command_buffer_timeout;
495 is_valid_ = true;
496
497 // Create the GPU Tracer later because it depends on state from
498 // the ContextVK.
499 gpu_tracer_ = std::make_shared<GPUTracerVK>(weak_from_this(),
500 settings.enable_gpu_tracing);
501 gpu_tracer_->InitializeQueryPool(*this);
502
503 //----------------------------------------------------------------------------
504 /// Label all the relevant objects. This happens after setup so that the
505 /// debug messengers have had a chance to be set up.
506 ///
507 SetDebugName(GetDevice(), device_holder_->device.get(), "ImpellerDevice");
508}
509
511 CapabilitiesVK::Cast(*device_capabilities_).SetOffscreenFormat(pixel_format);
512}
513
514// |Context|
515std::string ContextVK::DescribeGpuModel() const {
516 return device_name_;
517}
518
519bool ContextVK::IsValid() const {
520 return is_valid_;
521}
522
523std::shared_ptr<Allocator> ContextVK::GetResourceAllocator() const {
524 return allocator_;
525}
526
527std::shared_ptr<ShaderLibrary> ContextVK::GetShaderLibrary() const {
528 return shader_library_;
529}
530
531std::shared_ptr<SamplerLibrary> ContextVK::GetSamplerLibrary() const {
532 return sampler_library_;
533}
534
535std::shared_ptr<PipelineLibrary> ContextVK::GetPipelineLibrary() const {
536 return pipeline_library_;
537}
538
539std::shared_ptr<CommandBuffer> ContextVK::CreateCommandBuffer() const {
540 const auto& recycler = GetCommandPoolRecycler();
541 auto tls_pool = recycler->Get();
542 if (!tls_pool) {
543 return nullptr;
544 }
545
546 // look up a cached descriptor pool for the current frame and reuse it
547 // if it exists, otherwise create a new pool.
548 std::shared_ptr<DescriptorPoolVK> descriptor_pool;
549 {
550 Lock lock(desc_pool_mutex_);
551 DescriptorPoolMap::iterator current_pool =
552 cached_descriptor_pool_.find(std::this_thread::get_id());
553 if (current_pool == cached_descriptor_pool_.end()) {
554 descriptor_pool = (cached_descriptor_pool_[std::this_thread::get_id()] =
555 descriptor_pool_recycler_->GetDescriptorPool());
556 } else {
557 descriptor_pool = current_pool->second;
558 }
559 }
560
561 auto tracked_objects = std::make_shared<TrackedObjectsVK>(
562 weak_from_this(), std::move(tls_pool), std::move(descriptor_pool),
563 GetGPUTracer()->CreateGPUProbe());
564 auto queue = GetGraphicsQueue();
565
566 if (!tracked_objects || !tracked_objects->IsValid() || !queue) {
567 return nullptr;
568 }
569
570 vk::CommandBufferBeginInfo begin_info;
571 begin_info.flags = vk::CommandBufferUsageFlagBits::eOneTimeSubmit;
572 if (tracked_objects->GetCommandBuffer().begin(begin_info) !=
573 vk::Result::eSuccess) {
574 VALIDATION_LOG << "Could not begin command buffer.";
575 return nullptr;
576 }
577
578 tracked_objects->GetGPUProbe().RecordCmdBufferStart(
579 tracked_objects->GetCommandBuffer());
580
581 return std::shared_ptr<CommandBufferVK>(new CommandBufferVK(
582 shared_from_this(), //
583 GetDeviceHolder(), //
584 std::move(tracked_objects) //
585 ));
586}
587
588vk::Instance ContextVK::GetInstance() const {
589 return *device_holder_->instance;
590}
591
592const vk::Device& ContextVK::GetDevice() const {
593 return device_holder_->device.get();
594}
595
596const std::shared_ptr<fml::ConcurrentTaskRunner>
598 return raster_message_loop_->GetTaskRunner();
599}
600
602 // There are multiple objects, for example |CommandPoolVK|, that in their
603 // destructors make a strong reference to |ContextVK|. Resetting these shared
604 // pointers ensures that cleanup happens in a correct order.
605 //
606 // tl;dr: Without it, we get thread::join failures on shutdown.
607 fence_waiter_->Terminate();
608 resource_manager_.reset();
609
610 raster_message_loop_->Terminate();
611}
612
613std::shared_ptr<SurfaceContextVK> ContextVK::CreateSurfaceContext() {
614 return std::make_shared<SurfaceContextVK>(shared_from_this());
615}
616
617const std::shared_ptr<const Capabilities>& ContextVK::GetCapabilities() const {
618 return device_capabilities_;
619}
620
621const std::shared_ptr<QueueVK>& ContextVK::GetGraphicsQueue() const {
622 return queues_.graphics_queue;
623}
624
625vk::PhysicalDevice ContextVK::GetPhysicalDevice() const {
626 return device_holder_->physical_device;
627}
628
629std::shared_ptr<FenceWaiterVK> ContextVK::GetFenceWaiter() const {
630 return fence_waiter_;
631}
632
633std::shared_ptr<ResourceManagerVK> ContextVK::GetResourceManager() const {
634 return resource_manager_;
635}
636
637std::shared_ptr<CommandPoolRecyclerVK> ContextVK::GetCommandPoolRecycler()
638 const {
639 return command_pool_recycler_;
640}
641
642std::shared_ptr<GPUTracerVK> ContextVK::GetGPUTracer() const {
643 return gpu_tracer_;
644}
645
646std::shared_ptr<DescriptorPoolRecyclerVK> ContextVK::GetDescriptorPoolRecycler()
647 const {
648 return descriptor_pool_recycler_;
649}
650
651std::shared_ptr<CommandQueue> ContextVK::GetCommandQueue() const {
652 return command_queue_vk_;
653}
654
656 std::shared_ptr<CommandBuffer> command_buffer) {
657 if (should_batch_cmd_buffers_) {
658 pending_command_buffers_.push_back(std::move(command_buffer));
659 return true;
660 } else {
661 return GetCommandQueue()->Submit({command_buffer}).ok();
662 }
663}
664
666 if (pending_command_buffers_.empty()) {
667 return true;
668 }
669
670 if (should_batch_cmd_buffers_) {
671 bool result = GetCommandQueue()->Submit(pending_command_buffers_).ok();
672 pending_command_buffers_.clear();
673 return result;
674 } else {
675 return true;
676 }
677}
678
679// Creating a render pass is observed to take an additional 6ms on a Pixel 7
680// device as the driver will lazily bootstrap and compile shaders to do so.
681// The render pass does not need to be begun or executed.
684 RenderTarget render_target =
685 rt_allocator.CreateOffscreenMSAA(*this, {1, 1}, 1);
686
687 RenderPassBuilderVK builder;
688
689 render_target.IterateAllColorAttachments(
690 [&builder](size_t index, const ColorAttachment& attachment) -> bool {
691 builder.SetColorAttachment(
692 index, //
693 attachment.texture->GetTextureDescriptor().format, //
694 attachment.texture->GetTextureDescriptor().sample_count, //
695 attachment.load_action, //
696 attachment.store_action //
697 );
698 return true;
699 });
700
701 if (const auto& depth = render_target.GetDepthAttachment();
702 depth.has_value()) {
704 depth->texture->GetTextureDescriptor().format, //
705 depth->texture->GetTextureDescriptor().sample_count, //
706 depth->load_action, //
707 depth->store_action //
708 );
709 } else if (const auto& stencil = render_target.GetStencilAttachment();
710 stencil.has_value()) {
711 builder.SetStencilAttachment(
712 stencil->texture->GetTextureDescriptor().format, //
713 stencil->texture->GetTextureDescriptor().sample_count, //
714 stencil->load_action, //
715 stencil->store_action //
716 );
717 }
718
719 auto pass = builder.Build(GetDevice());
720}
721
723 {
724 Lock lock(desc_pool_mutex_);
725 cached_descriptor_pool_.erase(std::this_thread::get_id());
726 }
727 command_pool_recycler_->Dispose();
728}
729
730const std::shared_ptr<YUVConversionLibraryVK>&
732 return yuv_conversion_library_;
733}
734
735const std::unique_ptr<DriverInfoVK>& ContextVK::GetDriverInfo() const {
736 return driver_info_;
737}
738
740 return should_enable_surface_control_ &&
741 CapabilitiesVK::Cast(*device_capabilities_)
743}
744
748
749bool ContextVK::SubmitOnscreen(std::shared_ptr<CommandBuffer> cmd_buffer) {
750 return EnqueueCommandBuffer(std::move(cmd_buffer));
751}
752
754 return workarounds_;
755}
756
757} // namespace impeller
static std::shared_ptr< ConcurrentMessageLoop > Create(size_t worker_count=std::thread::hardware_concurrency())
static CapabilitiesVK & Cast(Capabilities &base)
The Vulkan layers and extensions wrangler.
bool SupportsExternalSemaphoreExtensions() const
void SetOffscreenFormat(PixelFormat pixel_format) const
std::optional< PhysicalDeviceFeatures > GetEnabledDeviceFeatures(const vk::PhysicalDevice &physical_device) const
void SetOffscreenFormat(PixelFormat pixel_format)
std::shared_ptr< Allocator > GetResourceAllocator() const override
Returns the allocator used to create textures and buffers on the device.
std::shared_ptr< ResourceManagerVK > GetResourceManager() const
vk::PhysicalDevice GetPhysicalDevice() const
const std::shared_ptr< YUVConversionLibraryVK > & GetYUVConversionLibrary() const
bool SetDebugName(T handle, std::string_view label) const
Definition context_vk.h:151
std::shared_ptr< DeviceHolderVK > GetDeviceHolder() const
Definition context_vk.h:191
bool EnqueueCommandBuffer(std::shared_ptr< CommandBuffer > command_buffer) override
Enqueue command_buffer for submission by the end of the frame.
const vk::Device & GetDevice() const
bool FlushCommandBuffers() override
Flush all pending command buffers.
bool IsValid() const override
Determines if a context is valid. If the caller ever receives an invalid context, they must discard i...
const std::unique_ptr< DriverInfoVK > & GetDriverInfo() const
void DisposeThreadLocalCachedResources() override
std::shared_ptr< CommandBuffer > CreateCommandBuffer() const override
Create a new command buffer. Command buffers can be used to encode graphics, blit,...
virtual bool SubmitOnscreen(std::shared_ptr< CommandBuffer > cmd_buffer) override
Submit the command buffer that renders to the onscreen surface.
std::shared_ptr< SamplerLibrary > GetSamplerLibrary() const override
Returns the library of combined image samplers used in shaders.
static std::shared_ptr< ContextVK > Create(Settings settings)
std::shared_ptr< PipelineLibrary > GetPipelineLibrary() const override
Returns the library of pipelines used by render or compute commands.
const std::shared_ptr< QueueVK > & GetGraphicsQueue() const
const std::shared_ptr< const Capabilities > & GetCapabilities() const override
Get the capabilities of Impeller context. All optionally supported feature of the platform,...
RuntimeStageBackend GetRuntimeStageBackend() const override
Retrieve the runtime stage for this context type.
std::shared_ptr< CommandPoolRecyclerVK > GetCommandPoolRecycler() const
std::shared_ptr< CommandQueue > GetCommandQueue() const override
Return the graphics queue for submitting command buffers.
void InitializeCommonlyUsedShadersIfNeeded() const override
std::shared_ptr< FenceWaiterVK > GetFenceWaiter() const
bool GetShouldEnableSurfaceControlSwapchain() const
Whether the Android Surface control based swapchain should be enabled.
std::shared_ptr< GPUTracerVK > GetGPUTracer() const
BackendType GetBackendType() const override
Get the graphics backend of an Impeller context.
~ContextVK() override
std::string DescribeGpuModel() const override
const WorkaroundsVK & GetWorkarounds() const
const std::shared_ptr< fml::ConcurrentTaskRunner > GetConcurrentWorkerTaskRunner() const
static size_t ChooseThreadCountForWorkers(size_t hardware_concurrency)
std::shared_ptr< ShaderLibrary > GetShaderLibrary() const override
Returns the library of shaders used to specify the programmable stages of a pipeline.
vk::Instance GetInstance() const
void Shutdown() override
Force all pending asynchronous work to finish. This is achieved by deleting all owned concurrent mess...
std::shared_ptr< DescriptorPoolRecyclerVK > GetDescriptorPoolRecycler() const
std::shared_ptr< SurfaceContextVK > CreateSurfaceContext()
RenderPassBuilderVK & SetDepthStencilAttachment(PixelFormat format, SampleCount sample_count, LoadAction load_action, StoreAction store_action)
RenderPassBuilderVK & SetStencilAttachment(PixelFormat format, SampleCount sample_count, LoadAction load_action, StoreAction store_action)
RenderPassBuilderVK & SetColorAttachment(size_t index, PixelFormat format, SampleCount sample_count, LoadAction load_action, StoreAction store_action, vk::ImageLayout current_layout=vk::ImageLayout::eUndefined, bool is_swapchain=false)
vk::UniqueRenderPass Build(const vk::Device &device) const
a wrapper around the impeller [Allocator] instance that can be used to provide caching of allocated r...
virtual RenderTarget CreateOffscreenMSAA(const Context &context, ISize size, int mip_count, std::string_view label="Offscreen MSAA", RenderTarget::AttachmentConfigMSAA color_attachment_config=RenderTarget::kDefaultColorAttachmentConfigMSAA, std::optional< RenderTarget::AttachmentConfig > stencil_attachment_config=RenderTarget::kDefaultStencilAttachmentConfig, const std::shared_ptr< Texture > &existing_color_msaa_texture=nullptr, const std::shared_ptr< Texture > &existing_color_resolve_texture=nullptr, const std::shared_ptr< Texture > &existing_depth_stencil_texture=nullptr, std::optional< PixelFormat > target_pixel_format=std::nullopt)
bool IterateAllColorAttachments(const std::function< bool(size_t index, const ColorAttachment &attachment)> &iterator) const
const std::optional< DepthAttachment > & GetDepthAttachment() const
const std::optional< StencilAttachment > & GetStencilAttachment() const
static std::shared_ptr< ResourceManagerVK > Create()
Creates a shared resource manager (a dedicated thread).
int32_t value
VkPhysicalDevice physical_device
Definition main.cc:67
std::vector< const char * > enabled_device_extensions
Definition main.cc:68
VkDevice device
Definition main.cc:69
VkInstance instance
Definition main.cc:64
VkQueue queue
Definition main.cc:71
std::shared_ptr< ImpellerAllocator > allocator
it will be possible to load the file into Perfetto s trace viewer use test Running tests that layout and measure text will not yield consistent results across various platforms Enabling this option will make font resolution default to the Ahem test font on all disable asset Prevents usage of any non test fonts unless they were explicitly Loaded via prefetched default font Indicates whether the embedding started a prefetch of the default font manager before creating the engine run In non interactive keep the shell running after the Dart script has completed enable serial On low power devices with low core running concurrent GC tasks on threads can cause them to contend with the UI thread which could potentially lead to jank This option turns off all concurrent GC activities domain network JSON encoded network policy per domain This overrides the DisallowInsecureConnections switch Embedder can specify whether to allow or disallow insecure connections at a domain level old gen heap The size limit in megabytes for the Dart VM old gen heap space enable Enable the Impeller renderer on supported platforms Ignored if Impeller is not supported on the platform enable vulkan validation
bool HasValidationLayers()
Definition context_vk.cc:53
PixelFormat
The Pixel formats supported by Impeller. The naming convention denotes the usage of the component,...
Definition formats.h:99
static std::optional< vk::PhysicalDevice > PickPhysicalDevice(const CapabilitiesVK &caps, const vk::Instance &instance)
Definition context_vk.cc:57
static bool gHasValidationLayers
Definition context_vk.cc:51
static constexpr uint32_t kImpellerEngineVersion
static std::optional< QueueIndexVK > PickQueue(const vk::PhysicalDevice &device, vk::QueueFlagBits flags)
Definition context_vk.cc:91
WorkaroundsVK GetWorkaroundsFromDriverInfo(DriverInfoVK &driver_info)
static std::vector< vk::DeviceQueueCreateInfo > GetQueueCreateInfos(std::initializer_list< QueueIndexVK > queues)
Definition context_vk.cc:68
LoadAction load_action
Definition formats.h:663
std::shared_ptr< Texture > texture
Definition formats.h:661
StoreAction store_action
Definition formats.h:664
static QueuesVK FromEmbedderQueue(vk::Queue queue, uint32_t queue_family_index)
Definition queue_vk.cc:58
static QueuesVK FromQueueIndices(const vk::Device &device, QueueIndexVK graphics, QueueIndexVK compute, QueueIndexVK transfer)
Definition queue_vk.cc:67
std::shared_ptr< QueueVK > graphics_queue
Definition queue_vk.h:64
A non-exhaustive set of driver specific workarounds.
#define TRACE_EVENT0(category_group, name)
#define VALIDATION_LOG
Definition validation.h:91