14#include <sys/resource.h>
24#include "flutter/fml/cpu_affinity.h"
25#include "flutter/fml/trace_event.h"
41VULKAN_HPP_DEFAULT_DISPATCH_LOADER_DYNAMIC_STORAGE
55 for (
const auto&
device :
instance.enumeratePhysicalDevices().value) {
64 std::initializer_list<QueueIndexVK> queues) {
65 std::map<
size_t ,
size_t > family_index_map;
66 for (
const auto&
queue : queues) {
67 family_index_map[
queue.family] = 0;
69 for (
const auto&
queue : queues) {
74 static float kQueuePriority = 1.0f;
75 std::vector<vk::DeviceQueueCreateInfo> infos;
76 for (
const auto& item : family_index_map) {
77 vk::DeviceQueueCreateInfo
info;
78 info.setQueueFamilyIndex(item.first);
79 info.setQueueCount(item.second + 1);
80 info.setQueuePriorities(kQueuePriority);
81 infos.push_back(
info);
87 vk::QueueFlagBits
flags) {
90 const auto families =
device.getQueueFamilyProperties();
91 for (
size_t i = 0u;
i < families.size();
i++) {
92 if (!(families[
i].queueFlags &
flags)) {
101 auto context = std::shared_ptr<ContextVK>(
new ContextVK());
102 context->Setup(std::move(
settings));
103 if (!context->IsValid()) {
113 return std::clamp(hardware_concurrency / 2ull, 1ull, 4ull);
117thread_local uint64_t tls_context_count = 0;
118uint64_t CalculateHash(
void* ptr) {
121 return ++tls_context_count;
125ContextVK::ContextVK() : hash_(CalculateHash(this)) {}
128 if (device_holder_ && device_holder_->device) {
129 [[maybe_unused]]
auto result = device_holder_->device->waitIdle();
138void ContextVK::Setup(Settings
settings) {
141 if (!
settings.proc_address_callback) {
147 raster_message_loop_->PostTaskToAllWorkers([]() {
152 if (::setpriority(PRIO_PROCESS, gettid(), -5) != 0) {
153 FML_LOG(
ERROR) <<
"Failed to set Workers task runner priority";
158 auto& dispatcher = VULKAN_HPP_DEFAULT_DISPATCHER;
159 dispatcher.init(
settings.proc_address_callback);
165 auto enable_validation =
settings.enable_validation;
167#if defined(FML_OS_ANDROID) && !defined(NDEBUG)
168 enable_validation =
true;
171 auto caps = std::shared_ptr<CapabilitiesVK>(
new CapabilitiesVK(
172 enable_validation,
settings.fatal_missing_validations));
174 if (!caps->IsValid()) {
181 auto enabled_layers = caps->GetEnabledLayers();
182 auto enabled_extensions = caps->GetEnabledInstanceExtensions();
184 if (!enabled_layers.has_value() || !enabled_extensions.has_value()) {
189 vk::InstanceCreateFlags instance_flags = {};
191 if (
std::find(enabled_extensions.value().begin(),
192 enabled_extensions.value().end(),
193 "VK_KHR_portability_enumeration") !=
194 enabled_extensions.value().end()) {
195 instance_flags |= vk::InstanceCreateFlagBits::eEnumeratePortabilityKHR;
198 std::vector<const char*> enabled_layers_c;
199 std::vector<const char*> enabled_extensions_c;
201 for (
const auto& layer : enabled_layers.value()) {
202 enabled_layers_c.push_back(layer.c_str());
205 for (
const auto&
ext : enabled_extensions.value()) {
206 enabled_extensions_c.push_back(
ext.c_str());
209 vk::ApplicationInfo application_info;
213 application_info.setPEngineName(
"Impeller");
214 application_info.setPApplicationName(
"Impeller");
216 vk::StructureChain<vk::InstanceCreateInfo, vk::ValidationFeaturesEXT>
219 if (!caps->AreValidationsEnabled()) {
220 instance_chain.unlink<vk::ValidationFeaturesEXT>();
223 std::vector<vk::ValidationFeatureEnableEXT> enabled_validations = {
224 vk::ValidationFeatureEnableEXT::eSynchronizationValidation,
227 auto validation = instance_chain.get<vk::ValidationFeaturesEXT>();
228 validation.setEnabledValidationFeatures(enabled_validations);
230 auto instance_info = instance_chain.get<vk::InstanceCreateInfo>();
231 instance_info.setPEnabledLayerNames(enabled_layers_c);
232 instance_info.setPEnabledExtensionNames(enabled_extensions_c);
233 instance_info.setPApplicationInfo(&application_info);
234 instance_info.setFlags(instance_flags);
236 auto device_holder = std::make_shared<DeviceHolderImpl>();
238 auto instance = vk::createInstanceUnique(instance_info);
239 if (
instance.result != vk::Result::eSuccess) {
244 device_holder->instance = std::move(
instance.value);
246 dispatcher.init(device_holder->instance.get());
255 std::make_unique<DebugReportVK>(*caps, device_holder->instance.get());
257 if (!debug_report->IsValid()) {
278 auto graphics_queue =
279 PickQueue(device_holder->physical_device, vk::QueueFlagBits::eGraphics);
280 auto transfer_queue =
281 PickQueue(device_holder->physical_device, vk::QueueFlagBits::eTransfer);
283 PickQueue(device_holder->physical_device, vk::QueueFlagBits::eCompute);
285 if (!graphics_queue.has_value()) {
289 if (!transfer_queue.has_value()) {
290 transfer_queue = graphics_queue.value();
292 if (!compute_queue.has_value()) {
301 caps->GetEnabledDeviceExtensions(device_holder->physical_device);
308 std::vector<const char*> enabled_device_extensions_c;
310 enabled_device_extensions_c.push_back(
ext.c_str());
314 {graphics_queue.value(), compute_queue.value(), transfer_queue.value()});
316 const auto enabled_features =
317 caps->GetEnabledDeviceFeatures(device_holder->physical_device);
318 if (!enabled_features.has_value()) {
324 vk::DeviceCreateInfo device_info;
326 device_info.setPNext(&enabled_features.value().get());
327 device_info.setQueueCreateInfos(queue_create_infos);
328 device_info.setPEnabledExtensionNames(enabled_device_extensions_c);
333 device_holder->physical_device.createDeviceUnique(device_info);
334 if (device_result.result != vk::Result::eSuccess) {
338 device_holder->device = std::move(device_result.value);
341 if (!caps->SetPhysicalDevice(device_holder->physical_device)) {
349 auto allocator = std::shared_ptr<AllocatorVK>(
new AllocatorVK(
351 application_info.apiVersion,
352 device_holder->physical_device,
354 device_holder->instance.get(),
358 if (!allocator->IsValid()) {
366 auto pipeline_library = std::shared_ptr<PipelineLibraryVK>(
367 new PipelineLibraryVK(device_holder,
369 std::move(
settings.cache_directory),
370 raster_message_loop_->GetTaskRunner()
373 if (!pipeline_library->IsValid()) {
378 auto sampler_library =
379 std::shared_ptr<SamplerLibraryVK>(
new SamplerLibraryVK(device_holder));
381 auto shader_library = std::shared_ptr<ShaderLibraryVK>(
382 new ShaderLibraryVK(device_holder,
386 if (!shader_library->IsValid()) {
395 std::shared_ptr<FenceWaiterVK>(
new FenceWaiterVK(device_holder));
401 if (!resource_manager) {
406 auto command_pool_recycler =
407 std::make_shared<CommandPoolRecyclerVK>(weak_from_this());
408 if (!command_pool_recycler) {
413 auto descriptor_pool_recycler =
414 std::make_shared<DescriptorPoolRecyclerVK>(weak_from_this());
415 if (!descriptor_pool_recycler) {
423 QueuesVK queues(device_holder->device.get(),
424 graphics_queue.value(),
425 compute_queue.value(),
426 transfer_queue.value()
428 if (!queues.IsValid()) {
434 dispatcher.vkGetPhysicalDeviceProperties(device_holder->physical_device,
435 &physical_device_properties);
440 device_holder_ = std::move(device_holder);
442 std::make_unique<DriverInfoVK>(device_holder_->physical_device);
443 debug_report_ = std::move(debug_report);
444 allocator_ = std::move(allocator);
445 shader_library_ = std::move(shader_library);
446 sampler_library_ = std::move(sampler_library);
447 pipeline_library_ = std::move(pipeline_library);
448 yuv_conversion_library_ = std::shared_ptr<YUVConversionLibraryVK>(
449 new YUVConversionLibraryVK(device_holder_));
450 queues_ = std::move(queues);
451 device_capabilities_ = std::move(caps);
452 fence_waiter_ = std::move(fence_waiter);
453 resource_manager_ = std::move(resource_manager);
454 command_pool_recycler_ = std::move(command_pool_recycler);
455 descriptor_pool_recycler_ = std::move(descriptor_pool_recycler);
456 device_name_ = std::string(physical_device_properties.
deviceName);
457 command_queue_vk_ = std::make_shared<CommandQueueVK>(weak_from_this());
462 gpu_tracer_ = std::make_shared<GPUTracerVK>(weak_from_this(),
464 gpu_tracer_->InitializeQueryPool(*
this);
491 return shader_library_;
495 return sampler_library_;
499 return pipeline_library_;
503 return std::shared_ptr<CommandBufferVK>(
505 CreateGraphicsCommandEncoderFactory())
510 return *device_holder_->instance;
514 return device_holder_->device.get();
517const std::shared_ptr<fml::ConcurrentTaskRunner>
519 return raster_message_loop_->GetTaskRunner();
528 fence_waiter_.reset();
529 resource_manager_.reset();
531 raster_message_loop_->Terminate();
535 return std::make_shared<SurfaceContextVK>(shared_from_this());
539 return device_capabilities_;
547 return device_holder_->physical_device;
551 return fence_waiter_;
555 return resource_manager_;
560 return command_pool_recycler_;
563std::unique_ptr<CommandEncoderFactoryVK>
564ContextVK::CreateGraphicsCommandEncoderFactory()
const {
565 return std::make_unique<CommandEncoderFactoryVK>(weak_from_this());
574 return descriptor_pool_recycler_;
578 return command_queue_vk_;
593 color.texture->GetTextureDescriptor().format,
594 color.texture->GetTextureDescriptor().sample_count,
601 builder.SetDepthStencilAttachment(
602 depth->texture->GetTextureDescriptor().format,
603 depth->texture->GetTextureDescriptor().sample_count,
608 stencil.has_value()) {
610 stencil->texture->GetTextureDescriptor().format,
611 stencil->texture->GetTextureDescriptor().sample_count,
612 stencil->load_action,
613 stencil->store_action
620const std::shared_ptr<YUVConversionLibraryVK>&
622 return yuv_conversion_library_;
static void info(const char *fmt,...) SK_PRINTF_LIKE(1
static unsigned clamp(SkFixed fx, int max)
int find(T *array, int N, T item)
static std::shared_ptr< ConcurrentMessageLoop > Create(size_t worker_count=std::thread::hardware_concurrency())
static CapabilitiesVK & Cast(Capabilities &base)
The Vulkan layers and extensions wrangler.
void SetOffscreenFormat(PixelFormat pixel_format) const
std::optional< PhysicalDeviceFeatures > GetEnabledDeviceFeatures(const vk::PhysicalDevice &physical_device) const
static void DestroyThreadLocalPools(const ContextVK *context)
Clean up resources held by all per-thread command pools associated with the given context.
void SetOffscreenFormat(PixelFormat pixel_format)
std::shared_ptr< Allocator > GetResourceAllocator() const override
Returns the allocator used to create textures and buffers on the device.
std::shared_ptr< ResourceManagerVK > GetResourceManager() const
vk::PhysicalDevice GetPhysicalDevice() const
const std::shared_ptr< YUVConversionLibraryVK > & GetYUVConversionLibrary() const
bool SetDebugName(T handle, std::string_view label) const
const vk::Device & GetDevice() const
bool IsValid() const override
Determines if a context is valid. If the caller ever receives an invalid context, they must discard i...
const std::unique_ptr< DriverInfoVK > & GetDriverInfo() const
std::shared_ptr< CommandBuffer > CreateCommandBuffer() const override
Create a new command buffer. Command buffers can be used to encode graphics, blit,...
std::shared_ptr< SamplerLibrary > GetSamplerLibrary() const override
Returns the library of combined image samplers used in shaders.
static std::shared_ptr< ContextVK > Create(Settings settings)
std::shared_ptr< PipelineLibrary > GetPipelineLibrary() const override
Returns the library of pipelines used by render or compute commands.
const std::shared_ptr< QueueVK > & GetGraphicsQueue() const
const std::shared_ptr< const Capabilities > & GetCapabilities() const override
Get the capabilities of Impeller context. All optionally supported feature of the platform,...
std::shared_ptr< CommandPoolRecyclerVK > GetCommandPoolRecycler() const
std::shared_ptr< CommandQueue > GetCommandQueue() const override
Return the graphics queue for submitting command buffers.
void InitializeCommonlyUsedShadersIfNeeded() const override
std::shared_ptr< FenceWaiterVK > GetFenceWaiter() const
std::shared_ptr< GPUTracerVK > GetGPUTracer() const
BackendType GetBackendType() const override
Get the graphics backend of an Impeller context.
std::string DescribeGpuModel() const override
const std::shared_ptr< fml::ConcurrentTaskRunner > GetConcurrentWorkerTaskRunner() const
static size_t ChooseThreadCountForWorkers(size_t hardware_concurrency)
std::shared_ptr< ShaderLibrary > GetShaderLibrary() const override
Returns the library of shaders used to specify the programmable stages of a pipeline.
vk::Instance GetInstance() const
void Shutdown() override
Force all pending asynchronous work to finish. This is achieved by deleting all owned concurrent mess...
std::shared_ptr< DescriptorPoolRecyclerVK > GetDescriptorPoolRecycler() const
std::shared_ptr< SurfaceContextVK > CreateSurfaceContext()
a wrapper around the impeller [Allocator] instance that can be used to provide caching of allocated r...
virtual RenderTarget CreateOffscreenMSAA(const Context &context, ISize size, int mip_count, const std::string &label="Offscreen MSAA", RenderTarget::AttachmentConfigMSAA color_attachment_config=RenderTarget::kDefaultColorAttachmentConfigMSAA, std::optional< RenderTarget::AttachmentConfig > stencil_attachment_config=RenderTarget::kDefaultStencilAttachmentConfig, const std::shared_ptr< Texture > &existing_color_msaa_texture=nullptr, const std::shared_ptr< Texture > &existing_color_resolve_texture=nullptr, const std::shared_ptr< Texture > &existing_depth_stencil_texture=nullptr)
const std::map< size_t, ColorAttachment > & GetColorAttachments() const
const std::optional< DepthAttachment > & GetDepthAttachment() const
const std::optional< StencilAttachment > & GetStencilAttachment() const
static std::shared_ptr< ResourceManagerVK > Create()
Creates a shared resource manager (a dedicated thread).
VkPhysicalDevice physical_device
std::vector< const char * > enabled_device_extensions
FlutterSemanticsFlag flags
#define FML_LOG(severity)
static float max(float r, float g, float b)
it will be possible to load the file into Perfetto s trace viewer disable asset Prevents usage of any non test fonts unless they were explicitly Loaded via prefetched default font Indicates whether the embedding started a prefetch of the default font manager before creating the engine run In non interactive keep the shell running after the Dart script has completed enable serial On low power devices with low core running concurrent GC tasks on threads can cause them to contend with the UI thread which could potentially lead to jank This option turns off all concurrent GC activities domain network JSON encoded network policy per domain This overrides the DisallowInsecureConnections switch Embedder can specify whether to allow or disallow insecure connections at a domain level old gen heap The size limit in megabytes for the Dart VM old gen heap space enable Enable the Impeller renderer on supported platforms Ignored if Impeller is not supported on the platform enable vulkan validation
@ kNotPerformance
Request affinity for all non-performance cores.
bool RequestAffinity(CpuAffinity affinity)
Request the given affinity for the current thread.
bool HasValidationLayers()
PixelFormat
The Pixel formats supported by Impeller. The naming convention denotes the usage of the component,...
static std::optional< vk::PhysicalDevice > PickPhysicalDevice(const CapabilitiesVK &caps, const vk::Instance &instance)
static bool gHasValidationLayers
static std::optional< QueueIndexVK > PickQueue(const vk::PhysicalDevice &device, vk::QueueFlagBits flags)
static std::vector< vk::DeviceQueueCreateInfo > GetQueueCreateInfos(std::initializer_list< QueueIndexVK > queues)
SI auto map(std::index_sequence< I... >, Fn &&fn, const Args &... args) -> skvx::Vec< sizeof...(I), decltype(fn(args[0]...))>
static SkString to_string(int n)
char deviceName[VK_MAX_PHYSICAL_DEVICE_NAME_SIZE]
std::shared_ptr< QueueVK > graphics_queue
#define TRACE_EVENT0(category_group, name)
#define VK_API_VERSION_1_0
#define VK_API_VERSION_1_1