Flutter Engine
The Flutter Engine
context_vk.cc
Go to the documentation of this file.
1// Copyright 2013 The Flutter Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
6
11
12#ifdef FML_OS_ANDROID
13#include <pthread.h>
14#include <sys/resource.h>
15#include <sys/time.h>
16#endif // FML_OS_ANDROID
17
18#include <map>
19#include <memory>
20#include <optional>
21#include <string>
22#include <vector>
23
24#include "flutter/fml/cpu_affinity.h"
25#include "flutter/fml/trace_event.h"
40
41VULKAN_HPP_DEFAULT_DISPATCH_LOADER_DYNAMIC_STORAGE
42
43namespace impeller {
44
45// TODO(csg): Fix this after caps are reworked.
46static bool gHasValidationLayers = false;
47
50}
51
52static std::optional<vk::PhysicalDevice> PickPhysicalDevice(
53 const CapabilitiesVK& caps,
54 const vk::Instance& instance) {
55 for (const auto& device : instance.enumeratePhysicalDevices().value) {
56 if (caps.GetEnabledDeviceFeatures(device).has_value()) {
57 return device;
58 }
59 }
60 return std::nullopt;
61}
62
63static std::vector<vk::DeviceQueueCreateInfo> GetQueueCreateInfos(
64 std::initializer_list<QueueIndexVK> queues) {
65 std::map<size_t /* family */, size_t /* index */> family_index_map;
66 for (const auto& queue : queues) {
67 family_index_map[queue.family] = 0;
68 }
69 for (const auto& queue : queues) {
70 auto value = family_index_map[queue.family];
71 family_index_map[queue.family] = std::max(value, queue.index);
72 }
73
74 static float kQueuePriority = 1.0f;
75 std::vector<vk::DeviceQueueCreateInfo> infos;
76 for (const auto& item : family_index_map) {
77 vk::DeviceQueueCreateInfo info;
78 info.setQueueFamilyIndex(item.first);
79 info.setQueueCount(item.second + 1);
80 info.setQueuePriorities(kQueuePriority);
81 infos.push_back(info);
82 }
83 return infos;
84}
85
86static std::optional<QueueIndexVK> PickQueue(const vk::PhysicalDevice& device,
87 vk::QueueFlagBits flags) {
88 // This can be modified to ensure that dedicated queues are returned for each
89 // queue type depending on support.
90 const auto families = device.getQueueFamilyProperties();
91 for (size_t i = 0u; i < families.size(); i++) {
92 if (!(families[i].queueFlags & flags)) {
93 continue;
94 }
95 return QueueIndexVK{.family = i, .index = 0};
96 }
97 return std::nullopt;
98}
99
100std::shared_ptr<ContextVK> ContextVK::Create(Settings settings) {
101 auto context = std::shared_ptr<ContextVK>(new ContextVK());
102 context->Setup(std::move(settings));
103 if (!context->IsValid()) {
104 return nullptr;
105 }
106 return context;
107}
108
109// static
110size_t ContextVK::ChooseThreadCountForWorkers(size_t hardware_concurrency) {
111 // Never create more than 4 worker threads. Attempt to use up to
112 // half of the available concurrency.
113 return std::clamp(hardware_concurrency / 2ull, /*lo=*/1ull, /*hi=*/4ull);
114}
115
116namespace {
117thread_local uint64_t tls_context_count = 0;
118uint64_t CalculateHash(void* ptr) {
119 // You could make a context once per nanosecond for 584 years on one thread
120 // before this overflows.
121 return ++tls_context_count;
122}
123} // namespace
124
125ContextVK::ContextVK() : hash_(CalculateHash(this)) {}
126
128 if (device_holder_ && device_holder_->device) {
129 [[maybe_unused]] auto result = device_holder_->device->waitIdle();
130 }
132}
133
136}
137
138void ContextVK::Setup(Settings settings) {
139 TRACE_EVENT0("impeller", "ContextVK::Setup");
140
141 if (!settings.proc_address_callback) {
142 return;
143 }
144
145 raster_message_loop_ = fml::ConcurrentMessageLoop::Create(
146 ChooseThreadCountForWorkers(std::thread::hardware_concurrency()));
147 raster_message_loop_->PostTaskToAllWorkers([]() {
148 // Currently we only use the worker task pool for small parts of a frame
149 // workload, if this changes this setting may need to be adjusted.
151#ifdef FML_OS_ANDROID
152 if (::setpriority(PRIO_PROCESS, gettid(), -5) != 0) {
153 FML_LOG(ERROR) << "Failed to set Workers task runner priority";
154 }
155#endif // FML_OS_ANDROID
156 });
157
158 auto& dispatcher = VULKAN_HPP_DEFAULT_DISPATCHER;
159 dispatcher.init(settings.proc_address_callback);
160
161 // Enable Vulkan validation if either:
162 // 1. The user has explicitly enabled it.
163 // 2. We are in a combination of debug mode, and running on Android.
164 // (It's possible 2 is overly conservative and we can simplify this)
165 auto enable_validation = settings.enable_validation;
166
167#if defined(FML_OS_ANDROID) && !defined(NDEBUG)
168 enable_validation = true;
169#endif
170
171 auto caps = std::shared_ptr<CapabilitiesVK>(new CapabilitiesVK(
172 enable_validation, settings.fatal_missing_validations));
173
174 if (!caps->IsValid()) {
175 VALIDATION_LOG << "Could not determine device capabilities.";
176 return;
177 }
178
179 gHasValidationLayers = caps->AreValidationsEnabled();
180
181 auto enabled_layers = caps->GetEnabledLayers();
182 auto enabled_extensions = caps->GetEnabledInstanceExtensions();
183
184 if (!enabled_layers.has_value() || !enabled_extensions.has_value()) {
185 VALIDATION_LOG << "Device has insufficient capabilities.";
186 return;
187 }
188
189 vk::InstanceCreateFlags instance_flags = {};
190
191 if (std::find(enabled_extensions.value().begin(),
192 enabled_extensions.value().end(),
193 "VK_KHR_portability_enumeration") !=
194 enabled_extensions.value().end()) {
195 instance_flags |= vk::InstanceCreateFlagBits::eEnumeratePortabilityKHR;
196 }
197
198 std::vector<const char*> enabled_layers_c;
199 std::vector<const char*> enabled_extensions_c;
200
201 for (const auto& layer : enabled_layers.value()) {
202 enabled_layers_c.push_back(layer.c_str());
203 }
204
205 for (const auto& ext : enabled_extensions.value()) {
206 enabled_extensions_c.push_back(ext.c_str());
207 }
208
209 vk::ApplicationInfo application_info;
210 application_info.setApplicationVersion(VK_API_VERSION_1_0);
211 application_info.setApiVersion(VK_API_VERSION_1_1);
212 application_info.setEngineVersion(VK_API_VERSION_1_0);
213 application_info.setPEngineName("Impeller");
214 application_info.setPApplicationName("Impeller");
215
216 vk::StructureChain<vk::InstanceCreateInfo, vk::ValidationFeaturesEXT>
217 instance_chain;
218
219 if (!caps->AreValidationsEnabled()) {
220 instance_chain.unlink<vk::ValidationFeaturesEXT>();
221 }
222
223 std::vector<vk::ValidationFeatureEnableEXT> enabled_validations = {
224 vk::ValidationFeatureEnableEXT::eSynchronizationValidation,
225 };
226
227 auto validation = instance_chain.get<vk::ValidationFeaturesEXT>();
228 validation.setEnabledValidationFeatures(enabled_validations);
229
230 auto instance_info = instance_chain.get<vk::InstanceCreateInfo>();
231 instance_info.setPEnabledLayerNames(enabled_layers_c);
232 instance_info.setPEnabledExtensionNames(enabled_extensions_c);
233 instance_info.setPApplicationInfo(&application_info);
234 instance_info.setFlags(instance_flags);
235
236 auto device_holder = std::make_shared<DeviceHolderImpl>();
237 {
238 auto instance = vk::createInstanceUnique(instance_info);
239 if (instance.result != vk::Result::eSuccess) {
240 VALIDATION_LOG << "Could not create Vulkan instance: "
241 << vk::to_string(instance.result);
242 return;
243 }
244 device_holder->instance = std::move(instance.value);
245 }
246 dispatcher.init(device_holder->instance.get());
247
248 //----------------------------------------------------------------------------
249 /// Setup the debug report.
250 ///
251 /// Do this as early as possible since we could use the debug report from
252 /// initialization issues.
253 ///
254 auto debug_report =
255 std::make_unique<DebugReportVK>(*caps, device_holder->instance.get());
256
257 if (!debug_report->IsValid()) {
258 VALIDATION_LOG << "Could not set up debug report.";
259 return;
260 }
261
262 //----------------------------------------------------------------------------
263 /// Pick the physical device.
264 ///
265 {
266 auto physical_device =
267 PickPhysicalDevice(*caps, device_holder->instance.get());
268 if (!physical_device.has_value()) {
269 VALIDATION_LOG << "No valid Vulkan device found.";
270 return;
271 }
272 device_holder->physical_device = physical_device.value();
273 }
274
275 //----------------------------------------------------------------------------
276 /// Pick device queues.
277 ///
278 auto graphics_queue =
279 PickQueue(device_holder->physical_device, vk::QueueFlagBits::eGraphics);
280 auto transfer_queue =
281 PickQueue(device_holder->physical_device, vk::QueueFlagBits::eTransfer);
282 auto compute_queue =
283 PickQueue(device_holder->physical_device, vk::QueueFlagBits::eCompute);
284
285 if (!graphics_queue.has_value()) {
286 VALIDATION_LOG << "Could not pick graphics queue.";
287 return;
288 }
289 if (!transfer_queue.has_value()) {
290 transfer_queue = graphics_queue.value();
291 }
292 if (!compute_queue.has_value()) {
293 VALIDATION_LOG << "Could not pick compute queue.";
294 return;
295 }
296
297 //----------------------------------------------------------------------------
298 /// Create the logical device.
299 ///
301 caps->GetEnabledDeviceExtensions(device_holder->physical_device);
302 if (!enabled_device_extensions.has_value()) {
303 // This shouldn't happen since we already did device selection. But
304 // doesn't hurt to check again.
305 return;
306 }
307
308 std::vector<const char*> enabled_device_extensions_c;
309 for (const auto& ext : enabled_device_extensions.value()) {
310 enabled_device_extensions_c.push_back(ext.c_str());
311 }
312
313 const auto queue_create_infos = GetQueueCreateInfos(
314 {graphics_queue.value(), compute_queue.value(), transfer_queue.value()});
315
316 const auto enabled_features =
317 caps->GetEnabledDeviceFeatures(device_holder->physical_device);
318 if (!enabled_features.has_value()) {
319 // This shouldn't happen since the device can't be picked if this was not
320 // true. But doesn't hurt to check.
321 return;
322 }
323
324 vk::DeviceCreateInfo device_info;
325
326 device_info.setPNext(&enabled_features.value().get());
327 device_info.setQueueCreateInfos(queue_create_infos);
328 device_info.setPEnabledExtensionNames(enabled_device_extensions_c);
329 // Device layers are deprecated and ignored.
330
331 {
332 auto device_result =
333 device_holder->physical_device.createDeviceUnique(device_info);
334 if (device_result.result != vk::Result::eSuccess) {
335 VALIDATION_LOG << "Could not create logical device.";
336 return;
337 }
338 device_holder->device = std::move(device_result.value);
339 }
340
341 if (!caps->SetPhysicalDevice(device_holder->physical_device)) {
342 VALIDATION_LOG << "Capabilities could not be updated.";
343 return;
344 }
345
346 //----------------------------------------------------------------------------
347 /// Create the allocator.
348 ///
349 auto allocator = std::shared_ptr<AllocatorVK>(new AllocatorVK(
350 weak_from_this(), //
351 application_info.apiVersion, //
352 device_holder->physical_device, //
353 device_holder, //
354 device_holder->instance.get(), //
355 *caps //
356 ));
357
358 if (!allocator->IsValid()) {
359 VALIDATION_LOG << "Could not create memory allocator.";
360 return;
361 }
362
363 //----------------------------------------------------------------------------
364 /// Setup the pipeline library.
365 ///
366 auto pipeline_library = std::shared_ptr<PipelineLibraryVK>(
367 new PipelineLibraryVK(device_holder, //
368 caps, //
369 std::move(settings.cache_directory), //
370 raster_message_loop_->GetTaskRunner() //
371 ));
372
373 if (!pipeline_library->IsValid()) {
374 VALIDATION_LOG << "Could not create pipeline library.";
375 return;
376 }
377
378 auto sampler_library =
379 std::shared_ptr<SamplerLibraryVK>(new SamplerLibraryVK(device_holder));
380
381 auto shader_library = std::shared_ptr<ShaderLibraryVK>(
382 new ShaderLibraryVK(device_holder, //
383 settings.shader_libraries_data) //
384 );
385
386 if (!shader_library->IsValid()) {
387 VALIDATION_LOG << "Could not create shader library.";
388 return;
389 }
390
391 //----------------------------------------------------------------------------
392 /// Create the fence waiter.
393 ///
394 auto fence_waiter =
395 std::shared_ptr<FenceWaiterVK>(new FenceWaiterVK(device_holder));
396
397 //----------------------------------------------------------------------------
398 /// Create the resource manager and command pool recycler.
399 ///
400 auto resource_manager = ResourceManagerVK::Create();
401 if (!resource_manager) {
402 VALIDATION_LOG << "Could not create resource manager.";
403 return;
404 }
405
406 auto command_pool_recycler =
407 std::make_shared<CommandPoolRecyclerVK>(weak_from_this());
408 if (!command_pool_recycler) {
409 VALIDATION_LOG << "Could not create command pool recycler.";
410 return;
411 }
412
413 auto descriptor_pool_recycler =
414 std::make_shared<DescriptorPoolRecyclerVK>(weak_from_this());
415 if (!descriptor_pool_recycler) {
416 VALIDATION_LOG << "Could not create descriptor pool recycler.";
417 return;
418 }
419
420 //----------------------------------------------------------------------------
421 /// Fetch the queues.
422 ///
423 QueuesVK queues(device_holder->device.get(), //
424 graphics_queue.value(), //
425 compute_queue.value(), //
426 transfer_queue.value() //
427 );
428 if (!queues.IsValid()) {
429 VALIDATION_LOG << "Could not fetch device queues.";
430 return;
431 }
432
433 VkPhysicalDeviceProperties physical_device_properties;
434 dispatcher.vkGetPhysicalDeviceProperties(device_holder->physical_device,
435 &physical_device_properties);
436
437 //----------------------------------------------------------------------------
438 /// All done!
439 ///
440 device_holder_ = std::move(device_holder);
441 driver_info_ =
442 std::make_unique<DriverInfoVK>(device_holder_->physical_device);
443 debug_report_ = std::move(debug_report);
444 allocator_ = std::move(allocator);
445 shader_library_ = std::move(shader_library);
446 sampler_library_ = std::move(sampler_library);
447 pipeline_library_ = std::move(pipeline_library);
448 yuv_conversion_library_ = std::shared_ptr<YUVConversionLibraryVK>(
449 new YUVConversionLibraryVK(device_holder_));
450 queues_ = std::move(queues);
451 device_capabilities_ = std::move(caps);
452 fence_waiter_ = std::move(fence_waiter);
453 resource_manager_ = std::move(resource_manager);
454 command_pool_recycler_ = std::move(command_pool_recycler);
455 descriptor_pool_recycler_ = std::move(descriptor_pool_recycler);
456 device_name_ = std::string(physical_device_properties.deviceName);
457 command_queue_vk_ = std::make_shared<CommandQueueVK>(weak_from_this());
458 is_valid_ = true;
459
460 // Create the GPU Tracer later because it depends on state from
461 // the ContextVK.
462 gpu_tracer_ = std::make_shared<GPUTracerVK>(weak_from_this(),
463 settings.enable_gpu_tracing);
464 gpu_tracer_->InitializeQueryPool(*this);
465
466 //----------------------------------------------------------------------------
467 /// Label all the relevant objects. This happens after setup so that the
468 /// debug messengers have had a chance to be set up.
469 ///
470 SetDebugName(GetDevice(), device_holder_->device.get(), "ImpellerDevice");
471}
472
474 CapabilitiesVK::Cast(*device_capabilities_).SetOffscreenFormat(pixel_format);
475}
476
477// |Context|
478std::string ContextVK::DescribeGpuModel() const {
479 return device_name_;
480}
481
482bool ContextVK::IsValid() const {
483 return is_valid_;
484}
485
486std::shared_ptr<Allocator> ContextVK::GetResourceAllocator() const {
487 return allocator_;
488}
489
490std::shared_ptr<ShaderLibrary> ContextVK::GetShaderLibrary() const {
491 return shader_library_;
492}
493
494std::shared_ptr<SamplerLibrary> ContextVK::GetSamplerLibrary() const {
495 return sampler_library_;
496}
497
498std::shared_ptr<PipelineLibrary> ContextVK::GetPipelineLibrary() const {
499 return pipeline_library_;
500}
501
502std::shared_ptr<CommandBuffer> ContextVK::CreateCommandBuffer() const {
503 return std::shared_ptr<CommandBufferVK>(
504 new CommandBufferVK(shared_from_this(), //
505 CreateGraphicsCommandEncoderFactory()) //
506 );
507}
508
509vk::Instance ContextVK::GetInstance() const {
510 return *device_holder_->instance;
511}
512
513const vk::Device& ContextVK::GetDevice() const {
514 return device_holder_->device.get();
515}
516
517const std::shared_ptr<fml::ConcurrentTaskRunner>
519 return raster_message_loop_->GetTaskRunner();
520}
521
523 // There are multiple objects, for example |CommandPoolVK|, that in their
524 // destructors make a strong reference to |ContextVK|. Resetting these shared
525 // pointers ensures that cleanup happens in a correct order.
526 //
527 // tl;dr: Without it, we get thread::join failures on shutdown.
528 fence_waiter_.reset();
529 resource_manager_.reset();
530
531 raster_message_loop_->Terminate();
532}
533
534std::shared_ptr<SurfaceContextVK> ContextVK::CreateSurfaceContext() {
535 return std::make_shared<SurfaceContextVK>(shared_from_this());
536}
537
538const std::shared_ptr<const Capabilities>& ContextVK::GetCapabilities() const {
539 return device_capabilities_;
540}
541
542const std::shared_ptr<QueueVK>& ContextVK::GetGraphicsQueue() const {
543 return queues_.graphics_queue;
544}
545
546vk::PhysicalDevice ContextVK::GetPhysicalDevice() const {
547 return device_holder_->physical_device;
548}
549
550std::shared_ptr<FenceWaiterVK> ContextVK::GetFenceWaiter() const {
551 return fence_waiter_;
552}
553
554std::shared_ptr<ResourceManagerVK> ContextVK::GetResourceManager() const {
555 return resource_manager_;
556}
557
558std::shared_ptr<CommandPoolRecyclerVK> ContextVK::GetCommandPoolRecycler()
559 const {
560 return command_pool_recycler_;
561}
562
563std::unique_ptr<CommandEncoderFactoryVK>
564ContextVK::CreateGraphicsCommandEncoderFactory() const {
565 return std::make_unique<CommandEncoderFactoryVK>(weak_from_this());
566}
567
568std::shared_ptr<GPUTracerVK> ContextVK::GetGPUTracer() const {
569 return gpu_tracer_;
570}
571
572std::shared_ptr<DescriptorPoolRecyclerVK> ContextVK::GetDescriptorPoolRecycler()
573 const {
574 return descriptor_pool_recycler_;
575}
576
577std::shared_ptr<CommandQueue> ContextVK::GetCommandQueue() const {
578 return command_queue_vk_;
579}
580
581// Creating a render pass is observed to take an additional 6ms on a Pixel 7
582// device as the driver will lazily bootstrap and compile shaders to do so.
583// The render pass does not need to be begun or executed.
586 RenderTarget render_target =
587 rt_allocator.CreateOffscreenMSAA(*this, {1, 1}, 1);
588
590 for (const auto& [bind_point, color] : render_target.GetColorAttachments()) {
591 builder.SetColorAttachment(
592 bind_point, //
593 color.texture->GetTextureDescriptor().format, //
594 color.texture->GetTextureDescriptor().sample_count, //
595 color.load_action, //
596 color.store_action //
597 );
598 }
599
600 if (auto depth = render_target.GetDepthAttachment(); depth.has_value()) {
601 builder.SetDepthStencilAttachment(
602 depth->texture->GetTextureDescriptor().format, //
603 depth->texture->GetTextureDescriptor().sample_count, //
604 depth->load_action, //
605 depth->store_action //
606 );
607 } else if (auto stencil = render_target.GetStencilAttachment();
608 stencil.has_value()) {
609 builder.SetStencilAttachment(
610 stencil->texture->GetTextureDescriptor().format, //
611 stencil->texture->GetTextureDescriptor().sample_count, //
612 stencil->load_action, //
613 stencil->store_action //
614 );
615 }
616
617 auto pass = builder.Build(GetDevice());
618}
619
620const std::shared_ptr<YUVConversionLibraryVK>&
622 return yuv_conversion_library_;
623}
624
625const std::unique_ptr<DriverInfoVK>& ContextVK::GetDriverInfo() const {
626 return driver_info_;
627}
628
629} // namespace impeller
static void info(const char *fmt,...) SK_PRINTF_LIKE(1
Definition: DM.cpp:213
static unsigned clamp(SkFixed fx, int max)
int find(T *array, int N, T item)
static std::shared_ptr< ConcurrentMessageLoop > Create(size_t worker_count=std::thread::hardware_concurrency())
static CapabilitiesVK & Cast(Capabilities &base)
Definition: backend_cast.h:13
The Vulkan layers and extensions wrangler.
void SetOffscreenFormat(PixelFormat pixel_format) const
std::optional< PhysicalDeviceFeatures > GetEnabledDeviceFeatures(const vk::PhysicalDevice &physical_device) const
static void DestroyThreadLocalPools(const ContextVK *context)
Clean up resources held by all per-thread command pools associated with the given context.
void SetOffscreenFormat(PixelFormat pixel_format)
Definition: context_vk.cc:473
std::shared_ptr< Allocator > GetResourceAllocator() const override
Returns the allocator used to create textures and buffers on the device.
Definition: context_vk.cc:486
std::shared_ptr< ResourceManagerVK > GetResourceManager() const
Definition: context_vk.cc:554
vk::PhysicalDevice GetPhysicalDevice() const
Definition: context_vk.cc:546
const std::shared_ptr< YUVConversionLibraryVK > & GetYUVConversionLibrary() const
Definition: context_vk.cc:621
bool SetDebugName(T handle, std::string_view label) const
Definition: context_vk.h:108
const vk::Device & GetDevice() const
Definition: context_vk.cc:513
bool IsValid() const override
Determines if a context is valid. If the caller ever receives an invalid context, they must discard i...
Definition: context_vk.cc:482
const std::unique_ptr< DriverInfoVK > & GetDriverInfo() const
Definition: context_vk.cc:625
std::shared_ptr< CommandBuffer > CreateCommandBuffer() const override
Create a new command buffer. Command buffers can be used to encode graphics, blit,...
Definition: context_vk.cc:502
std::shared_ptr< SamplerLibrary > GetSamplerLibrary() const override
Returns the library of combined image samplers used in shaders.
Definition: context_vk.cc:494
static std::shared_ptr< ContextVK > Create(Settings settings)
Definition: context_vk.cc:100
std::shared_ptr< PipelineLibrary > GetPipelineLibrary() const override
Returns the library of pipelines used by render or compute commands.
Definition: context_vk.cc:498
const std::shared_ptr< QueueVK > & GetGraphicsQueue() const
Definition: context_vk.cc:542
const std::shared_ptr< const Capabilities > & GetCapabilities() const override
Get the capabilities of Impeller context. All optionally supported feature of the platform,...
Definition: context_vk.cc:538
std::shared_ptr< CommandPoolRecyclerVK > GetCommandPoolRecycler() const
Definition: context_vk.cc:558
std::shared_ptr< CommandQueue > GetCommandQueue() const override
Return the graphics queue for submitting command buffers.
Definition: context_vk.cc:577
void InitializeCommonlyUsedShadersIfNeeded() const override
Definition: context_vk.cc:584
std::shared_ptr< FenceWaiterVK > GetFenceWaiter() const
Definition: context_vk.cc:550
std::shared_ptr< GPUTracerVK > GetGPUTracer() const
Definition: context_vk.cc:568
BackendType GetBackendType() const override
Get the graphics backend of an Impeller context.
Definition: context_vk.cc:134
~ContextVK() override
Definition: context_vk.cc:127
std::string DescribeGpuModel() const override
Definition: context_vk.cc:478
const std::shared_ptr< fml::ConcurrentTaskRunner > GetConcurrentWorkerTaskRunner() const
Definition: context_vk.cc:518
static size_t ChooseThreadCountForWorkers(size_t hardware_concurrency)
Definition: context_vk.cc:110
std::shared_ptr< ShaderLibrary > GetShaderLibrary() const override
Returns the library of shaders used to specify the programmable stages of a pipeline.
Definition: context_vk.cc:490
vk::Instance GetInstance() const
Definition: context_vk.cc:509
void Shutdown() override
Force all pending asynchronous work to finish. This is achieved by deleting all owned concurrent mess...
Definition: context_vk.cc:522
std::shared_ptr< DescriptorPoolRecyclerVK > GetDescriptorPoolRecycler() const
Definition: context_vk.cc:572
std::shared_ptr< SurfaceContextVK > CreateSurfaceContext()
Definition: context_vk.cc:534
a wrapper around the impeller [Allocator] instance that can be used to provide caching of allocated r...
virtual RenderTarget CreateOffscreenMSAA(const Context &context, ISize size, int mip_count, const std::string &label="Offscreen MSAA", RenderTarget::AttachmentConfigMSAA color_attachment_config=RenderTarget::kDefaultColorAttachmentConfigMSAA, std::optional< RenderTarget::AttachmentConfig > stencil_attachment_config=RenderTarget::kDefaultStencilAttachmentConfig, const std::shared_ptr< Texture > &existing_color_msaa_texture=nullptr, const std::shared_ptr< Texture > &existing_color_resolve_texture=nullptr, const std::shared_ptr< Texture > &existing_depth_stencil_texture=nullptr)
const std::map< size_t, ColorAttachment > & GetColorAttachments() const
const std::optional< DepthAttachment > & GetDepthAttachment() const
const std::optional< StencilAttachment > & GetStencilAttachment() const
static std::shared_ptr< ResourceManagerVK > Create()
Creates a shared resource manager (a dedicated thread).
DlColor color
VkPhysicalDevice physical_device
Definition: main.cc:51
std::vector< const char * > enabled_device_extensions
Definition: main.cc:52
VkDevice device
Definition: main.cc:53
VkInstance instance
Definition: main.cc:48
VkQueue queue
Definition: main.cc:55
FlutterSemanticsFlag flags
uint8_t value
GAsyncResult * result
#define FML_LOG(severity)
Definition: logging.h:82
static float max(float r, float g, float b)
Definition: hsl.cpp:49
it will be possible to load the file into Perfetto s trace viewer disable asset Prevents usage of any non test fonts unless they were explicitly Loaded via prefetched default font Indicates whether the embedding started a prefetch of the default font manager before creating the engine run In non interactive keep the shell running after the Dart script has completed enable serial On low power devices with low core running concurrent GC tasks on threads can cause them to contend with the UI thread which could potentially lead to jank This option turns off all concurrent GC activities domain network JSON encoded network policy per domain This overrides the DisallowInsecureConnections switch Embedder can specify whether to allow or disallow insecure connections at a domain level old gen heap The size limit in megabytes for the Dart VM old gen heap space enable Enable the Impeller renderer on supported platforms Ignored if Impeller is not supported on the platform enable vulkan validation
Definition: switches.h:274
@ kNotPerformance
Request affinity for all non-performance cores.
bool RequestAffinity(CpuAffinity affinity)
Request the given affinity for the current thread.
Definition: cpu_affinity.cc:26
bool HasValidationLayers()
Definition: context_vk.cc:48
PixelFormat
The Pixel formats supported by Impeller. The naming convention denotes the usage of the component,...
Definition: formats.h:99
static std::optional< vk::PhysicalDevice > PickPhysicalDevice(const CapabilitiesVK &caps, const vk::Instance &instance)
Definition: context_vk.cc:52
static bool gHasValidationLayers
Definition: context_vk.cc:46
static std::optional< QueueIndexVK > PickQueue(const vk::PhysicalDevice &device, vk::QueueFlagBits flags)
Definition: context_vk.cc:86
static std::vector< vk::DeviceQueueCreateInfo > GetQueueCreateInfos(std::initializer_list< QueueIndexVK > queues)
Definition: context_vk.cc:63
SI auto map(std::index_sequence< I... >, Fn &&fn, const Args &... args) -> skvx::Vec< sizeof...(I), decltype(fn(args[0]...))>
Definition: SkVx.h:680
static SkString to_string(int n)
Definition: nanobench.cpp:119
char deviceName[VK_MAX_PHYSICAL_DEVICE_NAME_SIZE]
Definition: vulkan_core.h:3235
std::shared_ptr< QueueVK > graphics_queue
Definition: queue_vk.h:64
#define ERROR(message)
Definition: elf_loader.cc:260
#define TRACE_EVENT0(category_group, name)
Definition: trace_event.h:131
#define VALIDATION_LOG
Definition: validation.h:73
#define VK_API_VERSION_1_0
Definition: vulkan_core.h:69
#define VK_API_VERSION_1_1
Definition: vulkan_core.h:4921