Flutter Engine Uber Docs
Docs for the entire Flutter Engine repo.
 
Loading...
Searching...
No Matches
context_mtl.mm
Go to the documentation of this file.
1// Copyright 2013 The Flutter Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
6#include <Metal/Metal.h>
7
8#include <memory>
9
11#include "flutter/fml/file.h"
12#include "flutter/fml/logging.h"
13#include "flutter/fml/paths.h"
21
22namespace impeller {
23
24static bool DeviceSupportsFramebufferFetch(id<MTLDevice> device) {
25#if FML_OS_IOS_SIMULATOR
26 // The iOS simulator lies about supporting framebuffer fetch.
27 return false;
28#else // FML_OS_IOS_SIMULATOR
29 // According to
30 // https://developer.apple.com/metal/Metal-Feature-Set-Tables.pdf, Apple2
31 // corresponds to iOS GPU family 2, which supports A8 devices.
32 return [device supportsFamily:MTLGPUFamilyApple2];
33#endif // FML_OS_IOS_SIMULATOR
34}
35
36static bool DeviceSupportsComputeSubgroups(id<MTLDevice> device) {
37 // Refer to the "SIMD-scoped reduction operations" feature in the table
38 // below: https://developer.apple.com/metal/Metal-Feature-Set-Tables.pdf
39 return [device supportsFamily:MTLGPUFamilyApple7] ||
40 [device supportsFamily:MTLGPUFamilyMac2];
41}
42
43// See "Extended Range and wide color pixel formats" in the Metal Feature Set
44// Tables: https://developer.apple.com/metal/Metal-Feature-Set-Tables.pdf
45// Wide gamut requires Apple3+ GPU family (supports 10-bit and F16 formats).
46// This includes all iOS devices with A9+ chip and Apple Silicon Macs (M1+).
47// Intel Macs (Mac2 family) do not support wide gamut.
48static bool DeviceSupportsExtendedRangeFormats(id<MTLDevice> device) {
49 return [device supportsFamily:MTLGPUFamilyApple3];
50}
51
80
81ContextMTL::ContextMTL(
82 const Flags& flags,
83 id<MTLDevice> device,
84 id<MTLCommandQueue> command_queue,
85 NSArray<id<MTLLibrary>>* shader_libraries,
86 std::shared_ptr<const fml::SyncSwitch> is_gpu_disabled_sync_switch,
87 std::optional<PixelFormat> pixel_format_override)
88 : Context(flags),
89 device_(device),
90 command_queue_(command_queue),
91 is_gpu_disabled_sync_switch_(std::move(is_gpu_disabled_sync_switch)) {
92 // Validate device.
93 if (!device_) {
94 VALIDATION_LOG << "Could not set up valid Metal device.";
95 return;
96 }
97
98 sync_switch_observer_.reset(new SyncSwitchObserver(*this));
99 is_gpu_disabled_sync_switch_->AddObserver(sync_switch_observer_.get());
100
101 // Setup the shader library.
102 {
103 if (shader_libraries == nil) {
104 VALIDATION_LOG << "Shader libraries were null.";
105 return;
106 }
107
108 // std::make_shared disallowed because of private friend ctor.
109 auto library = std::shared_ptr<ShaderLibraryMTL>(
110 new ShaderLibraryMTL(shader_libraries));
111 if (!library->IsValid()) {
112 VALIDATION_LOG << "Could not create valid Metal shader library.";
113 return;
114 }
115 shader_library_ = std::move(library);
116 }
117
118 // Setup the pipeline library.
119 {
120 pipeline_library_ =
121 std::shared_ptr<PipelineLibraryMTL>(new PipelineLibraryMTL(device_));
122 }
123
124 // Setup the sampler library.
125 {
126 sampler_library_ =
127 std::shared_ptr<SamplerLibraryMTL>(new SamplerLibraryMTL(device_));
128 }
129
130 // Setup the resource allocator.
131 {
132 resource_allocator_ = std::shared_ptr<AllocatorMTL>(
133 new AllocatorMTL(device_, "Impeller Permanents Allocator"));
134 if (!resource_allocator_) {
135 VALIDATION_LOG << "Could not set up the resource allocator.";
136 return;
137 }
138 }
139
140 device_capabilities_ =
141 InferMetalCapabilities(device_, pixel_format_override.has_value()
142 ? pixel_format_override.value()
144 command_queue_ip_ = std::make_shared<CommandQueue>();
145#ifdef IMPELLER_DEBUG
146 gpu_tracer_ = std::make_shared<GPUTracerMTL>();
147 capture_manager_ = std::make_shared<ImpellerMetalCaptureManager>(device_);
148#endif // IMPELLER_DEBUG
149 is_valid_ = true;
150}
151
152static NSArray<id<MTLLibrary>>* MTLShaderLibraryFromFilePaths(
153 id<MTLDevice> device,
154 const std::vector<std::string>& libraries_paths) {
155 NSMutableArray<id<MTLLibrary>>* found_libraries = [NSMutableArray array];
156 for (const auto& library_path : libraries_paths) {
157 if (!fml::IsFile(library_path)) {
158 VALIDATION_LOG << "Shader library does not exist at path '"
159 << library_path << "'";
160 return nil;
161 }
162 NSError* shader_library_error = nil;
163 auto library = [device newLibraryWithFile:@(library_path.c_str())
164 error:&shader_library_error];
165 if (!library) {
166 FML_LOG(ERROR) << "Could not create shader library: "
167 << shader_library_error.localizedDescription.UTF8String;
168 return nil;
169 }
170 [found_libraries addObject:library];
171 }
172 return found_libraries;
173}
174
175static NSArray<id<MTLLibrary>>* MTLShaderLibraryFromFileData(
176 id<MTLDevice> device,
177 const std::vector<std::shared_ptr<fml::Mapping>>& libraries_data,
178 const std::string& label) {
179 NSMutableArray<id<MTLLibrary>>* found_libraries = [NSMutableArray array];
180 for (const auto& library_data : libraries_data) {
181 if (library_data == nullptr) {
182 FML_LOG(ERROR) << "Shader library data was null.";
183 return nil;
184 }
185
186 __block auto data = library_data;
187
188 auto dispatch_data =
189 ::dispatch_data_create(library_data->GetMapping(), // buffer
190 library_data->GetSize(), // size
191 dispatch_get_main_queue(), // queue
192 ^() {
193 // We just need a reference.
194 data.reset();
195 } // destructor
196 );
197 if (!dispatch_data) {
198 FML_LOG(ERROR) << "Could not wrap shader data in dispatch data.";
199 return nil;
200 }
201
202 NSError* shader_library_error = nil;
203 auto library = [device newLibraryWithData:dispatch_data
204 error:&shader_library_error];
205 if (!library) {
206 FML_LOG(ERROR) << "Could not create shader library: "
207 << shader_library_error.localizedDescription.UTF8String;
208 return nil;
209 }
210 if (!label.empty()) {
211 library.label = @(label.c_str());
212 }
213 [found_libraries addObject:library];
214 }
215 return found_libraries;
216}
217
218static id<MTLDevice> CreateMetalDevice() {
219 return ::MTLCreateSystemDefaultDevice();
220}
221
222static id<MTLCommandQueue> CreateMetalCommandQueue(id<MTLDevice> device) {
223 auto command_queue = device.newCommandQueue;
224 if (!command_queue) {
225 VALIDATION_LOG << "Could not set up the command queue.";
226 return nullptr;
227 }
228 command_queue.label = @"Impeller Command Queue";
229 return command_queue;
230}
231
232std::shared_ptr<ContextMTL> ContextMTL::Create(
233 const Flags& flags,
234 const std::vector<std::string>& shader_library_paths,
235 std::shared_ptr<const fml::SyncSwitch> is_gpu_disabled_sync_switch) {
236 auto device = CreateMetalDevice();
237 auto command_queue = CreateMetalCommandQueue(device);
238 if (!command_queue) {
239 return nullptr;
240 }
241 auto context = std::shared_ptr<ContextMTL>(new ContextMTL(
242 flags, device, command_queue,
243 MTLShaderLibraryFromFilePaths(device, shader_library_paths),
244 std::move(is_gpu_disabled_sync_switch)));
245 if (!context->IsValid()) {
246 FML_LOG(ERROR) << "Could not create Metal context.";
247 return nullptr;
248 }
249 return context;
250}
251
252std::shared_ptr<ContextMTL> ContextMTL::Create(
253 const Flags& flags,
254 const std::vector<std::shared_ptr<fml::Mapping>>& shader_libraries_data,
255 std::shared_ptr<const fml::SyncSwitch> is_gpu_disabled_sync_switch,
256 const std::string& library_label,
257 std::optional<PixelFormat> pixel_format_override) {
258 auto device = CreateMetalDevice();
259 auto command_queue = CreateMetalCommandQueue(device);
260 if (!command_queue) {
261 return nullptr;
262 }
263 auto context = std::shared_ptr<ContextMTL>(new ContextMTL(
264 flags, device, command_queue,
265 MTLShaderLibraryFromFileData(device, shader_libraries_data,
266 library_label),
267 std::move(is_gpu_disabled_sync_switch), pixel_format_override));
268 if (!context->IsValid()) {
269 FML_LOG(ERROR) << "Could not create Metal context.";
270 return nullptr;
271 }
272 return context;
273}
274
275std::shared_ptr<ContextMTL> ContextMTL::Create(
276 const Flags& flags,
277 id<MTLDevice> device,
278 id<MTLCommandQueue> command_queue,
279 const std::vector<std::shared_ptr<fml::Mapping>>& shader_libraries_data,
280 std::shared_ptr<const fml::SyncSwitch> is_gpu_disabled_sync_switch,
281 const std::string& library_label) {
282 auto context = std::shared_ptr<ContextMTL>(
283 new ContextMTL(flags, device, command_queue,
284 MTLShaderLibraryFromFileData(device, shader_libraries_data,
285 library_label),
286 std::move(is_gpu_disabled_sync_switch)));
287 if (!context->IsValid()) {
288 FML_LOG(ERROR) << "Could not create Metal context.";
289 return nullptr;
290 }
291 return context;
292}
293
294ContextMTL::~ContextMTL() {
295 is_gpu_disabled_sync_switch_->RemoveObserver(sync_switch_observer_.get());
296}
297
298Context::BackendType ContextMTL::GetBackendType() const {
299 return Context::BackendType::kMetal;
300}
301
302// |Context|
303std::string ContextMTL::DescribeGpuModel() const {
304 return std::string([[device_ name] UTF8String]);
305}
306
307// |Context|
308bool ContextMTL::IsValid() const {
309 return is_valid_;
310}
311
312// |Context|
313std::shared_ptr<ShaderLibrary> ContextMTL::GetShaderLibrary() const {
314 return shader_library_;
315}
316
317// |Context|
318std::shared_ptr<PipelineLibrary> ContextMTL::GetPipelineLibrary() const {
319 return pipeline_library_;
320}
321
322// |Context|
323std::shared_ptr<SamplerLibrary> ContextMTL::GetSamplerLibrary() const {
324 return sampler_library_;
325}
326
327// |Context|
328std::shared_ptr<CommandBuffer> ContextMTL::CreateCommandBuffer() const {
329 return CreateCommandBufferInQueue(command_queue_);
330}
331
332// |Context|
333void ContextMTL::Shutdown() {}
334
335#ifdef IMPELLER_DEBUG
336std::shared_ptr<GPUTracerMTL> ContextMTL::GetGPUTracer() const {
337 return gpu_tracer_;
338}
339#endif // IMPELLER_DEBUG
340
341std::shared_ptr<const fml::SyncSwitch> ContextMTL::GetIsGpuDisabledSyncSwitch()
342 const {
343 return is_gpu_disabled_sync_switch_;
344}
345
346std::shared_ptr<CommandBuffer> ContextMTL::CreateCommandBufferInQueue(
347 id<MTLCommandQueue> queue) const {
348 if (!IsValid()) {
349 return nullptr;
350 }
351
352 auto buffer = std::shared_ptr<CommandBufferMTL>(
353 new CommandBufferMTL(weak_from_this(), device_, queue));
354 if (!buffer->IsValid()) {
355 return nullptr;
356 }
357 return buffer;
358}
359
360std::shared_ptr<Allocator> ContextMTL::GetResourceAllocator() const {
361 return resource_allocator_;
362}
363
364id<MTLDevice> ContextMTL::GetMTLDevice() const {
365 return device_;
366}
367
368const std::shared_ptr<const Capabilities>& ContextMTL::GetCapabilities() const {
369 return device_capabilities_;
370}
371
372void ContextMTL::SetCapabilities(
373 const std::shared_ptr<const Capabilities>& capabilities) {
374 device_capabilities_ = capabilities;
375}
376
377// |Context|
378bool ContextMTL::UpdateOffscreenLayerPixelFormat(PixelFormat format) {
379 device_capabilities_ = InferMetalCapabilities(device_, format);
380 return true;
381}
382
383id<MTLCommandBuffer> ContextMTL::CreateMTLCommandBuffer(
384 const std::string& label) const {
385 auto buffer = [command_queue_ commandBuffer];
386 if (!label.empty()) {
387 [buffer setLabel:@(label.data())];
388 }
389 return buffer;
390}
391
392void ContextMTL::StoreTaskForGPU(const fml::closure& task,
393 const fml::closure& failure) {
394 std::vector<PendingTasks> failed_tasks;
395 {
396 Lock lock(tasks_awaiting_gpu_mutex_);
397 tasks_awaiting_gpu_.push_back(PendingTasks{task, failure});
398 int32_t failed_task_count =
399 tasks_awaiting_gpu_.size() - kMaxTasksAwaitingGPU;
400 if (failed_task_count > 0) {
401 failed_tasks.reserve(failed_task_count);
402 failed_tasks.insert(failed_tasks.end(),
403 std::make_move_iterator(tasks_awaiting_gpu_.begin()),
404 std::make_move_iterator(tasks_awaiting_gpu_.begin() +
405 failed_task_count));
406 tasks_awaiting_gpu_.erase(
407 tasks_awaiting_gpu_.begin(),
408 tasks_awaiting_gpu_.begin() + failed_task_count);
409 }
410 }
411 for (const PendingTasks& task : failed_tasks) {
412 if (task.failure) {
413 task.failure();
414 }
415 }
416}
417
418void ContextMTL::FlushTasksAwaitingGPU() {
419 std::deque<PendingTasks> tasks_awaiting_gpu;
420 {
421 Lock lock(tasks_awaiting_gpu_mutex_);
422 std::swap(tasks_awaiting_gpu, tasks_awaiting_gpu_);
423 }
424 std::vector<PendingTasks> tasks_to_queue;
425 for (const auto& task : tasks_awaiting_gpu) {
426 is_gpu_disabled_sync_switch_->Execute(fml::SyncSwitch::Handlers()
427 .SetIfFalse([&] { task.task(); })
428 .SetIfTrue([&] {
429 // Lost access to the GPU
430 // immediately after it was
431 // activated. This may happen if
432 // the app was quickly
433 // foregrounded/backgrounded
434 // from a push notification.
435 // Store the tasks on the
436 // context again.
437 tasks_to_queue.push_back(task);
438 }));
439 }
440 if (!tasks_to_queue.empty()) {
441 Lock lock(tasks_awaiting_gpu_mutex_);
442 tasks_awaiting_gpu_.insert(tasks_awaiting_gpu_.end(),
443 tasks_to_queue.begin(), tasks_to_queue.end());
444 }
445}
446
447ContextMTL::SyncSwitchObserver::SyncSwitchObserver(ContextMTL& parent)
448 : parent_(parent) {}
449
450void ContextMTL::SyncSwitchObserver::OnSyncSwitchUpdate(bool new_is_disabled) {
451 if (!new_is_disabled) {
452 parent_.FlushTasksAwaitingGPU();
453 }
454}
455
456// |Context|
457std::shared_ptr<CommandQueue> ContextMTL::GetCommandQueue() const {
458 return command_queue_ip_;
459}
460
461// |Context|
465
466#ifdef IMPELLER_DEBUG
467const std::shared_ptr<ImpellerMetalCaptureManager>
468ContextMTL::GetCaptureManager() const {
469 return capture_manager_;
470}
471#endif // IMPELLER_DEBUG
472
474 current_capture_scope_ = [[MTLCaptureManager sharedCaptureManager]
475 newCaptureScopeWithDevice:device];
476 [current_capture_scope_ setLabel:@"Impeller Frame"];
477}
478
480 return scope_active_;
481}
482
484 if (scope_active_) {
485 return;
486 }
487 scope_active_ = true;
488 [current_capture_scope_ beginScope];
489}
490
492 FML_DCHECK(scope_active_);
493 [current_capture_scope_ endScope];
494 scope_active_ = false;
495}
496
497} // namespace impeller
CapabilitiesBuilder & SetDefaultColorFormat(PixelFormat value)
CapabilitiesBuilder & SetSupportsComputeSubgroups(bool value)
CapabilitiesBuilder & SetMinimumUniformAlignment(size_t value)
CapabilitiesBuilder & SetSupportsTextureToTextureBlits(bool value)
CapabilitiesBuilder & SetDefaultStencilFormat(PixelFormat value)
CapabilitiesBuilder & SetSupportsDeviceTransientTextures(bool value)
CapabilitiesBuilder & SetSupportsTriangleFan(bool value)
CapabilitiesBuilder & SetSupportsFramebufferFetch(bool value)
CapabilitiesBuilder & SetSupportsDecalSamplerAddressMode(bool value)
CapabilitiesBuilder & SetSupportsOffscreenMSAA(bool value)
CapabilitiesBuilder & SetSupportsSSBO(bool value)
CapabilitiesBuilder & SetMaximumRenderPassAttachmentSize(ISize size)
CapabilitiesBuilder & SetSupportsExtendedRangeFormats(bool value)
CapabilitiesBuilder & SetDefaultGlyphAtlasFormat(PixelFormat value)
CapabilitiesBuilder & SetSupportsCompute(bool value)
std::unique_ptr< Capabilities > Build()
CapabilitiesBuilder & SetDefaultDepthStencilFormat(PixelFormat value)
CapabilitiesBuilder & SetSupportsReadFromResolve(bool value)
std::shared_ptr< CommandQueue > GetCommandQueue() const override
Return the graphics queue for submitting command buffers.
RuntimeStageBackend GetRuntimeStageBackend() const override
Retrieve the runtime stage for this context type.
ImpellerMetalCaptureManager(id< MTLDevice > device)
Construct a new capture manager from the provided Metal device.
void FinishCapture()
End the current capture scope.
void StartCapture()
Begin a new capture scope, no-op if the scope has already started.
VkDevice device
Definition main.cc:69
VkQueue queue
Definition main.cc:71
const uint8_t uint32_t uint32_t GError ** error
uint32_t uint32_t * format
#define FML_LOG(severity)
Definition logging.h:101
#define FML_DCHECK(condition)
Definition logging.h:122
const char * name
Definition fuchsia.cc:49
DEF_SWITCHES_START aot vmservice shared library Name of the *so containing AOT compiled Dart assets for launching the service isolate vm snapshot The VM snapshot data that will be memory mapped as read only SnapshotAssetPath must be present isolate snapshot The isolate snapshot data that will be memory mapped as read only SnapshotAssetPath must be present cache dir Path to the cache directory This is different from the persistent_cache_path in embedder which is used for Skia shader cache icu native lib Path to the library file that exports the ICU data vm service The hostname IP address on which the Dart VM Service should be served If not defaults to or::depending on whether ipv6 is specified disable vm Disable the Dart VM Service The Dart VM Service is never available in release mode Bind to the IPv6 localhost address for the Dart VM Service Ignored if vm service host is set profile Make the profiler discard new samples once the profiler sample buffer is full When this flag is not the profiler sample buffer is used as a ring buffer
Definition switch_defs.h:98
bool IsFile(const std::string &path)
std::function< void()> closure
Definition closure.h:14
static bool DeviceSupportsExtendedRangeFormats(id< MTLDevice > device)
static NSArray< id< MTLLibrary > > * MTLShaderLibraryFromFilePaths(id< MTLDevice > device, const std::vector< std::string > &libraries_paths)
static id< MTLCommandQueue > CreateMetalCommandQueue(id< MTLDevice > device)
PixelFormat
The Pixel formats supported by Impeller. The naming convention denotes the usage of the component,...
Definition formats.h:99
static id< MTLDevice > CreateMetalDevice()
static NSArray< id< MTLLibrary > > * MTLShaderLibraryFromFileData(id< MTLDevice > device, const std::vector< std::shared_ptr< fml::Mapping > > &libraries_data, const std::string &label)
ISize DeviceMaxTextureSizeSupported(id< MTLDevice > device)
static bool DeviceSupportsComputeSubgroups(id< MTLDevice > device)
static bool DeviceSupportsFramebufferFetch(id< MTLDevice > device)
static std::unique_ptr< Capabilities > InferMetalCapabilities(id< MTLDevice > device, PixelFormat color_format)
Definition ref_ptr.h:261
Represents the 2 code paths available when calling |SyncSwitchExecute|.
Definition sync_switch.h:35
Handlers & SetIfFalse(const std::function< void()> &handler)
Sets the handler that will be executed if the |SyncSwitch| is false.
std::shared_ptr< const fml::Mapping > data
#define VALIDATION_LOG
Definition validation.h:91