Flutter Engine
 
Loading...
Searching...
No Matches
context_mtl.mm
Go to the documentation of this file.
1// Copyright 2013 The Flutter Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
6#include <Metal/Metal.h>
7
8#include <memory>
9
11#include "flutter/fml/file.h"
12#include "flutter/fml/logging.h"
13#include "flutter/fml/paths.h"
21
22namespace impeller {
23
24static bool DeviceSupportsFramebufferFetch(id<MTLDevice> device) {
25#if FML_OS_IOS_SIMULATOR
26 // The iOS simulator lies about supporting framebuffer fetch.
27 return false;
28#else // FML_OS_IOS_SIMULATOR
29 // According to
30 // https://developer.apple.com/metal/Metal-Feature-Set-Tables.pdf, Apple2
31 // corresponds to iOS GPU family 2, which supports A8 devices.
32 return [device supportsFamily:MTLGPUFamilyApple2];
33#endif // FML_OS_IOS_SIMULATOR
34}
35
36static bool DeviceSupportsComputeSubgroups(id<MTLDevice> device) {
37 // Refer to the "SIMD-scoped reduction operations" feature in the table
38 // below: https://developer.apple.com/metal/Metal-Feature-Set-Tables.pdf
39 return [device supportsFamily:MTLGPUFamilyApple7] ||
40 [device supportsFamily:MTLGPUFamilyMac2];
41}
42
43// See "Extended Range and wide color pixel formats" in the metal feature set
44// tables.
45static bool DeviceSupportsExtendedRangeFormats(id<MTLDevice> device) {
46 return [device supportsFamily:MTLGPUFamilyApple3];
47}
48
77
78ContextMTL::ContextMTL(
79 const Flags& flags,
80 id<MTLDevice> device,
81 id<MTLCommandQueue> command_queue,
82 NSArray<id<MTLLibrary>>* shader_libraries,
83 std::shared_ptr<const fml::SyncSwitch> is_gpu_disabled_sync_switch,
84 std::optional<PixelFormat> pixel_format_override)
85 : Context(flags),
86 device_(device),
87 command_queue_(command_queue),
88 is_gpu_disabled_sync_switch_(std::move(is_gpu_disabled_sync_switch)) {
89 // Validate device.
90 if (!device_) {
91 VALIDATION_LOG << "Could not set up valid Metal device.";
92 return;
93 }
94
95 sync_switch_observer_.reset(new SyncSwitchObserver(*this));
96 is_gpu_disabled_sync_switch_->AddObserver(sync_switch_observer_.get());
97
98 // Setup the shader library.
99 {
100 if (shader_libraries == nil) {
101 VALIDATION_LOG << "Shader libraries were null.";
102 return;
103 }
104
105 // std::make_shared disallowed because of private friend ctor.
106 auto library = std::shared_ptr<ShaderLibraryMTL>(
107 new ShaderLibraryMTL(shader_libraries));
108 if (!library->IsValid()) {
109 VALIDATION_LOG << "Could not create valid Metal shader library.";
110 return;
111 }
112 shader_library_ = std::move(library);
113 }
114
115 // Setup the pipeline library.
116 {
117 pipeline_library_ =
118 std::shared_ptr<PipelineLibraryMTL>(new PipelineLibraryMTL(device_));
119 }
120
121 // Setup the sampler library.
122 {
123 sampler_library_ =
124 std::shared_ptr<SamplerLibraryMTL>(new SamplerLibraryMTL(device_));
125 }
126
127 // Setup the resource allocator.
128 {
129 resource_allocator_ = std::shared_ptr<AllocatorMTL>(
130 new AllocatorMTL(device_, "Impeller Permanents Allocator"));
131 if (!resource_allocator_) {
132 VALIDATION_LOG << "Could not set up the resource allocator.";
133 return;
134 }
135 }
136
137 device_capabilities_ =
138 InferMetalCapabilities(device_, pixel_format_override.has_value()
139 ? pixel_format_override.value()
141 command_queue_ip_ = std::make_shared<CommandQueue>();
142#ifdef IMPELLER_DEBUG
143 gpu_tracer_ = std::make_shared<GPUTracerMTL>();
144 capture_manager_ = std::make_shared<ImpellerMetalCaptureManager>(device_);
145#endif // IMPELLER_DEBUG
146 is_valid_ = true;
147}
148
149static NSArray<id<MTLLibrary>>* MTLShaderLibraryFromFilePaths(
150 id<MTLDevice> device,
151 const std::vector<std::string>& libraries_paths) {
152 NSMutableArray<id<MTLLibrary>>* found_libraries = [NSMutableArray array];
153 for (const auto& library_path : libraries_paths) {
154 if (!fml::IsFile(library_path)) {
155 VALIDATION_LOG << "Shader library does not exist at path '"
156 << library_path << "'";
157 return nil;
158 }
159 NSError* shader_library_error = nil;
160 auto library = [device newLibraryWithFile:@(library_path.c_str())
161 error:&shader_library_error];
162 if (!library) {
163 FML_LOG(ERROR) << "Could not create shader library: "
164 << shader_library_error.localizedDescription.UTF8String;
165 return nil;
166 }
167 [found_libraries addObject:library];
168 }
169 return found_libraries;
170}
171
172static NSArray<id<MTLLibrary>>* MTLShaderLibraryFromFileData(
173 id<MTLDevice> device,
174 const std::vector<std::shared_ptr<fml::Mapping>>& libraries_data,
175 const std::string& label) {
176 NSMutableArray<id<MTLLibrary>>* found_libraries = [NSMutableArray array];
177 for (const auto& library_data : libraries_data) {
178 if (library_data == nullptr) {
179 FML_LOG(ERROR) << "Shader library data was null.";
180 return nil;
181 }
182
183 __block auto data = library_data;
184
185 auto dispatch_data =
186 ::dispatch_data_create(library_data->GetMapping(), // buffer
187 library_data->GetSize(), // size
188 dispatch_get_main_queue(), // queue
189 ^() {
190 // We just need a reference.
191 data.reset();
192 } // destructor
193 );
194 if (!dispatch_data) {
195 FML_LOG(ERROR) << "Could not wrap shader data in dispatch data.";
196 return nil;
197 }
198
199 NSError* shader_library_error = nil;
200 auto library = [device newLibraryWithData:dispatch_data
201 error:&shader_library_error];
202 if (!library) {
203 FML_LOG(ERROR) << "Could not create shader library: "
204 << shader_library_error.localizedDescription.UTF8String;
205 return nil;
206 }
207 if (!label.empty()) {
208 library.label = @(label.c_str());
209 }
210 [found_libraries addObject:library];
211 }
212 return found_libraries;
213}
214
215static id<MTLDevice> CreateMetalDevice() {
216 return ::MTLCreateSystemDefaultDevice();
217}
218
219static id<MTLCommandQueue> CreateMetalCommandQueue(id<MTLDevice> device) {
220 auto command_queue = device.newCommandQueue;
221 if (!command_queue) {
222 VALIDATION_LOG << "Could not set up the command queue.";
223 return nullptr;
224 }
225 command_queue.label = @"Impeller Command Queue";
226 return command_queue;
227}
228
229std::shared_ptr<ContextMTL> ContextMTL::Create(
230 const Flags& flags,
231 const std::vector<std::string>& shader_library_paths,
232 std::shared_ptr<const fml::SyncSwitch> is_gpu_disabled_sync_switch) {
233 auto device = CreateMetalDevice();
234 auto command_queue = CreateMetalCommandQueue(device);
235 if (!command_queue) {
236 return nullptr;
237 }
238 auto context = std::shared_ptr<ContextMTL>(new ContextMTL(
239 flags, device, command_queue,
240 MTLShaderLibraryFromFilePaths(device, shader_library_paths),
241 std::move(is_gpu_disabled_sync_switch)));
242 if (!context->IsValid()) {
243 FML_LOG(ERROR) << "Could not create Metal context.";
244 return nullptr;
245 }
246 return context;
247}
248
249std::shared_ptr<ContextMTL> ContextMTL::Create(
250 const Flags& flags,
251 const std::vector<std::shared_ptr<fml::Mapping>>& shader_libraries_data,
252 std::shared_ptr<const fml::SyncSwitch> is_gpu_disabled_sync_switch,
253 const std::string& library_label,
254 std::optional<PixelFormat> pixel_format_override) {
255 auto device = CreateMetalDevice();
256 auto command_queue = CreateMetalCommandQueue(device);
257 if (!command_queue) {
258 return nullptr;
259 }
260 auto context = std::shared_ptr<ContextMTL>(new ContextMTL(
261 flags, device, command_queue,
262 MTLShaderLibraryFromFileData(device, shader_libraries_data,
263 library_label),
264 std::move(is_gpu_disabled_sync_switch), pixel_format_override));
265 if (!context->IsValid()) {
266 FML_LOG(ERROR) << "Could not create Metal context.";
267 return nullptr;
268 }
269 return context;
270}
271
272std::shared_ptr<ContextMTL> ContextMTL::Create(
273 const Flags& flags,
274 id<MTLDevice> device,
275 id<MTLCommandQueue> command_queue,
276 const std::vector<std::shared_ptr<fml::Mapping>>& shader_libraries_data,
277 std::shared_ptr<const fml::SyncSwitch> is_gpu_disabled_sync_switch,
278 const std::string& library_label) {
279 auto context = std::shared_ptr<ContextMTL>(
280 new ContextMTL(flags, device, command_queue,
281 MTLShaderLibraryFromFileData(device, shader_libraries_data,
282 library_label),
283 std::move(is_gpu_disabled_sync_switch)));
284 if (!context->IsValid()) {
285 FML_LOG(ERROR) << "Could not create Metal context.";
286 return nullptr;
287 }
288 return context;
289}
290
291ContextMTL::~ContextMTL() {
292 is_gpu_disabled_sync_switch_->RemoveObserver(sync_switch_observer_.get());
293}
294
295Context::BackendType ContextMTL::GetBackendType() const {
296 return Context::BackendType::kMetal;
297}
298
299// |Context|
300std::string ContextMTL::DescribeGpuModel() const {
301 return std::string([[device_ name] UTF8String]);
302}
303
304// |Context|
305bool ContextMTL::IsValid() const {
306 return is_valid_;
307}
308
309// |Context|
310std::shared_ptr<ShaderLibrary> ContextMTL::GetShaderLibrary() const {
311 return shader_library_;
312}
313
314// |Context|
315std::shared_ptr<PipelineLibrary> ContextMTL::GetPipelineLibrary() const {
316 return pipeline_library_;
317}
318
319// |Context|
320std::shared_ptr<SamplerLibrary> ContextMTL::GetSamplerLibrary() const {
321 return sampler_library_;
322}
323
324// |Context|
325std::shared_ptr<CommandBuffer> ContextMTL::CreateCommandBuffer() const {
326 return CreateCommandBufferInQueue(command_queue_);
327}
328
329// |Context|
330void ContextMTL::Shutdown() {}
331
332#ifdef IMPELLER_DEBUG
333std::shared_ptr<GPUTracerMTL> ContextMTL::GetGPUTracer() const {
334 return gpu_tracer_;
335}
336#endif // IMPELLER_DEBUG
337
338std::shared_ptr<const fml::SyncSwitch> ContextMTL::GetIsGpuDisabledSyncSwitch()
339 const {
340 return is_gpu_disabled_sync_switch_;
341}
342
343std::shared_ptr<CommandBuffer> ContextMTL::CreateCommandBufferInQueue(
344 id<MTLCommandQueue> queue) const {
345 if (!IsValid()) {
346 return nullptr;
347 }
348
349 auto buffer = std::shared_ptr<CommandBufferMTL>(
350 new CommandBufferMTL(weak_from_this(), device_, queue));
351 if (!buffer->IsValid()) {
352 return nullptr;
353 }
354 return buffer;
355}
356
357std::shared_ptr<Allocator> ContextMTL::GetResourceAllocator() const {
358 return resource_allocator_;
359}
360
361id<MTLDevice> ContextMTL::GetMTLDevice() const {
362 return device_;
363}
364
365const std::shared_ptr<const Capabilities>& ContextMTL::GetCapabilities() const {
366 return device_capabilities_;
367}
368
369void ContextMTL::SetCapabilities(
370 const std::shared_ptr<const Capabilities>& capabilities) {
371 device_capabilities_ = capabilities;
372}
373
374// |Context|
375bool ContextMTL::UpdateOffscreenLayerPixelFormat(PixelFormat format) {
376 device_capabilities_ = InferMetalCapabilities(device_, format);
377 return true;
378}
379
380id<MTLCommandBuffer> ContextMTL::CreateMTLCommandBuffer(
381 const std::string& label) const {
382 auto buffer = [command_queue_ commandBuffer];
383 if (!label.empty()) {
384 [buffer setLabel:@(label.data())];
385 }
386 return buffer;
387}
388
389void ContextMTL::StoreTaskForGPU(const fml::closure& task,
390 const fml::closure& failure) {
391 std::vector<PendingTasks> failed_tasks;
392 {
393 Lock lock(tasks_awaiting_gpu_mutex_);
394 tasks_awaiting_gpu_.push_back(PendingTasks{task, failure});
395 int32_t failed_task_count =
396 tasks_awaiting_gpu_.size() - kMaxTasksAwaitingGPU;
397 if (failed_task_count > 0) {
398 failed_tasks.reserve(failed_task_count);
399 failed_tasks.insert(failed_tasks.end(),
400 std::make_move_iterator(tasks_awaiting_gpu_.begin()),
401 std::make_move_iterator(tasks_awaiting_gpu_.begin() +
402 failed_task_count));
403 tasks_awaiting_gpu_.erase(
404 tasks_awaiting_gpu_.begin(),
405 tasks_awaiting_gpu_.begin() + failed_task_count);
406 }
407 }
408 for (const PendingTasks& task : failed_tasks) {
409 if (task.failure) {
410 task.failure();
411 }
412 }
413}
414
415void ContextMTL::FlushTasksAwaitingGPU() {
416 std::deque<PendingTasks> tasks_awaiting_gpu;
417 {
418 Lock lock(tasks_awaiting_gpu_mutex_);
419 std::swap(tasks_awaiting_gpu, tasks_awaiting_gpu_);
420 }
421 std::vector<PendingTasks> tasks_to_queue;
422 for (const auto& task : tasks_awaiting_gpu) {
423 is_gpu_disabled_sync_switch_->Execute(fml::SyncSwitch::Handlers()
424 .SetIfFalse([&] { task.task(); })
425 .SetIfTrue([&] {
426 // Lost access to the GPU
427 // immediately after it was
428 // activated. This may happen if
429 // the app was quickly
430 // foregrounded/backgrounded
431 // from a push notification.
432 // Store the tasks on the
433 // context again.
434 tasks_to_queue.push_back(task);
435 }));
436 }
437 if (!tasks_to_queue.empty()) {
438 Lock lock(tasks_awaiting_gpu_mutex_);
439 tasks_awaiting_gpu_.insert(tasks_awaiting_gpu_.end(),
440 tasks_to_queue.begin(), tasks_to_queue.end());
441 }
442}
443
444ContextMTL::SyncSwitchObserver::SyncSwitchObserver(ContextMTL& parent)
445 : parent_(parent) {}
446
447void ContextMTL::SyncSwitchObserver::OnSyncSwitchUpdate(bool new_is_disabled) {
448 if (!new_is_disabled) {
449 parent_.FlushTasksAwaitingGPU();
450 }
451}
452
453// |Context|
454std::shared_ptr<CommandQueue> ContextMTL::GetCommandQueue() const {
455 return command_queue_ip_;
456}
457
458// |Context|
462
463#ifdef IMPELLER_DEBUG
464const std::shared_ptr<ImpellerMetalCaptureManager>
465ContextMTL::GetCaptureManager() const {
466 return capture_manager_;
467}
468#endif // IMPELLER_DEBUG
469
471 current_capture_scope_ = [[MTLCaptureManager sharedCaptureManager]
472 newCaptureScopeWithDevice:device];
473 [current_capture_scope_ setLabel:@"Impeller Frame"];
474}
475
477 return scope_active_;
478}
479
481 if (scope_active_) {
482 return;
483 }
484 scope_active_ = true;
485 [current_capture_scope_ beginScope];
486}
487
489 FML_DCHECK(scope_active_);
490 [current_capture_scope_ endScope];
491 scope_active_ = false;
492}
493
494} // namespace impeller
CapabilitiesBuilder & SetDefaultColorFormat(PixelFormat value)
CapabilitiesBuilder & SetSupportsComputeSubgroups(bool value)
CapabilitiesBuilder & SetMinimumUniformAlignment(size_t value)
CapabilitiesBuilder & SetSupportsTextureToTextureBlits(bool value)
CapabilitiesBuilder & SetDefaultStencilFormat(PixelFormat value)
CapabilitiesBuilder & SetSupportsDeviceTransientTextures(bool value)
CapabilitiesBuilder & SetSupportsTriangleFan(bool value)
CapabilitiesBuilder & SetSupportsFramebufferFetch(bool value)
CapabilitiesBuilder & SetSupportsDecalSamplerAddressMode(bool value)
CapabilitiesBuilder & SetSupportsOffscreenMSAA(bool value)
CapabilitiesBuilder & SetSupportsSSBO(bool value)
CapabilitiesBuilder & SetMaximumRenderPassAttachmentSize(ISize size)
CapabilitiesBuilder & SetSupportsExtendedRangeFormats(bool value)
CapabilitiesBuilder & SetDefaultGlyphAtlasFormat(PixelFormat value)
CapabilitiesBuilder & SetSupportsCompute(bool value)
std::unique_ptr< Capabilities > Build()
CapabilitiesBuilder & SetDefaultDepthStencilFormat(PixelFormat value)
CapabilitiesBuilder & SetSupportsReadFromResolve(bool value)
std::shared_ptr< CommandQueue > GetCommandQueue() const override
Return the graphics queue for submitting command buffers.
RuntimeStageBackend GetRuntimeStageBackend() const override
Retrieve the runtime stage for this context type.
ImpellerMetalCaptureManager(id< MTLDevice > device)
Construct a new capture manager from the provided Metal device.
void FinishCapture()
End the current capture scope.
void StartCapture()
Begin a new capture scope, no-op if the scope has already started.
VkDevice device
Definition main.cc:69
VkQueue queue
Definition main.cc:71
const uint8_t uint32_t uint32_t GError ** error
uint32_t uint32_t * format
#define FML_LOG(severity)
Definition logging.h:101
#define FML_DCHECK(condition)
Definition logging.h:122
const char * name
Definition fuchsia.cc:49
DEF_SWITCHES_START aot vmservice shared library Name of the *so containing AOT compiled Dart assets for launching the service isolate vm snapshot The VM snapshot data that will be memory mapped as read only SnapshotAssetPath must be present isolate snapshot The isolate snapshot data that will be memory mapped as read only SnapshotAssetPath must be present cache dir Path to the cache directory This is different from the persistent_cache_path in embedder which is used for Skia shader cache icu native lib Path to the library file that exports the ICU data vm service The hostname IP address on which the Dart VM Service should be served If not defaults to or::depending on whether ipv6 is specified disable vm Disable the Dart VM Service The Dart VM Service is never available in release mode Bind to the IPv6 localhost address for the Dart VM Service Ignored if vm service host is set profile Make the profiler discard new samples once the profiler sample buffer is full When this flag is not the profiler sample buffer is used as a ring buffer
Definition switch_defs.h:98
bool IsFile(const std::string &path)
std::function< void()> closure
Definition closure.h:14
static bool DeviceSupportsExtendedRangeFormats(id< MTLDevice > device)
static NSArray< id< MTLLibrary > > * MTLShaderLibraryFromFilePaths(id< MTLDevice > device, const std::vector< std::string > &libraries_paths)
static id< MTLCommandQueue > CreateMetalCommandQueue(id< MTLDevice > device)
PixelFormat
The Pixel formats supported by Impeller. The naming convention denotes the usage of the component,...
Definition formats.h:99
static id< MTLDevice > CreateMetalDevice()
static NSArray< id< MTLLibrary > > * MTLShaderLibraryFromFileData(id< MTLDevice > device, const std::vector< std::shared_ptr< fml::Mapping > > &libraries_data, const std::string &label)
ISize DeviceMaxTextureSizeSupported(id< MTLDevice > device)
static bool DeviceSupportsComputeSubgroups(id< MTLDevice > device)
static bool DeviceSupportsFramebufferFetch(id< MTLDevice > device)
static std::unique_ptr< Capabilities > InferMetalCapabilities(id< MTLDevice > device, PixelFormat color_format)
Definition ref_ptr.h:261
Represents the 2 code paths available when calling |SyncSwitchExecute|.
Definition sync_switch.h:35
Handlers & SetIfFalse(const std::function< void()> &handler)
Sets the handler that will be executed if the |SyncSwitch| is false.
std::shared_ptr< const fml::Mapping > data
#define VALIDATION_LOG
Definition validation.h:91