Flutter Engine
The Flutter Engine
MtlComputeCommandEncoder.h
Go to the documentation of this file.
1/*
2 * Copyright 2022 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#ifndef skgpu_graphite_MtlComputeCommandEncoder_DEFINED
9#define skgpu_graphite_MtlComputeCommandEncoder_DEFINED
10
15
16#import <Metal/Metal.h>
17
18namespace skgpu::graphite {
19
20/**
21 * Wraps a MTLComputeCommandEncoder object and associated tracked state
22 */
24public:
26 id<MTLCommandBuffer> commandBuffer) {
27 // Inserting a pool here so the autorelease occurs when we return and the
28 // only remaining ref is the retain below.
29 @autoreleasepool {
30 // Adding a retain here to keep our own ref separate from the autorelease pool
31 sk_cfp<id<MTLComputeCommandEncoder>> encoder =
32 sk_ret_cfp([commandBuffer computeCommandEncoder]);
33
34 // TODO(armansito): Support concurrent dispatch of compute passes using
35 // MTLDispatchTypeConcurrent on macOS 10.14+ and iOS 12.0+.
38 }
39 }
40
41 const char* getResourceType() const override { return "Metal Compute Command Encoder"; }
42
43 void setLabel(NSString* label) { [(*fCommandEncoder) setLabel:label]; }
44
45 void pushDebugGroup(NSString* string) { [(*fCommandEncoder) pushDebugGroup:string]; }
46 void popDebugGroup() { [(*fCommandEncoder) popDebugGroup]; }
47 void insertDebugSignpost(NSString* string) { [(*fCommandEncoder) insertDebugSignpost:string]; }
48
49 void setComputePipelineState(id<MTLComputePipelineState> pso) {
50 if (fCurrentComputePipelineState != pso) {
51 [(*fCommandEncoder) setComputePipelineState:pso];
52 fCurrentComputePipelineState = pso;
53 }
54 }
55
56 void setBuffer(id<MTLBuffer> buffer, NSUInteger offset, NSUInteger index) {
57 SkASSERT(buffer != nil);
58 SkASSERT(index < kMaxExpectedBuffers);
59 if (@available(macOS 10.11, iOS 8.3, tvOS 9.0, *)) {
60 if (fBuffers[index] == buffer) {
61 this->setBufferOffset(offset, index);
62 return;
63 }
64 }
65 if (fBuffers[index] != buffer || fBufferOffsets[index] != offset) {
66 [(*fCommandEncoder) setBuffer:buffer offset:offset atIndex:index];
67 fBuffers[index] = buffer;
68 fBufferOffsets[index] = offset;
69 }
70 }
71
72 void setBufferOffset(NSUInteger offset, NSUInteger index)
73 SK_API_AVAILABLE(macos(10.11), ios(8.3), tvos(9.0)) {
74 SkASSERT(index < kMaxExpectedBuffers);
75 if (fBufferOffsets[index] != offset) {
76 [(*fCommandEncoder) setBufferOffset:offset atIndex:index];
77 fBufferOffsets[index] = offset;
78 }
79 }
80
81 void setTexture(id<MTLTexture> texture, NSUInteger index) {
82 SkASSERT(index < kMaxExpectedTextures);
83 if (fTextures[index] != texture) {
84 [(*fCommandEncoder) setTexture:texture atIndex:index];
85 fTextures[index] = texture;
86 }
87 }
88
89 void setSamplerState(id<MTLSamplerState> sampler, NSUInteger index) {
90 SkASSERT(index < kMaxExpectedTextures);
91 if (fSamplers[index] != sampler) {
92 [(*fCommandEncoder) setSamplerState:sampler atIndex:index];
93 fSamplers[index] = sampler;
94 }
95 }
96
97 // `length` must be 16-byte aligned
98 void setThreadgroupMemoryLength(NSUInteger length, NSUInteger index) {
99 SkASSERT(length % 16 == 0);
100 [(*fCommandEncoder) setThreadgroupMemoryLength:length atIndex:index];
101 }
102
103 void dispatchThreadgroups(const WorkgroupSize& globalSize, const WorkgroupSize& localSize) {
104 MTLSize threadgroupCount =
105 MTLSizeMake(globalSize.fWidth, globalSize.fHeight, globalSize.fDepth);
106 MTLSize threadsPerThreadgroup =
107 MTLSizeMake(localSize.fWidth, localSize.fHeight, localSize.fDepth);
108 [(*fCommandEncoder) dispatchThreadgroups:threadgroupCount
109 threadsPerThreadgroup:threadsPerThreadgroup];
110 }
111
112 void dispatchThreadgroupsWithIndirectBuffer(id<MTLBuffer> indirectBuffer,
113 NSUInteger offset,
114 const WorkgroupSize& localSize) {
115 MTLSize threadsPerThreadgroup =
116 MTLSizeMake(localSize.fWidth, localSize.fHeight, localSize.fDepth);
117 [(*fCommandEncoder) dispatchThreadgroupsWithIndirectBuffer:indirectBuffer
118 indirectBufferOffset:offset
119 threadsPerThreadgroup:threadsPerThreadgroup];
120 }
121
122 void endEncoding() { [(*fCommandEncoder) endEncoding]; }
123
124private:
125 inline static constexpr int kMaxExpectedBuffers = 16;
126 inline static constexpr int kMaxExpectedTextures = 16;
127
129 sk_cfp<id<MTLComputeCommandEncoder>> encoder)
133 /*gpuMemorySize=*/0)
134 , fCommandEncoder(std::move(encoder)) {
135 for (int i = 0; i < kMaxExpectedBuffers; i++) {
136 fBuffers[i] = nil;
137 }
138 for (int i = 0; i < kMaxExpectedTextures; i++) {
139 fTextures[i] = nil;
140 fSamplers[i] = nil;
141 }
142 }
143
144 void freeGpuData() override { fCommandEncoder.reset(); }
145
146 sk_cfp<id<MTLComputeCommandEncoder>> fCommandEncoder;
147
148 id<MTLComputePipelineState> fCurrentComputePipelineState = nil;
149
150 id<MTLBuffer> fBuffers[kMaxExpectedBuffers];
151 NSUInteger fBufferOffsets[kMaxExpectedBuffers];
152
153 id<MTLTexture> fTextures[kMaxExpectedTextures];
154 id<MTLSamplerState> fSamplers[kMaxExpectedTextures];
155};
156
157} // namespace skgpu::graphite
158
159#endif // skgpu_graphite_MtlComputeCommandEncoder_DEFINED
#define SK_API_AVAILABLE(...)
Definition: SkAPI.h:49
#define SkASSERT(cond)
Definition: SkAssert.h:116
@ kYes
Do pre-clip the geometry before applying the (perspective) matrix.
void setBuffer(id< MTLBuffer > buffer, NSUInteger offset, NSUInteger index)
void setComputePipelineState(id< MTLComputePipelineState > pso)
void setTexture(id< MTLTexture > texture, NSUInteger index)
void setBufferOffset(NSUInteger offset, NSUInteger index) SK_API_AVAILABLE(macos(10.11)
static sk_sp< MtlComputeCommandEncoder > Make(const SharedContext *sharedContext, id< MTLCommandBuffer > commandBuffer)
void dispatchThreadgroups(const WorkgroupSize &globalSize, const WorkgroupSize &localSize)
void dispatchThreadgroupsWithIndirectBuffer(id< MTLBuffer > indirectBuffer, NSUInteger offset, const WorkgroupSize &localSize)
void setThreadgroupMemoryLength(NSUInteger length, NSUInteger index)
void setSamplerState(id< MTLSamplerState > sampler, NSUInteger index)
const SharedContext * sharedContext() const
Definition: Resource.h:189
size_t length
FlTexture * texture
DEF_SWITCHES_START aot vmservice shared library Name of the *so containing AOT compiled Dart assets for launching the service isolate vm snapshot The VM snapshot data that will be memory mapped as read only SnapshotAssetPath must be present isolate snapshot The isolate snapshot data that will be memory mapped as read only SnapshotAssetPath must be present cache dir Path to the cache directory This is different from the persistent_cache_path in embedder which is used for Skia shader cache icu native lib Path to the library file that exports the ICU data vm service The hostname IP address on which the Dart VM Service should be served If not defaults to or::depending on whether ipv6 is specified vm service A custom Dart VM Service port The default is to pick a randomly available open port disable vm Disable the Dart VM Service The Dart VM Service is never available in release mode disable vm service Disable mDNS Dart VM Service publication Bind to the IPv6 localhost address for the Dart VM Service Ignored if vm service host is set endless trace buffer
Definition: switches.h:126
Definition: GpuTools.h:21
Budgeted
Definition: GpuTypes.h:35
Definition: ref_ptr.h:256
SeparatedVector2 offset