Flutter Engine
The Flutter Engine
Loading...
Searching...
No Matches
GrMtlBuffer.mm
Go to the documentation of this file.
1/*
2 * Copyright 2018 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
14
15#if !__has_feature(objc_arc)
16#error This file must be compiled with Arc. Use -fobjc-arc flag
17#endif
18
19#ifdef SK_DEBUG
20#define VALIDATE() this->validate()
21#else
22#define VALIDATE() do {} while(false)
23#endif
24
25GR_NORETAIN_BEGIN
26
27#ifdef SK_ENABLE_MTL_DEBUG_INFO
28NSString* kBufferTypeNames[kGrGpuBufferTypeCount] = {
29 @"Vertex",
30 @"Index",
31 @"Indirect",
32 @"Xfer CPU to GPU",
33 @"Xfer GPU to CPU",
34 @"Uniform",
35};
36#endif
37
39 size_t size,
40 GrGpuBufferType intendedType,
41 GrAccessPattern accessPattern) {
42 return sk_sp<GrMtlBuffer>(new GrMtlBuffer(gpu,
43 size,
46 /*label=*/"MakeMtlBuffer"));
47}
48
49GrMtlBuffer::GrMtlBuffer(GrMtlGpu* gpu, size_t size, GrGpuBufferType intendedType,
50 GrAccessPattern accessPattern, std::string_view label)
51 : INHERITED(gpu, size, intendedType, accessPattern, label)
52 , fIsDynamic(accessPattern != kStatic_GrAccessPattern) {
53 NSUInteger options = 0;
54 if (@available(macOS 10.11, iOS 9.0, tvOS 9.0, *)) {
55 if (fIsDynamic) {
56#ifdef SK_BUILD_FOR_MAC
57 if (gpu->mtlCaps().isMac()) {
58 options |= MTLResourceStorageModeManaged;
59 } else {
60 options |= MTLResourceStorageModeShared;
61 }
62#else
63 options |= MTLResourceStorageModeShared;
64#endif
65 } else {
66 options |= MTLResourceStorageModePrivate;
67 }
68 }
69
71 fMtlBuffer = size == 0 ? nil :
72 [gpu->device() newBufferWithLength: size
74#ifdef SK_ENABLE_MTL_DEBUG_INFO
75 fMtlBuffer.label = kBufferTypeNames[(int)intendedType];
76#endif
78 VALIDATE();
79}
80
82 SkASSERT(!fMtlBuffer);
84}
85
86bool GrMtlBuffer::onUpdateData(const void *src, size_t offset, size_t size, bool preserve) {
87 if (fIsDynamic) {
88 this->internalMap();
89 if (!fMapPtr) {
90 return false;
91 }
92 memcpy(SkTAddOffset<void>(fMapPtr, offset), src, size);
93 this->internalUnmap(offset, size);
94 return true;
95 }
96 // Update via transfer buffer.
97
98 // We have to respect the transfer alignment. So we may transfer some extra bytes before and
99 // after the region to be updated.
100 size_t transferAlignment = this->getGpu()->caps()->transferFromBufferToBufferAlignment();
101 size_t r = offset%transferAlignment;
102 SkASSERT(!preserve || r == 0); // We can't push extra bytes when preserving.
103
104 offset -= r;
105 size_t transferSize = SkAlignTo(size + r, transferAlignment);
106
108 slice = this->mtlGpu()->stagingBufferManager()->allocateStagingBufferSlice(
109 transferSize, this->mtlGpu()->mtlCaps().getMinBufferAlignment());
110 if (!slice.fBuffer) {
111 return false;
112 }
113 memcpy(SkTAddOffset<void>(slice.fOffsetMapPtr, r), src, size);
114
115 GrMtlCommandBuffer* cmdBuffer = this->mtlGpu()->commandBuffer();
116 id<MTLBlitCommandEncoder> GR_NORETAIN blitCmdEncoder = cmdBuffer->getBlitCommandEncoder();
117 if (!blitCmdEncoder) {
118 return false;
119 }
120 GrMtlBuffer* mtlBuffer = static_cast<GrMtlBuffer*>(slice.fBuffer);
121 id<MTLBuffer> transferBuffer = mtlBuffer->mtlBuffer();
122 [blitCmdEncoder copyFromBuffer: transferBuffer
123 sourceOffset: slice.fOffset
124 toBuffer: fMtlBuffer
125 destinationOffset: offset
126 size: transferSize];
127 return true;
128}
129
130inline GrMtlGpu* GrMtlBuffer::mtlGpu() const {
131 SkASSERT(!this->wasDestroyed());
132 return static_cast<GrMtlGpu*>(this->getGpu());
133}
134
136 fMtlBuffer = nil;
137 fMapPtr = nullptr;
138 VALIDATE();
140}
141
143 if (!this->wasDestroyed()) {
144 VALIDATE();
145 fMtlBuffer = nil;
146 fMapPtr = nullptr;
147 VALIDATE();
148 }
150}
151
152void GrMtlBuffer::internalMap() {
153 if (fIsDynamic) {
154 VALIDATE();
155 SkASSERT(!this->isMapped());
156 fMapPtr = static_cast<char*>(fMtlBuffer.contents);
157 VALIDATE();
158 }
159}
160
161void GrMtlBuffer::internalUnmap(size_t writtenOffset, size_t writtenSize) {
162 SkASSERT(fMtlBuffer);
163 if (fIsDynamic) {
164 VALIDATE();
165 SkASSERT(writtenOffset + writtenSize <= this->size());
166 SkASSERT(this->isMapped());
167#ifdef SK_BUILD_FOR_MAC
168 if (this->mtlGpu()->mtlCaps().isMac() && writtenSize) {
169 // We should never write to this type of buffer on the CPU.
171 [fMtlBuffer didModifyRange: NSMakeRange(writtenOffset, writtenSize)];
172 }
173#endif
174 fMapPtr = nullptr;
175 }
176}
177
179 this->internalMap();
180}
181
183 this->internalUnmap(0, type == MapType::kWriteDiscard ? this-> size() : 0);
184}
185
187 SkASSERT(fMtlBuffer);
188 GrMtlCommandBuffer* cmdBuffer = this->mtlGpu()->commandBuffer();
189 id<MTLBlitCommandEncoder> GR_NORETAIN blitCmdEncoder = cmdBuffer->getBlitCommandEncoder();
190 if (!blitCmdEncoder) {
191 return false;
192 }
193
194 NSRange range{0, this->size()};
195 [blitCmdEncoder fillBuffer: fMtlBuffer range: range value: 0];
196
197 cmdBuffer->addGrBuffer(sk_ref_sp(this));
198
199 return true;
200}
201
202#ifdef SK_DEBUG
203void GrMtlBuffer::validate() const {
204 SkASSERT(fMtlBuffer == nil ||
211 SkASSERT((fMapPtr && fMtlBuffer) || !fMapPtr);
212}
213#endif
214
216 SkASSERT(fMtlBuffer);
217 if (!this->getLabel().empty()) {
218 NSString* labelStr = @(this->getLabel().c_str());
219 fMtlBuffer.label = [@"_Skia_" stringByAppendingString:labelStr];
220 }
221}
222
223GR_NORETAIN_END
const char * options
#define VALIDATE()
GrGpuBufferType
static const constexpr int kGrGpuBufferTypeCount
GrAccessPattern
@ kStatic_GrAccessPattern
static constexpr size_t SkAlignTo(size_t x, size_t alignment)
Definition SkAlign.h:33
#define SkASSERT(cond)
Definition SkAssert.h:116
sk_sp< T > sk_ref_sp(T *obj)
Definition SkRefCnt.h:381
Type::kYUV Type::kRGBA() int(0.7 *637)
size_t transferFromBufferToBufferAlignment() const
Definition GrCaps.h:244
size_t size() const final
Definition GrGpuBuffer.h:34
GrGpuBufferType intendedType() const
Definition GrGpuBuffer.h:99
bool isMapped() const
void * fMapPtr
GrAccessPattern accessPattern() const
Definition GrGpuBuffer.h:32
virtual void onAbandon()
std::string getLabel() const
GrGpu * getGpu() const
bool wasDestroyed() const
void registerWithCache(skgpu::Budgeted)
virtual void onRelease()
const GrCaps * caps() const
Definition GrGpu.h:73
GrMtlBuffer(GrMtlGpu *, size_t size, GrGpuBufferType intendedType, GrAccessPattern, std::string_view label)
void onRelease() override
~GrMtlBuffer() override
void onMap(MapType) override
bool onUpdateData(const void *src, size_t offset, size_t size, bool preserve) override
void onUnmap(MapType) override
static sk_sp< GrMtlBuffer > Make(GrMtlGpu *, size_t size, GrGpuBufferType intendedType, GrAccessPattern)
void onAbandon() override
void onSetLabel() override
bool onClearToZero() override
id< MTLBuffer > mtlBuffer() const
Definition GrMtlBuffer.h:28
bool isMac() const
Definition GrMtlCaps.h:89
size_t getMinBufferAlignment() const
Definition GrMtlCaps.h:92
void addGrBuffer(sk_sp< const GrBuffer > buffer)
id< MTLBlitCommandEncoder > getBlitCommandEncoder()
GrMtlCommandBuffer * commandBuffer()
Definition GrMtlGpu.mm:187
const GrMtlCaps & mtlCaps() const
Definition GrMtlGpu.h:47
id< MTLDevice > device() const
Definition GrMtlGpu.h:49
GrStagingBufferManager * stagingBufferManager() override
Definition GrMtlGpu.h:53
Slice allocateStagingBufferSlice(size_t size, size_t requiredAlignment=1)
EMSCRIPTEN_KEEPALIVE void empty()
uint8_t value
Point offset