Flutter Engine
The Flutter Engine
GrGLBuffer.cpp
Go to the documentation of this file.
1/*
2 * Copyright 2016 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
9
15
16#define GL_CALL(X) GR_GL_CALL(this->glGpu()->glInterface(), X)
17#define GL_CALL_RET(RET, X) GR_GL_CALL_RET(this->glGpu()->glInterface(), RET, X)
18
19#define GL_ALLOC_CALL(gpu, call) \
20 [&] { \
21 if (gpu->glCaps().skipErrorChecks()) { \
22 GR_GL_CALL(gpu->glInterface(), call); \
23 return static_cast<GrGLenum>(GR_GL_NO_ERROR); \
24 } else { \
25 gpu->clearErrorsAndCheckForOOM(); \
26 GR_GL_CALL_NOERRCHECK(gpu->glInterface(), call); \
27 return gpu->getErrorAndCheckForOOM(); \
28 } \
29 }()
30
32 size_t size,
33 GrGpuBufferType intendedType,
34 GrAccessPattern accessPattern) {
38 return nullptr;
39 }
40
42 /*label=*/"MakeGlBuffer"));
43 if (0 == buffer->bufferID()) {
44 return nullptr;
45 }
46 return buffer;
47}
48
49// GL_STREAM_DRAW triggers an optimization in Chromium's GPU process where a client's vertex buffer
50// objects are implemented as client-side-arrays on tile-deferred architectures.
51#define DYNAMIC_DRAW_PARAM GR_GL_STREAM_DRAW
52
54 GrAccessPattern accessPattern,
55 const GrGLCaps& caps) {
56 auto drawUsage = [](GrAccessPattern pattern) {
57 switch (pattern) {
59 // TODO: Do we really want to use STREAM_DRAW here on non-Chromium?
60 return DYNAMIC_DRAW_PARAM;
62 return GR_GL_STATIC_DRAW;
64 return GR_GL_STREAM_DRAW;
65 }
67 };
68
69 auto readUsage = [](GrAccessPattern pattern) {
70 switch (pattern) {
72 return GR_GL_DYNAMIC_READ;
74 return GR_GL_STATIC_READ;
76 return GR_GL_STREAM_READ;
77 }
79 };
80
81 auto usageType = [&drawUsage, &readUsage, &caps](GrGpuBufferType type,
82 GrAccessPattern pattern) {
83 // GL_NV_pixel_buffer_object adds transfer buffers but not the related <usage> values.
85 return drawUsage(pattern);
86 }
87 switch (type) {
93 return drawUsage(pattern);
95 return readUsage(pattern);
96 }
98 };
99
100 return usageType(bufferType, accessPattern);
101}
102
104 size_t size,
105 GrGpuBufferType intendedType,
106 GrAccessPattern accessPattern,
107 std::string_view label)
108 : INHERITED(gpu, size, intendedType, accessPattern, label)
109 , fIntendedType(intendedType)
110 , fBufferID(0)
111 , fUsage(gr_to_gl_access_pattern(intendedType, accessPattern, gpu->glCaps()))
112 , fHasAttachedToTexture(false) {
113 GL_CALL(GenBuffers(1, &fBufferID));
114 if (fBufferID) {
115 GrGLenum target = gpu->bindBuffer(fIntendedType, this);
116 GrGLenum error = GL_ALLOC_CALL(this->glGpu(), BufferData(target,
118 nullptr,
119 fUsage));
120 if (error != GR_GL_NO_ERROR) {
121 GL_CALL(DeleteBuffers(1, &fBufferID));
122 fBufferID = 0;
123 }
124 }
126 if (!fBufferID) {
128 }
129}
130
131inline GrGLGpu* GrGLBuffer::glGpu() const {
132 SkASSERT(!this->wasDestroyed());
133 return static_cast<GrGLGpu*>(this->getGpu());
134}
135
136inline const GrGLCaps& GrGLBuffer::glCaps() const {
137 return this->glGpu()->glCaps();
138}
139
141 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
142
143 if (!this->wasDestroyed()) {
144 // make sure we've not been abandoned or already released
145 if (fBufferID) {
146 GL_CALL(DeleteBuffers(1, &fBufferID));
147 fBufferID = 0;
148 }
149 fMapPtr = nullptr;
150 }
151
153}
154
156 fBufferID = 0;
157 fMapPtr = nullptr;
159}
160
161[[nodiscard]] static inline GrGLenum invalidate_buffer(GrGLGpu* gpu,
164 GrGLuint bufferID,
165 size_t bufferSize) {
166 switch (gpu->glCaps().invalidateBufferType()) {
168 return GR_GL_NO_ERROR;
170 return GL_ALLOC_CALL(gpu, BufferData(target, bufferSize, nullptr, usage));
172 GR_GL_CALL(gpu->glInterface(), InvalidateBufferData(bufferID));
173 return GR_GL_NO_ERROR;
174 }
176}
177
178void GrGLBuffer::onMap(MapType type) {
179 SkASSERT(fBufferID);
180 SkASSERT(!this->wasDestroyed());
181 SkASSERT(!this->isMapped());
182
183 // Handling dirty context is done in the bindBuffer call
184 switch (this->glCaps().mapBufferType()) {
186 return;
188 GrGLenum target = this->glGpu()->bindBuffer(fIntendedType, this);
190 GrGLenum error = invalidate_buffer(this->glGpu(),
191 target,
192 fUsage,
193 fBufferID,
194 this->size());
195 if (error != GR_GL_NO_ERROR) {
196 return;
197 }
198 }
200 GL_CALL_RET(fMapPtr, MapBuffer(target, access));
201 break;
202 }
204 GrGLenum target = this->glGpu()->bindBuffer(fIntendedType, this);
205 GrGLbitfield access;
206 switch (type) {
207 case MapType::kRead:
208 access = GR_GL_MAP_READ_BIT;
209 break;
212 break;
213 }
214 GL_CALL_RET(fMapPtr, MapBufferRange(target, 0, this->size(), access));
215 break;
216 }
218 GrGLenum target = this->glGpu()->bindBuffer(fIntendedType, this);
220 GL_CALL_RET(fMapPtr, MapBufferSubData(target, 0, this->size(), access));
221 break;
222 }
223 }
224}
225
226void GrGLBuffer::onUnmap(MapType) {
227 SkASSERT(fBufferID);
228 // bind buffer handles the dirty context
229 switch (this->glCaps().mapBufferType()) {
232 case GrGLCaps::kMapBuffer_MapBufferType: // fall through
234 GrGLenum target = this->glGpu()->bindBuffer(fIntendedType, this);
235 GL_CALL(UnmapBuffer(target));
236 break;
237 }
239 this->glGpu()->bindBuffer(fIntendedType, this); // TODO: Is this needed?
240 GL_CALL(UnmapBufferSubData(fMapPtr));
241 break;
242 }
243 fMapPtr = nullptr;
244}
245
246bool GrGLBuffer::onClearToZero() {
247 SkASSERT(fBufferID);
248
249 // We could improve this on GL 4.3+ with glClearBufferData (also GL_ARB_clear_buffer_object).
251 if (fMapPtr) {
252 std::memset(fMapPtr, 0, this->size());
254 return true;
255 }
256
257 void* zeros = sk_calloc_throw(this->size());
258 bool result = this->updateData(zeros, 0, this->size(), /*preserve=*/false);
259 sk_free(zeros);
260 return result;
261}
262
263bool GrGLBuffer::onUpdateData(const void* src, size_t offset, size_t size, bool preserve) {
264 SkASSERT(fBufferID);
265
266 // bindbuffer handles dirty context
267 GrGLenum target = this->glGpu()->bindBuffer(fIntendedType, this);
268 if (!preserve) {
269 GrGLenum error = invalidate_buffer(this->glGpu(), target, fUsage, fBufferID, this->size());
270 if (error != GR_GL_NO_ERROR) {
271 return false;
272 }
273 }
274 GL_CALL(BufferSubData(target, offset, size, src));
275 return true;
276}
277
278void GrGLBuffer::onSetLabel() {
279 SkASSERT(fBufferID);
280 if (!this->getLabel().empty()) {
281 const std::string label = "_Skia_" + this->getLabel();
282 if (this->glGpu()->glCaps().debugSupport()) {
283 GL_CALL(ObjectLabel(GR_GL_BUFFER, fBufferID, -1, label.c_str()));
284 }
285 }
286}
287
289 const SkString& dumpName) const {
290 SkString buffer_id;
291 buffer_id.appendU32(this->bufferID());
292 traceMemoryDump->setMemoryBacking(dumpName.c_str(), "gl_buffer", buffer_id.c_str());
293}
#define DYNAMIC_DRAW_PARAM
Definition: GrGLBuffer.cpp:51
static GrGLenum invalidate_buffer(GrGLGpu *gpu, GrGLenum target, GrGLenum usage, GrGLuint bufferID, size_t bufferSize)
Definition: GrGLBuffer.cpp:161
#define GL_ALLOC_CALL(gpu, call)
Definition: GrGLBuffer.cpp:19
#define GL_CALL(X)
Definition: GrGLBuffer.cpp:16
static GrGLenum gr_to_gl_access_pattern(GrGpuBufferType bufferType, GrAccessPattern accessPattern, const GrGLCaps &caps)
Definition: GrGLBuffer.cpp:53
#define GL_CALL_RET(RET, X)
Definition: GrGLBuffer.cpp:17
#define GR_GL_STATIC_READ
Definition: GrGLDefines.h:126
#define GR_GL_MAP_INVALIDATE_BUFFER_BIT
Definition: GrGLDefines.h:855
#define GR_GL_MAP_WRITE_BIT
Definition: GrGLDefines.h:853
#define GR_GL_STATIC_DRAW
Definition: GrGLDefines.h:125
#define GR_GL_BUFFER
Definition: GrGLDefines.h:1044
#define GR_GL_MAP_READ_BIT
Definition: GrGLDefines.h:852
#define GR_GL_NO_ERROR
Definition: GrGLDefines.h:175
#define GR_GL_WRITE_ONLY
Definition: GrGLDefines.h:848
#define GR_GL_STREAM_DRAW
Definition: GrGLDefines.h:123
#define GR_GL_DYNAMIC_READ
Definition: GrGLDefines.h:128
#define GR_GL_STREAM_READ
Definition: GrGLDefines.h:124
#define GR_GL_READ_ONLY
Definition: GrGLDefines.h:847
unsigned int GrGLuint
Definition: GrGLTypes.h:113
unsigned int GrGLbitfield
Definition: GrGLTypes.h:104
unsigned int GrGLenum
Definition: GrGLTypes.h:102
signed long int GrGLsizeiptr
Definition: GrGLTypes.h:126
#define GR_GL_CALL(IFACE, X)
Definition: GrGLUtil.h:381
GrGpuBufferType
Definition: GrTypesPriv.h:411
GrAccessPattern
Definition: GrTypesPriv.h:424
@ kDynamic_GrAccessPattern
Definition: GrTypesPriv.h:426
@ kStatic_GrAccessPattern
Definition: GrTypesPriv.h:428
@ kStream_GrAccessPattern
Definition: GrTypesPriv.h:430
#define SkUNREACHABLE
Definition: SkAssert.h:135
#define SkASSERT(cond)
Definition: SkAssert.h:116
static void * sk_calloc_throw(size_t size)
Definition: SkMalloc.h:71
SK_API void sk_free(void *)
#define TRACE_FUNC
Definition: SkTraceEvent.h:30
GLenum type
static sk_sp< GrGLBuffer > Make(GrGLGpu *, size_t size, GrGpuBufferType intendedType, GrAccessPattern)
Definition: GrGLBuffer.cpp:31
void onAbandon() override
Definition: GrGLBuffer.cpp:155
void setMemoryBacking(SkTraceMemoryDump *traceMemoryDump, const SkString &dumpName) const override
Definition: GrGLBuffer.cpp:288
GrGLuint bufferID() const
Definition: GrGLBuffer.h:29
void onRelease() override
Definition: GrGLBuffer.cpp:140
GrGLBuffer(GrGLGpu *, size_t size, GrGpuBufferType intendedType, GrAccessPattern, std::string_view label)
Definition: GrGLBuffer.cpp:103
@ kMapBufferRange_MapBufferType
Definition: GrGLCaps.h:109
@ kMapBuffer_MapBufferType
Definition: GrGLCaps.h:108
@ kNone_MapBufferType
Definition: GrGLCaps.h:107
@ kChromium_MapBufferType
Definition: GrGLCaps.h:110
InvalidateBufferType invalidateBufferType() const
Definition: GrGLCaps.h:396
TransferBufferType transferBufferType() const
What type of transfer buffer is supported?
Definition: GrGLCaps.h:329
const GrGLCaps & glCaps() const
Definition: GrGLGpu.h:108
const GrGLInterface * glInterface() const
Definition: GrGLGpu.h:103
GrGLenum bindBuffer(GrGpuBufferType type, const GrBuffer *)
Definition: GrGLGpu.cpp:2154
size_t size() const final
Definition: GrGpuBuffer.h:34
GrGpuBufferType intendedType() const
Definition: GrGpuBuffer.h:99
bool isMapped() const
Definition: GrGpuBuffer.cpp:47
bool updateData(const void *src, size_t offset, size_t size, bool preserve)
Definition: GrGpuBuffer.cpp:63
void * fMapPtr
Definition: GrGpuBuffer.h:119
GrAccessPattern accessPattern() const
Definition: GrGpuBuffer.h:32
virtual void onAbandon()
std::string getLabel() const
GrGpu * getGpu() const
bool wasDestroyed() const
void registerWithCache(skgpu::Budgeted)
virtual void onRelease()
ResourcePriv resourcePriv()
void appendU32(uint32_t value)
Definition: SkString.h:210
const char * c_str() const
Definition: SkString.h:133
virtual void setMemoryBacking(const char *dumpName, const char *backingType, const char *backingObjectId)=0
EMSCRIPTEN_KEEPALIVE void empty()
const uint8_t uint32_t uint32_t GError ** error
GAsyncResult * result
uint32_t * target
DEF_SWITCHES_START aot vmservice shared library Name of the *so containing AOT compiled Dart assets for launching the service isolate vm snapshot The VM snapshot data that will be memory mapped as read only SnapshotAssetPath must be present isolate snapshot The isolate snapshot data that will be memory mapped as read only SnapshotAssetPath must be present cache dir Path to the cache directory This is different from the persistent_cache_path in embedder which is used for Skia shader cache icu native lib Path to the library file that exports the ICU data vm service The hostname IP address on which the Dart VM Service should be served If not defaults to or::depending on whether ipv6 is specified vm service A custom Dart VM Service port The default is to pick a randomly available open port disable vm Disable the Dart VM Service The Dart VM Service is never available in release mode disable vm service Disable mDNS Dart VM Service publication Bind to the IPv6 localhost address for the Dart VM Service Ignored if vm service host is set endless trace buffer
Definition: switches.h:126
it will be possible to load the file into Perfetto s trace viewer disable asset Prevents usage of any non test fonts unless they were explicitly Loaded via prefetched default font Indicates whether the embedding started a prefetch of the default font manager before creating the engine run In non interactive keep the shell running after the Dart script has completed enable serial On low power devices with low core running concurrent GC tasks on threads can cause them to contend with the UI thread which could potentially lead to jank This option turns off all concurrent GC activities domain network JSON encoded network policy per domain This overrides the DisallowInsecureConnections switch Embedder can specify whether to allow or disallow insecure connections at a domain level old gen heap size
Definition: switches.h:259
static void usage(char *argv0)
SeparatedVector2 offset
#define TRACE_EVENT0(category_group, name)
Definition: trace_event.h:131