Flutter Engine
The Flutter Engine
GrBufferAllocPool.cpp
Go to the documentation of this file.
1/*
2 * Copyright 2010 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
11#include "src/base/SkSafeMath.h"
14
15#include <memory>
22
24 int maxBuffersToCache) {
25 return sk_sp<CpuBufferCache>(new CpuBufferCache(maxBuffersToCache));
26}
27
28GrBufferAllocPool::CpuBufferCache::CpuBufferCache(int maxBuffersToCache)
29 : fMaxBuffersToCache(maxBuffersToCache) {
30 if (fMaxBuffersToCache) {
31 fBuffers = std::make_unique<Buffer[]>(fMaxBuffersToCache);
32 }
33}
34
36 bool mustBeInitialized) {
37 SkASSERT(size > 0);
38 Buffer* result = nullptr;
39 if (size == kDefaultBufferSize) {
40 int i = 0;
41 for (; i < fMaxBuffersToCache && fBuffers[i].fBuffer; ++i) {
42 SkASSERT(fBuffers[i].fBuffer->size() == kDefaultBufferSize);
43 if (fBuffers[i].fBuffer->unique()) {
44 result = &fBuffers[i];
45 }
46 }
47 if (!result && i < fMaxBuffersToCache) {
48 fBuffers[i].fBuffer = GrCpuBuffer::Make(size);
49 result = &fBuffers[i];
50 }
51 }
52 Buffer tempResult;
53 if (!result) {
54 tempResult.fBuffer = GrCpuBuffer::Make(size);
55 result = &tempResult;
56 }
57 if (mustBeInitialized && !result->fCleared) {
58 result->fCleared = true;
59 memset(result->fBuffer->data(), 0, result->fBuffer->size());
60 }
61 return result->fBuffer;
62}
63
65 for (int i = 0; i < fMaxBuffersToCache && fBuffers[i].fBuffer; ++i) {
66 fBuffers[i].fBuffer.reset();
67 fBuffers[i].fCleared = false;
68 }
69}
71//////////////////////////////////////////////////////////////////////////////
72
73#ifdef SK_DEBUG
74 #define VALIDATE validate
75#else
76 static void VALIDATE(bool = false) {}
77#endif
78
79#define UNMAP_BUFFER(block) \
80 do { \
81 TRACE_EVENT_INSTANT1("skia.gpu", "GrBufferAllocPool Unmapping Buffer", \
82 TRACE_EVENT_SCOPE_THREAD, "percent_unwritten", \
83 (float)((block).fBytesFree) / (block).fBuffer->size()); \
84 SkASSERT(!block.fBuffer->isCpuBuffer()); \
85 static_cast<GrGpuBuffer*>(block.fBuffer.get())->unmap(); \
86 } while (false)
87
89 sk_sp<CpuBufferCache> cpuBufferCache)
90 : fBlocks(8)
91 , fCpuBufferCache(std::move(cpuBufferCache))
92 , fGpu(gpu)
93 , fBufferType(bufferType) {}
94
95void GrBufferAllocPool::deleteBlocks() {
96 if (!fBlocks.empty()) {
97 GrBuffer* buffer = fBlocks.back().fBuffer.get();
98 if (!buffer->isCpuBuffer() && static_cast<GrGpuBuffer*>(buffer)->isMapped()) {
99 UNMAP_BUFFER(fBlocks.back());
100 }
101 }
102 while (!fBlocks.empty()) {
103 this->destroyBlock();
104 }
105 SkASSERT(!fBufferPtr);
106}
107
109 VALIDATE();
110 this->deleteBlocks();
111}
112
114 VALIDATE();
115 fBytesInUse = 0;
116 this->deleteBlocks();
117 this->resetCpuData(0);
118 VALIDATE();
119}
120
122 VALIDATE();
123
124 if (fBufferPtr) {
125 BufferBlock& block = fBlocks.back();
126 GrBuffer* buffer = block.fBuffer.get();
127 if (!buffer->isCpuBuffer()) {
128 if (static_cast<GrGpuBuffer*>(buffer)->isMapped()) {
129 UNMAP_BUFFER(block);
130 } else {
131 size_t flushSize = block.fBuffer->size() - block.fBytesFree;
132 this->flushCpuData(fBlocks.back(), flushSize);
133 }
134 }
135 fBufferPtr = nullptr;
136 }
137 VALIDATE();
138}
139
140#ifdef SK_DEBUG
141void GrBufferAllocPool::validate(bool unusedBlockAllowed) const {
142 bool wasDestroyed = false;
143 if (fBufferPtr) {
144 SkASSERT(!fBlocks.empty());
145 const GrBuffer* buffer = fBlocks.back().fBuffer.get();
146 if (!buffer->isCpuBuffer() && !static_cast<const GrGpuBuffer*>(buffer)->isMapped()) {
147 SkASSERT(fCpuStagingBuffer && fCpuStagingBuffer->data() == fBufferPtr);
148 }
149 } else if (!fBlocks.empty()) {
150 const GrBuffer* buffer = fBlocks.back().fBuffer.get();
151 SkASSERT(buffer->isCpuBuffer() || !static_cast<const GrGpuBuffer*>(buffer)->isMapped());
152 }
153 size_t bytesInUse = 0;
154 for (int i = 0; i < fBlocks.size() - 1; ++i) {
155 const GrBuffer* buffer = fBlocks[i].fBuffer.get();
156 SkASSERT(buffer->isCpuBuffer() || !static_cast<const GrGpuBuffer*>(buffer)->isMapped());
157 }
158 for (int i = 0; !wasDestroyed && i < fBlocks.size(); ++i) {
159 GrBuffer* buffer = fBlocks[i].fBuffer.get();
160 if (!buffer->isCpuBuffer() && static_cast<GrGpuBuffer*>(buffer)->wasDestroyed()) {
161 wasDestroyed = true;
162 } else {
163 size_t bytes = fBlocks[i].fBuffer->size() - fBlocks[i].fBytesFree;
164 bytesInUse += bytes;
165 SkASSERT(bytes || unusedBlockAllowed);
166 }
167 }
168
169 if (!wasDestroyed) {
170 SkASSERT(bytesInUse == fBytesInUse);
171 if (unusedBlockAllowed) {
172 SkASSERT((fBytesInUse && !fBlocks.empty()) ||
173 (!fBytesInUse && (fBlocks.size() < 2)));
174 } else {
175 SkASSERT((0 == fBytesInUse) == fBlocks.empty());
176 }
177 }
178}
179#endif
180
181static inline size_t align_up_pad(size_t x, size_t alignment) {
182 return (alignment - x % alignment) % alignment;
183}
184
185static inline size_t align_down(size_t x, uint32_t alignment) {
186 return (x / alignment) * alignment;
187}
188
190 size_t alignment,
192 size_t* offset) {
193 VALIDATE();
194
197
198 if (fBufferPtr) {
199 BufferBlock& back = fBlocks.back();
200 size_t usedBytes = back.fBuffer->size() - back.fBytesFree;
201 size_t pad = align_up_pad(usedBytes, alignment);
202 SkSafeMath safeMath;
203 size_t alignedSize = safeMath.add(pad, size);
204 if (!safeMath.ok()) {
205 return nullptr;
206 }
207 if (alignedSize <= back.fBytesFree) {
208 memset((void*)(reinterpret_cast<intptr_t>(fBufferPtr) + usedBytes), 0, pad);
209 usedBytes += pad;
210 *offset = usedBytes;
211 *buffer = back.fBuffer;
212 back.fBytesFree -= alignedSize;
213 fBytesInUse += alignedSize;
214 VALIDATE();
215 return (void*)(reinterpret_cast<intptr_t>(fBufferPtr) + usedBytes);
216 }
217 }
218
219 // We could honor the space request using by a partial update of the current
220 // VB (if there is room). But we don't currently use draw calls to GL that
221 // allow the driver to know that previously issued draws won't read from
222 // the part of the buffer we update. Also, when this was written the GL
223 // buffer implementation was cheating on the actual buffer size by shrinking
224 // the buffer in updateData() if the amount of data passed was less than
225 // the full buffer size. This is old code and both concerns may be obsolete.
226
227 if (!this->createBlock(size)) {
228 return nullptr;
229 }
230 SkASSERT(fBufferPtr);
231
232 *offset = 0;
233 BufferBlock& back = fBlocks.back();
234 *buffer = back.fBuffer;
235 back.fBytesFree -= size;
236 fBytesInUse += size;
237 VALIDATE();
238 return fBufferPtr;
239}
240
242 size_t fallbackSize,
243 size_t alignment,
245 size_t* offset,
246 size_t* actualSize) {
247 VALIDATE();
248
251 SkASSERT(actualSize);
252
253 size_t usedBytes = (fBlocks.empty()) ? 0 : fBlocks.back().fBuffer->size() -
254 fBlocks.back().fBytesFree;
255 size_t pad = align_up_pad(usedBytes, alignment);
256 if (!fBufferPtr || fBlocks.empty() || (minSize + pad) > fBlocks.back().fBytesFree) {
257 // We either don't have a block yet or the current block doesn't have enough free space.
258 // Create a new one.
259 if (!this->createBlock(fallbackSize)) {
260 return nullptr;
261 }
262 usedBytes = 0;
263 pad = 0;
264 }
265 SkASSERT(fBufferPtr);
266
267 // Consume padding first, to make subsequent alignment math easier
268 memset(static_cast<char*>(fBufferPtr) + usedBytes, 0, pad);
269 usedBytes += pad;
270 fBlocks.back().fBytesFree -= pad;
271 fBytesInUse += pad;
272
273 // Give caller all remaining space in this block (but aligned correctly)
274 size_t size = align_down(fBlocks.back().fBytesFree, alignment);
275 *offset = usedBytes;
276 *buffer = fBlocks.back().fBuffer;
277 *actualSize = size;
278 fBlocks.back().fBytesFree -= size;
279 fBytesInUse += size;
280 VALIDATE();
281 return static_cast<char*>(fBufferPtr) + usedBytes;
282}
283
284void GrBufferAllocPool::putBack(size_t bytes) {
285 VALIDATE();
286 if (!bytes) {
287 return;
288 }
289 SkASSERT(!fBlocks.empty());
290 BufferBlock& block = fBlocks.back();
291 // Caller shouldn't try to put back more than they've taken and all those bytes should fit into
292 // one block. All the uses of this call are sequential with a single makeSpaceAtLeast call. So
293 // we should not have a case where someone is putting back bytes that are greater than the
294 // current block.
295 // It is possible the caller returns all their allocated bytes thus the <= and not just <.
296 SkASSERT(bytes <= (block.fBuffer->size() - block.fBytesFree));
297 block.fBytesFree += bytes;
298 fBytesInUse -= bytes;
299
300 // We don't allow blocks without any used bytes. So if we end up in that case after putting
301 // back the bytes then destroy the block. This scenario shouldn't occur often, but even if we
302 // end up allocating a new block immediately after destroying this one, the GPU and CPU buffers
303 // will usually be cached so the new block shouldn't be too expensive to make.
304 // TODO: This was true in older versions and uses of this class but is it still needed to
305 // have this restriction?
306 if (block.fBytesFree == block.fBuffer->size()) {
307 GrBuffer* buffer = block.fBuffer.get();
308 if (!buffer->isCpuBuffer() && static_cast<GrGpuBuffer*>(buffer)->isMapped()) {
309 UNMAP_BUFFER(block);
310 }
311 this->destroyBlock();
312 }
313
314 VALIDATE();
315}
316
317bool GrBufferAllocPool::createBlock(size_t requestSize) {
318 size_t size = std::max(requestSize, kDefaultBufferSize);
319
320 VALIDATE();
321
322 BufferBlock& block = fBlocks.push_back();
323
324 block.fBuffer = this->getBuffer(size);
325 if (!block.fBuffer) {
326 fBlocks.pop_back();
327 return false;
328 }
329
330 block.fBytesFree = block.fBuffer->size();
331 if (fBufferPtr) {
332 SkASSERT(fBlocks.size() > 1);
333 BufferBlock& prev = fBlocks.fromBack(1);
334 GrBuffer* buffer = prev.fBuffer.get();
335 if (!buffer->isCpuBuffer()) {
336 if (static_cast<GrGpuBuffer*>(buffer)->isMapped()) {
338 } else {
339 this->flushCpuData(prev, prev.fBuffer->size() - prev.fBytesFree);
340 }
341 }
342 fBufferPtr = nullptr;
343 }
344
345 SkASSERT(!fBufferPtr);
346
347 // If the buffer is CPU-backed we "map" it because it is free to do so and saves a copy.
348 // Otherwise when buffer mapping is supported we map if the buffer size is greater than the
349 // threshold.
350 if (block.fBuffer->isCpuBuffer()) {
351 fBufferPtr = static_cast<GrCpuBuffer*>(block.fBuffer.get())->data();
352 SkASSERT(fBufferPtr);
353 } else {
354 if (GrCaps::kNone_MapFlags != fGpu->caps()->mapBufferFlags() &&
355 size > fGpu->caps()->bufferMapThreshold()) {
356 fBufferPtr = static_cast<GrGpuBuffer*>(block.fBuffer.get())->map();
357 }
358 }
359 if (!fBufferPtr) {
360 this->resetCpuData(block.fBytesFree);
361 fBufferPtr = fCpuStagingBuffer->data();
362 }
363
364 VALIDATE(true);
365
366 return true;
367}
368
369void GrBufferAllocPool::destroyBlock() {
370 SkASSERT(!fBlocks.empty());
371 SkASSERT(fBlocks.back().fBuffer->isCpuBuffer() ||
372 !static_cast<GrGpuBuffer*>(fBlocks.back().fBuffer.get())->isMapped());
373 fBlocks.pop_back();
374 fBufferPtr = nullptr;
375}
376
377void GrBufferAllocPool::resetCpuData(size_t newSize) {
378 SkASSERT(newSize >= kDefaultBufferSize || !newSize);
379 if (!newSize) {
380 fCpuStagingBuffer.reset();
381 return;
382 }
383 if (fCpuStagingBuffer && newSize <= fCpuStagingBuffer->size()) {
384 return;
385 }
386 bool mustInitialize = fGpu->caps()->mustClearUploadedBufferData();
387 fCpuStagingBuffer = fCpuBufferCache ? fCpuBufferCache->makeBuffer(newSize, mustInitialize)
388 : GrCpuBuffer::Make(newSize);
389}
390
391void GrBufferAllocPool::flushCpuData(const BufferBlock& block, size_t flushSize) {
392 SkASSERT(block.fBuffer.get());
393 SkASSERT(!block.fBuffer.get()->isCpuBuffer());
394 GrGpuBuffer* buffer = static_cast<GrGpuBuffer*>(block.fBuffer.get());
395 SkASSERT(!buffer->isMapped());
396 SkASSERT(fCpuStagingBuffer && fCpuStagingBuffer->data() == fBufferPtr);
397 SkASSERT(flushSize <= buffer->size());
398 VALIDATE(true);
399
400 if (GrCaps::kNone_MapFlags != fGpu->caps()->mapBufferFlags() &&
401 flushSize > fGpu->caps()->bufferMapThreshold()) {
402 void* data = buffer->map();
403 if (data) {
404 memcpy(data, fBufferPtr, flushSize);
405 UNMAP_BUFFER(block);
406 return;
407 }
408 }
409 buffer->updateData(fBufferPtr, /*offset=*/0, flushSize, /*preserve=*/false);
410 VALIDATE(true);
411}
412
414 const GrCaps& caps = *fGpu->caps();
415 auto resourceProvider = fGpu->getContext()->priv().resourceProvider();
418 // Create a CPU buffer.
419 bool mustInitialize = caps.mustClearUploadedBufferData();
420 return fCpuBufferCache ? fCpuBufferCache->makeBuffer(size, mustInitialize)
422 }
423 return resourceProvider->createBuffer(size,
424 fBufferType,
427}
428
429////////////////////////////////////////////////////////////////////////////////
430
432 : GrBufferAllocPool(gpu, GrGpuBufferType::kVertex, std::move(cpuBufferCache)) {}
433
435 int vertexCount,
437 int* startVertex) {
438 SkASSERT(vertexCount >= 0);
440 SkASSERT(startVertex);
441
443 void* ptr = INHERITED::makeSpace(SkSafeMath::Mul(vertexSize, vertexCount),
444 vertexSize,
445 buffer,
446 &offset);
447
448 SkASSERT(0 == offset % vertexSize);
449 *startVertex = static_cast<int>(offset / vertexSize);
450 return ptr;
451}
452
453void* GrVertexBufferAllocPool::makeSpaceAtLeast(size_t vertexSize, int minVertexCount,
454 int fallbackVertexCount,
455 sk_sp<const GrBuffer>* buffer, int* startVertex,
456 int* actualVertexCount) {
457 SkASSERT(minVertexCount >= 0);
458 SkASSERT(fallbackVertexCount >= minVertexCount);
460 SkASSERT(startVertex);
461 SkASSERT(actualVertexCount);
462
464 size_t actualSize SK_INIT_TO_AVOID_WARNING;
465 void* ptr = INHERITED::makeSpaceAtLeast(SkSafeMath::Mul(vertexSize, minVertexCount),
466 SkSafeMath::Mul(vertexSize, fallbackVertexCount),
467 vertexSize,
468 buffer,
469 &offset,
470 &actualSize);
471
472 SkASSERT(0 == offset % vertexSize);
473 *startVertex = static_cast<int>(offset / vertexSize);
474
475 SkASSERT(0 == actualSize % vertexSize);
476 SkASSERT(actualSize >= vertexSize * minVertexCount);
477 *actualVertexCount = static_cast<int>(actualSize / vertexSize);
478
479 return ptr;
480}
481
482////////////////////////////////////////////////////////////////////////////////
483
485 : GrBufferAllocPool(gpu, GrGpuBufferType::kIndex, std::move(cpuBufferCache)) {}
486
488 int* startIndex) {
489 SkASSERT(indexCount >= 0);
491 SkASSERT(startIndex);
492
494 void* ptr = INHERITED::makeSpace(SkSafeMath::Mul(indexCount, sizeof(uint16_t)),
495 sizeof(uint16_t),
496 buffer,
497 &offset);
498
499 SkASSERT(0 == offset % sizeof(uint16_t));
500 *startIndex = static_cast<int>(offset / sizeof(uint16_t));
501 return ptr;
502}
503
504void* GrIndexBufferAllocPool::makeSpaceAtLeast(int minIndexCount, int fallbackIndexCount,
505 sk_sp<const GrBuffer>* buffer, int* startIndex,
506 int* actualIndexCount) {
507 SkASSERT(minIndexCount >= 0);
508 SkASSERT(fallbackIndexCount >= minIndexCount);
510 SkASSERT(startIndex);
511 SkASSERT(actualIndexCount);
512
514 size_t actualSize SK_INIT_TO_AVOID_WARNING;
515 void* ptr = INHERITED::makeSpaceAtLeast(SkSafeMath::Mul(minIndexCount, sizeof(uint16_t)),
516 SkSafeMath::Mul(fallbackIndexCount, sizeof(uint16_t)),
517 sizeof(uint16_t),
518 buffer,
519 &offset,
520 &actualSize);
521
522 SkASSERT(0 == offset % sizeof(uint16_t));
523 *startIndex = static_cast<int>(offset / sizeof(uint16_t));
524
525 SkASSERT(0 == actualSize % sizeof(uint16_t));
526 SkASSERT(actualSize >= minIndexCount * sizeof(uint16_t));
527 *actualIndexCount = static_cast<int>(actualSize / sizeof(uint16_t));
528 return ptr;
529}
static void VALIDATE(bool=false)
#define UNMAP_BUFFER(block)
static size_t align_up_pad(size_t x, size_t alignment)
static size_t align_down(size_t x, uint32_t alignment)
GrGpuBufferType
Definition: GrTypesPriv.h:411
@ kDynamic_GrAccessPattern
Definition: GrTypesPriv.h:426
static float prev(float f)
#define SkASSERT(cond)
Definition: SkAssert.h:116
#define SK_INIT_TO_AVOID_WARNING
Definition: SkMacros.h:58
static sk_sp< CpuBufferCache > Make(int maxBuffersToCache)
sk_sp< GrCpuBuffer > makeBuffer(size_t size, bool mustBeInitialized)
void putBack(size_t bytes)
GrBufferAllocPool(GrGpu *gpu, GrGpuBufferType bufferType, sk_sp< CpuBufferCache > cpuBufferCache)
void * makeSpaceAtLeast(size_t minSize, size_t fallbackSize, size_t alignment, sk_sp< const GrBuffer > *buffer, size_t *offset, size_t *actualSize)
void * makeSpace(size_t size, size_t alignment, sk_sp< const GrBuffer > *buffer, size_t *offset)
static constexpr size_t kDefaultBufferSize
sk_sp< GrBuffer > getBuffer(size_t size)
Definition: GrCaps.h:57
bool preferClientSideDynamicBuffers() const
Definition: GrCaps.h:114
uint32_t mapBufferFlags() const
Definition: GrCaps.h:211
size_t bufferMapThreshold() const
Definition: GrCaps.h:373
bool useClientSideIndirectBuffers() const
Definition: GrCaps.h:85
bool mustClearUploadedBufferData() const
Definition: GrCaps.h:380
@ kNone_MapFlags
Definition: GrCaps.h:197
char * data()
Definition: GrCpuBuffer.h:36
static sk_sp< GrCpuBuffer > Make(size_t size)
Definition: GrCpuBuffer.h:17
GrResourceProvider * resourceProvider()
GrDirectContextPriv priv()
bool isMapped() const
Definition: GrGpuBuffer.cpp:47
bool wasDestroyed() const
Definition: GrGpu.h:62
const GrCaps * caps() const
Definition: GrGpu.h:73
GrDirectContext * getContext()
Definition: GrGpu.h:67
GrIndexBufferAllocPool(GrGpu *gpu, sk_sp< CpuBufferCache > cpuBufferCache)
void * makeSpace(int indexCount, sk_sp< const GrBuffer > *buffer, int *startIndex)
void * makeSpaceAtLeast(int minIndexCount, int fallbackIndexCount, sk_sp< const GrBuffer > *buffer, int *startIndex, int *actualIndexCount)
void * makeSpace(size_t vertexSize, int vertexCount, sk_sp< const GrBuffer > *buffer, int *startVertex)
GrVertexBufferAllocPool(GrGpu *gpu, sk_sp< CpuBufferCache > cpuBufferCache)
void * makeSpaceAtLeast(size_t vertexSize, int minVertexCount, int fallbackVertexCount, sk_sp< const GrBuffer > *buffer, int *startVertex, int *actualVertexCount)
size_t add(size_t x, size_t y)
Definition: SkSafeMath.h:33
bool ok() const
Definition: SkSafeMath.h:26
static size_t Mul(size_t x, size_t y)
Definition: SkSafeMath.cpp:16
void reset(T *ptr=nullptr)
Definition: SkRefCnt.h:310
bool empty() const
Definition: SkTArray.h:199
T & fromBack(int i)
Definition: SkTArray.h:493
int size() const
Definition: SkTArray.h:421
GAsyncResult * result
static float max(float r, float g, float b)
Definition: hsl.cpp:49
double x
DEF_SWITCHES_START aot vmservice shared library Name of the *so containing AOT compiled Dart assets for launching the service isolate vm snapshot The VM snapshot data that will be memory mapped as read only SnapshotAssetPath must be present isolate snapshot The isolate snapshot data that will be memory mapped as read only SnapshotAssetPath must be present cache dir Path to the cache directory This is different from the persistent_cache_path in embedder which is used for Skia shader cache icu native lib Path to the library file that exports the ICU data vm service The hostname IP address on which the Dart VM Service should be served If not defaults to or::depending on whether ipv6 is specified vm service A custom Dart VM Service port The default is to pick a randomly available open port disable vm Disable the Dart VM Service The Dart VM Service is never available in release mode disable vm service Disable mDNS Dart VM Service publication Bind to the IPv6 localhost address for the Dart VM Service Ignored if vm service host is set endless trace buffer
Definition: switches.h:126
it will be possible to load the file into Perfetto s trace viewer disable asset Prevents usage of any non test fonts unless they were explicitly Loaded via prefetched default font Indicates whether the embedding started a prefetch of the default font manager before creating the engine run In non interactive keep the shell running after the Dart script has completed enable serial On low power devices with low core running concurrent GC tasks on threads can cause them to contend with the UI thread which could potentially lead to jank This option turns off all concurrent GC activities domain network JSON encoded network policy per domain This overrides the DisallowInsecureConnections switch Embedder can specify whether to allow or disallow insecure connections at a domain level old gen heap size
Definition: switches.h:259
SI auto map(std::index_sequence< I... >, Fn &&fn, const Args &... args) -> skvx::Vec< sizeof...(I), decltype(fn(args[0]...))>
Definition: SkVx.h:680
Definition: ref_ptr.h:256
SeparatedVector2 offset
std::shared_ptr< const fml::Mapping > data
Definition: texture_gles.cc:63