Flutter Engine
The Flutter Engine
Loading...
Searching...
No Matches
DawnBuffer.cpp
Go to the documentation of this file.
1/*
2 * Copyright 2022 Google LLC
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
9
15
16namespace skgpu::graphite {
17
18static const char* kBufferTypeNames[kBufferTypeCount] = {
19 "Vertex",
20 "Index",
21 "Xfer CPU to GPU",
22 "Xfer GPU to CPU",
23 "Uniform",
24 "Storage",
25 "Indirect",
26 "VertexStorage",
27 "IndexStorage",
28};
29
31 size_t size,
33 AccessPattern accessPattern,
34 std::string_view label) {
35 if (size <= 0) {
36 return nullptr;
37 }
38
39 wgpu::BufferUsage usage = wgpu::BufferUsage::None;
40
41 switch (type) {
43 usage = wgpu::BufferUsage::Vertex | wgpu::BufferUsage::CopyDst;
44 break;
46 usage = wgpu::BufferUsage::Index | wgpu::BufferUsage::CopyDst;
47 break;
49 usage = wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::MapWrite;
50 break;
52 usage = wgpu::BufferUsage::CopyDst | wgpu::BufferUsage::MapRead;
53 break;
55 usage = wgpu::BufferUsage::Uniform | wgpu::BufferUsage::CopyDst;
56 break;
58 usage = wgpu::BufferUsage::Storage | wgpu::BufferUsage::CopyDst |
59 wgpu::BufferUsage::CopySrc;
60 break;
62 usage = wgpu::BufferUsage::Indirect | wgpu::BufferUsage::Storage |
63 wgpu::BufferUsage::CopyDst;
64 break;
66 usage = wgpu::BufferUsage::Vertex | wgpu::BufferUsage::Storage;
67 break;
69 usage = wgpu::BufferUsage::Index | wgpu::BufferUsage::Storage;
70 break;
71 }
72
74 accessPattern == AccessPattern::kHostVisible &&
76 usage |= wgpu::BufferUsage::MapWrite;
77 }
78
79 wgpu::BufferDescriptor desc;
80#ifdef SK_DEBUG
81 desc.label = kBufferTypeNames[static_cast<int>(type)];
82#endif
83 desc.usage = usage;
84 desc.size = size;
85 // Specifying mappedAtCreation avoids clearing the buffer on the GPU which can cause MapAsync to
86 // be very slow as it waits for GPU execution to complete.
87 desc.mappedAtCreation = SkToBool(usage & wgpu::BufferUsage::MapWrite);
88
89 auto buffer = sharedContext->device().CreateBuffer(&desc);
90 if (!buffer) {
91 return {};
92 }
93
94 void* mappedAtCreationPtr = nullptr;
95 if (desc.mappedAtCreation) {
96 mappedAtCreationPtr = buffer.GetMappedRange();
97 SkASSERT(mappedAtCreationPtr);
98 }
99
101 size,
102 std::move(buffer),
103 type,
104 mappedAtCreationPtr,
105 std::move(label)));
106}
107
108DawnBuffer::DawnBuffer(const DawnSharedContext* sharedContext,
109 size_t size,
110 wgpu::Buffer buffer,
112 void* mappedAtCreationPtr,
113 std::string_view label)
114 : Buffer(sharedContext,
115 size,
116 std::move(label),
117 /*commandBufferRefsAsUsageRefs=*/buffer.GetUsage() & wgpu::BufferUsage::MapWrite)
118 , fBuffer(std::move(buffer))
119 , fType(type) {
120 fMapPtr = mappedAtCreationPtr;
121}
122
123#if defined(__EMSCRIPTEN__)
124void DawnBuffer::prepareForReturnToCache(const std::function<void()>& takeRef) {
125 // This function is only useful for Emscripten where we have to pre-map the buffer
126 // once it is returned to the cache.
127 SkASSERT(this->sharedContext()->caps()->bufferMapsAreAsync());
128
129 // This implementation is almost Dawn-agnostic. However, Buffer base class doesn't have any
130 // way of distinguishing a buffer that is mappable for writing from one mappable for reading.
131 // We only need to re-map the former.
132 if (!(fBuffer.GetUsage() & wgpu::BufferUsage::MapWrite)) {
133 return;
134 }
135 // We cannot start an async map while the GPU is still using the buffer. We asked that
136 // our Resource convert command buffer refs to usage refs. So we should never have any
137 // command buffer refs.
138 SkASSERT(!this->debugHasCommandBufferRef());
139 // Note that the map state cannot change on another thread when we are here. We got here
140 // because there were no UsageRefs on the buffer but async mapping holds a UsageRef until it
141 // completes.
142 if (this->isMapped()) {
143 return;
144 }
145 takeRef();
146 this->asyncMap([](void* ctx, skgpu::CallbackResult result) {
147 sk_sp<DawnBuffer> buffer(static_cast<DawnBuffer*>(ctx));
149 buffer->setDeleteASAP();
150 }
151 },
152 this);
153}
154
156 // This function is only useful for Emscripten where we have to use asyncMap().
157 SkASSERT(this->sharedContext()->caps()->bufferMapsAreAsync());
158
159 if (proc) {
160 SkAutoMutexExclusive ex(fAsyncMutex);
161 if (this->isMapped()) {
162 proc(ctx, CallbackResult::kSuccess);
163 return;
164 }
165 fAsyncMapCallbacks.push_back(RefCntedCallback::Make(proc, ctx));
166 }
167 if (this->isUnmappable()) {
168 return;
169 }
170 SkASSERT(fBuffer);
171 SkASSERT((fBuffer.GetUsage() & wgpu::BufferUsage::MapRead) ||
172 (fBuffer.GetUsage() & wgpu::BufferUsage::MapWrite));
173 SkASSERT(fBuffer.GetMapState() == wgpu::BufferMapState::Unmapped);
174 bool isWrite = fBuffer.GetUsage() & wgpu::BufferUsage::MapWrite;
175 auto buffer = sk_ref_sp(this);
176
177 fBuffer.MapAsync(
178 isWrite ? wgpu::MapMode::Write : wgpu::MapMode::Read,
179 0,
180 fBuffer.GetSize(),
181 [](WGPUBufferMapAsyncStatus s, void* userData) {
182 sk_sp<DawnBuffer> buffer(static_cast<DawnBuffer*>(userData));
183 buffer->mapCallback(s);
184 },
185 buffer.release());
186}
187
188#endif // defined(__EMSCRIPTEN__)
189
191#if defined(__EMSCRIPTEN__)
192 SKGPU_LOG_W("Synchronous buffer mapping not supported in Dawn. Failing map request.");
193#else
194 SkASSERT(!this->sharedContext()->caps()->bufferMapsAreAsync());
195 SkASSERT(fBuffer);
196 SkASSERT((fBuffer.GetUsage() & wgpu::BufferUsage::MapRead) ||
197 (fBuffer.GetUsage() & wgpu::BufferUsage::MapWrite));
198 bool isWrite = fBuffer.GetUsage() & wgpu::BufferUsage::MapWrite;
199
200 // Use wgpu::Future and WaitAny with timeout=0 to trigger callback immediately.
201 // This should work because our resource tracking mechanism should make sure that
202 // the buffer is free of any GPU use at this point.
203 wgpu::BufferMapCallbackInfo callbackInfo{};
204 callbackInfo.mode = wgpu::CallbackMode::WaitAnyOnly;
205 callbackInfo.userdata = this;
206 callbackInfo.callback = [](WGPUBufferMapAsyncStatus s, void* userData) {
207 auto buffer = static_cast<DawnBuffer*>(userData);
208 buffer->mapCallback(s);
209 };
210
211 wgpu::FutureWaitInfo mapWaitInfo{};
212
213 mapWaitInfo.future = fBuffer.MapAsync(isWrite ? wgpu::MapMode::Write : wgpu::MapMode::Read,
214 0,
215 fBuffer.GetSize(),
216 callbackInfo);
217
218 wgpu::Device device = static_cast<const DawnSharedContext*>(sharedContext())->device();
219 wgpu::Instance instance = device.GetAdapter().GetInstance();
220 [[maybe_unused]] auto status = instance.WaitAny(1, &mapWaitInfo, /*timeoutNS=*/0);
221
222 if (status != wgpu::WaitStatus::Success) {
223 // WaitAny(timeout=0) might fail in this scenario:
224 // - Allocates a buffer.
225 // - Encodes a command buffer to copy a texture to this buffer.
226 // - Submits the command buffer. If OOM happens, this command buffer will fail to
227 // be submitted.
228 // - The buffer is *supposed* to be free of any GPU use since the command buffer that would
229 // have used it wasn't submitted successfully.
230 // - If we try to map this buffer at this point, internally Dawn will try to use GPU to
231 // clear this buffer to zeros, since this is its 1st use. WaitAny(timeout=0) won't work
232 // since the buffer now has a pending GPU clear operation.
233 //
234 // To work around this, we need to try again with a blocking WaitAny(), to wait for the
235 // clear operation to finish.
236 // Notes:
237 // - This fallback should be rare since it is caused by an OOM error during buffer
238 // readbacks.
239 // - For buffer writing cases, since we use mappedAtCreation, the GPU clear won't happen.
240 status = instance.WaitAny(
241 1, &mapWaitInfo, /*timeoutNS=*/std::numeric_limits<uint64_t>::max());
242 }
243
244 SkASSERT(status == wgpu::WaitStatus::Success);
245 SkASSERT(mapWaitInfo.completed);
246#endif // defined(__EMSCRIPTEN__)
247}
248
250 SkASSERT(fBuffer);
251 SkASSERT(this->isUnmappable());
252
253 fMapPtr = nullptr;
254 fBuffer.Unmap();
255}
256
257void DawnBuffer::mapCallback(WGPUBufferMapAsyncStatus status) {
258 SkAutoMutexExclusive em(this->fAsyncMutex);
259 if (status == WGPUBufferMapAsyncStatus_Success) {
260 if (this->fBuffer.GetUsage() & wgpu::BufferUsage::MapWrite) {
261 this->fMapPtr = this->fBuffer.GetMappedRange();
262 } else {
263 // If buffer is only created with MapRead usage, Dawn only allows returning
264 // constant pointer. We need to use const_cast as a workaround here.
265 this->fMapPtr = const_cast<void*>(this->fBuffer.GetConstMappedRange());
266 }
267 } else {
268 const char* statusStr;
269 Priority priority = Priority::kError;
270 switch (status) {
271 case WGPUBufferMapAsyncStatus_ValidationError:
272 statusStr = "ValidationError";
273 break;
274 case WGPUBufferMapAsyncStatus_Unknown:
275 statusStr = "Unknown";
276 break;
277 case WGPUBufferMapAsyncStatus_DeviceLost:
278 statusStr = "DeviceLost";
279 break;
280 case WGPUBufferMapAsyncStatus_DestroyedBeforeCallback:
281 statusStr = "DestroyedBeforeCallback";
282 priority = Priority::kDebug;
283 break;
284 case WGPUBufferMapAsyncStatus_UnmappedBeforeCallback:
285 statusStr = "UnmappedBeforeCallback";
286 priority = Priority::kDebug;
287 break;
288 case WGPUBufferMapAsyncStatus_MappingAlreadyPending:
289 statusStr = "MappingAlreadyPending";
290 break;
291 case WGPUBufferMapAsyncStatus_OffsetOutOfRange:
292 statusStr = "OffsetOutOfRange";
293 break;
294 case WGPUBufferMapAsyncStatus_SizeOutOfRange:
295 statusStr = "SizeOutOfRange";
296 break;
297 default:
298 statusStr = "<Other>";
299 break;
300 }
301 SKGPU_LOG(priority, "Buffer async map failed with status %s.", statusStr);
302 for (auto& cb : this->fAsyncMapCallbacks) {
303 cb->setFailureResult();
304 }
305 }
306 this->fAsyncMapCallbacks.clear();
307}
308
310 return fBuffer.GetMapState() != wgpu::BufferMapState::Unmapped;
311}
312
314 fBuffer = nullptr;
315}
316
318 const char* dumpName) const {
319 Buffer::onDumpMemoryStatistics(traceMemoryDump, dumpName);
320 traceMemoryDump->dumpStringValue(
321 dumpName, "backend_label", kBufferTypeNames[static_cast<int>(fType)]);
322}
323
324} // namespace skgpu::graphite
#define SKGPU_LOG(priority, fmt,...)
Definition Log.h:27
#define SKGPU_LOG_W(fmt,...)
Definition Log.h:40
#define SkASSERT(cond)
Definition SkAssert.h:116
sk_sp< T > sk_ref_sp(T *obj)
Definition SkRefCnt.h:381
static constexpr bool SkToBool(const T &x)
Definition SkTo.h:35
virtual void dumpStringValue(const char *, const char *, const char *)
static sk_sp< RefCntedCallback > Make(Callback proc, Context ctx)
virtual void onAsyncMap(GpuFinishedProc, GpuFinishedContext)
Definition Buffer.cpp:35
size_t size() const
Definition Buffer.h:19
bool isMapped() const
Definition Buffer.h:30
void asyncMap(GpuFinishedProc=nullptr, GpuFinishedContext=nullptr)
Definition Buffer.cpp:22
bool drawBufferCanBeMapped() const
Definition Caps.h:235
bool isUnmappable() const override
void onDumpMemoryStatistics(SkTraceMemoryDump *traceMemoryDump, const char *dumpName) const override
static sk_sp< DawnBuffer > Make(const DawnSharedContext *, size_t size, BufferType type, AccessPattern, std::string_view label)
virtual void prepareForReturnToCache(const std::function< void()> &takeRef)
Definition Resource.h:170
virtual void onDumpMemoryStatistics(SkTraceMemoryDump *traceMemoryDump, const char *dumpName) const
Definition Resource.h:196
const SharedContext * sharedContext() const
Definition Resource.h:187
const Caps * caps() const
VkDevice device
Definition main.cc:53
VkInstance instance
Definition main.cc:48
struct MyStruct s
static const uint8_t buffer[]
GAsyncResult * result
SK_API bool Read(SkStreamSeekable *src, SkDocumentPage *dstArray, int dstArrayCount, const SkDeserialProcs *=nullptr)
static const char * kBufferTypeNames[kBufferTypeCount]
void * GpuFinishedContext
void(*)(GpuFinishedContext finishedContext, CallbackResult) GpuFinishedProc
static const int kBufferTypeCount
CallbackResult
Definition GpuTypes.h:45
Definition ref_ptr.h:256
static void usage(char *argv0)