Flutter Engine
The Flutter Engine
DawnBuffer.cpp
Go to the documentation of this file.
1/*
2 * Copyright 2022 Google LLC
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
9
15
16namespace skgpu::graphite {
17
19 size_t size,
21 AccessPattern accessPattern) {
22 if (size <= 0) {
23 return nullptr;
24 }
25
26 wgpu::BufferUsage usage = wgpu::BufferUsage::None;
27
28 switch (type) {
30 usage = wgpu::BufferUsage::Vertex | wgpu::BufferUsage::CopyDst;
31 break;
33 usage = wgpu::BufferUsage::Index | wgpu::BufferUsage::CopyDst;
34 break;
36 usage = wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::MapWrite;
37 break;
39 usage = wgpu::BufferUsage::CopyDst | wgpu::BufferUsage::MapRead;
40 break;
42 usage = wgpu::BufferUsage::Uniform | wgpu::BufferUsage::CopyDst;
43 break;
45 usage = wgpu::BufferUsage::Storage | wgpu::BufferUsage::CopyDst |
46 wgpu::BufferUsage::CopySrc;
47 break;
49 usage = wgpu::BufferUsage::Indirect | wgpu::BufferUsage::Storage |
50 wgpu::BufferUsage::CopyDst;
51 break;
53 usage = wgpu::BufferUsage::Vertex | wgpu::BufferUsage::Storage;
54 break;
56 usage = wgpu::BufferUsage::Index | wgpu::BufferUsage::Storage;
57 break;
58 }
59
61 accessPattern == AccessPattern::kHostVisible &&
63 // If the buffer is intended to be mappabe, add MapWrite usage and remove
64 // CopyDst.
65 // We don't want to allow both CPU and GPU to write to the same buffer.
66 usage |= wgpu::BufferUsage::MapWrite;
67 usage &= ~wgpu::BufferUsage::CopyDst;
68 }
69
70 wgpu::BufferDescriptor desc;
71 desc.usage = usage;
72 desc.size = size;
73 // Specifying mappedAtCreation avoids clearing the buffer on the GPU which can cause MapAsync to
74 // be very slow as it waits for GPU execution to complete.
75 desc.mappedAtCreation = SkToBool(usage & wgpu::BufferUsage::MapWrite);
76
77 auto buffer = sharedContext->device().CreateBuffer(&desc);
78 if (!buffer) {
79 return {};
80 }
81
82 void* mappedAtCreationPtr = nullptr;
83 if (desc.mappedAtCreation) {
84 mappedAtCreationPtr = buffer.GetMappedRange();
85 SkASSERT(mappedAtCreationPtr);
86 }
87
88 return sk_sp<DawnBuffer>(
89 new DawnBuffer(sharedContext, size, std::move(buffer), mappedAtCreationPtr));
90}
91
92DawnBuffer::DawnBuffer(const DawnSharedContext* sharedContext,
93 size_t size,
94 wgpu::Buffer buffer,
95 void* mappedAtCreationPtr)
96 : Buffer(sharedContext,
97 size,
98 /*commandBufferRefsAsUsageRefs=*/buffer.GetUsage() & wgpu::BufferUsage::MapWrite)
99 , fBuffer(std::move(buffer)) {
100 fMapPtr = mappedAtCreationPtr;
101}
102
103#if defined(__EMSCRIPTEN__)
104void DawnBuffer::prepareForReturnToCache(const std::function<void()>& takeRef) {
105 // This function is only useful for Emscripten where we have to pre-map the buffer
106 // once it is returned to the cache.
107 SkASSERT(this->sharedContext()->caps()->bufferMapsAreAsync());
108
109 // This implementation is almost Dawn-agnostic. However, Buffer base class doesn't have any
110 // way of distinguishing a buffer that is mappable for writing from one mappable for reading.
111 // We only need to re-map the former.
112 if (!(fBuffer.GetUsage() & wgpu::BufferUsage::MapWrite)) {
113 return;
114 }
115 // We cannot start an async map while the GPU is still using the buffer. We asked that
116 // our Resource convert command buffer refs to usage refs. So we should never have any
117 // command buffer refs.
118 SkASSERT(!this->debugHasCommandBufferRef());
119 // Note that the map state cannot change on another thread when we are here. We got here
120 // because there were no UsageRefs on the buffer but async mapping holds a UsageRef until it
121 // completes.
122 if (this->isMapped()) {
123 return;
124 }
125 takeRef();
126 this->asyncMap([](void* ctx, skgpu::CallbackResult result) {
127 sk_sp<DawnBuffer> buffer(static_cast<DawnBuffer*>(ctx));
129 buffer->setDeleteASAP();
130 }
131 },
132 this);
133}
134
136 // This function is only useful for Emscripten where we have to use asyncMap().
137 SkASSERT(this->sharedContext()->caps()->bufferMapsAreAsync());
138
139 if (proc) {
140 SkAutoMutexExclusive ex(fAsyncMutex);
141 if (this->isMapped()) {
142 proc(ctx, CallbackResult::kSuccess);
143 return;
144 }
145 fAsyncMapCallbacks.push_back(RefCntedCallback::Make(proc, ctx));
146 }
147 if (this->isUnmappable()) {
148 return;
149 }
150 SkASSERT(fBuffer);
151 SkASSERT((fBuffer.GetUsage() & wgpu::BufferUsage::MapRead) ||
152 (fBuffer.GetUsage() & wgpu::BufferUsage::MapWrite));
153 SkASSERT(fBuffer.GetMapState() == wgpu::BufferMapState::Unmapped);
154 bool isWrite = fBuffer.GetUsage() & wgpu::BufferUsage::MapWrite;
155 auto buffer = sk_ref_sp(this);
156
157 fBuffer.MapAsync(
158 isWrite ? wgpu::MapMode::Write : wgpu::MapMode::Read,
159 0,
160 fBuffer.GetSize(),
161 [](WGPUBufferMapAsyncStatus s, void* userData) {
162 sk_sp<DawnBuffer> buffer(static_cast<DawnBuffer*>(userData));
163 buffer->mapCallback(s);
164 },
165 buffer.release());
166}
167
168#endif // defined(__EMSCRIPTEN__)
169
170void DawnBuffer::onMap() {
171#if defined(__EMSCRIPTEN__)
172 SKGPU_LOG_W("Synchronous buffer mapping not supported in Dawn. Failing map request.");
173#else
174 SkASSERT(!this->sharedContext()->caps()->bufferMapsAreAsync());
175 SkASSERT(fBuffer);
176 SkASSERT((fBuffer.GetUsage() & wgpu::BufferUsage::MapRead) ||
177 (fBuffer.GetUsage() & wgpu::BufferUsage::MapWrite));
178 bool isWrite = fBuffer.GetUsage() & wgpu::BufferUsage::MapWrite;
179
180 // Use wgpu::Future and WaitAny with timeout=0 to trigger callback immediately.
181 // This should work because our resource tracking mechanism should make sure that
182 // the buffer is free of any GPU use at this point.
183 wgpu::FutureWaitInfo mapWaitInfo{};
184
185 mapWaitInfo.future =
186 fBuffer.MapAsync(isWrite ? wgpu::MapMode::Write : wgpu::MapMode::Read,
187 0,
188 fBuffer.GetSize(),
189 wgpu::CallbackMode::WaitAnyOnly,
190 [this](wgpu::MapAsyncStatus s, const char*) {
191 this->mapCallback(static_cast<WGPUBufferMapAsyncStatus>(s));
192 });
193
194 wgpu::Device device = static_cast<const DawnSharedContext*>(sharedContext())->device();
195 wgpu::Instance instance = device.GetAdapter().GetInstance();
196 [[maybe_unused]] auto status = instance.WaitAny(1, &mapWaitInfo, /*timeoutNS=*/0);
197
198 if (status != wgpu::WaitStatus::Success) {
199 // WaitAny(timeout=0) might fail in this scenario:
200 // - Allocates a buffer.
201 // - Encodes a command buffer to copy a texture to this buffer.
202 // - Submits the command buffer. If OOM happens, this command buffer will fail to
203 // be submitted.
204 // - The buffer is *supposed* to be free of any GPU use since the command buffer that would
205 // have used it wasn't submitted successfully.
206 // - If we try to map this buffer at this point, internally Dawn will try to use GPU to
207 // clear this buffer to zeros, since this is its 1st use. WaitAny(timeout=0) won't work
208 // since the buffer now has a pending GPU clear operation.
209 //
210 // To work around this, we need to try again with a blocking WaitAny(), to wait for the
211 // clear operation to finish.
212 // Notes:
213 // - This fallback should be rare since it is caused by an OOM error during buffer
214 // readbacks.
215 // - For buffer writing cases, since we use mappedAtCreation, the GPU clear won't happen.
216 status = instance.WaitAny(
217 1, &mapWaitInfo, /*timeoutNS=*/std::numeric_limits<uint64_t>::max());
218 }
219
220 SkASSERT(status == wgpu::WaitStatus::Success);
221 SkASSERT(mapWaitInfo.completed);
222#endif // defined(__EMSCRIPTEN__)
223}
224
225void DawnBuffer::onUnmap() {
226 SkASSERT(fBuffer);
227 SkASSERT(this->isUnmappable());
228
229 fMapPtr = nullptr;
230 fBuffer.Unmap();
231}
232
233void DawnBuffer::mapCallback(WGPUBufferMapAsyncStatus status) {
234 SkAutoMutexExclusive em(this->fAsyncMutex);
235 if (status == WGPUBufferMapAsyncStatus_Success) {
236 if (this->fBuffer.GetUsage() & wgpu::BufferUsage::MapWrite) {
237 this->fMapPtr = this->fBuffer.GetMappedRange();
238 } else {
239 // If buffer is only created with MapRead usage, Dawn only allows returning
240 // constant pointer. We need to use const_cast as a workaround here.
241 this->fMapPtr = const_cast<void*>(this->fBuffer.GetConstMappedRange());
242 }
243 } else {
244 const char* statusStr;
245 Priority priority = Priority::kError;
246 switch (status) {
247 case WGPUBufferMapAsyncStatus_ValidationError:
248 statusStr = "ValidationError";
249 break;
250 case WGPUBufferMapAsyncStatus_Unknown:
251 statusStr = "Unknown";
252 break;
253 case WGPUBufferMapAsyncStatus_DeviceLost:
254 statusStr = "DeviceLost";
255 break;
256 case WGPUBufferMapAsyncStatus_DestroyedBeforeCallback:
257 statusStr = "DestroyedBeforeCallback";
258 priority = Priority::kDebug;
259 break;
260 case WGPUBufferMapAsyncStatus_UnmappedBeforeCallback:
261 statusStr = "UnmappedBeforeCallback";
262 priority = Priority::kDebug;
263 break;
264 case WGPUBufferMapAsyncStatus_MappingAlreadyPending:
265 statusStr = "MappingAlreadyPending";
266 break;
267 case WGPUBufferMapAsyncStatus_OffsetOutOfRange:
268 statusStr = "OffsetOutOfRange";
269 break;
270 case WGPUBufferMapAsyncStatus_SizeOutOfRange:
271 statusStr = "SizeOutOfRange";
272 break;
273 default:
274 statusStr = "<Other>";
275 break;
276 }
277 SKGPU_LOG(priority, "Buffer async map failed with status %s.", statusStr);
278 for (auto& cb : this->fAsyncMapCallbacks) {
279 cb->setFailureResult();
280 }
281 }
282 this->fAsyncMapCallbacks.clear();
283}
284
286 return fBuffer.GetMapState() != wgpu::BufferMapState::Unmapped;
287}
288
289void DawnBuffer::freeGpuData() {
290 if (fBuffer) {
291 // Explicitly destroy the buffer since it might be ref'd by cached bind groups which are
292 // not immediately cleaned up. Graphite should already guarantee that all command buffers
293 // using this buffer (indirectly via BindGroups) are already completed.
294 fBuffer.Destroy();
295 fBuffer = nullptr;
296 }
297}
298
299void DawnBuffer::setBackendLabel(char const* label) {
300 SkASSERT(label);
301 if (sharedContext()->caps()->setBackendLabels()) {
302 fBuffer.SetLabel(label);
303 }
304}
305
306} // namespace skgpu::graphite
GrTriangulator::Vertex Vertex
#define SKGPU_LOG(priority, fmt,...)
Definition: Log.h:27
#define SKGPU_LOG_W(fmt,...)
Definition: Log.h:40
#define SkASSERT(cond)
Definition: SkAssert.h:116
SkMeshSpecification::Uniform Uniform
Definition: SkMesh.cpp:66
sk_sp< T > sk_ref_sp(T *obj)
Definition: SkRefCnt.h:381
static constexpr bool SkToBool(const T &x)
Definition: SkTo.h:35
GLenum type
static sk_sp< RefCntedCallback > Make(Callback proc, Context ctx)
virtual void onAsyncMap(GpuFinishedProc, GpuFinishedContext)
Definition: Buffer.cpp:35
size_t size() const
Definition: Buffer.h:19
bool isMapped() const
Definition: Buffer.h:30
void asyncMap(GpuFinishedProc=nullptr, GpuFinishedContext=nullptr)
Definition: Buffer.cpp:22
bool drawBufferCanBeMapped() const
Definition: Caps.h:242
bool isUnmappable() const override
Definition: DawnBuffer.cpp:285
static sk_sp< DawnBuffer > Make(const DawnSharedContext *, size_t size, BufferType type, AccessPattern)
Definition: DawnBuffer.cpp:18
virtual void prepareForReturnToCache(const std::function< void()> &takeRef)
Definition: Resource.h:173
const SharedContext * sharedContext() const
Definition: Resource.h:189
const Caps * caps() const
Definition: SharedContext.h:39
VkDevice device
Definition: main.cc:53
VkInstance instance
Definition: main.cc:48
struct MyStruct s
GAsyncResult * result
Dart_NativeFunction function
Definition: fuchsia.cc:51
static float max(float r, float g, float b)
Definition: hsl.cpp:49
SK_API bool Read(SkStreamSeekable *src, SkDocumentPage *dstArray, int dstArrayCount, const SkDeserialProcs *=nullptr)
std::unique_ptr< ProgramUsage > GetUsage(const Program &program)
DEF_SWITCHES_START aot vmservice shared library Name of the *so containing AOT compiled Dart assets for launching the service isolate vm snapshot The VM snapshot data that will be memory mapped as read only SnapshotAssetPath must be present isolate snapshot The isolate snapshot data that will be memory mapped as read only SnapshotAssetPath must be present cache dir Path to the cache directory This is different from the persistent_cache_path in embedder which is used for Skia shader cache icu native lib Path to the library file that exports the ICU data vm service The hostname IP address on which the Dart VM Service should be served If not defaults to or::depending on whether ipv6 is specified vm service A custom Dart VM Service port The default is to pick a randomly available open port disable vm Disable the Dart VM Service The Dart VM Service is never available in release mode disable vm service Disable mDNS Dart VM Service publication Bind to the IPv6 localhost address for the Dart VM Service Ignored if vm service host is set endless trace buffer
Definition: switches.h:126
it will be possible to load the file into Perfetto s trace viewer disable asset Prevents usage of any non test fonts unless they were explicitly Loaded via prefetched default font Indicates whether the embedding started a prefetch of the default font manager before creating the engine run In non interactive keep the shell running after the Dart script has completed enable serial On low power devices with low core running concurrent GC tasks on threads can cause them to contend with the UI thread which could potentially lead to jank This option turns off all concurrent GC activities domain network JSON encoded network policy per domain This overrides the DisallowInsecureConnections switch Embedder can specify whether to allow or disallow insecure connections at a domain level old gen heap size
Definition: switches.h:259
void * GpuFinishedContext
Definition: GraphiteTypes.h:29
void(*)(GpuFinishedContext finishedContext, CallbackResult) GpuFinishedProc
Definition: GraphiteTypes.h:30
VulkanMemoryAllocator::BufferUsage BufferUsage
CallbackResult
Definition: GpuTypes.h:45
Definition: ref_ptr.h:256
static void usage(char *argv0)