Flutter Engine
The Flutter Engine
DawnResourceProvider.cpp
Go to the documentation of this file.
1/*
2 * Copyright 2022 Google LLC
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
9
23
24namespace skgpu::graphite {
25
26namespace {
27
28constexpr int kBufferBindingSizeAlignment = 16;
29constexpr int kMaxNumberOfCachedBufferBindGroups = 32;
30constexpr int kMaxNumberOfCachedTextureBindGroups = 4096;
31
32wgpu::ShaderModule create_shader_module(const wgpu::Device& device, const char* source) {
33 wgpu::ShaderModuleWGSLDescriptor wgslDesc;
34 wgslDesc.code = source;
35 wgpu::ShaderModuleDescriptor descriptor;
36 descriptor.nextInChain = &wgslDesc;
37 return device.CreateShaderModule(&descriptor);
38}
39
40wgpu::RenderPipeline create_blit_render_pipeline(const DawnSharedContext* sharedContext,
41 const char* label,
42 wgpu::ShaderModule vsModule,
43 wgpu::ShaderModule fsModule,
44 wgpu::TextureFormat renderPassColorFormat,
45 wgpu::TextureFormat renderPassDepthStencilFormat,
46 int numSamples) {
47 wgpu::RenderPipelineDescriptor descriptor;
48 descriptor.label = label;
49 descriptor.layout = nullptr;
50
51 wgpu::ColorTargetState colorTarget;
52 colorTarget.format = renderPassColorFormat;
53 colorTarget.blend = nullptr;
54 colorTarget.writeMask = wgpu::ColorWriteMask::All;
55
56 wgpu::DepthStencilState depthStencil;
57 if (renderPassDepthStencilFormat != wgpu::TextureFormat::Undefined) {
58 depthStencil.format = renderPassDepthStencilFormat;
59 depthStencil.depthWriteEnabled = false;
60 depthStencil.depthCompare = wgpu::CompareFunction::Always;
61
62 descriptor.depthStencil = &depthStencil;
63 }
64
65 wgpu::FragmentState fragment;
66 fragment.module = std::move(fsModule);
67 fragment.entryPoint = "main";
68 fragment.targetCount = 1;
69 fragment.targets = &colorTarget;
70 descriptor.fragment = &fragment;
71
72 descriptor.vertex.module = std::move(vsModule);
73 descriptor.vertex.entryPoint = "main";
74 descriptor.vertex.constantCount = 0;
75 descriptor.vertex.constants = nullptr;
76 descriptor.vertex.bufferCount = 0;
77 descriptor.vertex.buffers = nullptr;
78
79 descriptor.primitive.frontFace = wgpu::FrontFace::CCW;
80 descriptor.primitive.cullMode = wgpu::CullMode::None;
81 descriptor.primitive.topology = wgpu::PrimitiveTopology::TriangleStrip;
82 descriptor.primitive.stripIndexFormat = wgpu::IndexFormat::Undefined;
83
84 descriptor.multisample.count = numSamples;
85 descriptor.multisample.mask = 0xFFFFFFFF;
86 descriptor.multisample.alphaToCoverageEnabled = false;
87
88 std::optional<DawnErrorChecker> errorChecker;
89 if (sharedContext->dawnCaps()->allowScopedErrorChecks()) {
90 errorChecker.emplace(sharedContext);
91 }
92 auto pipeline = sharedContext->device().CreateRenderPipeline(&descriptor);
93 if (errorChecker.has_value() && errorChecker->popErrorScopes() != DawnErrorType::kNoError) {
94 return nullptr;
95 }
96
97 return pipeline;
98}
99
100UniqueKey make_ubo_bind_group_key(
101 const std::array<std::pair<const DawnBuffer*, uint32_t>, 4>& boundBuffersAndSizes) {
102 static const UniqueKey::Domain kBufferBindGroupDomain = UniqueKey::GenerateDomain();
103
104 UniqueKey uniqueKey;
105 {
106 // Each entry in the bind group needs 2 uint32_t in the key:
107 // - buffer's unique ID: 32 bits.
108 // - buffer's binding size: 32 bits.
109 // We need total of 4 entries in the uniform buffer bind group.
110 // Unused entries will be assigned zero values.
112 &uniqueKey, kBufferBindGroupDomain, 8, "GraphicsPipelineBufferBindGroup");
113
114 for (uint32_t i = 0; i < boundBuffersAndSizes.size(); ++i) {
115 const DawnBuffer* boundBuffer = boundBuffersAndSizes[i].first;
116 const uint32_t bindingSize = boundBuffersAndSizes[i].second;
117 if (boundBuffer) {
118 builder[2 * i] = boundBuffer->uniqueID().asUInt();
119 builder[2 * i + 1] = bindingSize;
120 } else {
121 builder[2 * i] = 0;
122 builder[2 * i + 1] = 0;
123 }
124 }
125
126 builder.finish();
127 }
128
129 return uniqueKey;
130}
131
132UniqueKey make_texture_bind_group_key(const DawnSampler* sampler, const DawnTexture* texture) {
133 static const UniqueKey::Domain kTextureBindGroupDomain = UniqueKey::GenerateDomain();
134
135 UniqueKey uniqueKey;
136 {
137 UniqueKey::Builder builder(&uniqueKey,
138 kTextureBindGroupDomain,
139 2,
140 "GraphicsPipelineSingleTextureSamplerBindGroup");
141
142 builder[0] = sampler->uniqueID().asUInt();
143 builder[1] = texture->uniqueID().asUInt();
144
145 builder.finish();
146 }
147
148 return uniqueKey;
149}
150} // namespace
151
153 SingleOwner* singleOwner,
154 uint32_t recorderID,
155 size_t resourceBudget)
156 : ResourceProvider(sharedContext, singleOwner, recorderID, resourceBudget)
157 , fUniformBufferBindGroupCache(kMaxNumberOfCachedBufferBindGroups)
158 , fSingleTextureSamplerBindGroups(kMaxNumberOfCachedTextureBindGroups) {}
159
161
163 const RenderPassDesc& renderPassDesc) {
164 uint64_t renderPassKey =
165 this->dawnSharedContext()->dawnCaps()->getRenderPassDescKeyForPipeline(renderPassDesc);
166 wgpu::RenderPipeline pipeline = fBlitWithDrawPipelines[renderPassKey];
167 if (!pipeline) {
168 static constexpr char kVertexShaderText[] = R"(
169 var<private> fullscreenTriPositions : array<vec2<f32>, 3> = array<vec2<f32>, 3>(
170 vec2(-1.0, -1.0), vec2(-1.0, 3.0), vec2(3.0, -1.0));
171
172 @vertex
173 fn main(@builtin(vertex_index) vertexIndex : u32) -> @builtin(position) vec4<f32> {
174 return vec4(fullscreenTriPositions[vertexIndex], 1.0, 1.0);
175 }
176 )";
177
178 static constexpr char kFragmentShaderText[] = R"(
179 @group(0) @binding(0) var colorMap: texture_2d<f32>;
180
181 @fragment
182 fn main(@builtin(position) fragPosition : vec4<f32>) -> @location(0) vec4<f32> {
183 var coords : vec2<i32> = vec2<i32>(i32(fragPosition.x), i32(fragPosition.y));
184 return textureLoad(colorMap, coords, 0);
185 }
186 )";
187
188 auto vsModule = create_shader_module(dawnSharedContext()->device(), kVertexShaderText);
189 auto fsModule = create_shader_module(dawnSharedContext()->device(), kFragmentShaderText);
190
191 pipeline = create_blit_render_pipeline(
192 dawnSharedContext(),
193 /*label=*/"BlitWithDraw",
194 std::move(vsModule),
195 std::move(fsModule),
196 /*renderPassColorFormat=*/
197 renderPassDesc.fColorAttachment.fTextureInfo.dawnTextureSpec().getViewFormat(),
198 /*renderPassDepthStencilFormat=*/
200 ? renderPassDesc.fDepthStencilAttachment.fTextureInfo.dawnTextureSpec()
201 .getViewFormat()
202 : wgpu::TextureFormat::Undefined,
203 /*numSamples=*/renderPassDesc.fColorAttachment.fTextureInfo.numSamples());
204
205 if (pipeline) {
206 fBlitWithDrawPipelines.set(renderPassKey, pipeline);
207 }
208 }
209
210 return pipeline;
211}
212
213sk_sp<Texture> DawnResourceProvider::onCreateWrappedTexture(const BackendTexture& texture) {
214 // Convert to smart pointers. wgpu::Texture* constructor will increment the ref count.
215 wgpu::Texture dawnTexture = texture.getDawnTexturePtr();
216 wgpu::TextureView dawnTextureView = texture.getDawnTextureViewPtr();
217 SkASSERT(!dawnTexture || !dawnTextureView);
218
219 if (!dawnTexture && !dawnTextureView) {
220 return {};
221 }
222
223 if (dawnTexture) {
224 return DawnTexture::MakeWrapped(this->dawnSharedContext(),
225 texture.dimensions(),
226 texture.info(),
227 std::move(dawnTexture));
228 } else {
229 return DawnTexture::MakeWrapped(this->dawnSharedContext(),
230 texture.dimensions(),
231 texture.info(),
232 std::move(dawnTextureView));
233 }
234}
235
237 SkISize dimensions, const TextureInfo& msaaInfo) {
238 SkASSERT(msaaInfo.isValid());
239
240 // Derive the load texture's info from MSAA texture's info.
241 DawnTextureInfo dawnMsaaLoadTextureInfo;
242 msaaInfo.getDawnTextureInfo(&dawnMsaaLoadTextureInfo);
243 dawnMsaaLoadTextureInfo.fSampleCount = 1;
244 dawnMsaaLoadTextureInfo.fUsage |= wgpu::TextureUsage::TextureBinding;
245
246#if !defined(__EMSCRIPTEN__)
247 // MSAA texture can be transient attachment (memoryless) but the load texture cannot be.
248 // This is because the load texture will need to have its content retained between two passes
249 // loading:
250 // - first pass: the resolve texture is blitted to the load texture.
251 // - 2nd pass: the actual render pass is started and the load texture is blitted to the MSAA
252 // texture.
253 dawnMsaaLoadTextureInfo.fUsage &= (~wgpu::TextureUsage::TransientAttachment);
254#endif
255
256 auto texture = this->findOrCreateDiscardableMSAAAttachment(dimensions, dawnMsaaLoadTextureInfo);
257
258 return sk_sp<DawnTexture>(static_cast<DawnTexture*>(texture.release()));
259}
260
261sk_sp<GraphicsPipeline> DawnResourceProvider::createGraphicsPipeline(
262 const RuntimeEffectDictionary* runtimeDict,
263 const GraphicsPipelineDesc& pipelineDesc,
264 const RenderPassDesc& renderPassDesc) {
265 return DawnGraphicsPipeline::Make(this->dawnSharedContext(),
266 this,
267 runtimeDict,
268 pipelineDesc,
269 renderPassDesc);
270}
271
272sk_sp<ComputePipeline> DawnResourceProvider::createComputePipeline(
273 const ComputePipelineDesc& desc) {
274 return DawnComputePipeline::Make(this->dawnSharedContext(), desc);
275}
276
277sk_sp<Texture> DawnResourceProvider::createTexture(SkISize dimensions,
278 const TextureInfo& info,
279 skgpu::Budgeted budgeted) {
280 return DawnTexture::Make(this->dawnSharedContext(),
281 dimensions,
282 info,
283 budgeted);
284}
285
286sk_sp<Buffer> DawnResourceProvider::createBuffer(size_t size,
288 AccessPattern accessPattern) {
289 return DawnBuffer::Make(this->dawnSharedContext(), size, type, accessPattern);
290}
291
292sk_sp<Sampler> DawnResourceProvider::createSampler(const SamplerDesc& samplerDesc) {
293 return DawnSampler::Make(this->dawnSharedContext(),
294 samplerDesc.samplingOptions(),
295 samplerDesc.tileModeX(),
296 samplerDesc.tileModeY());
297}
298
299BackendTexture DawnResourceProvider::onCreateBackendTexture(SkISize dimensions,
300 const TextureInfo& info) {
301 wgpu::Texture texture = DawnTexture::MakeDawnTexture(this->dawnSharedContext(),
302 dimensions,
303 info);
304 if (!texture) {
305 return {};
306 }
307
308 return BackendTexture(texture.MoveToCHandle());
309}
310
311void DawnResourceProvider::onDeleteBackendTexture(const BackendTexture& texture) {
312 SkASSERT(texture.isValid());
313 SkASSERT(texture.backend() == BackendApi::kDawn);
314
315 // Automatically release the pointers in wgpu::TextureView & wgpu::Texture's dtor.
316 // Acquire() won't increment the ref count.
317 wgpu::TextureView::Acquire(texture.getDawnTextureViewPtr());
318 // We need to explicitly call Destroy() here since since that is the recommended way to delete
319 // a Dawn texture predictably versus just dropping a ref and relying on garbage collection.
320 //
321 // Additionally this helps to work around an issue where Skia may have cached a BindGroup that
322 // references the underlying texture. Skia currently doesn't destroy BindGroups when its use of
323 // the texture goes away, thus a ref to the texture remains on the BindGroup and memory is never
324 // cleared up unless we call Destroy() here.
325 wgpu::Texture::Acquire(texture.getDawnTexturePtr()).Destroy();
326}
327
328DawnSharedContext* DawnResourceProvider::dawnSharedContext() const {
329 return static_cast<DawnSharedContext*>(fSharedContext);
330}
331
334 AccessPattern accessPattern,
335 std::string_view label) {
336 sk_sp<Buffer> buffer = this->findOrCreateBuffer(size, type, accessPattern, std::move(label));
337 DawnBuffer* ptr = static_cast<DawnBuffer*>(buffer.release());
338 return sk_sp<DawnBuffer>(ptr);
339}
340
342 if (fUniformBuffersBindGroupLayout) {
343 return fUniformBuffersBindGroupLayout;
344 }
345
346 std::array<wgpu::BindGroupLayoutEntry, 4> entries;
348 entries[0].visibility = wgpu::ShaderStage::Vertex | wgpu::ShaderStage::Fragment;
349 entries[0].buffer.type = wgpu::BufferBindingType::Uniform;
350 entries[0].buffer.hasDynamicOffset = true;
351 entries[0].buffer.minBindingSize = 0;
352
354 entries[1].visibility = wgpu::ShaderStage::Vertex | wgpu::ShaderStage::Fragment;
355 entries[1].buffer.type = fSharedContext->caps()->storageBufferPreferred()
356 ? wgpu::BufferBindingType::ReadOnlyStorage
358 entries[1].buffer.hasDynamicOffset = true;
359 entries[1].buffer.minBindingSize = 0;
360
362 entries[2].visibility = wgpu::ShaderStage::Fragment;
363 entries[2].buffer.type = fSharedContext->caps()->storageBufferPreferred()
364 ? wgpu::BufferBindingType::ReadOnlyStorage
366 entries[2].buffer.hasDynamicOffset = true;
367 entries[2].buffer.minBindingSize = 0;
368
369 // Gradient buffer will only be used when storage buffers are preferred, else large
370 // gradients use a texture fallback, set binding type as a uniform when not in use to
371 // satisfy any binding type restricions for non-supported ssbo devices.
373 entries[3].visibility = wgpu::ShaderStage::Fragment;
374 entries[3].buffer.type = fSharedContext->caps()->storageBufferPreferred()
375 ? wgpu::BufferBindingType::ReadOnlyStorage
377 entries[3].buffer.hasDynamicOffset = true;
378 entries[3].buffer.minBindingSize = 0;
379
380 wgpu::BindGroupLayoutDescriptor groupLayoutDesc;
382 groupLayoutDesc.label = "Uniform buffers bind group layout";
383 }
384
385 groupLayoutDesc.entryCount = entries.size();
386 groupLayoutDesc.entries = entries.data();
387 fUniformBuffersBindGroupLayout =
388 this->dawnSharedContext()->device().CreateBindGroupLayout(&groupLayoutDesc);
389
390 return fUniformBuffersBindGroupLayout;
391}
392
393const wgpu::BindGroupLayout&
395 if (fSingleTextureSamplerBindGroupLayout) {
396 return fSingleTextureSamplerBindGroupLayout;
397 }
398
399 std::array<wgpu::BindGroupLayoutEntry, 2> entries;
400
401 entries[0].binding = 0;
402 entries[0].visibility = wgpu::ShaderStage::Fragment;
403 entries[0].sampler.type = wgpu::SamplerBindingType::Filtering;
404
405 entries[1].binding = 1;
406 entries[1].visibility = wgpu::ShaderStage::Fragment;
407 entries[1].texture.sampleType = wgpu::TextureSampleType::Float;
408 entries[1].texture.viewDimension = wgpu::TextureViewDimension::e2D;
409 entries[1].texture.multisampled = false;
410
411 wgpu::BindGroupLayoutDescriptor groupLayoutDesc;
413 groupLayoutDesc.label = "Single texture + sampler bind group layout";
414 }
415
416 groupLayoutDesc.entryCount = entries.size();
417 groupLayoutDesc.entries = entries.data();
418 fSingleTextureSamplerBindGroupLayout =
419 this->dawnSharedContext()->device().CreateBindGroupLayout(&groupLayoutDesc);
420
421 return fSingleTextureSamplerBindGroupLayout;
422}
423
424const wgpu::Buffer& DawnResourceProvider::getOrCreateNullBuffer() {
425 if (!fNullBuffer) {
426 wgpu::BufferDescriptor desc;
428 desc.label = "UnusedBufferSlot";
429 }
430 desc.usage = wgpu::BufferUsage::CopyDst | wgpu::BufferUsage::Uniform |
431 wgpu::BufferUsage::Storage;
432 desc.size = kBufferBindingSizeAlignment;
433 desc.mappedAtCreation = false;
434
435 fNullBuffer = this->dawnSharedContext()->device().CreateBuffer(&desc);
436 SkASSERT(fNullBuffer);
437 }
438
439 return fNullBuffer;
440}
441
443 if (!fIntrinsicConstantBuffer) {
444 fIntrinsicConstantBuffer = findOrCreateDawnBuffer(sizeof(float[4]),
447 "IntrinsicConstantBuffer");
448 SkASSERT(fIntrinsicConstantBuffer);
449 }
450
451 return fIntrinsicConstantBuffer;
452}
453
455 const std::array<std::pair<const DawnBuffer*, uint32_t>, 4>& boundBuffersAndSizes) {
456 auto key = make_ubo_bind_group_key(boundBuffersAndSizes);
457 auto* existingBindGroup = fUniformBufferBindGroupCache.find(key);
458 if (existingBindGroup) {
459 // cache hit.
460 return *existingBindGroup;
461 }
462
463 // Translate to wgpu::BindGroupDescriptor
464 std::array<wgpu::BindGroupEntry, 4> entries;
465
466 constexpr uint32_t kBindingIndices[] = {
471 };
472
473 for (uint32_t i = 0; i < boundBuffersAndSizes.size(); ++i) {
474 const DawnBuffer* boundBuffer = boundBuffersAndSizes[i].first;
475 const uint32_t bindingSize = boundBuffersAndSizes[i].second;
476
477 entries[i].binding = kBindingIndices[i];
478 entries[i].offset = 0;
479 if (boundBuffer) {
480 entries[i].buffer = boundBuffer->dawnBuffer();
481 entries[i].size = SkAlignTo(bindingSize, kBufferBindingSizeAlignment);
482 } else {
483 entries[i].buffer = this->getOrCreateNullBuffer();
484 entries[i].size = wgpu::kWholeSize;
485 }
486 }
487
488 wgpu::BindGroupDescriptor desc;
490 desc.entryCount = entries.size();
491 desc.entries = entries.data();
492
493 const auto& device = this->dawnSharedContext()->device();
494 auto bindGroup = device.CreateBindGroup(&desc);
495
496 return *fUniformBufferBindGroupCache.insert(key, bindGroup);
497}
498
500 const DawnSampler* sampler, const DawnTexture* texture) {
501 auto key = make_texture_bind_group_key(sampler, texture);
502 auto* existingBindGroup = fSingleTextureSamplerBindGroups.find(key);
503 if (existingBindGroup) {
504 // cache hit.
505 return *existingBindGroup;
506 }
507
508 std::array<wgpu::BindGroupEntry, 2> entries;
509
510 entries[0].binding = 0;
511 entries[0].sampler = sampler->dawnSampler();
512 entries[1].binding = 1;
513 entries[1].textureView = texture->sampleTextureView();
514
515 wgpu::BindGroupDescriptor desc;
517 desc.entryCount = entries.size();
518 desc.entries = entries.data();
519
520 const auto& device = this->dawnSharedContext()->device();
521 auto bindGroup = device.CreateBindGroup(&desc);
522
523 return *fSingleTextureSamplerBindGroups.insert(key, bindGroup);
524}
525
526} // namespace skgpu::graphite
static void info(const char *fmt,...) SK_PRINTF_LIKE(1
Definition: DM.cpp:213
GrTriangulator::Vertex Vertex
static constexpr size_t SkAlignTo(size_t x, size_t alignment)
Definition: SkAlign.h:33
#define SkASSERT(cond)
Definition: SkAssert.h:116
SkMeshSpecification::Uniform Uniform
Definition: SkMesh.cpp:66
GLenum type
V * find(const K &key)
Definition: SkLRUCache.h:49
V * insert(const K &key, V value)
Definition: SkLRUCache.h:62
static Domain GenerateDomain()
Definition: ResourceKey.cpp:27
bool setBackendLabels() const
Definition: Caps.h:297
bool storageBufferPreferred() const
Definition: Caps.h:239
static sk_sp< DawnBuffer > Make(const DawnSharedContext *, size_t size, BufferType type, AccessPattern)
Definition: DawnBuffer.cpp:18
const wgpu::Buffer & dawnBuffer() const
Definition: DawnBuffer.h:32
uint64_t getRenderPassDescKeyForPipeline(const RenderPassDesc &renderPassDesc) const
Definition: DawnCaps.cpp:870
static sk_sp< DawnComputePipeline > Make(const DawnSharedContext *, const ComputePipelineDesc &)
static constexpr unsigned int kRenderStepUniformBufferIndex
static constexpr unsigned int kPaintUniformBufferIndex
static constexpr unsigned int kGradientBufferIndex
static sk_sp< DawnGraphicsPipeline > Make(const DawnSharedContext *sharedContext, DawnResourceProvider *resourceProvider, const RuntimeEffectDictionary *runtimeDict, const GraphicsPipelineDesc &pipelineDesc, const RenderPassDesc &renderPassDesc)
static constexpr unsigned int kIntrinsicUniformBufferIndex
const wgpu::BindGroup & findOrCreateSingleTextureSamplerBindGroup(const DawnSampler *sampler, const DawnTexture *texture)
DawnResourceProvider(SharedContext *sharedContext, SingleOwner *, uint32_t recorderID, size_t resourceBudget)
const wgpu::BindGroupLayout & getOrCreateSingleTextureSamplerBindGroupLayout()
sk_sp< DawnTexture > findOrCreateDiscardableMSAALoadTexture(SkISize dimensions, const TextureInfo &msaaInfo)
const sk_sp< DawnBuffer > & getOrCreateIntrinsicConstantBuffer()
const wgpu::BindGroupLayout & getOrCreateUniformBuffersBindGroupLayout()
const wgpu::BindGroup & findOrCreateUniformBuffersBindGroup(const std::array< std::pair< const DawnBuffer *, uint32_t >, 4 > &boundBuffersAndSizes)
wgpu::RenderPipeline findOrCreateBlitWithDrawPipeline(const RenderPassDesc &renderPassDesc)
sk_sp< DawnBuffer > findOrCreateDawnBuffer(size_t size, BufferType type, AccessPattern, std::string_view label)
const wgpu::Sampler & dawnSampler() const
Definition: DawnSampler.h:30
static sk_sp< DawnSampler > Make(const DawnSharedContext *, const SkSamplingOptions &samplingOptions, SkTileMode xTileMode, SkTileMode yTileMode)
Definition: DawnSampler.cpp:64
const DawnCaps * dawnCaps() const
const wgpu::Device & device() const
static sk_sp< Texture > MakeWrapped(const DawnSharedContext *, SkISize dimensions, const TextureInfo &, wgpu::Texture)
static wgpu::Texture MakeDawnTexture(const DawnSharedContext *, SkISize dimensions, const TextureInfo &)
Definition: DawnTexture.cpp:22
static sk_sp< Texture > Make(const DawnSharedContext *, SkISize dimensions, const TextureInfo &, skgpu::Budgeted)
sk_sp< Texture > findOrCreateDiscardableMSAAAttachment(SkISize dimensions, const TextureInfo &)
sk_sp< Buffer > findOrCreateBuffer(size_t size, BufferType type, AccessPattern, std::string_view label)
const Caps * caps() const
Definition: SharedContext.h:39
uint32_t numSamples() const
Definition: TextureInfo.h:78
V * set(K key, V val)
Definition: SkTHash.h:487
VkDevice device
Definition: main.cc:53
SkBitmap source
Definition: examples.cpp:28
FlTexture * texture
DlVertices::Builder Builder
DEF_SWITCHES_START aot vmservice shared library Name of the *so containing AOT compiled Dart assets for launching the service isolate vm snapshot The VM snapshot data that will be memory mapped as read only SnapshotAssetPath must be present isolate snapshot The isolate snapshot data that will be memory mapped as read only SnapshotAssetPath must be present cache dir Path to the cache directory This is different from the persistent_cache_path in embedder which is used for Skia shader cache icu native lib Path to the library file that exports the ICU data vm service The hostname IP address on which the Dart VM Service should be served If not defaults to or::depending on whether ipv6 is specified vm service A custom Dart VM Service port The default is to pick a randomly available open port disable vm Disable the Dart VM Service The Dart VM Service is never available in release mode disable vm service Disable mDNS Dart VM Service publication Bind to the IPv6 localhost address for the Dart VM Service Ignored if vm service host is set endless trace buffer
Definition: switches.h:126
it will be possible to load the file into Perfetto s trace viewer disable asset Prevents usage of any non test fonts unless they were explicitly Loaded via prefetched default font Indicates whether the embedding started a prefetch of the default font manager before creating the engine run In non interactive keep the shell running after the Dart script has completed enable serial On low power devices with low core running concurrent GC tasks on threads can cause them to contend with the UI thread which could potentially lead to jank This option turns off all concurrent GC activities domain network JSON encoded network policy per domain This overrides the DisallowInsecureConnections switch Embedder can specify whether to allow or disallow insecure connections at a domain level old gen heap size
Definition: switches.h:259
Budgeted
Definition: GpuTypes.h:35
Definition: SkSize.h:16
wgpu::TextureUsage fUsage
Definition: DawnTypes.h:24
AttachmentDesc fDepthStencilAttachment