Flutter Engine
The Flutter Engine
VelloRenderer.cpp
Go to the documentation of this file.
1/*
2 * Copyright 2023 Google LLC
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
9
10#include "include/core/SkPath.h"
13#include "src/core/SkGeometry.h"
14#include "src/core/SkPathPriv.h"
26
27#include <algorithm>
28
29namespace skgpu::graphite {
30namespace {
31
32BufferView new_scratch_slice(ScratchBuffer& scratch) {
33 size_t size = scratch.size(); // Use the whole buffer.
34 BindBufferInfo info = scratch.suballocate(size);
35 return {info, info ? size : 0};
36}
37
38BufferView new_indirect_slice(DrawBufferManager* mgr, size_t size) {
39 BindBufferInfo info = mgr->getIndirectStorage(size, ClearBuffer::kYes);
40 return {info, info ? size : 0};
41}
42
43::rust::Slice<uint8_t> to_slice(void* ptr, size_t size) {
44 return {static_cast<uint8_t*>(ptr), size};
45}
46
47vello_cpp::Affine to_vello_affine(const SkMatrix& m) {
48 // Vello currently doesn't support perspective scaling and the encoding only accepts a 2x3
49 // affine transform matrix.
50 return {{m.get(0), m.get(3), m.get(1), m.get(4), m.get(2), m.get(5)}};
51}
52
53vello_cpp::Point to_vello_point(const SkPoint& p) { return {p.x(), p.y()}; }
54
55vello_cpp::Color to_vello_color(const SkColor4f& color) {
56 SkColor c = color.toSkColor();
57 return {
58 static_cast<uint8_t>(SkColorGetR(c)),
59 static_cast<uint8_t>(SkColorGetG(c)),
60 static_cast<uint8_t>(SkColorGetB(c)),
61 static_cast<uint8_t>(SkColorGetA(c)),
62 };
63}
64
65WorkgroupSize to_wg_size(const vello_cpp::WorkgroupSize& src) {
66 return WorkgroupSize(src.x, src.y, src.z);
67}
68
69vello_cpp::Fill to_fill_type(SkPathFillType fillType) {
70 // Vello does not provide an encoding for inverse fill types. When Skia uses vello to render
71 // a coverage mask for an inverse fill, it encodes a regular fill and inverts the coverage value
72 // after sampling the mask.
73 switch (fillType) {
76 return vello_cpp::Fill::NonZero;
79 return vello_cpp::Fill::EvenOdd;
80 }
81 return vello_cpp::Fill::NonZero;
82}
83
84vello_cpp::CapStyle to_cap_style(SkPaint::Cap cap) {
85 switch (cap) {
86 case SkPaint::Cap::kButt_Cap:
87 return vello_cpp::CapStyle::Butt;
88 case SkPaint::Cap::kRound_Cap:
89 return vello_cpp::CapStyle::Round;
90 case SkPaint::Cap::kSquare_Cap:
91 return vello_cpp::CapStyle::Square;
92 }
94}
95
96vello_cpp::JoinStyle to_join_style(SkPaint::Join join) {
97 switch (join) {
98 case SkPaint::Join::kMiter_Join:
99 return vello_cpp::JoinStyle::Miter;
100 case SkPaint::Join::kBevel_Join:
101 return vello_cpp::JoinStyle::Bevel;
102 case SkPaint::Join::kRound_Join:
103 return vello_cpp::JoinStyle::Round;
104 }
106}
107
108vello_cpp::Stroke to_stroke(const SkStrokeRec& style) {
109 return vello_cpp::Stroke{
110 /*width=*/style.getWidth(),
111 /*miter_limit=*/style.getMiter(),
112 /*cap*/ to_cap_style(style.getCap()),
113 /*join*/ to_join_style(style.getJoin()),
114 };
115}
116
117class PathIter : public vello_cpp::PathIterator {
118public:
119 PathIter(const SkPath& path, const Transform& t)
120 : fIterate(path), fIter(fIterate.begin()), fTransform(t) {}
121
122 bool next_element(vello_cpp::PathElement* outElem) override {
123 if (fConicQuadIdx < fConicConverter.countQuads()) {
124 SkASSERT(fConicQuads != nullptr);
125 outElem->verb = vello_cpp::PathVerb::QuadTo;
126 int pointIdx = fConicQuadIdx * 2;
127 outElem->points[0] = to_vello_point(fConicQuads[pointIdx]);
128 outElem->points[1] = to_vello_point(fConicQuads[pointIdx + 1]);
129 outElem->points[2] = to_vello_point(fConicQuads[pointIdx + 2]);
130 fConicQuadIdx++;
131 return true;
132 }
133
134 if (fIter == fIterate.end()) {
135 return false;
136 }
137
138 SkASSERT(outElem);
139 auto [verb, points, weights] = *fIter;
140 fIter++;
141
142 switch (verb) {
144 outElem->verb = vello_cpp::PathVerb::MoveTo;
145 outElem->points[0] = to_vello_point(points[0]);
146 break;
148 outElem->verb = vello_cpp::PathVerb::LineTo;
149 outElem->points[0] = to_vello_point(points[0]);
150 outElem->points[1] = to_vello_point(points[1]);
151 break;
153 // The vello encoding API doesn't handle conic sections. Approximate it with
154 // quadratic Béziers.
155 SkASSERT(fConicQuadIdx >= fConicConverter.countQuads()); // No other conic->quad
156 // conversions should be
157 // in progress
158 fConicQuads = fConicConverter.computeQuads(
159 points, *weights, 0.25 / fTransform.maxScaleFactor());
160 outElem->verb = vello_cpp::PathVerb::QuadTo;
161 outElem->points[0] = to_vello_point(fConicQuads[0]);
162 outElem->points[1] = to_vello_point(fConicQuads[1]);
163 outElem->points[2] = to_vello_point(fConicQuads[2]);
164
165 // The next call to `next_element` will yield the next quad in the list (at index 1)
166 // if `fConicConverter` contains more than 1 quad.
167 fConicQuadIdx = 1;
168 break;
170 outElem->verb = vello_cpp::PathVerb::QuadTo;
171 outElem->points[0] = to_vello_point(points[0]);
172 outElem->points[1] = to_vello_point(points[1]);
173 outElem->points[2] = to_vello_point(points[2]);
174 break;
176 outElem->verb = vello_cpp::PathVerb::CurveTo;
177 outElem->points[0] = to_vello_point(points[0]);
178 outElem->points[1] = to_vello_point(points[1]);
179 outElem->points[2] = to_vello_point(points[2]);
180 outElem->points[3] = to_vello_point(points[3]);
181 break;
183 outElem->verb = vello_cpp::PathVerb::Close;
184 break;
185 }
186
187 return true;
188 }
189
190private:
191 SkPathPriv::Iterate fIterate;
193
194 // Variables used to track conic to quadratic spline conversion. `fTransform` is used to
195 // determine the subpixel error tolerance in device coordinate space.
196 const Transform& fTransform;
197 SkAutoConicToQuads fConicConverter;
198 const SkPoint* fConicQuads = nullptr;
199 int fConicQuadIdx = 0;
200};
201
202} // namespace
203
204VelloScene::VelloScene() : fEncoding(vello_cpp::new_encoding()) {}
205
207 fEncoding->reset();
208}
209
211 const SkColor4f& fillColor,
212 const SkPathFillType fillType,
213 const Transform& t) {
214 PathIter iter(shape, t);
215 fEncoding->fill(to_fill_type(fillType),
216 to_vello_affine(t),
217 {vello_cpp::BrushKind::Solid, {to_vello_color(fillColor)}},
218 iter);
219}
220
222 const SkColor4f& fillColor,
223 const SkStrokeRec& style,
224 const Transform& t) {
225 // TODO: Obtain dashing pattern here and let Vello handle dashing on the CPU while
226 // encoding the path?
227 PathIter iter(shape, t);
228 vello_cpp::Brush brush{vello_cpp::BrushKind::Solid, {to_vello_color(fillColor)}};
229 fEncoding->stroke(to_stroke(style), to_vello_affine(t), brush, iter);
230}
231
232void VelloScene::pushClipLayer(const SkPath& shape, const Transform& t) {
233 PathIter iter(shape, t);
234 fEncoding->begin_clip(to_vello_affine(t), iter);
235 SkDEBUGCODE(fLayers++;)
236}
237
239 SkASSERT(fLayers > 0);
240 fEncoding->end_clip();
241 SkDEBUGCODE(fLayers--;)
242}
243
244void VelloScene::append(const VelloScene& other) {
245 fEncoding->append(*other.fEncoding);
246}
247
250 fFineArea = std::make_unique<VelloFineAreaAlpha8Step>();
251 fFineMsaa16 = std::make_unique<VelloFineMsaa16Alpha8Step>();
252 fFineMsaa8 = std::make_unique<VelloFineMsaa8Alpha8Step>();
253 } else {
254 fFineArea = std::make_unique<VelloFineAreaStep>();
255 fFineMsaa16 = std::make_unique<VelloFineMsaa16Step>();
256 fFineMsaa8 = std::make_unique<VelloFineMsaa8Step>();
257 }
258}
259
261
262std::unique_ptr<DispatchGroup> VelloRenderer::renderScene(const RenderParams& params,
263 const VelloScene& scene,
265 Recorder* recorder) const {
266 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
268
269 if (scene.fEncoding->is_empty()) {
270 return nullptr;
271 }
272
273 if (params.fWidth == 0 || params.fHeight == 0) {
274 return nullptr;
275 }
276
277 // TODO: validate that the pixel format matches the pipeline layout.
278 // Clamp the draw region to the target texture dimensions.
279 const SkISize dims = target->dimensions();
280 if (dims.isEmpty() || dims.fWidth < 0 || dims.fHeight < 0) {
281 SKGPU_LOG_W("VelloRenderer: cannot render to an empty target");
282 return nullptr;
283 }
284
285 SkASSERT(scene.fLayers == 0); // Begin/end clips must be matched.
286 auto config = scene.fEncoding->prepare_render(
287 std::min(params.fWidth, static_cast<uint32_t>(dims.fWidth)),
288 std::min(params.fHeight, static_cast<uint32_t>(dims.fHeight)),
289 to_vello_color(params.fBaseColor));
290 auto dispatchInfo = config->workgroup_counts();
291 auto bufferSizes = config->buffer_sizes();
292
294
295 // In total there are 25 resources that are used across the full pipeline stages. The sizes of
296 // these resources depend on the encoded scene. We allocate all of them and assign them
297 // directly to the builder here instead of delegating the logic to the ComputeSteps.
298 DrawBufferManager* bufMgr = recorder->priv().drawBufferManager();
299
300 size_t uboSize = config->config_uniform_buffer_size();
301 auto [uboPtr, configBuf] = bufMgr->getUniformPointer(uboSize);
302 if (!uboPtr || !config->write_config_uniform_buffer(to_slice(uboPtr, uboSize))) {
303 return nullptr;
304 }
305
306 size_t sceneSize = config->scene_buffer_size();
307 auto [scenePtr, sceneBuf] = bufMgr->getStoragePointer(sceneSize);
308 if (!scenePtr || !config->write_scene_buffer(to_slice(scenePtr, sceneSize))) {
309 return nullptr;
310 }
311
312 // TODO(b/285189802): The default sizes for the bump buffers (~97MB) exceed Graphite's resource
313 // budget if multiple passes are necessary per frame (250MB, see ResouceCache.h). We apply a
314 // crude size reduction here which seems to be enough for a 4k x 4k atlas render for the GMs
315 // that we have tested. The numbers below are able to render GM_longpathdash with CPU-side
316 // stroke expansion.
317 //
318 // We need to come up with a better approach to accurately predict the sizes for these buffers
319 // based on the scene encoding and our resource budget. It should be possible to build a
320 // conservative estimate using the total number of path verbs, some heuristic based on the verb
321 // and the path's transform, and the total number of tiles.
322 //
323 // The following numbers amount to ~48MB
324 const size_t lines_size = bufferSizes.lines;
325 const size_t bin_data_size = bufferSizes.bin_data;
326 const size_t tiles_size = bufferSizes.tiles;
327 const size_t segments_size = bufferSizes.segments;
328 const size_t seg_counts_size = bufferSizes.seg_counts;
329 const size_t ptcl_size = bufferSizes.ptcl;
330
331 // See the comments in VelloComputeSteps.h for an explanation of the logic here.
332
333 builder.assignSharedBuffer({configBuf, uboSize}, kVelloSlot_ConfigUniform);
334 builder.assignSharedBuffer({sceneBuf, sceneSize}, kVelloSlot_Scene);
335
336 // Buffers get cleared ahead of the entire DispatchGroup. Allocate the bump buffer early to
337 // avoid a potentially recycled (and prematurely cleared) scratch buffer.
338 ScratchBuffer bump = bufMgr->getScratchStorage(bufferSizes.bump_alloc);
339 builder.assignSharedBuffer(new_scratch_slice(bump), kVelloSlot_BumpAlloc, ClearBuffer::kYes);
340
341 // path_reduce
342 ScratchBuffer tagmonoids = bufMgr->getScratchStorage(bufferSizes.path_monoids);
343 {
344 // This can be immediately returned after input processing.
345 ScratchBuffer pathtagReduceOutput = bufMgr->getScratchStorage(bufferSizes.path_reduced);
346 builder.assignSharedBuffer(new_scratch_slice(pathtagReduceOutput),
348 builder.assignSharedBuffer(new_scratch_slice(tagmonoids), kVelloSlot_TagMonoid);
349 builder.appendStep(&fPathtagReduce, to_wg_size(dispatchInfo.path_reduce));
350
351 // If the input is too large to be fully processed by a single workgroup then a second
352 // reduce step and two scan steps are necessary. Otherwise one reduce+scan pair is
353 // sufficient.
354 //
355 // In either case, the result is `tagmonoids`.
356 if (dispatchInfo.use_large_path_scan) {
357 ScratchBuffer reduced2 = bufMgr->getScratchStorage(bufferSizes.path_reduced2);
358 ScratchBuffer reducedScan = bufMgr->getScratchStorage(bufferSizes.path_reduced_scan);
359
360 builder.assignSharedBuffer(new_scratch_slice(reduced2),
362 builder.assignSharedBuffer(new_scratch_slice(reducedScan),
364
365 builder.appendStep(&fPathtagReduce2, to_wg_size(dispatchInfo.path_reduce2));
366 builder.appendStep(&fPathtagScan1, to_wg_size(dispatchInfo.path_scan1));
367 builder.appendStep(&fPathtagScanLarge, to_wg_size(dispatchInfo.path_scan));
368 } else {
369 builder.appendStep(&fPathtagScanSmall, to_wg_size(dispatchInfo.path_scan));
370 }
371 }
372
373 // bbox_clear
374 ScratchBuffer pathBboxes = bufMgr->getScratchStorage(bufferSizes.path_bboxes);
375 builder.assignSharedBuffer(new_scratch_slice(pathBboxes), kVelloSlot_PathBBoxes);
376 builder.appendStep(&fBboxClear, to_wg_size(dispatchInfo.bbox_clear));
377
378 // flatten
379 ScratchBuffer lines = bufMgr->getScratchStorage(lines_size);
380 builder.assignSharedBuffer(new_scratch_slice(lines), kVelloSlot_Lines);
381 builder.appendStep(&fFlatten, to_wg_size(dispatchInfo.flatten));
382
383 tagmonoids.returnToPool();
384
385 // draw_reduce
386 ScratchBuffer drawReduced = bufMgr->getScratchStorage(bufferSizes.draw_reduced);
387 builder.assignSharedBuffer(new_scratch_slice(drawReduced), kVelloSlot_DrawReduceOutput);
388 builder.appendStep(&fDrawReduce, to_wg_size(dispatchInfo.draw_reduce));
389
390 // draw_leaf
391 ScratchBuffer drawMonoids = bufMgr->getScratchStorage(bufferSizes.draw_monoids);
392 ScratchBuffer binData = bufMgr->getScratchStorage(bin_data_size);
393 // A clip input buffer must still get bound even if the encoding doesn't contain any clips
394 ScratchBuffer clipInput = bufMgr->getScratchStorage(bufferSizes.clip_inps);
395 builder.assignSharedBuffer(new_scratch_slice(drawMonoids), kVelloSlot_DrawMonoid);
396 builder.assignSharedBuffer(new_scratch_slice(binData), kVelloSlot_InfoBinData);
397 builder.assignSharedBuffer(new_scratch_slice(clipInput), kVelloSlot_ClipInput);
398 builder.appendStep(&fDrawLeaf, to_wg_size(dispatchInfo.draw_leaf));
399
400 drawReduced.returnToPool();
401
402 // clip_reduce, clip_leaf
403 // The clip bbox buffer is always an input to the binning stage, even when the encoding doesn't
404 // contain any clips
405 ScratchBuffer clipBboxes = bufMgr->getScratchStorage(bufferSizes.clip_bboxes);
406 builder.assignSharedBuffer(new_scratch_slice(clipBboxes), kVelloSlot_ClipBBoxes);
407 WorkgroupSize clipReduceWgCount = to_wg_size(dispatchInfo.clip_reduce);
408 WorkgroupSize clipLeafWgCount = to_wg_size(dispatchInfo.clip_leaf);
409 bool doClipReduce = clipReduceWgCount.scalarSize() > 0u;
410 bool doClipLeaf = clipLeafWgCount.scalarSize() > 0u;
411 if (doClipReduce || doClipLeaf) {
412 ScratchBuffer clipBic = bufMgr->getScratchStorage(bufferSizes.clip_bics);
413 ScratchBuffer clipEls = bufMgr->getScratchStorage(bufferSizes.clip_els);
414 builder.assignSharedBuffer(new_scratch_slice(clipBic), kVelloSlot_ClipBicyclic);
415 builder.assignSharedBuffer(new_scratch_slice(clipEls), kVelloSlot_ClipElement);
416 if (doClipReduce) {
417 builder.appendStep(&fClipReduce, clipReduceWgCount);
418 }
419 if (doClipLeaf) {
420 builder.appendStep(&fClipLeaf, clipLeafWgCount);
421 }
422 }
423
424 clipInput.returnToPool();
425
426 // binning
427 ScratchBuffer drawBboxes = bufMgr->getScratchStorage(bufferSizes.draw_bboxes);
428 ScratchBuffer binHeaders = bufMgr->getScratchStorage(bufferSizes.bin_headers);
429 builder.assignSharedBuffer(new_scratch_slice(drawBboxes), kVelloSlot_DrawBBoxes);
430 builder.assignSharedBuffer(new_scratch_slice(binHeaders), kVelloSlot_BinHeader);
431 builder.appendStep(&fBinning, to_wg_size(dispatchInfo.binning));
432
433 pathBboxes.returnToPool();
434 clipBboxes.returnToPool();
435
436 // tile_alloc
437 ScratchBuffer paths = bufMgr->getScratchStorage(bufferSizes.paths);
438 ScratchBuffer tiles = bufMgr->getScratchStorage(tiles_size);
439 builder.assignSharedBuffer(new_scratch_slice(paths), kVelloSlot_Path);
440 builder.assignSharedBuffer(new_scratch_slice(tiles), kVelloSlot_Tile);
441 builder.appendStep(&fTileAlloc, to_wg_size(dispatchInfo.tile_alloc));
442
443 drawBboxes.returnToPool();
444
445 // path_count_setup
446 auto indirectCountBuffer = new_indirect_slice(bufMgr, bufferSizes.indirect_count);
447 builder.assignSharedBuffer(indirectCountBuffer, kVelloSlot_IndirectCount);
448 builder.appendStep(&fPathCountSetup, to_wg_size(dispatchInfo.path_count_setup));
449
450 // Rasterization stage scratch buffers.
451 ScratchBuffer seg_counts = bufMgr->getScratchStorage(seg_counts_size);
452 ScratchBuffer segments = bufMgr->getScratchStorage(segments_size);
453 ScratchBuffer ptcl = bufMgr->getScratchStorage(ptcl_size);
454
455 // path_count
456 builder.assignSharedBuffer(new_scratch_slice(seg_counts), kVelloSlot_SegmentCounts);
457 builder.appendStepIndirect(&fPathCount, indirectCountBuffer);
458
459 // backdrop
460 builder.appendStep(&fBackdrop, to_wg_size(dispatchInfo.backdrop));
461
462 // coarse
463 builder.assignSharedBuffer(new_scratch_slice(ptcl), kVelloSlot_PTCL);
464 builder.appendStep(&fCoarse, to_wg_size(dispatchInfo.coarse));
465
466 // path_tiling_setup
467 builder.appendStep(&fPathTilingSetup, to_wg_size(dispatchInfo.path_tiling_setup));
468
469 // path_tiling
470 builder.assignSharedBuffer(new_scratch_slice(segments), kVelloSlot_Segments);
471 builder.appendStepIndirect(&fPathTiling, indirectCountBuffer);
472
473 // fine
474 builder.assignSharedTexture(std::move(target), kVelloSlot_OutputImage);
475 const ComputeStep* fineVariant = nullptr;
476 switch (params.fAaConfig) {
478 fineVariant = fFineArea.get();
479 break;
481 fineVariant = fFineMsaa16.get();
482 break;
484 fineVariant = fFineMsaa8.get();
485 break;
486 }
487 SkASSERT(fineVariant != nullptr);
488 builder.appendStep(fineVariant, to_wg_size(dispatchInfo.fine));
489
490 return builder.finalize();
491}
492
493} // namespace skgpu::graphite
static void info(const char *fmt,...) SK_PRINTF_LIKE(1
Definition: DM.cpp:213
static const int points[]
std::unique_ptr< SkLatticeIter > fIter
Definition: LatticeOp.cpp:380
#define SKGPU_LOG_W(fmt,...)
Definition: Log.h:40
#define SkUNREACHABLE
Definition: SkAssert.h:135
#define SkASSERT(cond)
Definition: SkAssert.h:116
@ kAlpha_8_SkColorType
pixel with alpha in 8-bit byte
Definition: SkColorType.h:21
#define SkColorGetR(color)
Definition: SkColor.h:65
#define SkColorGetG(color)
Definition: SkColor.h:69
uint32_t SkColor
Definition: SkColor.h:37
#define SkColorGetA(color)
Definition: SkColor.h:61
#define SkColorGetB(color)
Definition: SkColor.h:73
SkPathFillType
Definition: SkPathTypes.h:11
@ kClose
SkPath::RawIter returns 0 points.
@ kCubic
SkPath::RawIter returns 4 points.
@ kConic
SkPath::RawIter returns 3 points + 1 weight.
@ kQuad
SkPath::RawIter returns 3 points.
@ kMove
SkPath::RawIter returns 1 point.
@ kLine
SkPath::RawIter returns 2 points.
SkDEBUGCODE(SK_SPI) SkThreadID SkGetThreadID()
#define TRACE_FUNC
Definition: SkTraceEvent.h:30
const SkPoint * computeQuads(const SkConic &conic, SkScalar tol)
Definition: SkGeometry.h:524
int countQuads() const
Definition: SkGeometry.h:539
SkPath::RangeIter RangeIter
Definition: SkPathPriv.h:164
Definition: SkPath.h:59
SkScalar getWidth() const
Definition: SkStrokeRec.h:42
SkPaint::Join getJoin() const
Definition: SkStrokeRec.h:45
SkPaint::Cap getCap() const
Definition: SkStrokeRec.h:44
SkScalar getMiter() const
Definition: SkStrokeRec.h:43
ScratchBuffer getScratchStorage(size_t requiredBytes)
std::pair< void *, BindBufferInfo > getUniformPointer(size_t requiredBytes)
std::pair< void *, BindBufferInfo > getStoragePointer(size_t requiredBytes)
DrawBufferManager * drawBufferManager()
Definition: RecorderPriv.h:58
std::unique_ptr< DispatchGroup > renderScene(const RenderParams &, const VelloScene &, sk_sp< TextureProxy > target, Recorder *) const
void pushClipLayer(const SkPath &shape, const Transform &transform)
void append(const VelloScene &other)
void solidStroke(const SkPath &, const SkColor4f &, const SkStrokeRec &, const Transform &transform)
void solidFill(const SkPath &, const SkColor4f &, const SkPathFillType, const Transform &transform)
DlColor color
static const char * begin(const StringSlice &s)
Definition: editor.cpp:252
const EmbeddedViewParams * params
uint32_t * target
static float min(float r, float g, float b)
Definition: hsl.cpp:48
SK_API sk_sp< SkShader > Color(SkColor)
DEF_SWITCHES_START aot vmservice shared library Name of the *so containing AOT compiled Dart assets for launching the service isolate vm snapshot The VM snapshot data that will be memory mapped as read only SnapshotAssetPath must be present isolate snapshot The isolate snapshot data that will be memory mapped as read only SnapshotAssetPath must be present cache dir path
Definition: switches.h:57
it will be possible to load the file into Perfetto s trace viewer disable asset Prevents usage of any non test fonts unless they were explicitly Loaded via prefetched default font Indicates whether the embedding started a prefetch of the default font manager before creating the engine run In non interactive keep the shell running after the Dart script has completed enable serial On low power devices with low core running concurrent GC tasks on threads can cause them to contend with the UI thread which could potentially lead to jank This option turns off all concurrent GC activities domain network JSON encoded network policy per domain This overrides the DisallowInsecureConnections switch Embedder can specify whether to allow or disallow insecure connections at a domain level old gen heap size
Definition: switches.h:259
TPoint< Scalar > Point
Definition: point.h:322
void MoveTo(PathBuilder *builder, Scalar x, Scalar y)
Definition: tessellator.cc:20
void LineTo(PathBuilder *builder, Scalar x, Scalar y)
Definition: tessellator.cc:24
void Close(PathBuilder *builder)
Definition: tessellator.cc:38
constexpr int kVelloSlot_DrawBBoxes
constexpr int kVelloSlot_Lines
SkColorType ComputeShaderCoverageMaskTargetFormat(const Caps *caps)
constexpr int kVelloSlot_LargePathtagScanFirstPassOutput
constexpr int kVelloSlot_BinHeader
constexpr int kVelloSlot_SegmentCounts
constexpr int kVelloSlot_ClipBicyclic
constexpr int kVelloSlot_Scene
constexpr int kVelloSlot_ConfigUniform
constexpr int kVelloSlot_ClipInput
constexpr int kVelloSlot_IndirectCount
constexpr int kVelloSlot_DrawReduceOutput
constexpr int kVelloSlot_LargePathtagReduceSecondPassOutput
constexpr int kVelloSlot_TagMonoid
constexpr int kVelloSlot_Segments
constexpr int kVelloSlot_InfoBinData
constexpr int kVelloSlot_ClipElement
constexpr int kVelloSlot_PathBBoxes
constexpr int kVelloSlot_Path
constexpr int kVelloSlot_OutputImage
constexpr int kVelloSlot_Tile
constexpr int kVelloSlot_PathtagReduceOutput
constexpr int kVelloSlot_PTCL
constexpr int kVelloSlot_ClipBBoxes
constexpr int kVelloSlot_DrawMonoid
constexpr int kVelloSlot_BumpAlloc
skgpu::graphite::Transform Transform
constexpr struct @263 tiles[]
static SkString join(const CommandLineFlags::StringArray &)
Definition: skpbench.cpp:741
Definition: SkSize.h:16
bool isEmpty() const
Definition: SkSize.h:31
int32_t fHeight
Definition: SkSize.h:18
int32_t fWidth
Definition: SkSize.h:17
SkPath::RangeIter end()
Definition: SkPathPriv.h:187
#define TRACE_EVENT0(category_group, name)
Definition: trace_event.h:131