Flutter Engine
The Flutter Engine
AAConvexPathRenderer.cpp
Go to the documentation of this file.
1/*
2 * Copyright 2012 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
9
12#include "src/core/SkGeometry.h"
14#include "src/core/SkPathPriv.h"
17#include "src/gpu/KeyBuilder.h"
35
36using namespace skia_private;
37
38namespace skgpu::ganesh {
39
40namespace {
41
42struct Segment {
43 enum {
44 // These enum values are assumed in member functions below.
45 kLine = 0,
46 kQuad = 1,
48
49 // line uses one pt, quad uses 2 pts
51 // normal to edge ending at each pt
53 // is the corner where the previous segment meets this segment
54 // sharp. If so, fMid is a normalized bisector facing outward.
56
57 int countPoints() {
58 static_assert(0 == kLine && 1 == kQuad);
59 return fType + 1;
60 }
61 const SkPoint& endPt() const {
62 static_assert(0 == kLine && 1 == kQuad);
63 return fPts[fType];
64 }
65 const SkPoint& endNorm() const {
66 static_assert(0 == kLine && 1 == kQuad);
67 return fNorms[fType];
68 }
69};
70
71typedef TArray<Segment, true> SegmentArray;
72
73bool center_of_mass(const SegmentArray& segments, SkPoint* c) {
74 SkScalar area = 0;
75 SkPoint center = {0, 0};
76 int count = segments.size();
77 if (count <= 0) {
78 return false;
79 }
80 SkPoint p0 = {0, 0};
81 if (count > 2) {
82 // We translate the polygon so that the first point is at the origin.
83 // This avoids some precision issues with small area polygons far away
84 // from the origin.
85 p0 = segments[0].endPt();
86 SkPoint pi;
87 SkPoint pj;
88 // the first and last iteration of the below loop would compute
89 // zeros since the starting / ending point is (0,0). So instead we start
90 // at i=1 and make the last iteration i=count-2.
91 pj = segments[1].endPt() - p0;
92 for (int i = 1; i < count - 1; ++i) {
93 pi = pj;
94 pj = segments[i + 1].endPt() - p0;
95
97 area += t;
98 center.fX += (pi.fX + pj.fX) * t;
99 center.fY += (pi.fY + pj.fY) * t;
100 }
101 }
102
103 // If the poly has no area then we instead return the average of
104 // its points.
105 if (SkScalarNearlyZero(area)) {
106 SkPoint avg;
107 avg.set(0, 0);
108 for (int i = 0; i < count; ++i) {
109 const SkPoint& pt = segments[i].endPt();
110 avg.fX += pt.fX;
111 avg.fY += pt.fY;
112 }
113 SkScalar denom = SK_Scalar1 / count;
114 avg.scale(denom);
115 *c = avg;
116 } else {
117 area *= 3;
118 area = SkScalarInvert(area);
119 center.scale(area);
120 // undo the translate of p0 to the origin.
121 *c = center + p0;
122 }
123 return !SkIsNaN(c->fX) && !SkIsNaN(c->fY) && c->isFinite();
124}
125
126bool compute_vectors(SegmentArray* segments,
127 SkPoint* fanPt,
129 int* vCount,
130 int* iCount) {
131 if (!center_of_mass(*segments, fanPt)) {
132 return false;
133 }
134 int count = segments->size();
135
136 // Make the normals point towards the outside
137 SkPointPriv::Side normSide;
139 normSide = SkPointPriv::kRight_Side;
140 } else {
141 normSide = SkPointPriv::kLeft_Side;
142 }
143
144 int64_t vCount64 = 0;
145 int64_t iCount64 = 0;
146 // compute normals at all points
147 for (int a = 0; a < count; ++a) {
148 Segment& sega = (*segments)[a];
149 int b = (a + 1) % count;
150 Segment& segb = (*segments)[b];
151
152 const SkPoint* prevPt = &sega.endPt();
153 int n = segb.countPoints();
154 for (int p = 0; p < n; ++p) {
155 segb.fNorms[p] = segb.fPts[p] - *prevPt;
156 segb.fNorms[p].normalize();
157 segb.fNorms[p] = SkPointPriv::MakeOrthog(segb.fNorms[p], normSide);
158 prevPt = &segb.fPts[p];
159 }
160 if (Segment::kLine == segb.fType) {
161 vCount64 += 5;
162 iCount64 += 9;
163 } else {
164 vCount64 += 6;
165 iCount64 += 12;
166 }
167 }
168
169 // compute mid-vectors where segments meet. TODO: Detect shallow corners
170 // and leave out the wedges and close gaps by stitching segments together.
171 for (int a = 0; a < count; ++a) {
172 const Segment& sega = (*segments)[a];
173 int b = (a + 1) % count;
174 Segment& segb = (*segments)[b];
175 segb.fMid = segb.fNorms[0] + sega.endNorm();
176 segb.fMid.normalize();
177 // corner wedges
178 vCount64 += 4;
179 iCount64 += 6;
180 }
181 if (vCount64 > SK_MaxS32 || iCount64 > SK_MaxS32) {
182 return false;
183 }
184 *vCount = vCount64;
185 *iCount = iCount64;
186 return true;
187}
188
189struct DegenerateTestData {
190 DegenerateTestData() { fStage = kInitial; }
191 bool isDegenerate() const { return kNonDegenerate != fStage; }
192 enum {
193 kInitial,
194 kPoint,
195 kLine,
196 kNonDegenerate
201};
202
203static const SkScalar kClose = (SK_Scalar1 / 16);
204static const SkScalar kCloseSqd = kClose * kClose;
205
206void update_degenerate_test(DegenerateTestData* data, const SkPoint& pt) {
207 switch (data->fStage) {
208 case DegenerateTestData::kInitial:
209 data->fFirstPoint = pt;
210 data->fStage = DegenerateTestData::kPoint;
211 break;
212 case DegenerateTestData::kPoint:
213 if (SkPointPriv::DistanceToSqd(pt, data->fFirstPoint) > kCloseSqd) {
214 data->fLineNormal = pt - data->fFirstPoint;
215 data->fLineNormal.normalize();
216 data->fLineNormal = SkPointPriv::MakeOrthog(data->fLineNormal);
217 data->fLineC = -data->fLineNormal.dot(data->fFirstPoint);
218 data->fStage = DegenerateTestData::kLine;
219 }
220 break;
221 case DegenerateTestData::kLine:
222 if (SkScalarAbs(data->fLineNormal.dot(pt) + data->fLineC) > kClose) {
223 data->fStage = DegenerateTestData::kNonDegenerate;
224 }
225 break;
226 case DegenerateTestData::kNonDegenerate:
227 break;
228 default:
229 SK_ABORT("Unexpected degenerate test stage.");
230 }
231}
232
233inline bool get_direction(const SkPath& path, const SkMatrix& m, SkPathFirstDirection* dir) {
234 // At this point, we've already returned true from canDraw(), which checked that the path's
235 // direction could be determined, so this should just be fetching the cached direction.
236 // However, if perspective is involved, we're operating on a transformed path, which may no
237 // longer have a computable direction.
240 return false;
241 }
242
243 // check whether m reverses the orientation
244 SkASSERT(!m.hasPerspective());
245 SkScalar det2x2 = m.get(SkMatrix::kMScaleX) * m.get(SkMatrix::kMScaleY) -
247 if (det2x2 < 0) {
249 }
250
251 return true;
252}
253
254inline void add_line_to_segment(const SkPoint& pt, SegmentArray* segments) {
255 segments->push_back();
256 segments->back().fType = Segment::kLine;
257 segments->back().fPts[0] = pt;
258}
259
260inline void add_quad_segment(const SkPoint pts[3], SegmentArray* segments) {
261 if (SkPointPriv::DistanceToLineSegmentBetweenSqd(pts[1], pts[0], pts[2]) < kCloseSqd) {
262 if (pts[0] != pts[2]) {
263 add_line_to_segment(pts[2], segments);
264 }
265 } else {
266 segments->push_back();
267 segments->back().fType = Segment::kQuad;
268 segments->back().fPts[0] = pts[1];
269 segments->back().fPts[1] = pts[2];
270 }
271}
272
273inline void add_cubic_segments(const SkPoint pts[4],
275 SegmentArray* segments) {
278 int count = quads.size();
279 for (int q = 0; q < count; q += 3) {
280 add_quad_segment(&quads[q], segments);
281 }
282}
283
284bool get_segments(const SkPath& path,
285 const SkMatrix& m,
286 SegmentArray* segments,
287 SkPoint* fanPt,
288 int* vCount,
289 int* iCount) {
290 SkPath::Iter iter(path, true);
291 // This renderer over-emphasizes very thin path regions. We use the distance
292 // to the path from the sample to compute coverage. Every pixel intersected
293 // by the path will be hit and the maximum distance is sqrt(2)/2. We don't
294 // notice that the sample may be close to a very thin area of the path and
295 // thus should be very light. This is particularly egregious for degenerate
296 // line paths. We detect paths that are very close to a line (zero area) and
297 // draw nothing.
298 DegenerateTestData degenerateData;
300 if (!get_direction(path, m, &dir)) {
301 return false;
302 }
303
304 for (;;) {
305 SkPoint pts[4];
306 SkPath::Verb verb = iter.next(pts);
307 switch (verb) {
309 m.mapPoints(pts, 1);
310 update_degenerate_test(&degenerateData, pts[0]);
311 break;
312 case SkPath::kLine_Verb: {
313 if (!SkPathPriv::AllPointsEq(pts, 2)) {
314 m.mapPoints(&pts[1], 1);
315 update_degenerate_test(&degenerateData, pts[1]);
316 add_line_to_segment(pts[1], segments);
317 }
318 break;
319 }
321 if (!SkPathPriv::AllPointsEq(pts, 3)) {
322 m.mapPoints(pts, 3);
323 update_degenerate_test(&degenerateData, pts[1]);
324 update_degenerate_test(&degenerateData, pts[2]);
325 add_quad_segment(pts, segments);
326 }
327 break;
328 case SkPath::kConic_Verb: {
329 if (!SkPathPriv::AllPointsEq(pts, 3)) {
330 m.mapPoints(pts, 3);
331 SkScalar weight = iter.conicWeight();
333 const SkPoint* quadPts = converter.computeQuads(pts, weight, 0.25f);
334 for (int i = 0; i < converter.countQuads(); ++i) {
335 update_degenerate_test(&degenerateData, quadPts[2*i + 1]);
336 update_degenerate_test(&degenerateData, quadPts[2*i + 2]);
337 add_quad_segment(quadPts + 2*i, segments);
338 }
339 }
340 break;
341 }
342 case SkPath::kCubic_Verb: {
343 if (!SkPathPriv::AllPointsEq(pts, 4)) {
344 m.mapPoints(pts, 4);
345 update_degenerate_test(&degenerateData, pts[1]);
346 update_degenerate_test(&degenerateData, pts[2]);
347 update_degenerate_test(&degenerateData, pts[3]);
348 add_cubic_segments(pts, dir, segments);
349 }
350 break;
351 }
353 if (degenerateData.isDegenerate()) {
354 return false;
355 } else {
356 return compute_vectors(segments, fanPt, dir, vCount, iCount);
357 }
358 default:
359 break;
360 }
361 }
362}
363
364struct Draw {
365 Draw() : fVertexCnt(0), fIndexCnt(0) {}
368};
369
370typedef TArray<Draw, true> DrawArray;
371
372void create_vertices(const SegmentArray& segments,
373 const SkPoint& fanPt,
374 const VertexColor& color,
375 DrawArray* draws,
376 VertexWriter& verts,
377 uint16_t* idxs,
378 size_t vertexStride) {
379 Draw* draw = &draws->push_back();
380 // alias just to make vert/index assignments easier to read.
381 int* v = &draw->fVertexCnt;
382 int* i = &draw->fIndexCnt;
383
384 int count = segments.size();
385 for (int a = 0; a < count; ++a) {
386 const Segment& sega = segments[a];
387 int b = (a + 1) % count;
388 const Segment& segb = segments[b];
389
390 // Check whether adding the verts for this segment to the current draw would cause index
391 // values to overflow.
392 int vCount = 4;
393 if (Segment::kLine == segb.fType) {
394 vCount += 5;
395 } else {
396 vCount += 6;
397 }
398 if (draw->fVertexCnt + vCount > (1 << 16)) {
399 idxs += *i;
400 draw = &draws->push_back();
401 v = &draw->fVertexCnt;
402 i = &draw->fIndexCnt;
403 }
404
405 const SkScalar negOneDists[2] = { -SK_Scalar1, -SK_Scalar1 };
406
407 // FIXME: These tris are inset in the 1 unit arc around the corner
408 SkPoint p0 = sega.endPt();
409 // Position, Color, UV, D0, D1
410 verts << p0 << color << SkPoint{0, 0} << negOneDists;
411 verts << (p0 + sega.endNorm()) << color << SkPoint{0, -SK_Scalar1} << negOneDists;
412 verts << (p0 + segb.fMid) << color << SkPoint{0, -SK_Scalar1} << negOneDists;
413 verts << (p0 + segb.fNorms[0]) << color << SkPoint{0, -SK_Scalar1} << negOneDists;
414
415 idxs[*i + 0] = *v + 0;
416 idxs[*i + 1] = *v + 2;
417 idxs[*i + 2] = *v + 1;
418 idxs[*i + 3] = *v + 0;
419 idxs[*i + 4] = *v + 3;
420 idxs[*i + 5] = *v + 2;
421
422 *v += 4;
423 *i += 6;
424
425 if (Segment::kLine == segb.fType) {
426 // we draw the line edge as a degenerate quad (u is 0, v is the
427 // signed distance to the edge)
428 SkPoint v1Pos = sega.endPt();
429 SkPoint v2Pos = segb.fPts[0];
430 SkScalar dist = SkPointPriv::DistanceToLineBetween(fanPt, v1Pos, v2Pos);
431
432 verts << fanPt << color << SkPoint{0, dist} << negOneDists;
433 verts << v1Pos << color << SkPoint{0, 0} << negOneDists;
434 verts << v2Pos << color << SkPoint{0, 0} << negOneDists;
435 verts << (v1Pos + segb.fNorms[0]) << color << SkPoint{0, -SK_Scalar1} << negOneDists;
436 verts << (v2Pos + segb.fNorms[0]) << color << SkPoint{0, -SK_Scalar1} << negOneDists;
437
438 idxs[*i + 0] = *v + 3;
439 idxs[*i + 1] = *v + 1;
440 idxs[*i + 2] = *v + 2;
441
442 idxs[*i + 3] = *v + 4;
443 idxs[*i + 4] = *v + 3;
444 idxs[*i + 5] = *v + 2;
445
446 *i += 6;
447
448 // Draw the interior fan if it exists.
449 // TODO: Detect and combine colinear segments. This will ensure we catch every case
450 // with no interior, and that the resulting shared edge uses the same endpoints.
451 if (count >= 3) {
452 idxs[*i + 0] = *v + 0;
453 idxs[*i + 1] = *v + 2;
454 idxs[*i + 2] = *v + 1;
455
456 *i += 3;
457 }
458
459 *v += 5;
460 } else {
461 SkPoint qpts[] = {sega.endPt(), segb.fPts[0], segb.fPts[1]};
462
463 SkScalar c0 = segb.fNorms[0].dot(qpts[0]);
464 SkScalar c1 = segb.fNorms[1].dot(qpts[2]);
465
466 // We must transform the positions into UV in cpu memory and then copy them to the gpu
467 // buffer. If we write the position first into the gpu buffer then calculate the UVs, it
468 // will cause us to read from the GPU buffer which can be very slow.
469 struct PosAndUV {
470 SkPoint fPos;
471 SkPoint fUV;
472 };
473 PosAndUV posAndUVPoints[6];
474 posAndUVPoints[0].fPos = fanPt;
475 posAndUVPoints[1].fPos = qpts[0];
476 posAndUVPoints[2].fPos = qpts[2];
477 posAndUVPoints[3].fPos = qpts[0] + segb.fNorms[0];
478 posAndUVPoints[4].fPos = qpts[2] + segb.fNorms[1];
479 SkVector midVec = segb.fNorms[0] + segb.fNorms[1];
480 midVec.normalize();
481 posAndUVPoints[5].fPos = qpts[1] + midVec;
482
483 GrPathUtils::QuadUVMatrix toUV(qpts);
484 toUV.apply(posAndUVPoints, 6, sizeof(PosAndUV), sizeof(SkPoint));
485
486 verts << posAndUVPoints[0].fPos << color << posAndUVPoints[0].fUV
487 << (-segb.fNorms[0].dot(fanPt) + c0)
488 << (-segb.fNorms[1].dot(fanPt) + c1);
489
490 verts << posAndUVPoints[1].fPos << color << posAndUVPoints[1].fUV
491 << 0.0f
492 << (-segb.fNorms[1].dot(qpts[0]) + c1);
493
494 verts << posAndUVPoints[2].fPos << color << posAndUVPoints[2].fUV
495 << (-segb.fNorms[0].dot(qpts[2]) + c0)
496 << 0.0f;
497 // We need a negative value that is very large that it won't effect results if it is
498 // interpolated with. However, the value can't be too large of a negative that it
499 // effects numerical precision on less powerful GPUs.
500 static const SkScalar kStableLargeNegativeValue = -SK_ScalarMax/1000000;
501 verts << posAndUVPoints[3].fPos << color << posAndUVPoints[3].fUV
502 << kStableLargeNegativeValue
503 << kStableLargeNegativeValue;
504
505 verts << posAndUVPoints[4].fPos << color << posAndUVPoints[4].fUV
506 << kStableLargeNegativeValue
507 << kStableLargeNegativeValue;
508
509 verts << posAndUVPoints[5].fPos << color << posAndUVPoints[5].fUV
510 << kStableLargeNegativeValue
511 << kStableLargeNegativeValue;
512
513 idxs[*i + 0] = *v + 3;
514 idxs[*i + 1] = *v + 1;
515 idxs[*i + 2] = *v + 2;
516 idxs[*i + 3] = *v + 4;
517 idxs[*i + 4] = *v + 3;
518 idxs[*i + 5] = *v + 2;
519
520 idxs[*i + 6] = *v + 5;
521 idxs[*i + 7] = *v + 3;
522 idxs[*i + 8] = *v + 4;
523
524 *i += 9;
525
526 // Draw the interior fan if it exists.
527 // TODO: Detect and combine colinear segments. This will ensure we catch every case
528 // with no interior, and that the resulting shared edge uses the same endpoints.
529 if (count >= 3) {
530 idxs[*i + 0] = *v + 0;
531 idxs[*i + 1] = *v + 2;
532 idxs[*i + 2] = *v + 1;
533
534 *i += 3;
535 }
536
537 *v += 6;
538 }
539 }
540}
541
542///////////////////////////////////////////////////////////////////////////////
543
544/*
545 * Quadratic specified by 0=u^2-v canonical coords. u and v are the first
546 * two components of the vertex attribute. Coverage is based on signed
547 * distance with negative being inside, positive outside. The edge is specified in
548 * window space (y-down). If either the third or fourth component of the interpolated
549 * vertex coord is > 0 then the pixel is considered outside the edge. This is used to
550 * attempt to trim to a portion of the infinite quad.
551 * Requires shader derivative instruction support.
552 */
553
554class QuadEdgeEffect : public GrGeometryProcessor {
555public:
557 const SkMatrix& localMatrix,
558 bool usesLocalCoords,
559 bool wideColor) {
560 return arena->make([&](void* ptr) {
561 return new (ptr) QuadEdgeEffect(localMatrix, usesLocalCoords, wideColor);
562 });
563 }
564
565 ~QuadEdgeEffect() override {}
566
567 const char* name() const override { return "QuadEdge"; }
568
569 void addToKey(const GrShaderCaps& caps, KeyBuilder* b) const override {
570 b->addBool(fUsesLocalCoords, "usesLocalCoords");
571 b->addBits(ProgramImpl::kMatrixKeyBits,
572 ProgramImpl::ComputeMatrixKey(caps, fLocalMatrix),
573 "localMatrixType");
574 }
575
576 std::unique_ptr<ProgramImpl> makeProgramImpl(const GrShaderCaps&) const override;
577
578private:
579 QuadEdgeEffect(const SkMatrix& localMatrix, bool usesLocalCoords, bool wideColor)
580 : INHERITED(kQuadEdgeEffect_ClassID)
581 , fLocalMatrix(localMatrix)
582 , fUsesLocalCoords(usesLocalCoords) {
583 fInPosition = {"inPosition", kFloat2_GrVertexAttribType, SkSLType::kFloat2};
584 fInColor = MakeColorAttribute("inColor", wideColor);
585 // GL on iOS 14 needs more precision for the quadedge attributes
586 fInQuadEdge = {"inQuadEdge", kFloat4_GrVertexAttribType, SkSLType::kFloat4};
587 this->setVertexAttributesWithImplicitOffsets(&fInPosition, 3);
588 }
589
590 Attribute fInPosition;
591 Attribute fInColor;
592 Attribute fInQuadEdge;
593
594 SkMatrix fLocalMatrix;
595 bool fUsesLocalCoords;
596
598
600};
601
602std::unique_ptr<GrGeometryProcessor::ProgramImpl> QuadEdgeEffect::makeProgramImpl(
603 const GrShaderCaps&) const {
604 class Impl : public ProgramImpl {
605 public:
606 void setData(const GrGLSLProgramDataManager& pdman,
607 const GrShaderCaps& shaderCaps,
608 const GrGeometryProcessor& geomProc) override {
609 const QuadEdgeEffect& qe = geomProc.cast<QuadEdgeEffect>();
610 SetTransform(pdman, shaderCaps, fLocalMatrixUniform, qe.fLocalMatrix, &fLocalMatrix);
611 }
612
613 private:
614 void onEmitCode(EmitArgs& args, GrGPArgs* gpArgs) override {
615 const QuadEdgeEffect& qe = args.fGeomProc.cast<QuadEdgeEffect>();
616 GrGLSLVertexBuilder* vertBuilder = args.fVertBuilder;
617 GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder;
618 GrGLSLVaryingHandler* varyingHandler = args.fVaryingHandler;
619 GrGLSLUniformHandler* uniformHandler = args.fUniformHandler;
620
621 // emit attributes
622 varyingHandler->emitAttributes(qe);
623
624 // GL on iOS 14 needs more precision for the quadedge attributes
625 // We might as well enable it everywhere
627 varyingHandler->addVarying("QuadEdge", &v);
628 vertBuilder->codeAppendf("%s = %s;", v.vsOut(), qe.fInQuadEdge.name());
629
630 // Setup pass through color
631 fragBuilder->codeAppendf("half4 %s;", args.fOutputColor);
632 varyingHandler->addPassThroughAttribute(qe.fInColor.asShaderVar(), args.fOutputColor);
633
634 // Setup position
635 WriteOutputPosition(vertBuilder, gpArgs, qe.fInPosition.name());
636 if (qe.fUsesLocalCoords) {
637 WriteLocalCoord(vertBuilder,
638 uniformHandler,
639 *args.fShaderCaps,
640 gpArgs,
641 qe.fInPosition.asShaderVar(),
642 qe.fLocalMatrix,
643 &fLocalMatrixUniform);
644 }
645
646 fragBuilder->codeAppendf("half edgeAlpha;");
647
648 // keep the derivative instructions outside the conditional
649 fragBuilder->codeAppendf("half2 duvdx = half2(dFdx(%s.xy));", v.fsIn());
650 fragBuilder->codeAppendf("half2 duvdy = half2(dFdy(%s.xy));", v.fsIn());
651 fragBuilder->codeAppendf("if (%s.z > 0.0 && %s.w > 0.0) {", v.fsIn(), v.fsIn());
652 // today we know z and w are in device space. We could use derivatives
653 fragBuilder->codeAppendf("edgeAlpha = half(min(min(%s.z, %s.w) + 0.5, 1.0));", v.fsIn(),
654 v.fsIn());
655 fragBuilder->codeAppendf ("} else {");
656 fragBuilder->codeAppendf("half2 gF = half2(half(2.0*%s.x*duvdx.x - duvdx.y),"
657 " half(2.0*%s.x*duvdy.x - duvdy.y));",
658 v.fsIn(), v.fsIn());
659 fragBuilder->codeAppendf("edgeAlpha = half(%s.x*%s.x - %s.y);", v.fsIn(), v.fsIn(),
660 v.fsIn());
661 fragBuilder->codeAppendf("edgeAlpha = "
662 "saturate(0.5 - edgeAlpha / length(gF));}");
663
664 fragBuilder->codeAppendf("half4 %s = half4(edgeAlpha);", args.fOutputCoverage);
665 }
666
667 private:
668 SkMatrix fLocalMatrix = SkMatrix::InvalidMatrix();
669
670 UniformHandle fLocalMatrixUniform;
671 };
672
673 return std::make_unique<Impl>();
674}
675
677
678#if defined(GR_TEST_UTILS)
679GrGeometryProcessor* QuadEdgeEffect::TestCreate(GrProcessorTestData* d) {
680 SkMatrix localMatrix = GrTest::TestMatrix(d->fRandom);
681 bool usesLocalCoords = d->fRandom->nextBool();
682 bool wideColor = d->fRandom->nextBool();
683 // Doesn't work without derivative instructions.
684 return d->caps()->shaderCaps()->fShaderDerivativeSupport
685 ? QuadEdgeEffect::Make(d->allocator(), localMatrix, usesLocalCoords, wideColor)
686 : nullptr;
687}
688#endif
689
690class AAConvexPathOp final : public GrMeshDrawOp {
691private:
693
694public:
696
697 static GrOp::Owner Make(GrRecordingContext* context,
698 GrPaint&& paint,
699 const SkMatrix& viewMatrix,
700 const SkPath& path,
701 const GrUserStencilSettings* stencilSettings) {
702 return Helper::FactoryHelper<AAConvexPathOp>(context, std::move(paint), viewMatrix, path,
703 stencilSettings);
704 }
705
706 AAConvexPathOp(GrProcessorSet* processorSet, const SkPMColor4f& color,
707 const SkMatrix& viewMatrix, const SkPath& path,
708 const GrUserStencilSettings* stencilSettings)
709 : INHERITED(ClassID()), fHelper(processorSet, GrAAType::kCoverage, stencilSettings) {
710 fPaths.emplace_back(PathData{viewMatrix, path, color});
711 this->setTransformedBounds(path.getBounds(), viewMatrix, HasAABloat::kYes,
712 IsHairline::kNo);
713 }
714
715 const char* name() const override { return "AAConvexPathOp"; }
716
717 void visitProxies(const GrVisitProxyFunc& func) const override {
718 if (fProgramInfo) {
719 fProgramInfo->visitFPProxies(func);
720 } else {
721 fHelper.visitProxies(func);
722 }
723 }
724
725 FixedFunctionFlags fixedFunctionFlags() const override { return fHelper.fixedFunctionFlags(); }
726
727 GrProcessorSet::Analysis finalize(const GrCaps& caps, const GrAppliedClip* clip,
728 GrClampType clampType) override {
729 return fHelper.finalizeProcessors(
731 &fPaths.back().fColor, &fWideColor);
732 }
733
734private:
735 GrProgramInfo* programInfo() override { return fProgramInfo; }
736
737 void onCreateProgramInfo(const GrCaps* caps,
738 SkArenaAlloc* arena,
739 const GrSurfaceProxyView& writeView,
740 bool usesMSAASurface,
741 GrAppliedClip&& appliedClip,
742 const GrDstProxyView& dstProxyView,
743 GrXferBarrierFlags renderPassXferBarriers,
744 GrLoadOp colorLoadOp) override {
746 if (fHelper.usesLocalCoords() && !fPaths.back().fViewMatrix.invert(&invert)) {
747 return;
748 }
749
750 GrGeometryProcessor* quadProcessor = QuadEdgeEffect::Make(arena, invert,
751 fHelper.usesLocalCoords(),
752 fWideColor);
753
754 fProgramInfo = fHelper.createProgramInfoWithStencil(caps, arena, writeView, usesMSAASurface,
755 std::move(appliedClip),
756 dstProxyView, quadProcessor,
758 renderPassXferBarriers, colorLoadOp);
759 }
760
761 void onPrepareDraws(GrMeshDrawTarget* target) override {
762 int instanceCount = fPaths.size();
763
764 if (!fProgramInfo) {
765 this->createProgramInfo(target);
766 if (!fProgramInfo) {
767 return;
768 }
769 }
770
771 const size_t kVertexStride = fProgramInfo->geomProc().vertexStride();
772
773 fDraws.reserve(instanceCount);
774
775 // TODO generate all segments for all paths and use one vertex buffer
776 for (int i = 0; i < instanceCount; i++) {
777 const PathData& args = fPaths[i];
778
779 // We use the fact that SkPath::transform path does subdivision based on
780 // perspective. Otherwise, we apply the view matrix when copying to the
781 // segment representation.
782 const SkMatrix* viewMatrix = &args.fViewMatrix;
783
784 // We avoid initializing the path unless we have to
785 const SkPath* pathPtr = &args.fPath;
786 SkTLazy<SkPath> tmpPath;
787 if (viewMatrix->hasPerspective()) {
788 SkPath* tmpPathPtr = tmpPath.init(*pathPtr);
789 tmpPathPtr->setIsVolatile(true);
790 tmpPathPtr->transform(*viewMatrix);
791 viewMatrix = &SkMatrix::I();
792 pathPtr = tmpPathPtr;
793 }
794
795 int vertexCount;
796 int indexCount;
797 enum {
798 kPreallocSegmentCnt = 512 / sizeof(Segment),
799 kPreallocDrawCnt = 4,
800 };
802 SkPoint fanPt;
803
804 if (!get_segments(*pathPtr, *viewMatrix, &segments, &fanPt, &vertexCount,
805 &indexCount)) {
806 continue;
807 }
808
809 sk_sp<const GrBuffer> vertexBuffer;
810 int firstVertex;
811
812 VertexWriter verts = target->makeVertexWriter(kVertexStride,
813 vertexCount,
814 &vertexBuffer,
815 &firstVertex);
816
817 if (!verts) {
818 SkDebugf("Could not allocate vertices\n");
819 return;
820 }
821
822 sk_sp<const GrBuffer> indexBuffer;
823 int firstIndex;
824
825 uint16_t *idxs = target->makeIndexSpace(indexCount, &indexBuffer, &firstIndex);
826 if (!idxs) {
827 SkDebugf("Could not allocate indices\n");
828 return;
829 }
830
832 VertexColor color(args.fColor, fWideColor);
833 create_vertices(segments, fanPt, color, &draws, verts, idxs, kVertexStride);
834
835 GrSimpleMesh* meshes = target->allocMeshes(draws.size());
836 for (int j = 0; j < draws.size(); ++j) {
837 const Draw& draw = draws[j];
838 meshes[j].setIndexed(indexBuffer, draw.fIndexCnt, firstIndex, 0,
839 draw.fVertexCnt - 1, GrPrimitiveRestart::kNo, vertexBuffer,
840 firstVertex);
841 firstIndex += draw.fIndexCnt;
842 firstVertex += draw.fVertexCnt;
843 }
844
845 fDraws.push_back({ meshes, draws.size() });
846 }
847 }
848
849 void onExecute(GrOpFlushState* flushState, const SkRect& chainBounds) override {
850 if (!fProgramInfo || fDraws.empty()) {
851 return;
852 }
853
854 flushState->bindPipelineAndScissorClip(*fProgramInfo, chainBounds);
855 flushState->bindTextures(fProgramInfo->geomProc(), nullptr, fProgramInfo->pipeline());
856 for (int i = 0; i < fDraws.size(); ++i) {
857 for (int j = 0; j < fDraws[i].fMeshCount; ++j) {
858 flushState->drawMesh(fDraws[i].fMeshes[j]);
859 }
860 }
861 }
862
863 CombineResult onCombineIfPossible(GrOp* t, SkArenaAlloc*, const GrCaps& caps) override {
864 AAConvexPathOp* that = t->cast<AAConvexPathOp>();
865 if (!fHelper.isCompatible(that->fHelper, caps, this->bounds(), that->bounds())) {
866 return CombineResult::kCannotCombine;
867 }
868 if (fHelper.usesLocalCoords() &&
869 !SkMatrixPriv::CheapEqual(fPaths[0].fViewMatrix, that->fPaths[0].fViewMatrix)) {
870 return CombineResult::kCannotCombine;
871 }
872
873 fPaths.push_back_n(that->fPaths.size(), that->fPaths.begin());
874 fWideColor |= that->fWideColor;
875 return CombineResult::kMerged;
876 }
877
878#if defined(GR_TEST_UTILS)
879 SkString onDumpInfo() const override {
880 return SkStringPrintf("Count: %d\n%s", fPaths.size(), fHelper.dumpInfo().c_str());
881 }
882#endif
883
884 struct PathData {
888 };
889
890 Helper fHelper;
892 bool fWideColor;
893
894 struct MeshDraw {
897 };
898
899 SkTDArray<MeshDraw> fDraws;
900 GrProgramInfo* fProgramInfo = nullptr;
901
902 using INHERITED = GrMeshDrawOp;
903};
904
905} // anonymous namespace
906
907///////////////////////////////////////////////////////////////////////////////
908
909PathRenderer::CanDrawPath AAConvexPathRenderer::onCanDrawPath(const CanDrawPathArgs& args) const {
910 // This check requires convexity and known direction, since the direction is used to build
911 // the geometry segments. Degenerate convex paths will fall through to some other path renderer.
912 if (args.fCaps->shaderCaps()->fShaderDerivativeSupport &&
913 (GrAAType::kCoverage == args.fAAType) && args.fShape->style().isSimpleFill() &&
914 !args.fShape->inverseFilled() && args.fShape->knownToBeConvex() &&
915 args.fShape->knownDirection()) {
916 return CanDrawPath::kYes;
917 }
918 return CanDrawPath::kNo;
919}
920
921bool AAConvexPathRenderer::onDrawPath(const DrawPathArgs& args) {
922 GR_AUDIT_TRAIL_AUTO_FRAME(args.fContext->priv().auditTrail(),
923 "AAConvexPathRenderer::onDrawPath");
924 SkASSERT(args.fSurfaceDrawContext->numSamples() <= 1);
925 SkASSERT(!args.fShape->isEmpty());
926
927 SkPath path;
928 args.fShape->asPath(&path);
929
930 GrOp::Owner op = AAConvexPathOp::Make(args.fContext, std::move(args.fPaint),
931 *args.fViewMatrix,
932 path, args.fUserStencilSettings);
933 args.fSurfaceDrawContext->addDrawOp(args.fClip, std::move(op));
934 return true;
935}
936
937} // namespace skgpu::ganesh
938
939#if defined(GR_TEST_UTILS)
940
941GR_DRAW_OP_TEST_DEFINE(AAConvexPathOp) {
942 SkMatrix viewMatrix = GrTest::TestMatrixInvertible(random);
943 const SkPath& path = GrTest::TestPathConvex(random);
944 const GrUserStencilSettings* stencilSettings = GrGetRandomStencil(random, context);
946 context, std::move(paint), viewMatrix, path, stencilSettings);
947}
948
949#endif
SkVector fLineNormal
SkPoint fPts[2]
SkVector fMid
SkPath fPath
enum skgpu::ganesh::@8120::DegenerateTestData::@384 fStage
SkPoint fFirstPoint
SkMatrix fViewMatrix
enum skgpu::ganesh::@8120::Segment::@383 fType
SkPMColor4f fColor
int fIndexCnt
int fMeshCount
GrSimpleMesh * fMeshes
int fVertexCnt
SkVector fNorms[2]
SkScalar fLineC
int count
Definition: FontMgrTest.cpp:50
static constexpr SkScalar kCloseSqd
#define GR_AUDIT_TRAIL_AUTO_FRAME(audit_trail, framename)
Definition: GrAuditTrail.h:167
#define DEFINE_OP_CLASS_ID
Definition: GrOp.h:64
#define GR_DECLARE_GEOMETRY_PROCESSOR_TEST
#define GR_DEFINE_GEOMETRY_PROCESSOR_TEST(...)
GrClampType
Definition: GrTypesPriv.h:228
std::function< void(GrSurfaceProxy *, skgpu::Mipmapped)> GrVisitProxyFunc
Definition: GrTypesPriv.h:943
GrAAType
Definition: GrTypesPriv.h:200
GrLoadOp
Definition: GrTypesPriv.h:155
@ kFloat2_GrVertexAttribType
Definition: GrTypesPriv.h:314
@ kFloat4_GrVertexAttribType
Definition: GrTypesPriv.h:316
GrXferBarrierFlags
#define SK_ABORT(message,...)
Definition: SkAssert.h:70
#define SkASSERT(cond)
Definition: SkAssert.h:116
void SK_SPI SkDebugf(const char format[],...) SK_PRINTF_LIKE(1
static constexpr bool SkIsNaN(T x)
static constexpr int32_t SK_MaxS32
Definition: SkMath.h:21
SkPathFirstDirection
Definition: SkPathEnums.h:19
@ kClose
SkPath::RawIter returns 0 points.
static SkPath clip(const SkPath &path, const SkHalfPlane &plane)
Definition: SkPath.cpp:3892
#define INHERITED(method,...)
Definition: SkRecorder.cpp:128
#define SkScalarInvert(x)
Definition: SkScalar.h:73
static bool SkScalarNearlyZero(SkScalar x, SkScalar tolerance=SK_ScalarNearlyZero)
Definition: SkScalar.h:101
#define SK_ScalarMax
Definition: SkScalar.h:24
#define SK_Scalar1
Definition: SkScalar.h:18
#define SkScalarAbs(x)
Definition: SkScalar.h:39
SK_API SkString SkStringPrintf(const char *format,...) SK_PRINTF_LIKE(1
Creates a new string and writes into it using a printf()-style format.
static void draw(SkCanvas *canvas, SkRect &target, int x, int y)
Definition: aaclip.cpp:27
Definition: GrCaps.h:57
void codeAppendf(const char format[],...) SK_PRINTF_LIKE(2
void emitAttributes(const GrGeometryProcessor &)
void addPassThroughAttribute(const GrShaderVar &vsVar, const char *output, Interpolation=Interpolation::kInterpolated)
void addVarying(const char *name, GrGLSLVarying *varying, Interpolation=Interpolation::kInterpolated)
size_t vertexStride() const
void drawMesh(const GrSimpleMesh &mesh)
void bindPipelineAndScissorClip(const GrProgramInfo &programInfo, const SkRect &drawBounds)
void bindTextures(const GrGeometryProcessor &geomProc, const GrSurfaceProxy &singleGeomProcTexture, const GrPipeline &pipeline)
Definition: GrOp.h:70
std::unique_ptr< GrOp > Owner
Definition: GrOp.h:72
const T & cast() const
Definition: GrOp.h:148
void apply(void *vertices, int vertexCount, size_t stride, size_t uvOffset) const
Definition: GrPathUtils.h:87
const T & cast() const
Definition: GrProcessor.h:127
const GrPipeline & pipeline() const
Definition: GrProgramInfo.h:39
const GrGeometryProcessor & geomProc() const
Definition: GrProgramInfo.h:40
void visitFPProxies(const GrVisitProxyFunc &func) const
Definition: GrProgramInfo.h:64
GrProcessorSet::Analysis finalizeProcessors(const GrCaps &caps, const GrAppliedClip *clip, GrClampType clampType, GrProcessorAnalysisCoverage geometryCoverage, GrProcessorAnalysisColor *geometryColor)
void visitProxies(const GrVisitProxyFunc &func) const
GrDrawOp::FixedFunctionFlags fixedFunctionFlags() const
bool isCompatible(const GrSimpleMeshDrawOpHelperWithStencil &that, const GrCaps &, const SkRect &thisBounds, const SkRect &thatBounds, bool ignoreAAType=false) const
GrProgramInfo * createProgramInfoWithStencil(const GrCaps *, SkArenaAlloc *, const GrSurfaceProxyView &writeView, bool usesMSAASurface, GrAppliedClip &&, const GrDstProxyView &, GrGeometryProcessor *, GrPrimitiveType, GrXferBarrierFlags renderPassXferBarriers, GrLoadOp colorLoadOp)
auto make(Ctor &&ctor) -> decltype(ctor(nullptr))
Definition: SkArenaAlloc.h:120
static bool CheapEqual(const SkMatrix &a, const SkMatrix &b)
Definition: SkMatrixPriv.h:181
static constexpr int kMScaleX
horizontal scale factor
Definition: SkMatrix.h:353
static const SkMatrix & I()
Definition: SkMatrix.cpp:1544
bool hasPerspective() const
Definition: SkMatrix.h:312
static constexpr int kMSkewY
vertical skew factor
Definition: SkMatrix.h:356
static constexpr int kMScaleY
vertical scale factor
Definition: SkMatrix.h:357
static constexpr int kMSkewX
horizontal skew factor
Definition: SkMatrix.h:354
static const SkMatrix & InvalidMatrix()
Definition: SkMatrix.cpp:1550
static SkPathFirstDirection ComputeFirstDirection(const SkPath &)
Definition: SkPath.cpp:2627
static bool AllPointsEq(const SkPoint pts[], int count)
Definition: SkPathPriv.h:339
static SkPathFirstDirection OppositeFirstDirection(SkPathFirstDirection dir)
Definition: SkPathPriv.h:52
Definition: SkPath.h:59
SkPath & setIsVolatile(bool isVolatile)
Definition: SkPath.h:370
@ kMove_Verb
Definition: SkPath.h:1466
@ kConic_Verb
Definition: SkPath.h:1469
@ kDone_Verb
Definition: SkPath.h:1472
@ kCubic_Verb
Definition: SkPath.h:1470
@ kQuad_Verb
Definition: SkPath.h:1468
@ kLine_Verb
Definition: SkPath.h:1467
void transform(const SkMatrix &matrix, SkPath *dst, SkApplyPerspectiveClip pc=SkApplyPerspectiveClip::kYes) const
Definition: SkPath.cpp:1711
static SkPoint MakeOrthog(const SkPoint &vec, Side side=kLeft_Side)
Definition: SkPointPriv.h:96
static SkScalar DistanceToLineBetween(const SkPoint &pt, const SkPoint &a, const SkPoint &b, Side *side=nullptr)
Definition: SkPointPriv.h:35
static SkScalar DistanceToLineSegmentBetweenSqd(const SkPoint &pt, const SkPoint &a, const SkPoint &b)
Definition: SkPoint.cpp:126
static SkScalar DistanceToSqd(const SkPoint &pt, const SkPoint &a)
Definition: SkPointPriv.h:48
T * init(Args &&... args)
Definition: SkTLazy.h:45
int size() const
Definition: SkTArray.h:421
static void Draw(SkCanvas *canvas, const SkRect &rect)
const Paint & paint
Definition: color_source.cc:38
DlColor color
VULKAN_HPP_DEFAULT_DISPATCH_LOADER_DYNAMIC_STORAGE auto & d
Definition: main.cc:19
float SkScalar
Definition: extension.cpp:12
static bool b
struct MyStruct a[10]
gboolean invert
G_BEGIN_DECLS G_MODULE_EXPORT FlValue * args
uint32_t * target
void convertCubicToQuadsConstrainToTangents(const SkPoint p[4], SkScalar tolScale, SkPathFirstDirection dir, skia_private::TArray< SkPoint, true > *quads)
SK_API sk_sp< SkDocument > Make(SkWStream *dst, const SkSerialProcs *=nullptr, std::function< void(const SkPicture *)> onEndPage=nullptr)
string converter
Definition: cacheimages.py:19
void Helper(uword arg)
DEF_SWITCHES_START aot vmservice shared library Name of the *so containing AOT compiled Dart assets for launching the service isolate vm snapshot The VM snapshot data that will be memory mapped as read only SnapshotAssetPath must be present isolate snapshot The isolate snapshot data that will be memory mapped as read only SnapshotAssetPath must be present cache dir path
Definition: switches.h:57
DEF_SWITCHES_START aot vmservice shared library name
Definition: switches.h:32
DEF_SWITCHES_START aot vmservice shared library Name of the *so containing AOT compiled Dart assets for launching the service isolate vm snapshot The VM snapshot data that will be memory mapped as read only SnapshotAssetPath must be present isolate snapshot The isolate snapshot data that will be memory mapped as read only SnapshotAssetPath must be present cache dir Path to the cache directory This is different from the persistent_cache_path in embedder which is used for Skia shader cache icu native lib Path to the library file that exports the ICU data vm service The hostname IP address on which the Dart VM Service should be served If not defaults to or::depending on whether ipv6 is specified vm service A custom Dart VM Service port The default is to pick a randomly available open port disable vm Disable the Dart VM Service The Dart VM Service is never available in release mode disable vm service Disable mDNS Dart VM Service publication Bind to the IPv6 localhost address for the Dart VM Service Ignored if vm service host is set endless trace Enable an endless trace buffer The default is a ring buffer This is useful when very old events need to viewed For during application launch Memory usage will continue to grow indefinitely however Start app with an specific route defined on the framework flutter assets dir
Definition: switches.h:145
@ kPoint
Draws a point at each input vertex.
const SkPoint kQuad[4]
void setIndexed(sk_sp< const GrBuffer > indexBuffer, int indexCount, int baseIndex, uint16_t minIndexValue, uint16_t maxIndexValue, GrPrimitiveRestart, sk_sp< const GrBuffer > vertexBuffer, int baseVertex)
Definition: GrSimpleMesh.h:56
static float CrossProduct(const SkVector &a, const SkVector &b)
Definition: SkPoint_impl.h:532
float fX
x-axis value
Definition: SkPoint_impl.h:164
bool isFinite() const
Definition: SkPoint_impl.h:412
float dot(const SkVector &vec) const
Definition: SkPoint_impl.h:554
void set(float x, float y)
Definition: SkPoint_impl.h:200
void scale(float scale, SkPoint *dst) const
Definition: SkPoint.cpp:17
float fY
y-axis value
Definition: SkPoint_impl.h:165
bool normalize()
Definition: SkPoint.cpp:22
std::shared_ptr< const fml::Mapping > data
Definition: texture_gles.cc:63