Flutter Engine
The Flutter Engine
dl_region.cc
Go to the documentation of this file.
1// Copyright 2013 The Flutter Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "flutter/display_list/geometry/dl_region.h"
6
7#include "flutter/fml/logging.h"
8
9namespace flutter {
10
11// Threshold for switching from linear search through span lines to binary
12// search.
14
15DlRegion::SpanBuffer::SpanBuffer(DlRegion::SpanBuffer&& m)
16 : capacity_(m.capacity_), size_(m.size_), spans_(m.spans_) {
17 m.size_ = 0;
18 m.capacity_ = 0;
19 m.spans_ = nullptr;
20};
21
22DlRegion::SpanBuffer::SpanBuffer(const DlRegion::SpanBuffer& m)
23 : capacity_(m.capacity_), size_(m.size_) {
24 if (m.spans_ == nullptr) {
25 spans_ = nullptr;
26 } else {
27 spans_ = static_cast<Span*>(std::malloc(capacity_ * sizeof(Span)));
28 memcpy(spans_, m.spans_, size_ * sizeof(Span));
29 }
30};
31
32DlRegion::SpanBuffer& DlRegion::SpanBuffer::operator=(
33 const DlRegion::SpanBuffer& buffer) {
34 SpanBuffer copy(buffer);
35 std::swap(*this, copy);
36 return *this;
37}
38
39DlRegion::SpanBuffer& DlRegion::SpanBuffer::operator=(
40 DlRegion::SpanBuffer&& buffer) {
41 std::swap(capacity_, buffer.capacity_);
42 std::swap(size_, buffer.size_);
43 std::swap(spans_, buffer.spans_);
44 return *this;
45}
46
47DlRegion::SpanBuffer::~SpanBuffer() {
48 free(spans_);
49}
50
51void DlRegion::SpanBuffer::reserve(size_t capacity) {
52 if (capacity_ < capacity) {
53 spans_ = static_cast<Span*>(std::realloc(spans_, capacity * sizeof(Span)));
54 capacity_ = capacity;
55 }
56}
57
58DlRegion::SpanChunkHandle DlRegion::SpanBuffer::storeChunk(const Span* begin,
59 const Span* end) {
60 size_t chunk_size = end - begin;
61 size_t min_capacity = size_ + chunk_size + 1;
62 if (capacity_ < min_capacity) {
63 size_t new_capacity = std::max(min_capacity, capacity_ * 2);
64 new_capacity = std::max(new_capacity, size_t(512));
65 reserve(new_capacity);
66 }
67 SpanChunkHandle res = size_;
68 size_ += chunk_size + 1;
69 setChunkSize(res, chunk_size);
70
71 auto* dst = spans_ + res + 1;
72 memmove(dst, begin, chunk_size * sizeof(Span));
73
74 return res;
75}
76
77size_t DlRegion::SpanBuffer::getChunkSize(SpanChunkHandle handle) const {
78 FML_DCHECK(handle < size_);
79 return spans_[handle].left;
80}
81
82void DlRegion::SpanBuffer::setChunkSize(SpanChunkHandle handle, size_t size) {
83 FML_DCHECK(handle < size_);
84 FML_DCHECK(spans_ != nullptr);
85 // NOLINTNEXTLINE(clang-analyzer-core.NullDereference)
86 spans_[handle].left = size;
87}
88
89void DlRegion::SpanBuffer::getSpans(SpanChunkHandle handle,
90 const DlRegion::Span*& begin,
91 const DlRegion::Span*& end) const {
92 FML_DCHECK(handle < size_);
93 begin = spans_ + handle + 1;
94 end = begin + getChunkSize(handle);
95}
96
97DlRegion::DlRegion(const std::vector<SkIRect>& rects) {
98 setRects(rects);
99}
100
101DlRegion::DlRegion(const SkIRect& rect) : bounds_(rect) {
102 Span span{rect.left(), rect.right()};
103 lines_.push_back(makeLine(rect.top(), rect.bottom(), &span, &span + 1));
104}
105
106bool DlRegion::spansEqual(SpanLine& line,
107 const Span* begin,
108 const Span* end) const {
109 const Span *our_begin, *our_end;
110 span_buffer_.getSpans(line.chunk_handle, our_begin, our_end);
111 size_t our_size = our_end - our_begin;
112 size_t their_size = end - begin;
113 if (our_size != their_size) {
114 return false;
115 }
116
117 return memcmp(our_begin, begin, our_size * sizeof(Span)) == 0;
118}
119
120DlRegion::SpanLine DlRegion::makeLine(int32_t top,
121 int32_t bottom,
122 const SpanVec& v) {
123 return makeLine(top, bottom, v.data(), v.data() + v.size());
124}
125
126DlRegion::SpanLine DlRegion::makeLine(int32_t top,
127 int32_t bottom,
128 const Span* begin,
129 const Span* end) {
130 auto handle = span_buffer_.storeChunk(begin, end);
131 return {top, bottom, handle};
132}
133
134// Returns number of valid spans in res. For performance reasons res is never
135// downsized.
136size_t DlRegion::unionLineSpans(std::vector<Span>& res,
137 const SpanBuffer& a_buffer,
138 SpanChunkHandle a_handle,
139 const SpanBuffer& b_buffer,
140 SpanChunkHandle b_handle) {
141 class OrderedSpanAccumulator {
142 public:
143 explicit OrderedSpanAccumulator(std::vector<Span>& res) : res(res) {}
144
145 void accumulate(const Span& span) {
146 if (span.left > last_ || len == 0) {
147 res[len++] = span;
148 last_ = span.right;
149 } else if (span.right > last_) {
150 FML_DCHECK(len > 0);
151 res[len - 1].right = span.right;
152 last_ = span.right;
153 }
154 }
155
156 size_t len = 0;
157 std::vector<Span>& res;
158
159 private:
160 int32_t last_ = std::numeric_limits<int32_t>::min();
161 };
162
163 const Span *begin1, *end1;
164 a_buffer.getSpans(a_handle, begin1, end1);
165
166 const Span *begin2, *end2;
167 b_buffer.getSpans(b_handle, begin2, end2);
168
169 size_t min_size = (end1 - begin1) + (end2 - begin2);
170 if (res.size() < min_size) {
171 res.resize(min_size);
172 }
173
174 OrderedSpanAccumulator accumulator(res);
175
176 while (true) {
177 if (begin1->left < begin2->left) {
178 accumulator.accumulate(*begin1++);
179 if (begin1 == end1) {
180 break;
181 }
182 } else {
183 // Either 2 is first, or they are equal, in which case add 2 now
184 // and we might combine 1 with it next time around
185 accumulator.accumulate(*begin2++);
186 if (begin2 == end2) {
187 break;
188 }
189 }
190 }
191
192 FML_DCHECK(begin1 == end1 || begin2 == end2);
193
194 while (begin1 < end1) {
195 accumulator.accumulate(*begin1++);
196 }
197 while (begin2 < end2) {
198 accumulator.accumulate(*begin2++);
199 }
200
201 FML_DCHECK(begin1 == end1 && begin2 == end2);
202
203 return accumulator.len;
204}
205
206size_t DlRegion::intersectLineSpans(std::vector<Span>& res,
207 const SpanBuffer& a_buffer,
208 SpanChunkHandle a_handle,
209 const SpanBuffer& b_buffer,
210 SpanChunkHandle b_handle) {
211 const Span *begin1, *end1;
212 a_buffer.getSpans(a_handle, begin1, end1);
213
214 const Span *begin2, *end2;
215 b_buffer.getSpans(b_handle, begin2, end2);
216
217 // Worst case scenario, interleaved overlapping spans
218 // AAAA BBBB CCCC
219 // XXX YYYY XXXX
220 size_t min_size = (end1 - begin1) + (end2 - begin2) - 1;
221 if (res.size() < min_size) {
222 res.resize(min_size);
223 }
224
225 // Pointer to the next span to be written.
226 Span* new_span = res.data();
227
228 while (begin1 != end1 && begin2 != end2) {
229 if (begin1->right <= begin2->left) {
230 ++begin1;
231 } else if (begin2->right <= begin1->left) {
232 ++begin2;
233 } else {
234 int32_t left = std::max(begin1->left, begin2->left);
235 int32_t right = std::min(begin1->right, begin2->right);
236 FML_DCHECK(left < right);
237 FML_DCHECK(new_span < res.data() + res.size());
238 *new_span++ = {left, right};
239 if (begin1->right == right) {
240 ++begin1;
241 }
242 if (begin2->right == right) {
243 ++begin2;
244 }
245 }
246 }
247
248 return new_span - res.data();
249}
250
251void DlRegion::setRects(const std::vector<SkIRect>& unsorted_rects) {
252 // setRects can only be called on empty regions.
253 FML_DCHECK(lines_.empty());
254
255 size_t count = unsorted_rects.size();
256 std::vector<const SkIRect*> rects(count);
257 for (size_t i = 0; i < count; i++) {
258 rects[i] = &unsorted_rects[i];
259 bounds_.join(unsorted_rects[i]);
260 }
261 std::sort(rects.begin(), rects.end(), [](const SkIRect* a, const SkIRect* b) {
262 if (a->top() < b->top()) {
263 return true;
264 }
265 if (a->top() > b->top()) {
266 return false;
267 }
268 return a->left() < b->left();
269 });
270
271 size_t active_end = 0;
272 size_t next_rect = 0;
273 int32_t cur_y = std::numeric_limits<int32_t>::min();
274 SpanVec working_spans;
275
276#ifdef DlRegion_DO_STATS
277 size_t active_rect_count = 0;
278 size_t span_count = 0;
279 int pass_count = 0;
280 int line_count = 0;
281#endif
282
283 while (next_rect < count || active_end > 0) {
284 // First prune passed rects out of the active list
285 size_t preserve_end = 0;
286 for (size_t i = 0; i < active_end; i++) {
287 const SkIRect* r = rects[i];
288 if (r->bottom() > cur_y) {
289 rects[preserve_end++] = r;
290 }
291 }
292 active_end = preserve_end;
293
294 // If we have no active rects any more, jump to the top of the
295 // next available input rect.
296 if (active_end == 0) {
297 if (next_rect >= count) {
298 // No active rects and no more rects to bring in. We are done.
299 break;
300 }
301 cur_y = rects[next_rect]->top();
302 }
303
304 // Next, insert any new rects we've reached into the active list
305 while (next_rect < count) {
306 const SkIRect* r = rects[next_rect];
307 if (r->isEmpty()) {
308 continue;
309 }
310 if (r->top() > cur_y) {
311 break;
312 }
313 // We now know that we will be inserting this rect into active list
314 next_rect++;
315 size_t insert_at = active_end++;
316 while (insert_at > 0) {
317 const SkIRect* ir = rects[insert_at - 1];
318 if (ir->left() <= r->left()) {
319 break;
320 }
321 rects[insert_at--] = ir;
322 }
323 rects[insert_at] = r;
324 }
325
326 // We either preserved some rects in the active list or added more from
327 // the remaining input rects, or we would have exited the loop above.
328 FML_DCHECK(active_end != 0);
329 working_spans.clear();
330 FML_DCHECK(working_spans.empty());
331
332#ifdef DlRegion_DO_STATS
333 active_rect_count += active_end;
334 pass_count++;
335#endif
336
337 // [start_x, end_x) always represents a valid span to be inserted
338 // [cur_y, end_y) is the intersecting range over which all spans are valid
339 int32_t start_x = rects[0]->left();
340 int32_t end_x = rects[0]->right();
341 int32_t end_y = rects[0]->bottom();
342 for (size_t i = 1; i < active_end; i++) {
343 const SkIRect* r = rects[i];
344 if (r->left() > end_x) {
345 working_spans.emplace_back(start_x, end_x);
346 start_x = r->left();
347 end_x = r->right();
348 } else if (end_x < r->right()) {
349 end_x = r->right();
350 }
351 if (end_y > r->bottom()) {
352 end_y = r->bottom();
353 }
354 }
355 working_spans.emplace_back(start_x, end_x);
356
357 // end_y must not pass by the top of the next input rect
358 if (next_rect < count && end_y > rects[next_rect]->top()) {
359 end_y = rects[next_rect]->top();
360 }
361
362 // If all of the rules above work out, we should never collapse the
363 // current range of Y coordinates to empty
364 FML_DCHECK(end_y > cur_y);
365
366 if (!lines_.empty() && lines_.back().bottom == cur_y &&
367 spansEqual(lines_.back(), working_spans.data(),
368 working_spans.data() + working_spans.size())) {
369 lines_.back().bottom = end_y;
370 } else {
371#ifdef DlRegion_DO_STATS
372 span_count += working_spans.size();
373 line_count++;
374#endif
375 lines_.push_back(makeLine(cur_y, end_y, working_spans));
376 }
377 cur_y = end_y;
378 }
379
380#ifdef DlRegion_DO_STATS
381 double span_avg = ((double)span_count) / line_count;
382 double active_avg = ((double)active_rect_count) / pass_count;
383 FML_LOG(ERROR) << lines_.size() << " lines for " << count
384 << " input rects, avg " << span_avg
385 << " spans per line and avg " << active_avg
386 << " active rects per loop";
387#endif
388}
389
390void DlRegion::appendLine(int32_t top,
391 int32_t bottom,
392 const Span* begin,
393 const Span* end) {
394 if (lines_.empty()) {
395 lines_.push_back(makeLine(top, bottom, begin, end));
396 } else {
397 if (lines_.back().bottom == top && spansEqual(lines_.back(), begin, end)) {
398 lines_.back().bottom = bottom;
399 } else {
400 lines_.push_back(makeLine(top, bottom, begin, end));
401 }
402 }
403}
404
405DlRegion DlRegion::MakeUnion(const DlRegion& a, const DlRegion& b) {
406 if (a.isEmpty()) {
407 return b;
408 } else if (b.isEmpty()) {
409 return a;
410 } else if (a.isSimple() && a.bounds_.contains(b.bounds_)) {
411 return a;
412 } else if (b.isSimple() && b.bounds_.contains(a.bounds_)) {
413 return b;
414 }
415
416 DlRegion res;
417 res.bounds_ = a.bounds_;
418 res.bounds_.join(b.bounds_);
419 res.span_buffer_.reserve(a.span_buffer_.capacity() +
420 b.span_buffer_.capacity());
421
422 auto& lines = res.lines_;
423 lines.reserve(a.lines_.size() + b.lines_.size());
424
425 auto a_it = a.lines_.begin();
426 auto b_it = b.lines_.begin();
427 auto a_end = a.lines_.end();
428 auto b_end = b.lines_.end();
429
430 FML_DCHECK(a_it != a_end && b_it != b_end);
431
432 auto& a_buffer = a.span_buffer_;
433 auto& b_buffer = b.span_buffer_;
434
435 std::vector<Span> tmp;
436
437 int32_t cur_top = std::numeric_limits<int32_t>::min();
438
439 while (a_it != a_end && b_it != b_end) {
440 auto a_top = std::max(cur_top, a_it->top);
441 auto b_top = std::max(cur_top, b_it->top);
442 if (a_it->bottom <= b_top) {
443 res.appendLine(a_top, a_it->bottom, a_buffer, a_it->chunk_handle);
444 ++a_it;
445 } else if (b_it->bottom <= a_top) {
446 res.appendLine(b_top, b_it->bottom, b_buffer, b_it->chunk_handle);
447 ++b_it;
448 } else {
449 if (a_top < b_top) {
450 res.appendLine(a_top, b_top, a_buffer, a_it->chunk_handle);
451 cur_top = b_top;
452 if (cur_top == a_it->bottom) {
453 ++a_it;
454 }
455 } else if (b_top < a_top) {
456 res.appendLine(b_top, a_top, b_buffer, b_it->chunk_handle);
457 cur_top = a_top;
458 if (cur_top == b_it->bottom) {
459 ++b_it;
460 }
461 } else {
462 auto new_bottom = std::min(a_it->bottom, b_it->bottom);
463 FML_DCHECK(a_top == b_top);
464 FML_DCHECK(new_bottom > a_top);
465 FML_DCHECK(new_bottom > b_top);
466 auto size = unionLineSpans(tmp, a_buffer, a_it->chunk_handle, b_buffer,
467 b_it->chunk_handle);
468 res.appendLine(a_top, new_bottom, tmp.data(), tmp.data() + size);
469 cur_top = new_bottom;
470 if (cur_top == a_it->bottom) {
471 ++a_it;
472 }
473 if (cur_top == b_it->bottom) {
474 ++b_it;
475 }
476 }
477 }
478 }
479
480 FML_DCHECK(a_it == a_end || b_it == b_end);
481
482 while (a_it != a_end) {
483 auto a_top = std::max(cur_top, a_it->top);
484 res.appendLine(a_top, a_it->bottom, a_buffer, a_it->chunk_handle);
485 ++a_it;
486 }
487
488 while (b_it != b_end) {
489 auto b_top = std::max(cur_top, b_it->top);
490 res.appendLine(b_top, b_it->bottom, b_buffer, b_it->chunk_handle);
491 ++b_it;
492 }
493
494 return res;
495}
496
497DlRegion DlRegion::MakeIntersection(const DlRegion& a, const DlRegion& b) {
498 if (!SkIRect::Intersects(a.bounds_, b.bounds_)) {
499 return DlRegion();
500 } else if (a.isSimple() && b.isSimple()) {
501 SkIRect r(a.bounds_);
502 auto res = r.intersect(b.bounds_);
503 (void)res; // Suppress unused variable warning in release builds.
504 FML_DCHECK(res);
505 return DlRegion(r);
506 } else if (a.isSimple() && a.bounds_.contains(b.bounds_)) {
507 return b;
508 } else if (b.isSimple() && b.bounds_.contains(a.bounds_)) {
509 return a;
510 }
511
512 DlRegion res;
513 res.span_buffer_.reserve(
514 std::max(a.span_buffer_.capacity(), b.span_buffer_.capacity()));
515
516 auto& lines = res.lines_;
517 lines.reserve(std::min(a.lines_.size(), b.lines_.size()));
518
519 std::vector<SpanLine>::const_iterator a_it, b_it;
520 getIntersectionIterators(a.lines_, b.lines_, a_it, b_it);
521
522 auto a_end = a.lines_.end();
523 auto b_end = b.lines_.end();
524
525 auto& a_buffer = a.span_buffer_;
526 auto& b_buffer = b.span_buffer_;
527
528 std::vector<Span> tmp;
529
530 int32_t cur_top = std::numeric_limits<int32_t>::min();
531
532 while (a_it != a_end && b_it != b_end) {
533 auto a_top = std::max(cur_top, a_it->top);
534 auto b_top = std::max(cur_top, b_it->top);
535 if (a_it->bottom <= b_top) {
536 ++a_it;
537 } else if (b_it->bottom <= a_top) {
538 ++b_it;
539 } else {
540 auto top = std::max(a_top, b_top);
541 auto bottom = std::min(a_it->bottom, b_it->bottom);
542 FML_DCHECK(top < bottom);
543 auto size = intersectLineSpans(tmp, a_buffer, a_it->chunk_handle,
544 b_buffer, b_it->chunk_handle);
545 if (size > 0) {
546 res.appendLine(top, bottom, tmp.data(), tmp.data() + size);
547 res.bounds_.join(SkIRect::MakeLTRB(
548 tmp.data()->left, top, (tmp.data() + size - 1)->right, bottom));
549 }
550 cur_top = bottom;
551 if (cur_top == a_it->bottom) {
552 ++a_it;
553 }
554 if (cur_top == b_it->bottom) {
555 ++b_it;
556 }
557 }
558 }
559 FML_DCHECK(a_it == a_end || b_it == b_end);
560 return res;
561}
562
563std::vector<SkIRect> DlRegion::getRects(bool deband) const {
564 std::vector<SkIRect> rects;
565 if (isEmpty()) {
566 return rects;
567 } else if (isSimple()) {
568 rects.push_back(bounds_);
569 return rects;
570 }
571
572 size_t rect_count = 0;
573 size_t previous_span_end = 0;
574 for (const auto& line : lines_) {
575 rect_count += span_buffer_.getChunkSize(line.chunk_handle);
576 }
577 rects.reserve(rect_count);
578
579 for (const auto& line : lines_) {
580 const Span *span_begin, *span_end;
581 span_buffer_.getSpans(line.chunk_handle, span_begin, span_end);
582 for (const auto* span = span_begin; span < span_end; ++span) {
583 SkIRect rect{span->left, line.top, span->right, line.bottom};
584 if (deband) {
585 auto iter = rects.begin() + previous_span_end;
586 // If there is rectangle previously in rects on which this one is a
587 // vertical continuation, remove the previous rectangle and expand
588 // this one vertically to cover the area.
589 while (iter != rects.begin()) {
590 --iter;
591 if (iter->bottom() < rect.top()) {
592 // Went all the way to previous span line.
593 break;
594 } else if (iter->left() == rect.left() &&
595 iter->right() == rect.right()) {
596 FML_DCHECK(iter->bottom() == rect.top());
597 rect.fTop = iter->fTop;
598 rects.erase(iter);
599 --previous_span_end;
600 break;
601 }
602 }
603 }
604 rects.push_back(rect);
605 }
606 previous_span_end = rects.size();
607 }
608 return rects;
609}
610
611bool DlRegion::isComplex() const {
612 return lines_.size() > 1 ||
613 (lines_.size() == 1 &&
614 span_buffer_.getChunkSize(lines_.front().chunk_handle) > 1);
615}
616
617bool DlRegion::intersects(const SkIRect& rect) const {
618 if (isEmpty()) {
619 return false;
620 }
621
622 auto bounds_intersect = SkIRect::Intersects(bounds_, rect);
623
624 if (isSimple()) {
625 return bounds_intersect;
626 }
627
628 if (!bounds_intersect) {
629 return false;
630 }
631
632 auto it = lines_.begin();
633 auto end = lines_.end();
634 if (lines_.size() > kBinarySearchThreshold &&
635 it[kBinarySearchThreshold].bottom <= rect.fTop) {
636 it = std::lower_bound(
637 lines_.begin() + kBinarySearchThreshold + 1, lines_.end(), rect.fTop,
638 [](const SpanLine& line, int32_t top) { return line.bottom <= top; });
639 } else {
640 while (it != end && it->bottom <= rect.fTop) {
641 ++it;
642 continue;
643 }
644 }
645 while (it != end && it->top < rect.fBottom) {
646 FML_DCHECK(rect.fTop < it->bottom && it->top < rect.fBottom);
647 const Span *begin, *end;
648 span_buffer_.getSpans(it->chunk_handle, begin, end);
649 while (begin != end && begin->left < rect.fRight) {
650 if (begin->right > rect.fLeft) {
651 return true;
652 }
653 ++begin;
654 }
655 ++it;
656 }
657
658 return false;
659}
660
661bool DlRegion::spansIntersect(const Span* begin1,
662 const Span* end1,
663 const Span* begin2,
664 const Span* end2) {
665 while (begin1 != end1 && begin2 != end2) {
666 if (begin1->right <= begin2->left) {
667 ++begin1;
668 } else if (begin2->right <= begin1->left) {
669 ++begin2;
670 } else {
671 return true;
672 }
673 }
674 return false;
675}
676
677void DlRegion::getIntersectionIterators(
678 const std::vector<SpanLine>& a_lines,
679 const std::vector<SpanLine>& b_lines,
680 std::vector<SpanLine>::const_iterator& a_it,
681 std::vector<SpanLine>::const_iterator& b_it) {
682 a_it = a_lines.begin();
683 auto a_end = a_lines.end();
684 b_it = b_lines.begin();
685 auto b_end = b_lines.end();
686
687 FML_DCHECK(a_it != a_end && b_it != b_end);
688
689 auto a_len = a_end - a_it;
690 auto b_len = b_end - b_it;
691
692 if (a_len > kBinarySearchThreshold &&
693 a_it[kBinarySearchThreshold].bottom <= b_it->top) {
694 a_it = std::lower_bound(
695 a_lines.begin() + kBinarySearchThreshold + 1, a_lines.end(), b_it->top,
696 [](const SpanLine& line, int32_t top) { return line.bottom <= top; });
697 } else if (b_len > kBinarySearchThreshold &&
698 b_it[kBinarySearchThreshold].bottom <= a_it->top) {
699 b_it = std::lower_bound(
700 b_lines.begin() + kBinarySearchThreshold + 1, b_lines.end(), a_it->top,
701 [](const SpanLine& line, int32_t top) { return line.bottom <= top; });
702 }
703}
704
705bool DlRegion::intersects(const DlRegion& region) const {
706 if (isEmpty() || region.isEmpty()) {
707 return false;
708 }
709
710 auto our_complex = isComplex();
711 auto their_complex = region.isComplex();
712 auto bounds_intersect = SkIRect::Intersects(bounds_, region.bounds_);
713
714 if (!our_complex && !their_complex) {
715 return bounds_intersect;
716 }
717
718 if (!bounds_intersect) {
719 return false;
720 }
721
722 if (!our_complex) {
723 return region.intersects(bounds_);
724 }
725
726 if (!their_complex) {
727 return intersects(region.bounds_);
728 }
729
730 std::vector<SpanLine>::const_iterator ours, theirs;
731 getIntersectionIterators(lines_, region.lines_, ours, theirs);
732 auto ours_end = lines_.end();
733 auto theirs_end = region.lines_.end();
734
735 while (ours != ours_end && theirs != theirs_end) {
736 if (ours->bottom <= theirs->top) {
737 ++ours;
738 } else if (theirs->bottom <= ours->top) {
739 ++theirs;
740 } else {
741 FML_DCHECK(ours->top < theirs->bottom && theirs->top < ours->bottom);
742 const Span *ours_begin, *ours_end;
743 span_buffer_.getSpans(ours->chunk_handle, ours_begin, ours_end);
744 const Span *theirs_begin, *theirs_end;
745 region.span_buffer_.getSpans(theirs->chunk_handle, theirs_begin,
746 theirs_end);
747 if (spansIntersect(ours_begin, ours_end, theirs_begin, theirs_end)) {
748 return true;
749 }
750 if (ours->bottom < theirs->bottom) {
751 ++ours;
752 } else {
753 ++theirs;
754 }
755 }
756 }
757 return false;
758}
759
760} // namespace flutter
int count
Definition: FontMgrTest.cpp:50
static std::vector< SkPDFIndirectReference > sort(const THashSet< SkPDFIndirectReference > &src)
void swap(sk_sp< T > &a, sk_sp< T > &b)
Definition: SkRefCnt.h:341
static void copy(void *dst, const uint8_t *src, int width, int bpp, int deltaSrc, int offset, const SkPMColor ctable[])
Definition: SkSwizzler.cpp:31
bool isComplex() const
Definition: SkRegion.h:158
bool isEmpty() const
Definition: SkRegion.h:146
bool intersects(const SkIRect &rect) const
Definition: SkRegion.cpp:502
static const char * begin(const StringSlice &s)
Definition: editor.cpp:252
static bool b
struct MyStruct a[10]
if(end==-1)
glong glong end
#define FML_LOG(severity)
Definition: logging.h:82
#define FML_DCHECK(condition)
Definition: logging.h:103
static float max(float r, float g, float b)
Definition: hsl.cpp:49
static float min(float r, float g, float b)
Definition: hsl.cpp:48
ClipOpAndAA opAA SkRegion region
Definition: SkRecords.h:238
sk_sp< SkBlender > blender SkRect rect
Definition: SkRecords.h:350
Definition: copy.py:1
static intptr_t chunk_size(intptr_t bytes_left)
void * malloc(size_t size)
Definition: allocation.cc:19
void * realloc(void *ptr, size_t size)
Definition: allocation.cc:27
DEF_SWITCHES_START aot vmservice shared library Name of the *so containing AOT compiled Dart assets for launching the service isolate vm snapshot The VM snapshot data that will be memory mapped as read only SnapshotAssetPath must be present isolate snapshot The isolate snapshot data that will be memory mapped as read only SnapshotAssetPath must be present cache dir Path to the cache directory This is different from the persistent_cache_path in embedder which is used for Skia shader cache icu native lib Path to the library file that exports the ICU data vm service The hostname IP address on which the Dart VM Service should be served If not defaults to or::depending on whether ipv6 is specified vm service A custom Dart VM Service port The default is to pick a randomly available open port disable vm Disable the Dart VM Service The Dart VM Service is never available in release mode disable vm service Disable mDNS Dart VM Service publication Bind to the IPv6 localhost address for the Dart VM Service Ignored if vm service host is set endless trace buffer
Definition: switches.h:126
const int kBinarySearchThreshold
Definition: dl_region.cc:13
it will be possible to load the file into Perfetto s trace viewer disable asset Prevents usage of any non test fonts unless they were explicitly Loaded via prefetched default font Indicates whether the embedding started a prefetch of the default font manager before creating the engine run In non interactive keep the shell running after the Dart script has completed enable serial On low power devices with low core running concurrent GC tasks on threads can cause them to contend with the UI thread which could potentially lead to jank This option turns off all concurrent GC activities domain network JSON encoded network policy per domain This overrides the DisallowInsecureConnections switch Embedder can specify whether to allow or disallow insecure connections at a domain level old gen heap size
Definition: switches.h:259
dst
Definition: cp.py:12
Definition: SkRect.h:32
bool intersect(const SkIRect &r)
Definition: SkRect.h:513
static bool Intersects(const SkIRect &a, const SkIRect &b)
Definition: SkRect.h:535
constexpr int32_t top() const
Definition: SkRect.h:120
static constexpr SkIRect MakeLTRB(int32_t l, int32_t t, int32_t r, int32_t b)
Definition: SkRect.h:91
constexpr int32_t bottom() const
Definition: SkRect.h:134
constexpr int32_t right() const
Definition: SkRect.h:127
void join(const SkIRect &r)
Definition: SkRect.cpp:31
bool isEmpty() const
Definition: SkRect.h:202
constexpr int32_t left() const
Definition: SkRect.h:113
#define ERROR(message)
Definition: elf_loader.cc:260