Flutter Engine
The Flutter Engine
Loading...
Searching...
No Matches
GrGpu.cpp
Go to the documentation of this file.
1/*
2 * Copyright 2010 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
8
17#include "src/base/SkMathPriv.h"
33
34#include <algorithm>
35#include <utility>
36
37using namespace skia_private;
38
39////////////////////////////////////////////////////////////////////////////////
40
41GrGpu::GrGpu(GrDirectContext* direct) : fResetBits(kAll_GrBackendState), fContext(direct) {}
42
44 this->callSubmittedProcs(false);
45}
46
48 fCaps = std::move(caps);
49}
50
52
53////////////////////////////////////////////////////////////////////////////////
54
55static bool validate_texel_levels(SkISize dimensions, GrColorType texelColorType,
56 const GrMipLevel* texels, int mipLevelCount, const GrCaps* caps) {
57 SkASSERT(mipLevelCount > 0);
58 bool hasBasePixels = texels[0].fPixels;
59 int levelsWithPixelsCnt = 0;
60 auto bpp = GrColorTypeBytesPerPixel(texelColorType);
61 int w = dimensions.fWidth;
62 int h = dimensions.fHeight;
63 for (int currentMipLevel = 0; currentMipLevel < mipLevelCount; ++currentMipLevel) {
64 if (texels[currentMipLevel].fPixels) {
65 const size_t minRowBytes = w * bpp;
66 if (caps->writePixelsRowBytesSupport()) {
67 if (texels[currentMipLevel].fRowBytes < minRowBytes) {
68 return false;
69 }
70 if (texels[currentMipLevel].fRowBytes % bpp) {
71 return false;
72 }
73 } else {
74 if (texels[currentMipLevel].fRowBytes != minRowBytes) {
75 return false;
76 }
77 }
78 ++levelsWithPixelsCnt;
79 }
80 if (w == 1 && h == 1) {
81 if (currentMipLevel != mipLevelCount - 1) {
82 return false;
83 }
84 } else {
85 w = std::max(w / 2, 1);
86 h = std::max(h / 2, 1);
87 }
88 }
89 // Either just a base layer or a full stack is required.
90 if (mipLevelCount != 1 && (w != 1 || h != 1)) {
91 return false;
92 }
93 // Can specify just the base, all levels, or no levels.
94 if (!hasBasePixels) {
95 return levelsWithPixelsCnt == 0;
96 }
97 return levelsWithPixelsCnt == 1 || levelsWithPixelsCnt == mipLevelCount;
98}
99
100sk_sp<GrTexture> GrGpu::createTextureCommon(SkISize dimensions,
101 const GrBackendFormat& format,
102 GrTextureType textureType,
103 GrRenderable renderable,
104 int renderTargetSampleCnt,
105 skgpu::Budgeted budgeted,
106 GrProtected isProtected,
107 int mipLevelCount,
108 uint32_t levelClearMask,
109 std::string_view label) {
110 if (this->caps()->isFormatCompressed(format)) {
111 // Call GrGpu::createCompressedTexture.
112 return nullptr;
113 }
114
115 skgpu::Mipmapped mipmapped = mipLevelCount > 1 ? skgpu::Mipmapped::kYes : skgpu::Mipmapped::kNo;
116 if (!this->caps()->validateSurfaceParams(dimensions,
117 format,
118 renderable,
119 renderTargetSampleCnt,
120 mipmapped,
121 textureType)) {
122 return nullptr;
123 }
124
125 if (renderable == GrRenderable::kYes) {
126 renderTargetSampleCnt =
127 this->caps()->getRenderTargetSampleCount(renderTargetSampleCnt, format);
128 }
129 // Attempt to catch un- or wrongly initialized sample counts.
130 SkASSERT(renderTargetSampleCnt > 0 && renderTargetSampleCnt <= 64);
131 this->handleDirtyContext();
132 auto tex = this->onCreateTexture(dimensions,
133 format,
134 renderable,
135 renderTargetSampleCnt,
136 budgeted,
137 isProtected,
138 mipLevelCount,
139 levelClearMask,
140 label);
141 if (tex) {
142 SkASSERT(tex->backendFormat() == format);
143 SkASSERT(GrRenderable::kNo == renderable || tex->asRenderTarget());
144 if (!this->caps()->reuseScratchTextures() && renderable == GrRenderable::kNo) {
145 tex->resourcePriv().removeScratchKey();
146 }
148 if (renderTargetSampleCnt > 1 && !this->caps()->msaaResolvesAutomatically()) {
149 SkASSERT(GrRenderable::kYes == renderable);
150 tex->asRenderTarget()->setRequiresManualMSAAResolve();
151 }
152 }
153 return tex;
154}
155
157 const GrBackendFormat& format,
158 GrTextureType textureType,
159 GrRenderable renderable,
160 int renderTargetSampleCnt,
161 skgpu::Mipmapped mipmapped,
162 skgpu::Budgeted budgeted,
163 GrProtected isProtected,
164 std::string_view label) {
165 int mipLevelCount = 1;
166 if (mipmapped == skgpu::Mipmapped::kYes) {
167 mipLevelCount =
168 32 - SkCLZ(static_cast<uint32_t>(std::max(dimensions.fWidth, dimensions.fHeight)));
169 }
170 uint32_t levelClearMask =
171 this->caps()->shouldInitializeTextures() ? (1 << mipLevelCount) - 1 : 0;
172 auto tex = this->createTextureCommon(dimensions,
173 format,
174 textureType,
175 renderable,
176 renderTargetSampleCnt,
177 budgeted,
178 isProtected,
179 mipLevelCount,
180 levelClearMask,
181 label);
182 if (tex && mipmapped == skgpu::Mipmapped::kYes && levelClearMask) {
183 tex->markMipmapsClean();
184 }
185
186 return tex;
187}
188
190 const GrBackendFormat& format,
191 GrTextureType textureType,
192 GrRenderable renderable,
193 int renderTargetSampleCnt,
194 skgpu::Budgeted budgeted,
195 GrProtected isProtected,
196 GrColorType textureColorType,
197 GrColorType srcColorType,
198 const GrMipLevel texels[],
199 int texelLevelCount,
200 std::string_view label) {
201 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
202 if (texelLevelCount) {
203 if (!validate_texel_levels(dimensions, srcColorType, texels, texelLevelCount,
204 this->caps())) {
205 return nullptr;
206 }
207 }
208
209 int mipLevelCount = std::max(1, texelLevelCount);
210 uint32_t levelClearMask = 0;
211 if (this->caps()->shouldInitializeTextures()) {
212 if (texelLevelCount) {
213 for (int i = 0; i < mipLevelCount; ++i) {
214 if (!texels->fPixels) {
215 levelClearMask |= static_cast<uint32_t>(1 << i);
216 }
217 }
218 } else {
219 levelClearMask = static_cast<uint32_t>((1 << mipLevelCount) - 1);
220 }
221 }
222
223 auto tex = this->createTextureCommon(dimensions,
224 format,
225 textureType,
226 renderable,
227 renderTargetSampleCnt,
228 budgeted,
229 isProtected,
230 texelLevelCount,
231 levelClearMask,
232 label);
233 if (tex) {
234 bool markMipLevelsClean = false;
235 // Currently if level 0 does not have pixels then no other level may, as enforced by
236 // validate_texel_levels.
237 if (texelLevelCount && texels[0].fPixels) {
238 if (!this->writePixels(tex.get(),
239 SkIRect::MakeSize(dimensions),
240 textureColorType,
241 srcColorType,
242 texels,
243 texelLevelCount)) {
244 return nullptr;
245 }
246 // Currently if level[1] of mip map has pixel data then so must all other levels.
247 // as enforced by validate_texel_levels.
248 markMipLevelsClean = (texelLevelCount > 1 && !levelClearMask && texels[1].fPixels);
250 } else if (levelClearMask && mipLevelCount > 1) {
251 markMipLevelsClean = true;
252 }
253 if (markMipLevelsClean) {
254 tex->markMipmapsClean();
255 }
256 }
257 return tex;
258}
259
261 const GrBackendFormat& format,
262 skgpu::Budgeted budgeted,
263 skgpu::Mipmapped mipmapped,
264 GrProtected isProtected,
265 const void* data,
266 size_t dataSize) {
267 this->handleDirtyContext();
268 if (dimensions.width() < 1 || dimensions.width() > this->caps()->maxTextureSize() ||
269 dimensions.height() < 1 || dimensions.height() > this->caps()->maxTextureSize()) {
270 return nullptr;
271 }
272 // Note if we relax the requirement that data must be provided then we must check
273 // caps()->shouldInitializeTextures() here.
274 if (!data) {
275 return nullptr;
276 }
277
278 // TODO: expand CompressedDataIsCorrect to work here too
280 if (compressionType == SkTextureCompressionType::kNone) {
281 return nullptr;
282 }
283
284 if (!this->caps()->isFormatTexturable(format, GrTextureType::k2D)) {
285 return nullptr;
286 }
287
288 if (dataSize <
290 compressionType, dimensions, nullptr, mipmapped == skgpu::Mipmapped::kYes)) {
291 return nullptr;
292 }
293 return this->onCreateCompressedTexture(dimensions, format, budgeted, mipmapped, isProtected,
294 data, dataSize);
295}
296
298 GrWrapOwnership ownership,
299 GrWrapCacheable cacheable,
300 GrIOType ioType) {
301 SkASSERT(ioType != kWrite_GrIOType);
302 this->handleDirtyContext();
303
304 const GrCaps* caps = this->caps();
305 SkASSERT(caps);
306
307 if (!caps->isFormatTexturable(backendTex.getBackendFormat(), backendTex.textureType())) {
308 return nullptr;
309 }
310 if (backendTex.width() > caps->maxTextureSize() ||
311 backendTex.height() > caps->maxTextureSize()) {
312 return nullptr;
313 }
314
315 return this->onWrapBackendTexture(backendTex, ownership, cacheable, ioType);
316}
317
319 GrWrapOwnership ownership,
320 GrWrapCacheable cacheable) {
321 this->handleDirtyContext();
322
323 const GrCaps* caps = this->caps();
324 SkASSERT(caps);
325
326 if (!caps->isFormatTexturable(backendTex.getBackendFormat(), backendTex.textureType())) {
327 return nullptr;
328 }
329 if (backendTex.width() > caps->maxTextureSize() ||
330 backendTex.height() > caps->maxTextureSize()) {
331 return nullptr;
332 }
333
334 return this->onWrapCompressedBackendTexture(backendTex, ownership, cacheable);
335}
336
338 int sampleCnt,
339 GrWrapOwnership ownership,
340 GrWrapCacheable cacheable) {
341 this->handleDirtyContext();
342 if (sampleCnt < 1) {
343 return nullptr;
344 }
345
346 const GrCaps* caps = this->caps();
347
348 if (!caps->isFormatTexturable(backendTex.getBackendFormat(), backendTex.textureType()) ||
349 !caps->isFormatRenderable(backendTex.getBackendFormat(), sampleCnt)) {
350 return nullptr;
351 }
352
353 if (backendTex.width() > caps->maxRenderTargetSize() ||
354 backendTex.height() > caps->maxRenderTargetSize()) {
355 return nullptr;
356 }
357 sk_sp<GrTexture> tex =
358 this->onWrapRenderableBackendTexture(backendTex, sampleCnt, ownership, cacheable);
359 SkASSERT(!tex || tex->asRenderTarget());
360 if (tex && sampleCnt > 1 && !caps->msaaResolvesAutomatically()) {
361 tex->asRenderTarget()->setRequiresManualMSAAResolve();
362 }
363 return tex;
364}
365
367 this->handleDirtyContext();
368
369 const GrCaps* caps = this->caps();
370
371 if (!caps->isFormatRenderable(backendRT.getBackendFormat(), backendRT.sampleCnt())) {
372 return nullptr;
373 }
374
376 if (backendRT.isFramebufferOnly()) {
377 rt->setFramebufferOnly();
378 }
379 return rt;
380}
381
386
388 const GrVkDrawableInfo& vkInfo) {
389 // This is only supported on Vulkan so we default to returning nullptr here
390 return nullptr;
391}
392
394 GrGpuBufferType intendedType,
395 GrAccessPattern accessPattern) {
396 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
397 this->handleDirtyContext();
398 if ((intendedType == GrGpuBufferType::kXferCpuToGpu ||
399 intendedType == GrGpuBufferType::kXferGpuToCpu) &&
400 accessPattern == kStatic_GrAccessPattern) {
401 return nullptr;
402 }
403 sk_sp<GrGpuBuffer> buffer = this->onCreateBuffer(size, intendedType, accessPattern);
404 if (buffer && !this->caps()->reuseScratchBuffers()) {
405 buffer->resourcePriv().removeScratchKey();
406 }
407 return buffer;
408}
409
410bool GrGpu::copySurface(GrSurface* dst, const SkIRect& dstRect,
411 GrSurface* src, const SkIRect& srcRect,
412 GrSamplerState::Filter filter) {
413 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
414 SkASSERT(dst && src);
415 SkASSERT(!src->framebufferOnly());
416
417 if (dst->readOnly()) {
418 return false;
419 }
420
421 this->handleDirtyContext();
422
423 return this->onCopySurface(dst, dstRect, src, srcRect, filter);
424}
425
427 SkIRect rect,
428 GrColorType surfaceColorType,
429 GrColorType dstColorType,
430 void* buffer,
431 size_t rowBytes) {
432 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
434 SkASSERT(!surface->framebufferOnly());
435 SkASSERT(this->caps()->areColorTypeAndFormatCompatible(surfaceColorType,
436 surface->backendFormat()));
437
438 if (!SkIRect::MakeSize(surface->dimensions()).contains(rect)) {
439 return false;
440 }
441
442 size_t minRowBytes = SkToSizeT(GrColorTypeBytesPerPixel(dstColorType) * rect.width());
443 if (!this->caps()->readPixelsRowBytesSupport()) {
444 if (rowBytes != minRowBytes) {
445 return false;
446 }
447 } else {
448 if (rowBytes < minRowBytes) {
449 return false;
450 }
451 if (rowBytes % GrColorTypeBytesPerPixel(dstColorType)) {
452 return false;
453 }
454 }
455
456 this->handleDirtyContext();
457
458 return this->onReadPixels(surface, rect, surfaceColorType, dstColorType, buffer, rowBytes);
459}
460
462 SkIRect rect,
463 GrColorType surfaceColorType,
464 GrColorType srcColorType,
465 const GrMipLevel texels[],
466 int mipLevelCount,
467 bool prepForTexSampling) {
468 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
469 ATRACE_ANDROID_FRAMEWORK_ALWAYS("Texture upload(%u) %ix%i",
470 surface->uniqueID().asUInt(), rect.width(), rect.height());
472 SkASSERT(!surface->framebufferOnly());
473
474 if (surface->readOnly()) {
475 return false;
476 }
477
478 if (mipLevelCount == 0) {
479 return false;
480 } else if (mipLevelCount == 1) {
481 // We require that if we are not mipped, then the write region is contained in the surface
482 if (!SkIRect::MakeSize(surface->dimensions()).contains(rect)) {
483 return false;
484 }
485 } else if (rect != SkIRect::MakeSize(surface->dimensions())) {
486 // We require that if the texels are mipped, than the write region is the entire surface
487 return false;
488 }
489
490 if (!validate_texel_levels(rect.size(), srcColorType, texels, mipLevelCount, this->caps())) {
491 return false;
492 }
493
494 this->handleDirtyContext();
495 if (!this->onWritePixels(surface,
496 rect,
497 surfaceColorType,
498 srcColorType,
499 texels,
500 mipLevelCount,
501 prepForTexSampling)) {
502 return false;
503 }
504
505 this->didWriteToSurface(surface, kTopLeft_GrSurfaceOrigin, &rect, mipLevelCount);
507
508 return true;
509}
510
512 size_t srcOffset,
514 size_t dstOffset,
515 size_t size) {
516 SkASSERT(src);
517 SkASSERT(dst);
518 SkASSERT(srcOffset % this->caps()->transferFromBufferToBufferAlignment() == 0);
519 SkASSERT(dstOffset % this->caps()->transferFromBufferToBufferAlignment() == 0);
520 SkASSERT(size % this->caps()->transferFromBufferToBufferAlignment() == 0);
521 SkASSERT(srcOffset + size <= src->size());
522 SkASSERT(dstOffset + size <= dst->size());
523 SkASSERT(src->intendedType() == GrGpuBufferType::kXferCpuToGpu);
524 SkASSERT(dst->intendedType() != GrGpuBufferType::kXferCpuToGpu);
525
526 this->handleDirtyContext();
527 if (!this->onTransferFromBufferToBuffer(std::move(src),
528 srcOffset,
529 std::move(dst),
530 dstOffset,
531 size)) {
532 return false;
533 }
534
536
537 return true;
538}
539
541 SkIRect rect,
542 GrColorType textureColorType,
543 GrColorType bufferColorType,
544 sk_sp<GrGpuBuffer> transferBuffer,
545 size_t offset,
546 size_t rowBytes) {
547 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
549 SkASSERT(transferBuffer);
550 SkASSERT(transferBuffer->intendedType() == GrGpuBufferType::kXferCpuToGpu);
551
552 if (texture->readOnly()) {
553 return false;
554 }
555
556 // We require that the write region is contained in the texture
557 if (!SkIRect::MakeSize(texture->dimensions()).contains(rect)) {
558 return false;
559 }
560
561 size_t bpp = GrColorTypeBytesPerPixel(bufferColorType);
562 if (this->caps()->writePixelsRowBytesSupport()) {
563 if (rowBytes < SkToSizeT(bpp*rect.width())) {
564 return false;
565 }
566 if (rowBytes % bpp) {
567 return false;
568 }
569 } else {
570 if (rowBytes != SkToSizeT(bpp*rect.width())) {
571 return false;
572 }
573 }
574
575 this->handleDirtyContext();
576 if (!this->onTransferPixelsTo(texture,
577 rect,
578 textureColorType,
579 bufferColorType,
580 std::move(transferBuffer),
581 offset,
582 rowBytes)) {
583 return false;
584 }
585
586 this->didWriteToSurface(texture, kTopLeft_GrSurfaceOrigin, &rect);
588
589 return true;
590}
591
593 SkIRect rect,
594 GrColorType surfaceColorType,
595 GrColorType bufferColorType,
596 sk_sp<GrGpuBuffer> transferBuffer,
597 size_t offset) {
598 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
600 SkASSERT(transferBuffer);
601 SkASSERT(transferBuffer->intendedType() == GrGpuBufferType::kXferGpuToCpu);
602 SkASSERT(this->caps()->areColorTypeAndFormatCompatible(surfaceColorType,
603 surface->backendFormat()));
604
605#ifdef SK_DEBUG
606 auto supportedRead = this->caps()->supportedReadPixelsColorType(
607 surfaceColorType, surface->backendFormat(), bufferColorType);
608 SkASSERT(supportedRead.fOffsetAlignmentForTransferBuffer);
609 SkASSERT(offset % supportedRead.fOffsetAlignmentForTransferBuffer == 0);
610#endif
611
612 // We require that the write region is contained in the texture
613 if (!SkIRect::MakeSize(surface->dimensions()).contains(rect)) {
614 return false;
615 }
616
617 this->handleDirtyContext();
618 if (!this->onTransferPixelsFrom(surface,
619 rect,
620 surfaceColorType,
621 bufferColorType,
622 std::move(transferBuffer),
623 offset)) {
624 return false;
625 }
626
628
629 return true;
630}
631
633 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
635 SkASSERT(this->caps()->mipmapSupport());
636 SkASSERT(texture->mipmapped() == skgpu::Mipmapped::kYes);
637 if (!texture->mipmapsAreDirty()) {
638 // This can happen when the proxy expects mipmaps to be dirty, but they are not dirty on the
639 // actual target. This may be caused by things that the drawingManager could not predict,
640 // i.e., ops that don't draw anything, aborting a draw for exceptional circumstances, etc.
641 // NOTE: This goes away once we quit tracking mipmap state on the actual texture.
642 return true;
643 }
644 if (texture->readOnly()) {
645 return false;
646 }
647 if (this->onRegenerateMipMapLevels(texture)) {
648 texture->markMipmapsClean();
649 return true;
650 }
651 return false;
652}
653
658
661 this->handleDirtyContext();
662 this->onResolveRenderTarget(target, resolveRect);
663}
664
666 uint32_t mipLevels) const {
668 SkASSERT(!surface->readOnly());
669 // Mark any MIP chain as dirty if and only if there is a non-empty bounds.
670 if (nullptr == bounds || !bounds->isEmpty()) {
672 if (texture) {
673 if (mipLevels == 1) {
675 } else {
676 texture->markMipmapsClean();
677 }
678 }
679 }
680}
681
684 const GrFlushInfo& info,
685 const skgpu::MutableTextureState* newState) {
686 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
687
688 GrResourceProvider* resourceProvider = fContext->priv().resourceProvider();
689
690 std::unique_ptr<std::unique_ptr<GrSemaphore>[]> semaphores(
691 new std::unique_ptr<GrSemaphore>[info.fNumSemaphores]);
692 if (this->caps()->backendSemaphoreSupport() && info.fNumSemaphores) {
693 for (size_t i = 0; i < info.fNumSemaphores; ++i) {
694 if (info.fSignalSemaphores[i].isInitialized()) {
695 semaphores[i] = resourceProvider->wrapBackendSemaphore(
696 info.fSignalSemaphores[i],
699 // If we failed to wrap the semaphore it means the client didn't give us a valid
700 // semaphore to begin with. Therefore, it is fine to not signal it.
701 if (semaphores[i]) {
702 this->insertSemaphore(semaphores[i].get());
703 }
704 } else {
705 semaphores[i] = resourceProvider->makeSemaphore(false);
706 if (semaphores[i]) {
707 this->insertSemaphore(semaphores[i].get());
708 info.fSignalSemaphores[i] = semaphores[i]->backendSemaphore();
709 }
710 }
711 }
712 }
713
714 if (info.fFinishedProc) {
715 this->addFinishedProc(info.fFinishedProc, info.fFinishedContext);
716 }
717
718 if (info.fSubmittedProc) {
719 fSubmittedProcs.emplace_back(info.fSubmittedProc, info.fSubmittedContext);
720 }
721
722 // We currently don't support passing in new surface state for multiple proxies here. The only
723 // time we have multiple proxies is if we are flushing a yuv SkImage which won't have state
724 // updates anyways.
725 SkASSERT(!newState || proxies.size() == 1);
727 this->prepareSurfacesForBackendAccessAndStateUpdates(proxies, access, newState);
728}
729
731 GrRenderTarget* renderTarget,
732 bool useMSAASurface,
733 GrAttachment* stencil,
734 GrSurfaceOrigin origin,
735 const SkIRect& bounds,
736 const GrOpsRenderPass::LoadAndStoreInfo& colorInfo,
738 const TArray<GrSurfaceProxy*, true>& sampledProxies,
739 GrXferBarrierFlags renderPassXferBarriers) {
740#if SK_HISTOGRAMS_ENABLED
741 fCurrentSubmitRenderPassCount++;
742#endif
744 return this->onGetOpsRenderPass(renderTarget, useMSAASurface, stencil, origin, bounds,
745 colorInfo, stencilInfo, sampledProxies, renderPassXferBarriers);
746}
747
749 this->stats()->incNumSubmitToGpus();
750
751 if (auto manager = this->stagingBufferManager()) {
752 manager->detachBuffers();
753 }
754
755 if (auto uniformsBuffer = this->uniformsRingBuffer()) {
756 uniformsBuffer->startSubmit(this);
757 }
758
759 bool submitted = this->onSubmitToGpu(sync);
760
761 this->callSubmittedProcs(submitted);
762
763 this->reportSubmitHistograms();
764
765 return submitted;
766}
767
768void GrGpu::reportSubmitHistograms() {
769#if SK_HISTOGRAMS_ENABLED
770 // The max allowed value for SK_HISTOGRAM_EXACT_LINEAR is 100. If we want to support higher
771 // values we can add SK_HISTOGRAM_CUSTOM_COUNTS but this has a number of buckets that is less
772 // than the number of actual values
773 static constexpr int kMaxRenderPassBucketValue = 100;
774 SK_HISTOGRAM_EXACT_LINEAR("SubmitRenderPasses",
775 std::min(fCurrentSubmitRenderPassCount, kMaxRenderPassBucketValue),
776 kMaxRenderPassBucketValue);
777 fCurrentSubmitRenderPassCount = 0;
778#endif
779
781}
782
784 if (fOOMed) {
785 fOOMed = false;
786 return true;
787 }
788 return false;
789}
790
791void GrGpu::callSubmittedProcs(bool success) {
792 for (int i = 0; i < fSubmittedProcs.size(); ++i) {
793 fSubmittedProcs[i].fProc(fSubmittedProcs[i].fContext, success);
794 }
795 fSubmittedProcs.clear();
796}
797
798#ifdef SK_ENABLE_DUMP_GPU
800
801void GrGpu::dumpJSON(SkJSONWriter* writer) const {
802 writer->beginObject();
803
804 // TODO: Is there anything useful in the base class to dump here?
805
806 this->onDumpJSON(writer);
807
808 writer->endObject();
809}
810#else
811void GrGpu::dumpJSON(SkJSONWriter* writer) const { }
812#endif
813
814#if defined(GR_TEST_UTILS)
815
816#if GR_GPU_STATS
817
818void GrGpu::Stats::dump(SkString* out) {
819 out->appendf("Textures Created: %d\n", fTextureCreates);
820 out->appendf("Texture Uploads: %d\n", fTextureUploads);
821 out->appendf("Transfers to Texture: %d\n", fTransfersToTexture);
822 out->appendf("Transfers from Surface: %d\n", fTransfersFromSurface);
823 out->appendf("Stencil Buffer Creates: %d\n", fStencilAttachmentCreates);
824 out->appendf("MSAA Attachment Creates: %d\n", fMSAAAttachmentCreates);
825 out->appendf("Number of draws: %d\n", fNumDraws);
826 out->appendf("Number of Scratch Textures reused %d\n", fNumScratchTexturesReused);
827 out->appendf("Number of Scratch MSAA Attachments reused %d\n",
828 fNumScratchMSAAAttachmentsReused);
829 out->appendf("Number of Render Passes: %d\n", fRenderPasses);
830 out->appendf("Reordered DAGs Over Budget: %d\n", fNumReorderedDAGsOverBudget);
831
832 // enable this block to output CSV-style stats for program pre-compilation
833#if 0
834 SkASSERT(fNumInlineCompilationFailures == 0);
835 SkASSERT(fNumPreCompilationFailures == 0);
836 SkASSERT(fNumCompilationFailures == 0);
837 SkASSERT(fNumPartialCompilationSuccesses == 0);
838
839 SkDebugf("%d, %d, %d, %d, %d\n",
840 fInlineProgramCacheStats[(int) Stats::ProgramCacheResult::kHit],
841 fInlineProgramCacheStats[(int) Stats::ProgramCacheResult::kMiss],
842 fPreProgramCacheStats[(int) Stats::ProgramCacheResult::kHit],
843 fPreProgramCacheStats[(int) Stats::ProgramCacheResult::kMiss],
844 fNumCompilationSuccesses);
845#endif
846}
847
848void GrGpu::Stats::dumpKeyValuePairs(TArray<SkString>* keys, TArray<double>* values) {
849 keys->push_back(SkString("render_passes"));
850 values->push_back(fRenderPasses);
851 keys->push_back(SkString("reordered_dags_over_budget"));
852 values->push_back(fNumReorderedDAGsOverBudget);
853}
854
855#endif // GR_GPU_STATS
856#endif // defined(GR_TEST_UTILS)
857
859 SkTextureCompressionType compressionType,
860 skgpu::Mipmapped mipmapped,
861 const void* data,
862 size_t length) {
863 size_t computedSize = SkCompressedDataSize(
864 compressionType, dimensions, nullptr, mipmapped == skgpu::Mipmapped::kYes);
865 return computedSize == length;
866}
867
869 const GrBackendFormat& format,
870 GrRenderable renderable,
871 skgpu::Mipmapped mipmapped,
872 GrProtected isProtected,
873 std::string_view label) {
874 const GrCaps* caps = this->caps();
875
876 if (!format.isValid()) {
877 return {};
878 }
879
881 // Compressed formats must go through the createCompressedBackendTexture API
882 return {};
883 }
884
885 if (dimensions.isEmpty() || dimensions.width() > caps->maxTextureSize() ||
886 dimensions.height() > caps->maxTextureSize()) {
887 return {};
888 }
889
890 if (mipmapped == skgpu::Mipmapped::kYes && !this->caps()->mipmapSupport()) {
891 return {};
892 }
893
894 return this->onCreateBackendTexture(
895 dimensions, format, renderable, mipmapped, isProtected, label);
896}
897
899 sk_sp<skgpu::RefCntedCallback> finishedCallback,
900 std::array<float, 4> color) {
901 if (!backendTexture.isValid()) {
902 return false;
903 }
904
905 if (backendTexture.hasMipmaps() && !this->caps()->mipmapSupport()) {
906 return false;
907 }
908
909 return this->onClearBackendTexture(backendTexture, std::move(finishedCallback), color);
910}
911
913 const GrBackendFormat& format,
914 skgpu::Mipmapped mipmapped,
915 GrProtected isProtected) {
916 const GrCaps* caps = this->caps();
917
918 if (!format.isValid()) {
919 return {};
920 }
921
923 if (compressionType == SkTextureCompressionType::kNone) {
924 // Uncompressed formats must go through the createBackendTexture API
925 return {};
926 }
927
928 if (dimensions.isEmpty() ||
929 dimensions.width() > caps->maxTextureSize() ||
930 dimensions.height() > caps->maxTextureSize()) {
931 return {};
932 }
933
934 if (mipmapped == skgpu::Mipmapped::kYes && !this->caps()->mipmapSupport()) {
935 return {};
936 }
937
938 return this->onCreateCompressedBackendTexture(dimensions, format, mipmapped, isProtected);
939}
940
942 sk_sp<skgpu::RefCntedCallback> finishedCallback,
943 const void* data,
944 size_t length) {
945 SkASSERT(data);
946
947 if (!backendTexture.isValid()) {
948 return false;
949 }
950
951 GrBackendFormat format = backendTexture.getBackendFormat();
952
954 if (compressionType == SkTextureCompressionType::kNone) {
955 // Uncompressed formats must go through the createBackendTexture API
956 return false;
957 }
958
959 if (backendTexture.hasMipmaps() && !this->caps()->mipmapSupport()) {
960 return false;
961 }
962
963 skgpu::Mipmapped mipmapped =
964 backendTexture.hasMipmaps() ? skgpu::Mipmapped::kYes : skgpu::Mipmapped::kNo;
965
966 if (!CompressedDataIsCorrect(backendTexture.dimensions(),
967 compressionType,
968 mipmapped,
969 data,
970 length)) {
971 return false;
972 }
973
974 return this->onUpdateCompressedBackendTexture(backendTexture,
975 std::move(finishedCallback),
976 data,
977 length);
978}
static void info(const char *fmt,...) SK_PRINTF_LIKE(1
Definition DM.cpp:213
SkTextureCompressionType GrBackendFormatToCompressionType(const GrBackendFormat &format)
static bool validate_texel_levels(SkISize dimensions, GrColorType texelColorType, const GrMipLevel *texels, int mipLevelCount, const GrCaps *caps)
Definition GrGpu.cpp:55
GrWrapCacheable
Definition GrTypesPriv.h:84
static constexpr size_t GrColorTypeBytesPerPixel(GrColorType ct)
GrIOType
@ kWrite_GrIOType
GrWrapOwnership
Definition GrTypesPriv.h:76
@ kBorrow_GrWrapOwnership
Definition GrTypesPriv.h:78
GrGpuBufferType
GrTextureType
GrColorType
GrAccessPattern
@ kStatic_GrAccessPattern
GrSurfaceOrigin
Definition GrTypes.h:147
@ kTopLeft_GrSurfaceOrigin
Definition GrTypes.h:148
static const uint32_t kAll_GrBackendState
Definition GrTypes.h:176
GrSyncCpu
Definition GrTypes.h:239
GrXferBarrierFlags
SkColor4f color
#define SkASSERT(cond)
Definition SkAssert.h:116
size_t SkCompressedDataSize(SkTextureCompressionType type, SkISize dimensions, TArray< size_t > *individualMipOffsets, bool mipmapped)
void SK_SPI SkDebugf(const char format[],...) SK_PRINTF_LIKE(1
static int SkCLZ(uint32_t mask)
Definition SkMathPriv.h:186
const Context & fContext
SkFilterMode
constexpr size_t SkToSizeT(S x)
Definition SkTo.h:31
#define ATRACE_ANDROID_FRAMEWORK_ALWAYS(fmt,...)
#define TRACE_FUNC
#define SK_HISTOGRAM_EXACT_LINEAR(name, sample, value_max)
Definition SkTypes.h:115
bool isValid() const
GrBackendFormat getBackendFormat() const
SkISize dimensions() const
GrBackendFormat getBackendFormat() const
bool hasMipmaps() const
GrTextureType textureType() const
bool shouldInitializeTextures() const
Definition GrCaps.h:386
virtual bool isFormatTexturable(const GrBackendFormat &, GrTextureType) const =0
int maxTextureSize() const
Definition GrCaps.h:229
int maxRenderTargetSize() const
Definition GrCaps.h:223
virtual bool isFormatRenderable(const GrBackendFormat &format, int sampleCount) const =0
bool isFormatCompressed(const GrBackendFormat &format) const
Definition GrCaps.cpp:457
SupportedRead supportedReadPixelsColorType(GrColorType srcColorType, const GrBackendFormat &srcFormat, GrColorType dstColorType) const
Definition GrCaps.cpp:366
virtual int getRenderTargetSampleCount(int requestedCount, const GrBackendFormat &) const =0
bool msaaResolvesAutomatically() const
Definition GrCaps.h:100
bool writePixelsRowBytesSupport() const
Definition GrCaps.h:354
GrResourceProvider * resourceProvider()
GrDirectContextPriv priv()
void incNumSubmitToGpus()
Definition GrGpu.h:543
void incBufferTransfers()
Definition GrGpu.h:537
void incTransfersFromSurface()
Definition GrGpu.h:538
void incTransfersToTexture()
Definition GrGpu.h:536
void incTextureCreates()
Definition GrGpu.h:534
void incTextureUploads()
Definition GrGpu.h:535
void incRenderPasses()
Definition GrGpu.h:546
bool writePixels(GrSurface *surface, SkIRect rect, GrColorType surfaceColorType, GrColorType srcColorType, const GrMipLevel texels[], int mipLevelCount, bool prepForTexSampling=false)
Definition GrGpu.cpp:461
Stats * stats()
Definition GrGpu.h:551
virtual bool onReadPixels(GrSurface *, SkIRect, GrColorType surfaceColorType, GrColorType dstColorType, void *, size_t rowBytes)=0
virtual sk_sp< GrTexture > onCreateTexture(SkISize dimensions, const GrBackendFormat &, GrRenderable, int renderTargetSampleCnt, skgpu::Budgeted, GrProtected, int mipLevelCoont, uint32_t levelClearMask, std::string_view label)=0
virtual bool onTransferPixelsTo(GrTexture *, SkIRect, GrColorType textureColorType, GrColorType bufferColorType, sk_sp< GrGpuBuffer > transferBuffer, size_t offset, size_t rowBytes)=0
virtual GrBackendTexture onCreateCompressedBackendTexture(SkISize dimensions, const GrBackendFormat &, skgpu::Mipmapped, GrProtected)=0
virtual void insertSemaphore(GrSemaphore *semaphore)=0
bool readPixels(GrSurface *surface, SkIRect rect, GrColorType surfaceColorType, GrColorType dstColorType, void *buffer, size_t rowBytes)
Definition GrGpu.cpp:426
virtual void onReportSubmitHistograms()
Definition GrGpu.h:849
virtual GrBackendTexture onCreateBackendTexture(SkISize dimensions, const GrBackendFormat &, GrRenderable, skgpu::Mipmapped, GrProtected, std::string_view label)=0
bool submitToGpu(GrSyncCpu sync)
Definition GrGpu.cpp:748
void dumpJSON(SkJSONWriter *) const
Definition GrGpu.cpp:811
virtual bool onCopySurface(GrSurface *dst, const SkIRect &dstRect, GrSurface *src, const SkIRect &srcRect, GrSamplerState::Filter)=0
const GrCaps * caps() const
Definition GrGpu.h:73
bool regenerateMipMapLevels(GrTexture *)
Definition GrGpu.cpp:632
GrBackendTexture createCompressedBackendTexture(SkISize dimensions, const GrBackendFormat &, skgpu::Mipmapped, GrProtected)
Definition GrGpu.cpp:912
virtual GrOpsRenderPass * onGetOpsRenderPass(GrRenderTarget *renderTarget, bool useMSAASurface, GrAttachment *stencil, GrSurfaceOrigin, const SkIRect &bounds, const GrOpsRenderPass::LoadAndStoreInfo &, const GrOpsRenderPass::StencilLoadAndStoreInfo &, const skia_private::TArray< GrSurfaceProxy *, true > &sampledProxies, GrXferBarrierFlags renderPassXferBarriers)=0
virtual bool onTransferFromBufferToBuffer(sk_sp< GrGpuBuffer > src, size_t srcOffset, sk_sp< GrGpuBuffer > dst, size_t dstOffset, size_t size)=0
void didWriteToSurface(GrSurface *surface, GrSurfaceOrigin origin, const SkIRect *bounds, uint32_t mipLevels=1) const
Definition GrGpu.cpp:665
virtual bool onTransferPixelsFrom(GrSurface *, SkIRect, GrColorType surfaceColorType, GrColorType bufferColorType, sk_sp< GrGpuBuffer > transferBuffer, size_t offset)=0
virtual bool onWritePixels(GrSurface *, SkIRect, GrColorType surfaceColorType, GrColorType srcColorType, const GrMipLevel[], int mipLevelCount, bool prepForTexSampling)=0
virtual sk_sp< GrTexture > onWrapBackendTexture(const GrBackendTexture &, GrWrapOwnership, GrWrapCacheable, GrIOType)=0
virtual sk_sp< GrRenderTarget > onWrapVulkanSecondaryCBAsRenderTarget(const SkImageInfo &, const GrVkDrawableInfo &)
Definition GrGpu.cpp:387
bool updateCompressedBackendTexture(const GrBackendTexture &, sk_sp< skgpu::RefCntedCallback > finishedCallback, const void *data, size_t length)
Definition GrGpu.cpp:941
GrGpu(GrDirectContext *direct)
Definition GrGpu.cpp:41
virtual sk_sp< GrTexture > onWrapCompressedBackendTexture(const GrBackendTexture &, GrWrapOwnership, GrWrapCacheable)=0
void resolveRenderTarget(GrRenderTarget *, const SkIRect &resolveRect)
Definition GrGpu.cpp:659
bool checkAndResetOOMed()
Definition GrGpu.cpp:783
sk_sp< GrTexture > wrapBackendTexture(const GrBackendTexture &, GrWrapOwnership, GrWrapCacheable, GrIOType)
Definition GrGpu.cpp:297
virtual void prepareSurfacesForBackendAccessAndStateUpdates(SkSpan< GrSurfaceProxy * > proxies, SkSurfaces::BackendSurfaceAccess access, const skgpu::MutableTextureState *newState)
Definition GrGpu.h:841
virtual ~GrGpu()
Definition GrGpu.cpp:43
virtual void addFinishedProc(GrGpuFinishedProc finishedProc, GrGpuFinishedContext finishedContext)=0
virtual bool onClearBackendTexture(const GrBackendTexture &, sk_sp< skgpu::RefCntedCallback > finishedCallback, std::array< float, 4 > color)=0
virtual sk_sp< GrGpuBuffer > onCreateBuffer(size_t size, GrGpuBufferType intendedType, GrAccessPattern)=0
virtual bool onRegenerateMipMapLevels(GrTexture *)=0
bool transferPixelsFrom(GrSurface *surface, SkIRect rect, GrColorType surfaceColorType, GrColorType bufferColorType, sk_sp< GrGpuBuffer > transferBuffer, size_t offset)
Definition GrGpu.cpp:592
void resetTextureBindings()
Definition GrGpu.cpp:654
bool transferFromBufferToBuffer(sk_sp< GrGpuBuffer > src, size_t srcOffset, sk_sp< GrGpuBuffer > dst, size_t dstOffset, size_t size)
Definition GrGpu.cpp:511
virtual bool onSubmitToGpu(GrSyncCpu sync)=0
virtual sk_sp< GrRenderTarget > onWrapBackendRenderTarget(const GrBackendRenderTarget &)=0
sk_sp< GrGpuBuffer > createBuffer(size_t size, GrGpuBufferType intendedType, GrAccessPattern accessPattern)
Definition GrGpu.cpp:393
DisconnectType
Definition GrGpu.h:80
virtual GrStagingBufferManager * stagingBufferManager()
Definition GrGpu.h:76
virtual void onResetTextureBindings()
Definition GrGpu.h:735
sk_sp< GrRenderTarget > wrapVulkanSecondaryCBAsRenderTarget(const SkImageInfo &, const GrVkDrawableInfo &)
Definition GrGpu.cpp:382
virtual void onResolveRenderTarget(GrRenderTarget *target, const SkIRect &resolveRect)=0
GrBackendTexture createBackendTexture(SkISize dimensions, const GrBackendFormat &, GrRenderable, skgpu::Mipmapped, GrProtected, std::string_view label)
Definition GrGpu.cpp:868
sk_sp< GrRenderTarget > wrapBackendRenderTarget(const GrBackendRenderTarget &)
Definition GrGpu.cpp:366
virtual sk_sp< GrTexture > onWrapRenderableBackendTexture(const GrBackendTexture &, int sampleCnt, GrWrapOwnership, GrWrapCacheable)=0
static bool CompressedDataIsCorrect(SkISize dimensions, SkTextureCompressionType, skgpu::Mipmapped, const void *data, size_t length)
Definition GrGpu.cpp:858
virtual sk_sp< GrTexture > onCreateCompressedTexture(SkISize dimensions, const GrBackendFormat &, skgpu::Budgeted, skgpu::Mipmapped, GrProtected, const void *data, size_t dataSize)=0
bool copySurface(GrSurface *dst, const SkIRect &dstRect, GrSurface *src, const SkIRect &srcRect, GrSamplerState::Filter filter)
Definition GrGpu.cpp:410
void executeFlushInfo(SkSpan< GrSurfaceProxy * >, SkSurfaces::BackendSurfaceAccess access, const GrFlushInfo &, const skgpu::MutableTextureState *newState)
Definition GrGpu.cpp:682
virtual GrRingBuffer * uniformsRingBuffer()
Definition GrGpu.h:78
virtual void disconnect(DisconnectType)
Definition GrGpu.cpp:51
sk_sp< GrTexture > wrapCompressedBackendTexture(const GrBackendTexture &, GrWrapOwnership, GrWrapCacheable)
Definition GrGpu.cpp:318
GrOpsRenderPass * getOpsRenderPass(GrRenderTarget *renderTarget, bool useMSAASurface, GrAttachment *stencil, GrSurfaceOrigin, const SkIRect &bounds, const GrOpsRenderPass::LoadAndStoreInfo &, const GrOpsRenderPass::StencilLoadAndStoreInfo &, const skia_private::TArray< GrSurfaceProxy *, true > &sampledProxies, GrXferBarrierFlags renderPassXferBarriers)
Definition GrGpu.cpp:730
virtual bool onUpdateCompressedBackendTexture(const GrBackendTexture &, sk_sp< skgpu::RefCntedCallback > finishedCallback, const void *data, size_t length)=0
void initCaps(sk_sp< const GrCaps > caps)
Definition GrGpu.cpp:47
bool transferPixelsTo(GrTexture *texture, SkIRect rect, GrColorType textureColorType, GrColorType bufferColorType, sk_sp< GrGpuBuffer > transferBuffer, size_t offset, size_t rowBytes)
Definition GrGpu.cpp:540
sk_sp< GrTexture > createTexture(SkISize dimensions, const GrBackendFormat &format, GrTextureType textureType, GrRenderable renderable, int renderTargetSampleCnt, skgpu::Budgeted budgeted, GrProtected isProtected, GrColorType textureColorType, GrColorType srcColorType, const GrMipLevel texels[], int texelLevelCount, std::string_view label)
Definition GrGpu.cpp:189
void handleDirtyContext()
Definition GrGpu.h:677
sk_sp< GrTexture > createCompressedTexture(SkISize dimensions, const GrBackendFormat &format, skgpu::Budgeted budgeted, skgpu::Mipmapped mipmapped, GrProtected isProtected, const void *data, size_t dataSize)
Definition GrGpu.cpp:260
sk_sp< GrTexture > wrapRenderableBackendTexture(const GrBackendTexture &, int sampleCnt, GrWrapOwnership, GrWrapCacheable)
Definition GrGpu.cpp:337
bool clearBackendTexture(const GrBackendTexture &, sk_sp< skgpu::RefCntedCallback > finishedCallback, std::array< float, 4 > color)
Definition GrGpu.cpp:898
Stats fStats
Definition GrGpu.h:703
std::unique_ptr< GrSemaphore > makeSemaphore(bool isOwned=true)
std::unique_ptr< GrSemaphore > wrapBackendSemaphore(const GrBackendSemaphore &, GrSemaphoreWrapType, GrWrapOwnership=kBorrow_GrWrapOwnership)
void markMipmapsDirty()
Definition GrTexture.cpp:25
GrTexture * asTexture() override
Definition GrTexture.h:33
void beginObject(const char *name=nullptr, bool multiline=true)
constexpr size_t size() const
Definition SkSpan_impl.h:95
int size() const
Definition SkTArray.h:416
T & emplace_back(Args &&... args)
Definition SkTArray.h:243
VkSurfaceKHR surface
Definition main.cc:49
static const uint8_t buffer[]
uint32_t uint32_t * format
uint32_t * target
size_t length
FlTexture * texture
BackendSurfaceAccess
Definition SkSurface.h:44
@ kNoAccess
back-end surface will not be used by client
Budgeted
Definition GpuTypes.h:35
Renderable
Definition GpuTypes.h:69
Mipmapped
Definition GpuTypes.h:53
Protected
Definition GpuTypes.h:61
SkScalar w
SkScalar h
Point offset
GrGpuFinishedProc fFinishedProc
Definition GrTypes.h:219
const void * fPixels
static constexpr SkIRect MakeSize(const SkISize &size)
Definition SkRect.h:66
bool contains(int32_t x, int32_t y) const
Definition SkRect.h:463
bool isEmpty() const
Definition SkSize.h:31
int32_t fHeight
Definition SkSize.h:18
int32_t fWidth
Definition SkSize.h:17
constexpr int32_t width() const
Definition SkSize.h:36
constexpr int32_t height() const
Definition SkSize.h:37
#define TRACE_EVENT0(category_group, name)