44 this->callSubmittedProcs(
false);
48 fCaps = std::move(
caps);
58 bool hasBasePixels = texels[0].
fPixels;
59 int levelsWithPixelsCnt = 0;
63 for (
int currentMipLevel = 0; currentMipLevel < mipLevelCount; ++currentMipLevel) {
64 if (texels[currentMipLevel].fPixels) {
65 const size_t minRowBytes =
w * bpp;
67 if (texels[currentMipLevel].fRowBytes < minRowBytes) {
70 if (texels[currentMipLevel].fRowBytes % bpp) {
74 if (texels[currentMipLevel].fRowBytes != minRowBytes) {
78 ++levelsWithPixelsCnt;
80 if (
w == 1 &&
h == 1) {
81 if (currentMipLevel != mipLevelCount - 1) {
90 if (mipLevelCount != 1 && (
w != 1 ||
h != 1)) {
95 return levelsWithPixelsCnt == 0;
97 return levelsWithPixelsCnt == 1 || levelsWithPixelsCnt == mipLevelCount;
104 int renderTargetSampleCnt,
108 uint32_t levelClearMask,
109 std::string_view label) {
110 if (this->
caps()->isFormatCompressed(
format)) {
116 if (!this->
caps()->validateSurfaceParams(dimensions,
119 renderTargetSampleCnt,
126 renderTargetSampleCnt =
130 SkASSERT(renderTargetSampleCnt > 0 && renderTargetSampleCnt <= 64);
135 renderTargetSampleCnt,
145 tex->resourcePriv().removeScratchKey();
148 if (renderTargetSampleCnt > 1 && !this->
caps()->msaaResolvesAutomatically()) {
150 tex->asRenderTarget()->setRequiresManualMSAAResolve();
160 int renderTargetSampleCnt,
164 std::string_view label) {
165 int mipLevelCount = 1;
170 uint32_t levelClearMask =
172 auto tex = this->createTextureCommon(dimensions,
176 renderTargetSampleCnt,
183 tex->markMipmapsClean();
193 int renderTargetSampleCnt,
200 std::string_view label) {
202 if (texelLevelCount) {
209 int mipLevelCount =
std::max(1, texelLevelCount);
210 uint32_t levelClearMask = 0;
211 if (this->
caps()->shouldInitializeTextures()) {
212 if (texelLevelCount) {
213 for (
int i = 0;
i < mipLevelCount; ++
i) {
215 levelClearMask |=
static_cast<uint32_t
>(1 <<
i);
219 levelClearMask =
static_cast<uint32_t
>((1 << mipLevelCount) - 1);
223 auto tex = this->createTextureCommon(dimensions,
227 renderTargetSampleCnt,
234 bool markMipLevelsClean =
false;
237 if (texelLevelCount && texels[0].fPixels) {
248 markMipLevelsClean = (texelLevelCount > 1 && !levelClearMask && texels[1].
fPixels);
250 }
else if (levelClearMask && mipLevelCount > 1) {
251 markMipLevelsClean =
true;
253 if (markMipLevelsClean) {
254 tex->markMipmapsClean();
268 if (dimensions.
width() < 1 || dimensions.
width() > this->caps()->maxTextureSize() ||
269 dimensions.
height() < 1 || dimensions.
height() > this->caps()->maxTextureSize()) {
377 rt->setFramebufferOnly();
404 if (
buffer && !this->
caps()->reuseScratchBuffers()) {
405 buffer->resourcePriv().removeScratchKey();
417 if (
dst->readOnly()) {
435 SkASSERT(this->
caps()->areColorTypeAndFormatCompatible(surfaceColorType,
443 if (!this->
caps()->readPixelsRowBytesSupport()) {
444 if (rowBytes != minRowBytes) {
448 if (rowBytes < minRowBytes) {
467 bool prepForTexSampling) {
478 if (mipLevelCount == 0) {
480 }
else if (mipLevelCount == 1) {
501 prepForTexSampling)) {
518 SkASSERT(srcOffset % this->
caps()->transferFromBufferToBufferAlignment() == 0);
519 SkASSERT(dstOffset % this->
caps()->transferFromBufferToBufferAlignment() == 0);
562 if (this->
caps()->writePixelsRowBytesSupport()) {
566 if (rowBytes % bpp) {
580 std::move(transferBuffer),
602 SkASSERT(this->
caps()->areColorTypeAndFormatCompatible(surfaceColorType,
607 surfaceColorType,
surface->backendFormat(), bufferColorType);
608 SkASSERT(supportedRead.fOffsetAlignmentForTransferBuffer);
609 SkASSERT(
offset % supportedRead.fOffsetAlignmentForTransferBuffer == 0);
622 std::move(transferBuffer),
637 if (!
texture->mipmapsAreDirty()) {
666 uint32_t mipLevels)
const {
673 if (mipLevels == 1) {
690 std::unique_ptr<std::unique_ptr<GrSemaphore>[]> semaphores(
691 new std::unique_ptr<GrSemaphore>[
info.fNumSemaphores]);
692 if (this->
caps()->backendSemaphoreSupport() &&
info.fNumSemaphores) {
693 for (
size_t i = 0;
i <
info.fNumSemaphores; ++
i) {
694 if (
info.fSignalSemaphores[
i].isInitialized()) {
696 info.fSignalSemaphores[
i],
708 info.fSignalSemaphores[
i] = semaphores[
i]->backendSemaphore();
714 if (
info.fFinishedProc) {
718 if (
info.fSubmittedProc) {
740#if SK_HISTOGRAMS_ENABLED
741 fCurrentSubmitRenderPassCount++;
745 colorInfo, stencilInfo, sampledProxies, renderPassXferBarriers);
756 uniformsBuffer->startSubmit(
this);
761 this->callSubmittedProcs(submitted);
763 this->reportSubmitHistograms();
768void GrGpu::reportSubmitHistograms() {
769#if SK_HISTOGRAMS_ENABLED
773 static constexpr int kMaxRenderPassBucketValue = 100;
775 std::min(fCurrentSubmitRenderPassCount, kMaxRenderPassBucketValue),
776 kMaxRenderPassBucketValue);
777 fCurrentSubmitRenderPassCount = 0;
791void GrGpu::callSubmittedProcs(
bool success) {
792 for (
int i = 0;
i < fSubmittedProcs.
size(); ++
i) {
793 fSubmittedProcs[
i].fProc(fSubmittedProcs[
i].fContext, success);
795 fSubmittedProcs.
clear();
798#ifdef SK_ENABLE_DUMP_GPU
806 this->onDumpJSON(writer);
814#if defined(GR_TEST_UTILS)
819 out->appendf(
"Textures Created: %d\n", fTextureCreates);
820 out->appendf(
"Texture Uploads: %d\n", fTextureUploads);
821 out->appendf(
"Transfers to Texture: %d\n", fTransfersToTexture);
822 out->appendf(
"Transfers from Surface: %d\n", fTransfersFromSurface);
823 out->appendf(
"Stencil Buffer Creates: %d\n", fStencilAttachmentCreates);
824 out->appendf(
"MSAA Attachment Creates: %d\n", fMSAAAttachmentCreates);
825 out->appendf(
"Number of draws: %d\n", fNumDraws);
826 out->appendf(
"Number of Scratch Textures reused %d\n", fNumScratchTexturesReused);
827 out->appendf(
"Number of Scratch MSAA Attachments reused %d\n",
828 fNumScratchMSAAAttachmentsReused);
829 out->appendf(
"Number of Render Passes: %d\n", fRenderPasses);
830 out->appendf(
"Reordered DAGs Over Budget: %d\n", fNumReorderedDAGsOverBudget);
834 SkASSERT(fNumInlineCompilationFailures == 0);
835 SkASSERT(fNumPreCompilationFailures == 0);
836 SkASSERT(fNumCompilationFailures == 0);
837 SkASSERT(fNumPartialCompilationSuccesses == 0);
840 fInlineProgramCacheStats[(
int) Stats::ProgramCacheResult::kHit],
841 fInlineProgramCacheStats[(
int) Stats::ProgramCacheResult::kMiss],
842 fPreProgramCacheStats[(
int) Stats::ProgramCacheResult::kHit],
843 fPreProgramCacheStats[(
int) Stats::ProgramCacheResult::kMiss],
844 fNumCompilationSuccesses);
850 values->push_back(fRenderPasses);
852 values->push_back(fNumReorderedDAGsOverBudget);
865 return computedSize ==
length;
873 std::string_view label) {
895 dimensions,
format, renderable, mipmapped, isProtected, label);
900 std::array<float, 4>
color) {
901 if (!backendTexture.
isValid()) {
905 if (backendTexture.
hasMipmaps() && !this->caps()->mipmapSupport()) {
947 if (!backendTexture.
isValid()) {
959 if (backendTexture.
hasMipmaps() && !this->caps()->mipmapSupport()) {
975 std::move(finishedCallback),
static void info(const char *fmt,...) SK_PRINTF_LIKE(1
SkTextureCompressionType GrBackendFormatToCompressionType(const GrBackendFormat &format)
static bool validate_texel_levels(SkISize dimensions, GrColorType texelColorType, const GrMipLevel *texels, int mipLevelCount, const GrCaps *caps)
static constexpr size_t GrColorTypeBytesPerPixel(GrColorType ct)
@ kBorrow_GrWrapOwnership
@ kStatic_GrAccessPattern
@ kTopLeft_GrSurfaceOrigin
static const uint32_t kAll_GrBackendState
size_t SkCompressedDataSize(SkTextureCompressionType type, SkISize dimensions, TArray< size_t > *individualMipOffsets, bool mipmapped)
void SK_SPI SkDebugf(const char format[],...) SK_PRINTF_LIKE(1
static int SkCLZ(uint32_t mask)
constexpr size_t SkToSizeT(S x)
#define ATRACE_ANDROID_FRAMEWORK_ALWAYS(fmt,...)
#define SK_HISTOGRAM_EXACT_LINEAR(name, sample, value_max)
static void dump(const float m[20], SkYUVColorSpace cs, bool rgb2yuv)
bool isFramebufferOnly() const
GrBackendFormat getBackendFormat() const
SkISize dimensions() const
GrBackendFormat getBackendFormat() const
GrTextureType textureType() const
bool shouldInitializeTextures() const
virtual bool isFormatTexturable(const GrBackendFormat &, GrTextureType) const =0
int maxTextureSize() const
int maxRenderTargetSize() const
virtual bool isFormatRenderable(const GrBackendFormat &format, int sampleCount) const =0
bool isFormatCompressed(const GrBackendFormat &format) const
SupportedRead supportedReadPixelsColorType(GrColorType srcColorType, const GrBackendFormat &srcFormat, GrColorType dstColorType) const
virtual int getRenderTargetSampleCount(int requestedCount, const GrBackendFormat &) const =0
bool msaaResolvesAutomatically() const
bool writePixelsRowBytesSupport() const
GrResourceProvider * resourceProvider()
GrDirectContextPriv priv()
GrGpuBufferType intendedType() const
void incNumSubmitToGpus()
void incBufferTransfers()
void incTransfersFromSurface()
void incTransfersToTexture()
bool writePixels(GrSurface *surface, SkIRect rect, GrColorType surfaceColorType, GrColorType srcColorType, const GrMipLevel texels[], int mipLevelCount, bool prepForTexSampling=false)
virtual bool onReadPixels(GrSurface *, SkIRect, GrColorType surfaceColorType, GrColorType dstColorType, void *, size_t rowBytes)=0
virtual sk_sp< GrTexture > onCreateTexture(SkISize dimensions, const GrBackendFormat &, GrRenderable, int renderTargetSampleCnt, skgpu::Budgeted, GrProtected, int mipLevelCoont, uint32_t levelClearMask, std::string_view label)=0
virtual bool onTransferPixelsTo(GrTexture *, SkIRect, GrColorType textureColorType, GrColorType bufferColorType, sk_sp< GrGpuBuffer > transferBuffer, size_t offset, size_t rowBytes)=0
virtual GrBackendTexture onCreateCompressedBackendTexture(SkISize dimensions, const GrBackendFormat &, skgpu::Mipmapped, GrProtected)=0
virtual void insertSemaphore(GrSemaphore *semaphore)=0
bool readPixels(GrSurface *surface, SkIRect rect, GrColorType surfaceColorType, GrColorType dstColorType, void *buffer, size_t rowBytes)
virtual void onReportSubmitHistograms()
virtual GrBackendTexture onCreateBackendTexture(SkISize dimensions, const GrBackendFormat &, GrRenderable, skgpu::Mipmapped, GrProtected, std::string_view label)=0
bool submitToGpu(GrSyncCpu sync)
void dumpJSON(SkJSONWriter *) const
virtual bool onCopySurface(GrSurface *dst, const SkIRect &dstRect, GrSurface *src, const SkIRect &srcRect, GrSamplerState::Filter)=0
const GrCaps * caps() const
bool regenerateMipMapLevels(GrTexture *)
GrBackendTexture createCompressedBackendTexture(SkISize dimensions, const GrBackendFormat &, skgpu::Mipmapped, GrProtected)
virtual GrOpsRenderPass * onGetOpsRenderPass(GrRenderTarget *renderTarget, bool useMSAASurface, GrAttachment *stencil, GrSurfaceOrigin, const SkIRect &bounds, const GrOpsRenderPass::LoadAndStoreInfo &, const GrOpsRenderPass::StencilLoadAndStoreInfo &, const skia_private::TArray< GrSurfaceProxy *, true > &sampledProxies, GrXferBarrierFlags renderPassXferBarriers)=0
virtual bool onTransferFromBufferToBuffer(sk_sp< GrGpuBuffer > src, size_t srcOffset, sk_sp< GrGpuBuffer > dst, size_t dstOffset, size_t size)=0
void didWriteToSurface(GrSurface *surface, GrSurfaceOrigin origin, const SkIRect *bounds, uint32_t mipLevels=1) const
virtual bool onTransferPixelsFrom(GrSurface *, SkIRect, GrColorType surfaceColorType, GrColorType bufferColorType, sk_sp< GrGpuBuffer > transferBuffer, size_t offset)=0
virtual bool onWritePixels(GrSurface *, SkIRect, GrColorType surfaceColorType, GrColorType srcColorType, const GrMipLevel[], int mipLevelCount, bool prepForTexSampling)=0
virtual sk_sp< GrTexture > onWrapBackendTexture(const GrBackendTexture &, GrWrapOwnership, GrWrapCacheable, GrIOType)=0
virtual sk_sp< GrRenderTarget > onWrapVulkanSecondaryCBAsRenderTarget(const SkImageInfo &, const GrVkDrawableInfo &)
bool updateCompressedBackendTexture(const GrBackendTexture &, sk_sp< skgpu::RefCntedCallback > finishedCallback, const void *data, size_t length)
GrGpu(GrDirectContext *direct)
virtual sk_sp< GrTexture > onWrapCompressedBackendTexture(const GrBackendTexture &, GrWrapOwnership, GrWrapCacheable)=0
void resolveRenderTarget(GrRenderTarget *, const SkIRect &resolveRect)
bool checkAndResetOOMed()
sk_sp< GrTexture > wrapBackendTexture(const GrBackendTexture &, GrWrapOwnership, GrWrapCacheable, GrIOType)
virtual void prepareSurfacesForBackendAccessAndStateUpdates(SkSpan< GrSurfaceProxy * > proxies, SkSurfaces::BackendSurfaceAccess access, const skgpu::MutableTextureState *newState)
virtual void addFinishedProc(GrGpuFinishedProc finishedProc, GrGpuFinishedContext finishedContext)=0
virtual bool onClearBackendTexture(const GrBackendTexture &, sk_sp< skgpu::RefCntedCallback > finishedCallback, std::array< float, 4 > color)=0
virtual sk_sp< GrGpuBuffer > onCreateBuffer(size_t size, GrGpuBufferType intendedType, GrAccessPattern)=0
virtual bool onRegenerateMipMapLevels(GrTexture *)=0
bool transferPixelsFrom(GrSurface *surface, SkIRect rect, GrColorType surfaceColorType, GrColorType bufferColorType, sk_sp< GrGpuBuffer > transferBuffer, size_t offset)
void resetTextureBindings()
bool transferFromBufferToBuffer(sk_sp< GrGpuBuffer > src, size_t srcOffset, sk_sp< GrGpuBuffer > dst, size_t dstOffset, size_t size)
virtual bool onSubmitToGpu(GrSyncCpu sync)=0
virtual sk_sp< GrRenderTarget > onWrapBackendRenderTarget(const GrBackendRenderTarget &)=0
sk_sp< GrGpuBuffer > createBuffer(size_t size, GrGpuBufferType intendedType, GrAccessPattern accessPattern)
virtual GrStagingBufferManager * stagingBufferManager()
virtual void onResetTextureBindings()
sk_sp< GrRenderTarget > wrapVulkanSecondaryCBAsRenderTarget(const SkImageInfo &, const GrVkDrawableInfo &)
virtual void onResolveRenderTarget(GrRenderTarget *target, const SkIRect &resolveRect)=0
GrBackendTexture createBackendTexture(SkISize dimensions, const GrBackendFormat &, GrRenderable, skgpu::Mipmapped, GrProtected, std::string_view label)
sk_sp< GrRenderTarget > wrapBackendRenderTarget(const GrBackendRenderTarget &)
virtual sk_sp< GrTexture > onWrapRenderableBackendTexture(const GrBackendTexture &, int sampleCnt, GrWrapOwnership, GrWrapCacheable)=0
static bool CompressedDataIsCorrect(SkISize dimensions, SkTextureCompressionType, skgpu::Mipmapped, const void *data, size_t length)
virtual sk_sp< GrTexture > onCreateCompressedTexture(SkISize dimensions, const GrBackendFormat &, skgpu::Budgeted, skgpu::Mipmapped, GrProtected, const void *data, size_t dataSize)=0
bool copySurface(GrSurface *dst, const SkIRect &dstRect, GrSurface *src, const SkIRect &srcRect, GrSamplerState::Filter filter)
void executeFlushInfo(SkSpan< GrSurfaceProxy * >, SkSurfaces::BackendSurfaceAccess access, const GrFlushInfo &, const skgpu::MutableTextureState *newState)
virtual GrRingBuffer * uniformsRingBuffer()
virtual void disconnect(DisconnectType)
sk_sp< GrTexture > wrapCompressedBackendTexture(const GrBackendTexture &, GrWrapOwnership, GrWrapCacheable)
GrOpsRenderPass * getOpsRenderPass(GrRenderTarget *renderTarget, bool useMSAASurface, GrAttachment *stencil, GrSurfaceOrigin, const SkIRect &bounds, const GrOpsRenderPass::LoadAndStoreInfo &, const GrOpsRenderPass::StencilLoadAndStoreInfo &, const skia_private::TArray< GrSurfaceProxy *, true > &sampledProxies, GrXferBarrierFlags renderPassXferBarriers)
virtual bool onUpdateCompressedBackendTexture(const GrBackendTexture &, sk_sp< skgpu::RefCntedCallback > finishedCallback, const void *data, size_t length)=0
void initCaps(sk_sp< const GrCaps > caps)
bool transferPixelsTo(GrTexture *texture, SkIRect rect, GrColorType textureColorType, GrColorType bufferColorType, sk_sp< GrGpuBuffer > transferBuffer, size_t offset, size_t rowBytes)
sk_sp< GrTexture > createTexture(SkISize dimensions, const GrBackendFormat &format, GrTextureType textureType, GrRenderable renderable, int renderTargetSampleCnt, skgpu::Budgeted budgeted, GrProtected isProtected, GrColorType textureColorType, GrColorType srcColorType, const GrMipLevel texels[], int texelLevelCount, std::string_view label)
void handleDirtyContext()
sk_sp< GrTexture > createCompressedTexture(SkISize dimensions, const GrBackendFormat &format, skgpu::Budgeted budgeted, skgpu::Mipmapped mipmapped, GrProtected isProtected, const void *data, size_t dataSize)
sk_sp< GrTexture > wrapRenderableBackendTexture(const GrBackendTexture &, int sampleCnt, GrWrapOwnership, GrWrapCacheable)
bool clearBackendTexture(const GrBackendTexture &, sk_sp< skgpu::RefCntedCallback > finishedCallback, std::array< float, 4 > color)
void setRequiresManualMSAAResolve()
std::unique_ptr< GrSemaphore > makeSemaphore(bool isOwned=true)
std::unique_ptr< GrSemaphore > wrapBackendSemaphore(const GrBackendSemaphore &, GrSemaphoreWrapType, GrWrapOwnership=kBorrow_GrWrapOwnership)
virtual GrRenderTarget * asRenderTarget()
void beginObject(const char *name=nullptr, bool multiline=true)
constexpr size_t size() const
T & emplace_back(Args &&... args)
uint32_t uint32_t * format
static float max(float r, float g, float b)
static float min(float r, float g, float b)
Optional< SkRect > bounds
sk_sp< SkBlender > blender SkRect rect
@ kNoAccess
back-end surface will not be used by client
it will be possible to load the file into Perfetto s trace viewer disable asset Prevents usage of any non test fonts unless they were explicitly Loaded via prefetched default font manager
DEF_SWITCHES_START aot vmservice shared library Name of the *so containing AOT compiled Dart assets for launching the service isolate vm snapshot The VM snapshot data that will be memory mapped as read only SnapshotAssetPath must be present isolate snapshot The isolate snapshot data that will be memory mapped as read only SnapshotAssetPath must be present cache dir Path to the cache directory This is different from the persistent_cache_path in embedder which is used for Skia shader cache icu native lib Path to the library file that exports the ICU data vm service The hostname IP address on which the Dart VM Service should be served If not defaults to or::depending on whether ipv6 is specified vm service A custom Dart VM Service port The default is to pick a randomly available open port disable vm Disable the Dart VM Service The Dart VM Service is never available in release mode disable vm service Disable mDNS Dart VM Service publication Bind to the IPv6 localhost address for the Dart VM Service Ignored if vm service host is set endless trace buffer
it will be possible to load the file into Perfetto s trace viewer disable asset Prevents usage of any non test fonts unless they were explicitly Loaded via prefetched default font Indicates whether the embedding started a prefetch of the default font manager before creating the engine run In non interactive keep the shell running after the Dart script has completed enable serial On low power devices with low core running concurrent GC tasks on threads can cause them to contend with the UI thread which could potentially lead to jank This option turns off all concurrent GC activities domain network JSON encoded network policy per domain This overrides the DisallowInsecureConnections switch Embedder can specify whether to allow or disallow insecure connections at a domain level old gen heap size
const myers::Point & get(const myers::Segment &)
GrGpuFinishedProc fFinishedProc
static constexpr SkIRect MakeSize(const SkISize &size)
bool contains(int32_t x, int32_t y) const
constexpr int32_t width() const
constexpr int32_t height() const
std::shared_ptr< const fml::Mapping > data
#define TRACE_EVENT0(category_group, name)