Flutter Engine
The Flutter Engine
GrGpu.h
Go to the documentation of this file.
1/*
2 * Copyright 2011 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#ifndef GrGpu_DEFINED
9#define GrGpu_DEFINED
10
11#include "include/core/SkData.h"
12#include "include/core/SkRect.h"
14#include "include/core/SkSpan.h"
18#include "include/gpu/GrTypes.h"
22#include "src/gpu/ganesh/GrGpuBuffer.h" // IWYU pragma: keep
26
27#include <array>
28#include <cstddef>
29#include <cstdint>
30#include <memory>
31#include <string_view>
32
33class GrAttachment;
35class GrDirectContext;
36class GrGLContext;
37class GrProgramDesc;
38class GrProgramInfo;
39class GrRenderTarget;
40class GrRingBuffer;
41class GrSemaphore;
43class GrSurface;
44class GrSurfaceProxy;
45class GrTexture;
47class SkJSONWriter;
48class SkString;
50struct GrVkDrawableInfo;
51struct SkISize;
52struct SkImageInfo;
53
54namespace SkSurfaces {
55enum class BackendSurfaceAccess;
56}
57namespace skgpu {
58class MutableTextureState;
59class RefCntedCallback;
60} // namespace skgpu
61
62class GrGpu {
63public:
64 GrGpu(GrDirectContext* direct);
65 virtual ~GrGpu();
66
67 GrDirectContext* getContext() { return fContext; }
68 const GrDirectContext* getContext() const { return fContext; }
69
70 /**
71 * Gets the capabilities of the draw target.
72 */
73 const GrCaps* caps() const { return fCaps.get(); }
74 sk_sp<const GrCaps> refCaps() const { return fCaps; }
75
76 virtual GrStagingBufferManager* stagingBufferManager() { return nullptr; }
77
78 virtual GrRingBuffer* uniformsRingBuffer() { return nullptr; }
79
80 enum class DisconnectType {
81 // No cleanup should be attempted, immediately cease making backend API calls
82 kAbandon,
83 // Free allocated resources (not known by GrResourceCache) before returning and
84 // ensure no backend backend 3D API calls will be made after disconnect() returns.
85 kCleanup,
86 };
87
88 // Called by context when the underlying backend context is already or will be destroyed
89 // before GrDirectContext.
90 virtual void disconnect(DisconnectType);
91
94
95 // Called by GrDirectContext::isContextLost. Returns true if the backend Gpu object has gotten
96 // into an unrecoverable, lost state.
97 virtual bool isDeviceLost() const { return false; }
98
99 /**
100 * The GrGpu object normally assumes that no outsider is setting state
101 * within the underlying 3D API's context/device/whatever. This call informs
102 * the GrGpu that the state was modified and it shouldn't make assumptions
103 * about the state.
104 */
105 void markContextDirty(uint32_t state = kAll_GrBackendState) { fResetBits |= state; }
106
107 /**
108 * Creates a texture object. If renderable is kYes then the returned texture can
109 * be used as a render target by calling GrTexture::asRenderTarget(). Not all
110 * pixel configs can be used as render targets. Support for configs as textures
111 * or render targets can be checked using GrCaps.
112 *
113 * @param dimensions dimensions of the texture to be created.
114 * @param format the format for the texture (not currently used).
115 * @param renderable should the resulting texture be renderable
116 * @param renderTargetSampleCnt The number of samples to use for rendering if renderable is
117 * kYes. If renderable is kNo then this must be 1.
118 * @param budgeted does this texture count against the resource cache budget?
119 * @param isProtected should the texture be created as protected.
120 * @param texels array of mipmap levels containing texel data to load.
121 * If level i has pixels then it is assumed that its dimensions are
122 * max(1, floor(dimensions.fWidth / 2)) by
123 * max(1, floor(dimensions.fHeight / 2)).
124 * If texels[i].fPixels == nullptr for all i <= mipLevelCount or
125 * mipLevelCount is 0 then the texture's contents are uninitialized.
126 * If a level has non-null pixels, its row bytes must be a multiple of the
127 * config's bytes-per-pixel. The row bytes must be tight to the
128 * level width if !caps->writePixelsRowBytesSupport().
129 * If mipLevelCount > 1 and texels[i].fPixels != nullptr for any i > 0
130 * then all levels must have non-null pixels. All levels must have
131 * non-null pixels if GrCaps::createTextureMustSpecifyAllLevels() is true.
132 * @param textureColorType The color type interpretation of the texture for the purpose of
133 * of uploading texel data.
134 * @param srcColorType The color type of data in texels[].
135 * @param texelLevelCount the number of levels in 'texels'. May be 0, 1, or
136 * floor(max((log2(dimensions.fWidth), log2(dimensions.fHeight)))). It
137 * must be the latter if GrCaps::createTextureMustSpecifyAllLevels() is
138 * true.
139 * @return The texture object if successful, otherwise nullptr.
140 */
142 const GrBackendFormat& format,
143 GrTextureType textureType,
144 GrRenderable renderable,
145 int renderTargetSampleCnt,
146 skgpu::Budgeted budgeted,
147 GrProtected isProtected,
148 GrColorType textureColorType,
149 GrColorType srcColorType,
150 const GrMipLevel texels[],
151 int texelLevelCount,
152 std::string_view label);
153
154 /**
155 * Simplified createTexture() interface for when there is no initial texel data to upload.
156 */
158 const GrBackendFormat& format,
159 GrTextureType textureType,
160 GrRenderable renderable,
161 int renderTargetSampleCnt,
162 skgpu::Mipmapped mipmapped,
163 skgpu::Budgeted budgeted,
164 GrProtected isProtected,
165 std::string_view label);
166
168 const GrBackendFormat& format,
169 skgpu::Budgeted budgeted,
170 skgpu::Mipmapped mipmapped,
171 GrProtected isProtected,
172 const void* data,
173 size_t dataSize);
174
175 /**
176 * Implements GrResourceProvider::wrapBackendTexture
177 */
181 GrIOType);
182
186
187 /**
188 * Implements GrResourceProvider::wrapRenderableBackendTexture
189 */
191 int sampleCnt,
194
195 /**
196 * Implements GrResourceProvider::wrapBackendRenderTarget
197 */
199
200 /**
201 * Implements GrResourceProvider::wrapVulkanSecondaryCBAsRenderTarget
202 */
204 const GrVkDrawableInfo&);
205
206 /**
207 * Creates a buffer in GPU memory. For a client-side buffer use GrBuffer::CreateCPUBacked.
208 *
209 * @param size size of buffer to create.
210 * @param intendedType hint to the graphics subsystem about what the buffer will be used for.
211 * @param accessPattern hint to the graphics subsystem about how the data will be accessed.
212 *
213 * @return the buffer if successful, otherwise nullptr.
214 */
216 GrGpuBufferType intendedType,
217 GrAccessPattern accessPattern);
218
219 /**
220 * Resolves MSAA. The resolveRect must already be in the native destination space.
221 */
222 void resolveRenderTarget(GrRenderTarget*, const SkIRect& resolveRect);
223
224 /**
225 * Uses the base of the texture to recompute the contents of the other levels.
226 */
228
229 /**
230 * If the backend API has stateful texture bindings, this resets them back to defaults.
231 */
233
234 /**
235 * Reads a rectangle of pixels from a render target. No sRGB/linear conversions are performed.
236 *
237 * @param surface the surface to read from
238 * @param rect the rectangle of pixels to read
239 * @param surfaceColorType the color type for this use of the surface.
240 * @param dstColorType the color type of the destination buffer.
241 * @param buffer memory to read the rectangle into.
242 * @param rowBytes the number of bytes between consecutive rows. Must be a multiple of
243 * dstColorType's bytes-per-pixel. Must be tight to width if
244 * !caps->readPixelsRowBytesSupport().
245 *
246 * @return true if the read succeeded, false if not. The read can fail
247 * because of the surface doesn't support reading, the color type
248 * is not allowed for the format of the surface or if the rectangle
249 * read is not contained in the surface.
250 */
253 GrColorType surfaceColorType,
254 GrColorType dstColorType,
255 void* buffer,
256 size_t rowBytes);
257
258 /**
259 * Updates the pixels in a rectangle of a surface. No sRGB/linear conversions are performed.
260 *
261 * @param surface the surface to write to.
262 * @param rect the rectangle of pixels to overwrite
263 * @param surfaceColorType the color type for this use of the surface.
264 * @param srcColorType the color type of the source buffer.
265 * @param texels array of mipmap levels containing texture data. Row bytes must be a
266 * multiple of srcColorType's bytes-per-pixel. Must be tight to level
267 * width if !caps->writePixelsRowBytesSupport().
268 * @param mipLevelCount number of levels in 'texels'
269 * @param prepForTexSampling After doing write pixels should the surface be prepared for texture
270 * sampling. This is currently only used by Vulkan for inline uploads
271 * to set that layout back to sampled after doing the upload. Inline
272 * uploads currently can happen between draws in a single op so it is
273 * not trivial to break up the OpsTask into two tasks when we see
274 * an inline upload. However, once we are able to support doing that
275 * we can remove this parameter.
276 *
277 * @return true if the write succeeded, false if not. The read can fail
278 * because of the surface doesn't support writing (e.g. read only),
279 * the color type is not allowed for the format of the surface or
280 * if the rectangle written is not contained in the surface.
281 */
284 GrColorType surfaceColorType,
285 GrColorType srcColorType,
286 const GrMipLevel texels[],
287 int mipLevelCount,
288 bool prepForTexSampling = false);
289
290 /**
291 * Helper for the case of a single level.
292 */
295 GrColorType surfaceColorType,
296 GrColorType srcColorType,
297 const void* buffer,
298 size_t rowBytes,
299 bool prepForTexSampling = false) {
300 GrMipLevel mipLevel = {buffer, rowBytes, nullptr};
301 return this->writePixels(surface,
302 rect,
303 surfaceColorType,
304 srcColorType,
305 &mipLevel,
306 1,
307 prepForTexSampling);
308 }
309
310 /**
311 * Transfer bytes from one GPU buffer to another. The src buffer must have type kXferCpuToGpu
312 * and the dst buffer must not. Neither buffer may currently be mapped. The offsets and size
313 * must be aligned to GrCaps::transferFromBufferToBufferAlignment.
314 *
315 * @param src the buffer to read from
316 * @param srcOffset the aligned offset at the src at which the transfer begins.
317 * @param dst the buffer to write to
318 * @param dstOffset the aligned offset in the dst at which the transfer begins
319 * @param size the aligned number of bytes to transfer;
320 */
322 size_t srcOffset,
324 size_t dstOffset,
325 size_t size);
326
327 /**
328 * Updates the pixels in a rectangle of a texture using a buffer. If the texture is MIP mapped,
329 * the base level is written to.
330 *
331 * @param texture the texture to write to.
332 * @param rect the rectangle of pixels in the texture to overwrite
333 * @param textureColorType the color type for this use of the surface.
334 * @param bufferColorType the color type of the transfer buffer's pixel data
335 * @param transferBuffer GrBuffer to read pixels from (type must be "kXferCpuToGpu")
336 * @param offset offset from the start of the buffer
337 * @param rowBytes number of bytes between consecutive rows in the buffer. Must be a
338 * multiple of bufferColorType's bytes-per-pixel. Must be tight to
339 * rect.width() if !caps->writePixelsRowBytesSupport().
340 */
343 GrColorType textureColorType,
344 GrColorType bufferColorType,
345 sk_sp<GrGpuBuffer> transferBuffer,
346 size_t offset,
347 size_t rowBytes);
348
349 /**
350 * Reads the pixels from a rectangle of a surface into a buffer. Use
351 * GrCaps::SupportedRead::fOffsetAlignmentForTransferBuffer to determine the requirements for
352 * the buffer offset alignment. If the surface is a MIP mapped texture, the base level is read.
353 *
354 * If successful the row bytes in the buffer is always:
355 * GrColorTypeBytesPerPixel(bufferColorType) * rect.width()
356 *
357 * Asserts that the caller has passed a properly aligned offset and that the buffer is
358 * large enough to hold the result
359 *
360 * @param surface the surface to read from.
361 * @param rect the rectangle of pixels to read
362 * @param surfaceColorType the color type for this use of the surface.
363 * @param bufferColorType the color type of the transfer buffer's pixel data
364 * @param transferBuffer GrBuffer to write pixels to (type must be "kXferGpuToCpu")
365 * @param offset offset from the start of the buffer
366 */
369 GrColorType surfaceColorType,
370 GrColorType bufferColorType,
371 sk_sp<GrGpuBuffer> transferBuffer,
372 size_t offset);
373
374 // Called to perform a surface to surface copy. Fallbacks to issuing a draw from the src to dst
375 // take place at higher levels and this function implement faster copy paths. The src and dst
376 // rects are pre-clipped. The src rect and dst rect are guaranteed to be within the
377 // src/dst bounds and non-empty. They must also be in their exact device space coords, including
378 // already being transformed for origin if need be. If canDiscardOutsideDstRect is set to true
379 // then we don't need to preserve any data on the dst surface outside of the copy.
380 //
381 // Backends may or may not support src and dst rects with differing dimensions. This can assume
382 // that GrCaps.canCopySurface() returned true for these surfaces and rects.
383 bool copySurface(GrSurface* dst, const SkIRect& dstRect,
384 GrSurface* src, const SkIRect& srcRect,
386
387 // Returns a GrOpsRenderPass which OpsTasks send draw commands to instead of directly
388 // to the Gpu object. The 'bounds' rect is the content rect of the renderTarget.
389 // If a 'stencil' is provided it will be the one bound to 'renderTarget'. If one is not
390 // provided but 'renderTarget' has a stencil buffer then that is a signal that the
391 // render target's stencil buffer should be ignored.
393 GrRenderTarget* renderTarget,
394 bool useMSAASurface,
395 GrAttachment* stencil,
397 const SkIRect& bounds,
401 GrXferBarrierFlags renderPassXferBarriers);
402
403 // Called by GrDrawingManager when flushing.
404 // Provides a hook for post-flush actions (e.g. Vulkan command buffer submits). This will also
405 // insert any numSemaphore semaphores on the gpu and set the backendSemaphores to match the
406 // inserted semaphores.
409 const GrFlushInfo&,
410 const skgpu::MutableTextureState* newState);
411
412 // Called before render tasks are executed during a flush.
413 virtual void willExecute() {}
414
415 bool submitToGpu(GrSyncCpu sync);
416
417 virtual void submit(GrOpsRenderPass*) = 0;
418
419 [[nodiscard]] virtual std::unique_ptr<GrSemaphore> makeSemaphore(bool isOwned = true) = 0;
420 virtual std::unique_ptr<GrSemaphore> wrapBackendSemaphore(const GrBackendSemaphore&,
422 GrWrapOwnership) = 0;
423 virtual void insertSemaphore(GrSemaphore* semaphore) = 0;
424 virtual void waitSemaphore(GrSemaphore* semaphore) = 0;
425
426 virtual void addFinishedProc(GrGpuFinishedProc finishedProc,
427 GrGpuFinishedContext finishedContext) = 0;
428 virtual void checkFinishProcs() = 0;
429 virtual void finishOutstandingGpuWork() = 0;
430
431 // NOLINTNEXTLINE(performance-unnecessary-value-param)
433
434 /**
435 * Checks if we detected an OOM from the underlying 3D API and if so returns true and resets
436 * the internal OOM state to false. Otherwise, returns false.
437 */
438 bool checkAndResetOOMed();
439
440 /**
441 * Put this texture in a safe and known state for use across multiple contexts. Depending on
442 * the backend, this may return a GrSemaphore. If so, other contexts should wait on that
443 * semaphore before using this texture.
444 */
445 virtual std::unique_ptr<GrSemaphore> prepareTextureForCrossContextUsage(GrTexture*) = 0;
446
447 /**
448 * Frees any backend specific objects that are not currently in use by the GPU. This is called
449 * when the client is trying to free up as much GPU memory as possible. We will not release
450 * resources connected to programs/pipelines since the cost to recreate those is significantly
451 * higher that other resources.
452 */
454
455 ///////////////////////////////////////////////////////////////////////////
456 // Debugging and Stats
457
458 class Stats {
459 public:
460#if GR_GPU_STATS
461 Stats() = default;
462
463 void reset() { *this = {}; }
464
465 int textureCreates() const { return fTextureCreates; }
466 void incTextureCreates() { fTextureCreates++; }
467
468 int textureUploads() const { return fTextureUploads; }
469 void incTextureUploads() { fTextureUploads++; }
470
471 int transfersToTexture() const { return fTransfersToTexture; }
472 void incTransfersToTexture() { fTransfersToTexture++; }
473
474 int transfersFromSurface() const { return fTransfersFromSurface; }
475 void incTransfersFromSurface() { fTransfersFromSurface++; }
476
477 void incBufferTransfers() { fBufferTransfers++; }
478 int bufferTransfers() const { return fBufferTransfers; }
479
480 int stencilAttachmentCreates() const { return fStencilAttachmentCreates; }
481 void incStencilAttachmentCreates() { fStencilAttachmentCreates++; }
482
483 int msaaAttachmentCreates() const { return fMSAAAttachmentCreates; }
484 void incMSAAAttachmentCreates() { fMSAAAttachmentCreates++; }
485
486 int numDraws() const { return fNumDraws; }
487 void incNumDraws() { fNumDraws++; }
488
489 int numFailedDraws() const { return fNumFailedDraws; }
490 void incNumFailedDraws() { ++fNumFailedDraws; }
491
492 int numSubmitToGpus() const { return fNumSubmitToGpus; }
493 void incNumSubmitToGpus() { ++fNumSubmitToGpus; }
494
495 int numScratchTexturesReused() const { return fNumScratchTexturesReused; }
496 void incNumScratchTexturesReused() { ++fNumScratchTexturesReused; }
497
498 int numScratchMSAAAttachmentsReused() const { return fNumScratchMSAAAttachmentsReused; }
499 void incNumScratchMSAAAttachmentsReused() { ++fNumScratchMSAAAttachmentsReused; }
500
501 int renderPasses() const { return fRenderPasses; }
502 void incRenderPasses() { fRenderPasses++; }
503
504 int numReorderedDAGsOverBudget() const { return fNumReorderedDAGsOverBudget; }
505 void incNumReorderedDAGsOverBudget() { fNumReorderedDAGsOverBudget++; }
506
507#if defined(GR_TEST_UTILS)
508 void dump(SkString*);
509 void dumpKeyValuePairs(
511#endif
512 private:
513 int fTextureCreates = 0;
514 int fTextureUploads = 0;
515 int fTransfersToTexture = 0;
516 int fTransfersFromSurface = 0;
517 int fBufferTransfers = 0;
518 int fStencilAttachmentCreates = 0;
519 int fMSAAAttachmentCreates = 0;
520 int fNumDraws = 0;
521 int fNumFailedDraws = 0;
522 int fNumSubmitToGpus = 0;
523 int fNumScratchTexturesReused = 0;
524 int fNumScratchMSAAAttachmentsReused = 0;
525 int fRenderPasses = 0;
526 int fNumReorderedDAGsOverBudget = 0;
527
528#else // !GR_GPU_STATS
529
530#if defined(GR_TEST_UTILS)
531 void dump(SkString*) {}
533#endif
541 void incNumDraws() {}
548#endif
549 };
550
551 Stats* stats() { return &fStats; }
552 void dumpJSON(SkJSONWriter*) const;
553
554
555 /**
556 * Creates a texture directly in the backend API without wrapping it in a GrTexture.
557 * Must be matched with a call to deleteBackendTexture().
558 *
559 * If data is null the texture is uninitialized.
560 *
561 * If data represents a color then all texture levels are cleared to that color.
562 *
563 * If data represents pixmaps then it must have a either one pixmap or, if mipmapping
564 * is specified, a complete MIP hierarchy of pixmaps. Additionally, if provided, the mip
565 * levels must be sized correctly according to the MIP sizes implied by dimensions. They
566 * must all have the same color type and that color type must be compatible with the
567 * texture format.
568 */
570 const GrBackendFormat&,
574 std::string_view label);
575
577 sk_sp<skgpu::RefCntedCallback> finishedCallback,
578 std::array<float, 4> color);
579
580 /**
581 * Same as the createBackendTexture case except compressed backend textures can
582 * never be renderable.
583 */
585 const GrBackendFormat&,
588
590 sk_sp<skgpu::RefCntedCallback> finishedCallback,
591 const void* data,
592 size_t length);
593
596 skgpu::MutableTextureState* previousState,
597 // NOLINTNEXTLINE(performance-unnecessary-value-param)
598 sk_sp<skgpu::RefCntedCallback> finishedCallback) {
599 return false;
600 }
601
604 skgpu::MutableTextureState* previousState,
605 // NOLINTNEXTLINE(performance-unnecessary-value-param)
606 sk_sp<skgpu::RefCntedCallback> finishedCallback) {
607 return false;
608 }
609
610 /**
611 * Frees a texture created by createBackendTexture(). If ownership of the backend
612 * texture has been transferred to a context using adopt semantics this should not be called.
613 */
614 virtual void deleteBackendTexture(const GrBackendTexture&) = 0;
615
616 /**
617 * In this case we have a program descriptor and a program info but no render target.
618 */
619 virtual bool compile(const GrProgramDesc&, const GrProgramInfo&) = 0;
620
621 virtual bool precompileShader(const SkData& key, const SkData& data) { return false; }
622
623#if defined(GR_TEST_UTILS)
624 /** Check a handle represents an actual texture in the backend API that has not been freed. */
625 virtual bool isTestingOnlyBackendTexture(const GrBackendTexture&) const = 0;
626
627 /**
628 * Creates a GrBackendRenderTarget that can be wrapped using
629 * SkSurfaces::WrapBackendRenderTarget. Ideally this is a non-textureable allocation to
630 * differentiate from testing with SkSurfaces::WrapBackendTexture. When sampleCnt > 1 this
631 * is used to test client wrapped allocations with MSAA where Skia does not allocate a separate
632 * buffer for resolving. If the color is non-null the backing store should be cleared to the
633 * passed in color.
634 */
635 virtual GrBackendRenderTarget createTestingOnlyBackendRenderTarget(
636 SkISize dimensions,
638 int sampleCount = 1,
640
641 /**
642 * Deletes a GrBackendRenderTarget allocated with the above. Synchronization to make this safe
643 * is up to the caller.
644 */
645 virtual void deleteTestingOnlyBackendRenderTarget(const GrBackendRenderTarget&) = 0;
646
647 // This is only to be used in GL-specific tests.
648 virtual const GrGLContext* glContextForTesting() const { return nullptr; }
649
650 // This is only to be used by testing code
651 virtual void resetShaderCacheForTesting() const {}
652
653 /**
654 * Inserted as a pair around a block of code to do a GPU frame capture.
655 * Currently only works with the Metal backend.
656 */
657 virtual void testingOnly_startCapture() {}
658 virtual void testingOnly_stopCapture() {}
659#endif
660
661 // width and height may be larger than rt (if underlying API allows it).
662 // Returns nullptr if compatible sb could not be created, otherwise the caller owns the ref on
663 // the GrAttachment.
665 SkISize dimensions,
666 int numStencilSamples) = 0;
667
669
670 // Creates an MSAA surface to be used as an MSAA attachment on a framebuffer.
672 const GrBackendFormat& format,
673 int numSamples,
674 GrProtected isProtected,
675 GrMemoryless isMemoryless) = 0;
676
678 if (fResetBits) {
679 this->resetContext();
680 }
681 }
682
683 virtual void storeVkPipelineCacheData() {}
684
685 // Called before certain draws in order to guarantee coherent results from dst reads.
687
688protected:
689 static bool CompressedDataIsCorrect(SkISize dimensions,
692 const void* data,
693 size_t length);
694
695 // If the surface is a texture this marks its mipmaps as dirty.
697 GrSurfaceOrigin origin,
698 const SkIRect* bounds,
699 uint32_t mipLevels = 1) const;
700
701 void setOOMed() { fOOMed = true; }
702
704
705 // Subclass must call this to initialize caps in its constructor.
707
708private:
710 const GrBackendFormat&,
714 std::string_view label) = 0;
715
717 const GrBackendFormat&,
719 GrProtected) = 0;
720
722 sk_sp<skgpu::RefCntedCallback> finishedCallback,
723 std::array<float, 4> color) = 0;
724
726 sk_sp<skgpu::RefCntedCallback> finishedCallback,
727 const void* data,
728 size_t length) = 0;
729
730 // called when the 3D context state is unknown. Subclass should emit any
731 // assumed 3D context state and dirty any state cache.
732 virtual void onResetContext(uint32_t resetBits) {}
733
734 // Implementation of resetTextureBindings.
735 virtual void onResetTextureBindings() {}
736
737 // overridden by backend-specific derived class to create objects.
738 // Texture size, renderablility, format support, sample count will have already been validated
739 // in base class before onCreateTexture is called.
740 // If the ith bit is set in levelClearMask then the ith MIP level should be cleared.
742 const GrBackendFormat&,
744 int renderTargetSampleCnt,
747 int mipLevelCoont,
748 uint32_t levelClearMask,
749 std::string_view label) = 0;
751 const GrBackendFormat&,
755 const void* data,
756 size_t dataSize) = 0;
760 GrIOType) = 0;
761
764 GrWrapCacheable) = 0;
765
767 int sampleCnt,
769 GrWrapCacheable) = 0;
772 const GrVkDrawableInfo&);
773
775 GrGpuBufferType intendedType,
776 GrAccessPattern) = 0;
777
778 // overridden by backend-specific derived class to perform the surface read
779 virtual bool onReadPixels(GrSurface*,
780 SkIRect,
781 GrColorType surfaceColorType,
782 GrColorType dstColorType,
783 void*,
784 size_t rowBytes) = 0;
785
786 // overridden by backend-specific derived class to perform the surface write
788 SkIRect,
789 GrColorType surfaceColorType,
790 GrColorType srcColorType,
791 const GrMipLevel[],
792 int mipLevelCount,
793 bool prepForTexSampling) = 0;
794
795 // overridden by backend-specific derived class to perform the buffer transfer
797 size_t srcOffset,
799 size_t dstOffset,
800 size_t size) = 0;
801
802 // overridden by backend-specific derived class to perform the texture transfer
804 SkIRect,
805 GrColorType textureColorType,
806 GrColorType bufferColorType,
807 sk_sp<GrGpuBuffer> transferBuffer,
808 size_t offset,
809 size_t rowBytes) = 0;
810
811 // overridden by backend-specific derived class to perform the surface transfer
813 SkIRect,
814 GrColorType surfaceColorType,
815 GrColorType bufferColorType,
816 sk_sp<GrGpuBuffer> transferBuffer,
817 size_t offset) = 0;
818
819 // overridden by backend-specific derived class to perform the resolve
820 virtual void onResolveRenderTarget(GrRenderTarget* target, const SkIRect& resolveRect) = 0;
821
822 // overridden by backend specific derived class to perform mip map level regeneration.
824
825 // overridden by backend specific derived class to perform the copy surface
826 virtual bool onCopySurface(GrSurface* dst, const SkIRect& dstRect,
827 GrSurface* src, const SkIRect& srcRect,
829
831 GrRenderTarget* renderTarget,
832 bool useMSAASurface,
833 GrAttachment* stencil,
835 const SkIRect& bounds,
839 GrXferBarrierFlags renderPassXferBarriers) = 0;
840
844 const skgpu::MutableTextureState* newState) {}
845
846 virtual bool onSubmitToGpu(GrSyncCpu sync) = 0;
847
848 void reportSubmitHistograms();
849 virtual void onReportSubmitHistograms() {}
850
851#ifdef SK_ENABLE_DUMP_GPU
852 virtual void onDumpJSON(SkJSONWriter*) const {}
853#endif
854
855 sk_sp<GrTexture> createTextureCommon(SkISize,
856 const GrBackendFormat&,
857 GrTextureType textureType,
859 int renderTargetSampleCnt,
862 int mipLevelCnt,
863 uint32_t levelClearMask,
864 std::string_view label);
865
866 void resetContext() {
867 this->onResetContext(fResetBits);
868 fResetBits = 0;
869 }
870
871 void callSubmittedProcs(bool success);
872
874
875 uint32_t fResetBits;
876 // The context owns us, not vice-versa, so this ptr is not ref'ed by Gpu.
877 GrDirectContext* fContext;
878
879 struct SubmittedProc {
880 SubmittedProc(GrGpuSubmittedProc proc, GrGpuSubmittedContext context)
881 : fProc(proc), fContext(context) {}
882
885 };
887
888 bool fOOMed = false;
889
890#if SK_HISTOGRAMS_ENABLED
891 int fCurrentSubmitRenderPassCount = 0;
892#endif
893
894 using INHERITED = SkRefCnt;
895};
896
897#endif
m reset()
GrWrapCacheable
Definition: GrTypesPriv.h:85
GrIOType
Definition: GrTypesPriv.h:402
GrWrapOwnership
Definition: GrTypesPriv.h:77
GrGpuBufferType
Definition: GrTypesPriv.h:411
GrMemoryless
Definition: GrTypesPriv.h:123
GrSemaphoreWrapType
Definition: GrTypesPriv.h:146
GrTextureType
Definition: GrTypesPriv.h:268
GrColorType
Definition: GrTypesPriv.h:540
GrAccessPattern
Definition: GrTypesPriv.h:424
GrSurfaceOrigin
Definition: GrTypes.h:147
static const uint32_t kAll_GrBackendState
Definition: GrTypes.h:176
void * GrGpuSubmittedContext
Definition: GrTypes.h:181
void * GrGpuFinishedContext
Definition: GrTypes.h:178
void(* GrGpuSubmittedProc)(GrGpuSubmittedContext submittedContext, bool success)
Definition: GrTypes.h:182
void(* GrGpuFinishedProc)(GrGpuFinishedContext finishedContext)
Definition: GrTypes.h:179
GrSyncCpu
Definition: GrTypes.h:239
GrXferBarrierType
GrXferBarrierFlags
IsFiniteProc fProc
Definition: MathBench.cpp:219
const Context & fContext
SkFilterMode
SkTextureCompressionType
static void dump(const float m[20], SkYUVColorSpace cs, bool rgb2yuv)
Definition: SkYUVMath.cpp:629
Definition: GrCaps.h:57
void incNumSubmitToGpus()
Definition: GrGpu.h:543
void incNumScratchTexturesReused()
Definition: GrGpu.h:544
void incNumReorderedDAGsOverBudget()
Definition: GrGpu.h:547
void incBufferTransfers()
Definition: GrGpu.h:537
void incNumScratchMSAAAttachmentsReused()
Definition: GrGpu.h:545
void incStencilAttachmentCreates()
Definition: GrGpu.h:539
void incTransfersFromSurface()
Definition: GrGpu.h:538
void incTransfersToTexture()
Definition: GrGpu.h:536
void incNumFailedDraws()
Definition: GrGpu.h:542
void incTextureCreates()
Definition: GrGpu.h:534
void incMSAAAttachmentCreates()
Definition: GrGpu.h:540
void incTextureUploads()
Definition: GrGpu.h:535
void incNumDraws()
Definition: GrGpu.h:541
void incRenderPasses()
Definition: GrGpu.h:546
Definition: GrGpu.h:62
virtual std::unique_ptr< GrSemaphore > makeSemaphore(bool isOwned=true)=0
bool writePixels(GrSurface *surface, SkIRect rect, GrColorType surfaceColorType, GrColorType srcColorType, const GrMipLevel texels[], int mipLevelCount, bool prepForTexSampling=false)
Definition: GrGpu.cpp:461
Stats * stats()
Definition: GrGpu.h:551
bool writePixels(GrSurface *surface, SkIRect rect, GrColorType surfaceColorType, GrColorType srcColorType, const void *buffer, size_t rowBytes, bool prepForTexSampling=false)
Definition: GrGpu.h:293
virtual bool onReadPixels(GrSurface *, SkIRect, GrColorType surfaceColorType, GrColorType dstColorType, void *, size_t rowBytes)=0
virtual bool precompileShader(const SkData &key, const SkData &data)
Definition: GrGpu.h:621
virtual sk_sp< GrTexture > onCreateTexture(SkISize dimensions, const GrBackendFormat &, GrRenderable, int renderTargetSampleCnt, skgpu::Budgeted, GrProtected, int mipLevelCoont, uint32_t levelClearMask, std::string_view label)=0
virtual bool onTransferPixelsTo(GrTexture *, SkIRect, GrColorType textureColorType, GrColorType bufferColorType, sk_sp< GrGpuBuffer > transferBuffer, size_t offset, size_t rowBytes)=0
virtual GrBackendTexture onCreateCompressedBackendTexture(SkISize dimensions, const GrBackendFormat &, skgpu::Mipmapped, GrProtected)=0
virtual void insertSemaphore(GrSemaphore *semaphore)=0
bool readPixels(GrSurface *surface, SkIRect rect, GrColorType surfaceColorType, GrColorType dstColorType, void *buffer, size_t rowBytes)
Definition: GrGpu.cpp:426
virtual void onReportSubmitHistograms()
Definition: GrGpu.h:849
virtual GrBackendTexture onCreateBackendTexture(SkISize dimensions, const GrBackendFormat &, GrRenderable, skgpu::Mipmapped, GrProtected, std::string_view label)=0
virtual void deleteBackendTexture(const GrBackendTexture &)=0
bool submitToGpu(GrSyncCpu sync)
Definition: GrGpu.cpp:748
void dumpJSON(SkJSONWriter *) const
Definition: GrGpu.cpp:811
virtual bool onCopySurface(GrSurface *dst, const SkIRect &dstRect, GrSurface *src, const SkIRect &srcRect, GrSamplerState::Filter)=0
void setOOMed()
Definition: GrGpu.h:701
const GrCaps * caps() const
Definition: GrGpu.h:73
bool regenerateMipMapLevels(GrTexture *)
Definition: GrGpu.cpp:632
GrBackendTexture createCompressedBackendTexture(SkISize dimensions, const GrBackendFormat &, skgpu::Mipmapped, GrProtected)
Definition: GrGpu.cpp:912
virtual void finishOutstandingGpuWork()=0
virtual GrOpsRenderPass * onGetOpsRenderPass(GrRenderTarget *renderTarget, bool useMSAASurface, GrAttachment *stencil, GrSurfaceOrigin, const SkIRect &bounds, const GrOpsRenderPass::LoadAndStoreInfo &, const GrOpsRenderPass::StencilLoadAndStoreInfo &, const skia_private::TArray< GrSurfaceProxy *, true > &sampledProxies, GrXferBarrierFlags renderPassXferBarriers)=0
GrDirectContext * getContext()
Definition: GrGpu.h:67
virtual void willExecute()
Definition: GrGpu.h:413
virtual bool onTransferFromBufferToBuffer(sk_sp< GrGpuBuffer > src, size_t srcOffset, sk_sp< GrGpuBuffer > dst, size_t dstOffset, size_t size)=0
void didWriteToSurface(GrSurface *surface, GrSurfaceOrigin origin, const SkIRect *bounds, uint32_t mipLevels=1) const
Definition: GrGpu.cpp:665
virtual bool onTransferPixelsFrom(GrSurface *, SkIRect, GrColorType surfaceColorType, GrColorType bufferColorType, sk_sp< GrGpuBuffer > transferBuffer, size_t offset)=0
virtual bool onWritePixels(GrSurface *, SkIRect, GrColorType surfaceColorType, GrColorType srcColorType, const GrMipLevel[], int mipLevelCount, bool prepForTexSampling)=0
virtual sk_sp< GrTexture > onWrapBackendTexture(const GrBackendTexture &, GrWrapOwnership, GrWrapCacheable, GrIOType)=0
virtual sk_sp< GrRenderTarget > onWrapVulkanSecondaryCBAsRenderTarget(const SkImageInfo &, const GrVkDrawableInfo &)
Definition: GrGpu.cpp:387
virtual void waitSemaphore(GrSemaphore *semaphore)=0
bool updateCompressedBackendTexture(const GrBackendTexture &, sk_sp< skgpu::RefCntedCallback > finishedCallback, const void *data, size_t length)
Definition: GrGpu.cpp:941
virtual sk_sp< GrAttachment > makeStencilAttachment(const GrBackendFormat &colorFormat, SkISize dimensions, int numStencilSamples)=0
GrGpu(GrDirectContext *direct)
Definition: GrGpu.cpp:41
virtual sk_sp< GrTexture > onWrapCompressedBackendTexture(const GrBackendTexture &, GrWrapOwnership, GrWrapCacheable)=0
virtual void releaseUnlockedBackendObjects()
Definition: GrGpu.h:453
void resolveRenderTarget(GrRenderTarget *, const SkIRect &resolveRect)
Definition: GrGpu.cpp:659
virtual void storeVkPipelineCacheData()
Definition: GrGpu.h:683
virtual std::unique_ptr< GrSemaphore > prepareTextureForCrossContextUsage(GrTexture *)=0
bool checkAndResetOOMed()
Definition: GrGpu.cpp:783
sk_sp< GrTexture > wrapBackendTexture(const GrBackendTexture &, GrWrapOwnership, GrWrapCacheable, GrIOType)
Definition: GrGpu.cpp:297
virtual void prepareSurfacesForBackendAccessAndStateUpdates(SkSpan< GrSurfaceProxy * > proxies, SkSurfaces::BackendSurfaceAccess access, const skgpu::MutableTextureState *newState)
Definition: GrGpu.h:841
virtual ~GrGpu()
Definition: GrGpu.cpp:43
virtual void addFinishedProc(GrGpuFinishedProc finishedProc, GrGpuFinishedContext finishedContext)=0
virtual sk_sp< GrAttachment > makeMSAAAttachment(SkISize dimensions, const GrBackendFormat &format, int numSamples, GrProtected isProtected, GrMemoryless isMemoryless)=0
virtual bool onClearBackendTexture(const GrBackendTexture &, sk_sp< skgpu::RefCntedCallback > finishedCallback, std::array< float, 4 > color)=0
virtual sk_sp< GrGpuBuffer > onCreateBuffer(size_t size, GrGpuBufferType intendedType, GrAccessPattern)=0
virtual bool onRegenerateMipMapLevels(GrTexture *)=0
virtual bool setBackendRenderTargetState(const GrBackendRenderTarget &, const skgpu::MutableTextureState &, skgpu::MutableTextureState *previousState, sk_sp< skgpu::RefCntedCallback > finishedCallback)
Definition: GrGpu.h:602
bool transferPixelsFrom(GrSurface *surface, SkIRect rect, GrColorType surfaceColorType, GrColorType bufferColorType, sk_sp< GrGpuBuffer > transferBuffer, size_t offset)
Definition: GrGpu.cpp:592
sk_sp< const GrCaps > refCaps() const
Definition: GrGpu.h:74
void resetTextureBindings()
Definition: GrGpu.cpp:654
bool transferFromBufferToBuffer(sk_sp< GrGpuBuffer > src, size_t srcOffset, sk_sp< GrGpuBuffer > dst, size_t dstOffset, size_t size)
Definition: GrGpu.cpp:511
virtual bool compile(const GrProgramDesc &, const GrProgramInfo &)=0
void markContextDirty(uint32_t state=kAll_GrBackendState)
Definition: GrGpu.h:105
virtual bool onSubmitToGpu(GrSyncCpu sync)=0
virtual void onResetContext(uint32_t resetBits)
Definition: GrGpu.h:732
virtual sk_sp< GrRenderTarget > onWrapBackendRenderTarget(const GrBackendRenderTarget &)=0
sk_sp< GrGpuBuffer > createBuffer(size_t size, GrGpuBufferType intendedType, GrAccessPattern accessPattern)
Definition: GrGpu.cpp:393
virtual GrBackendFormat getPreferredStencilFormat(const GrBackendFormat &)=0
DisconnectType
Definition: GrGpu.h:80
virtual GrThreadSafePipelineBuilder * pipelineBuilder()=0
const GrDirectContext * getContext() const
Definition: GrGpu.h:68
virtual GrStagingBufferManager * stagingBufferManager()
Definition: GrGpu.h:76
virtual void onResetTextureBindings()
Definition: GrGpu.h:735
sk_sp< GrRenderTarget > wrapVulkanSecondaryCBAsRenderTarget(const SkImageInfo &, const GrVkDrawableInfo &)
Definition: GrGpu.cpp:382
virtual void submit(GrOpsRenderPass *)=0
virtual void onResolveRenderTarget(GrRenderTarget *target, const SkIRect &resolveRect)=0
GrBackendTexture createBackendTexture(SkISize dimensions, const GrBackendFormat &, GrRenderable, skgpu::Mipmapped, GrProtected, std::string_view label)
Definition: GrGpu.cpp:868
virtual bool isDeviceLost() const
Definition: GrGpu.h:97
sk_sp< GrRenderTarget > wrapBackendRenderTarget(const GrBackendRenderTarget &)
Definition: GrGpu.cpp:366
virtual sk_sp< GrTexture > onWrapRenderableBackendTexture(const GrBackendTexture &, int sampleCnt, GrWrapOwnership, GrWrapCacheable)=0
static bool CompressedDataIsCorrect(SkISize dimensions, SkTextureCompressionType, skgpu::Mipmapped, const void *data, size_t length)
Definition: GrGpu.cpp:858
virtual sk_sp< GrTexture > onCreateCompressedTexture(SkISize dimensions, const GrBackendFormat &, skgpu::Budgeted, skgpu::Mipmapped, GrProtected, const void *data, size_t dataSize)=0
bool copySurface(GrSurface *dst, const SkIRect &dstRect, GrSurface *src, const SkIRect &srcRect, GrSamplerState::Filter filter)
Definition: GrGpu.cpp:410
void executeFlushInfo(SkSpan< GrSurfaceProxy * >, SkSurfaces::BackendSurfaceAccess access, const GrFlushInfo &, const skgpu::MutableTextureState *newState)
Definition: GrGpu.cpp:682
virtual bool setBackendTextureState(const GrBackendTexture &, const skgpu::MutableTextureState &, skgpu::MutableTextureState *previousState, sk_sp< skgpu::RefCntedCallback > finishedCallback)
Definition: GrGpu.h:594
virtual GrRingBuffer * uniformsRingBuffer()
Definition: GrGpu.h:78
virtual void disconnect(DisconnectType)
Definition: GrGpu.cpp:51
sk_sp< GrTexture > wrapCompressedBackendTexture(const GrBackendTexture &, GrWrapOwnership, GrWrapCacheable)
Definition: GrGpu.cpp:318
GrOpsRenderPass * getOpsRenderPass(GrRenderTarget *renderTarget, bool useMSAASurface, GrAttachment *stencil, GrSurfaceOrigin, const SkIRect &bounds, const GrOpsRenderPass::LoadAndStoreInfo &, const GrOpsRenderPass::StencilLoadAndStoreInfo &, const skia_private::TArray< GrSurfaceProxy *, true > &sampledProxies, GrXferBarrierFlags renderPassXferBarriers)
Definition: GrGpu.cpp:730
virtual bool onUpdateCompressedBackendTexture(const GrBackendTexture &, sk_sp< skgpu::RefCntedCallback > finishedCallback, const void *data, size_t length)=0
virtual void xferBarrier(GrRenderTarget *, GrXferBarrierType)=0
void initCaps(sk_sp< const GrCaps > caps)
Definition: GrGpu.cpp:47
virtual void takeOwnershipOfBuffer(sk_sp< GrGpuBuffer >)
Definition: GrGpu.h:432
virtual void checkFinishProcs()=0
bool transferPixelsTo(GrTexture *texture, SkIRect rect, GrColorType textureColorType, GrColorType bufferColorType, sk_sp< GrGpuBuffer > transferBuffer, size_t offset, size_t rowBytes)
Definition: GrGpu.cpp:540
sk_sp< GrTexture > createTexture(SkISize dimensions, const GrBackendFormat &format, GrTextureType textureType, GrRenderable renderable, int renderTargetSampleCnt, skgpu::Budgeted budgeted, GrProtected isProtected, GrColorType textureColorType, GrColorType srcColorType, const GrMipLevel texels[], int texelLevelCount, std::string_view label)
Definition: GrGpu.cpp:189
virtual std::unique_ptr< GrSemaphore > wrapBackendSemaphore(const GrBackendSemaphore &, GrSemaphoreWrapType, GrWrapOwnership)=0
void handleDirtyContext()
Definition: GrGpu.h:677
sk_sp< GrTexture > createCompressedTexture(SkISize dimensions, const GrBackendFormat &format, skgpu::Budgeted budgeted, skgpu::Mipmapped mipmapped, GrProtected isProtected, const void *data, size_t dataSize)
Definition: GrGpu.cpp:260
sk_sp< GrTexture > wrapRenderableBackendTexture(const GrBackendTexture &, int sampleCnt, GrWrapOwnership, GrWrapCacheable)
Definition: GrGpu.cpp:337
bool clearBackendTexture(const GrBackendTexture &, sk_sp< skgpu::RefCntedCallback > finishedCallback, std::array< float, 4 > color)
Definition: GrGpu.cpp:898
virtual sk_sp< GrThreadSafePipelineBuilder > refPipelineBuilder()=0
Stats fStats
Definition: GrGpu.h:703
Definition: SkData.h:25
T * get() const
Definition: SkRefCnt.h:303
DlColor color
VkSurfaceKHR surface
Definition: main.cc:49
AtkStateType state
uint32_t uint32_t * format
uint32_t * target
size_t length
FlTexture * texture
Optional< SkRect > bounds
Definition: SkRecords.h:189
sk_sp< SkBlender > blender SkRect rect
Definition: SkRecords.h:350
BackendSurfaceAccess
Definition: SkSurface.h:44
DEF_SWITCHES_START aot vmservice shared library Name of the *so containing AOT compiled Dart assets for launching the service isolate vm snapshot The VM snapshot data that will be memory mapped as read only SnapshotAssetPath must be present isolate snapshot The isolate snapshot data that will be memory mapped as read only SnapshotAssetPath must be present cache dir Path to the cache directory This is different from the persistent_cache_path in embedder which is used for Skia shader cache icu native lib Path to the library file that exports the ICU data vm service The hostname IP address on which the Dart VM Service should be served If not defaults to or::depending on whether ipv6 is specified vm service A custom Dart VM Service port The default is to pick a randomly available open port disable vm Disable the Dart VM Service The Dart VM Service is never available in release mode disable vm service Disable mDNS Dart VM Service publication Bind to the IPv6 localhost address for the Dart VM Service Ignored if vm service host is set endless trace buffer
Definition: switches.h:126
it will be possible to load the file into Perfetto s trace viewer disable asset Prevents usage of any non test fonts unless they were explicitly Loaded via prefetched default font Indicates whether the embedding started a prefetch of the default font manager before creating the engine run In non interactive keep the shell running after the Dart script has completed enable serial On low power devices with low core running concurrent GC tasks on threads can cause them to contend with the UI thread which could potentially lead to jank This option turns off all concurrent GC activities domain network JSON encoded network policy per domain This overrides the DisallowInsecureConnections switch Embedder can specify whether to allow or disallow insecure connections at a domain level old gen heap size
Definition: switches.h:259
dst
Definition: cp.py:12
Definition: GpuTools.h:21
Budgeted
Definition: GpuTypes.h:35
Renderable
Definition: GpuTypes.h:69
Mipmapped
Definition: GpuTypes.h:53
Protected
Definition: GpuTypes.h:61
SeparatedVector2 offset
Definition: SkRect.h:32
Definition: SkSize.h:16
std::shared_ptr< const fml::Mapping > data
Definition: texture_gles.cc:63