Flutter Engine
The Flutter Engine
GrDirectContext.h
Go to the documentation of this file.
1/*
2 * Copyright 2020 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#ifndef GrDirectContext_DEFINED
9#define GrDirectContext_DEFINED
10
17#include "include/gpu/GrTypes.h"
18
19#include <chrono>
20#include <cstddef>
21#include <cstdint>
22#include <memory>
23#include <string_view>
24
25class GrAtlasManager;
27class GrBackendFormat;
33class GrGpu;
34class GrResourceCache;
36class SkData;
37class SkImage;
38class SkPixmap;
39class SkSurface;
40class SkTaskGroup;
42enum SkColorType : int;
44struct GrMockOptions;
45struct GrD3DBackendContext; // IWYU pragma: keep
46
47namespace skgpu {
48 class MutableTextureState;
49#if !defined(SK_ENABLE_OPTIMIZE_SIZE)
50 namespace ganesh { class SmallPathAtlasMgr; }
51#endif
52}
53namespace sktext { namespace gpu { class StrikeCache; } }
54namespace wgpu { class Device; } // IWYU pragma: keep
55
56namespace SkSurfaces {
57enum class BackendSurfaceAccess;
58}
59
61public:
62#ifdef SK_DIRECT3D
63 /**
64 * Makes a GrDirectContext which uses Direct3D as the backend. The Direct3D context
65 * must be kept alive until the returned GrDirectContext is first destroyed or abandoned.
66 */
67 static sk_sp<GrDirectContext> MakeDirect3D(const GrD3DBackendContext&, const GrContextOptions&);
68 static sk_sp<GrDirectContext> MakeDirect3D(const GrD3DBackendContext&);
69#endif
70
71 static sk_sp<GrDirectContext> MakeMock(const GrMockOptions*, const GrContextOptions&);
72 static sk_sp<GrDirectContext> MakeMock(const GrMockOptions*);
73
74 ~GrDirectContext() override;
75
76 /**
77 * The context normally assumes that no outsider is setting state
78 * within the underlying 3D API's context/device/whatever. This call informs
79 * the context that the state was modified and it should resend. Shouldn't
80 * be called frequently for good performance.
81 * The flag bits, state, is dependent on which backend is used by the
82 * context, either GL or D3D (possible in future).
83 */
84 void resetContext(uint32_t state = kAll_GrBackendState);
85
86 /**
87 * If the backend is GrBackendApi::kOpenGL, then all texture unit/target combinations for which
88 * the context has modified the bound texture will have texture id 0 bound. This does not
89 * flush the context. Calling resetContext() does not change the set that will be bound
90 * to texture id 0 on the next call to resetGLTextureBindings(). After this is called
91 * all unit/target combinations are considered to have unmodified bindings until the context
92 * subsequently modifies them (meaning if this is called twice in a row with no intervening
93 * context usage then the second call is a no-op.)
94 */
95 void resetGLTextureBindings();
96
97 /**
98 * Abandons all GPU resources and assumes the underlying backend 3D API context is no longer
99 * usable. Call this if you have lost the associated GPU context, and thus internal texture,
100 * buffer, etc. references/IDs are now invalid. Calling this ensures that the destructors of the
101 * context and any of its created resource objects will not make backend 3D API calls. Content
102 * rendered but not previously flushed may be lost. After this function is called all subsequent
103 * calls on the context will fail or be no-ops.
104 *
105 * The typical use case for this function is that the underlying 3D context was lost and further
106 * API calls may crash.
107 *
108 * This call is not valid to be made inside ReleaseProcs passed into SkSurface or SkImages. The
109 * call will simply fail (and assert in debug) if it is called while inside a ReleaseProc.
110 *
111 * For Vulkan, even if the device becomes lost, the VkQueue, VkDevice, or VkInstance used to
112 * create the context must be kept alive even after abandoning the context. Those objects must
113 * live for the lifetime of the context object itself. The reason for this is so that
114 * we can continue to delete any outstanding GrBackendTextures/RenderTargets which must be
115 * cleaned up even in a device lost state.
116 */
117 void abandonContext() override;
118
119 /**
120 * Returns true if the context was abandoned or if the backend specific context has gotten into
121 * an unrecoverarble, lost state (e.g. in Vulkan backend if we've gotten a
122 * VK_ERROR_DEVICE_LOST). If the backend context is lost, this call will also abandon this
123 * context.
124 */
125 bool abandoned() override;
126
127 /**
128 * Returns true if the backend specific context has gotten into an unrecoverarble, lost state
129 * (e.g. in Vulkan backend if we've gotten a VK_ERROR_DEVICE_LOST). If the backend context is
130 * lost, this call will also abandon this context.
131 */
132 bool isDeviceLost();
133
134 // TODO: Remove this from public after migrating Chrome.
136
137 /**
138 * Checks if the underlying 3D API reported an out-of-memory error. If this returns true it is
139 * reset and will return false until another out-of-memory error is reported by the 3D API. If
140 * the context is abandoned then this will report false.
141 *
142 * Currently this is implemented for:
143 *
144 * OpenGL [ES] - Note that client calls to glGetError() may swallow GL_OUT_OF_MEMORY errors and
145 * therefore hide the error from Skia. Also, it is not advised to use this in combination with
146 * enabling GrContextOptions::fSkipGLErrorChecks. That option may prevent the context from ever
147 * checking the GL context for OOM.
148 *
149 * Vulkan - Reports true if VK_ERROR_OUT_OF_HOST_MEMORY or VK_ERROR_OUT_OF_DEVICE_MEMORY has
150 * occurred.
151 */
152 bool oomed();
153
154 /**
155 * This is similar to abandonContext() however the underlying 3D context is not yet lost and
156 * the context will cleanup all allocated resources before returning. After returning it will
157 * assume that the underlying context may no longer be valid.
158 *
159 * The typical use case for this function is that the client is going to destroy the 3D context
160 * but can't guarantee that context will be destroyed first (perhaps because it may be ref'ed
161 * elsewhere by either the client or Skia objects).
162 *
163 * For Vulkan, even if the device becomes lost, the VkQueue, VkDevice, or VkInstance used to
164 * create the context must be alive before calling releaseResourcesAndAbandonContext.
165 */
166 void releaseResourcesAndAbandonContext();
167
168 ///////////////////////////////////////////////////////////////////////////
169 // Resource Cache
170
171 /** DEPRECATED
172 * Return the current GPU resource cache limits.
173 *
174 * @param maxResources If non-null, will be set to -1.
175 * @param maxResourceBytes If non-null, returns maximum number of bytes of
176 * video memory that can be held in the cache.
177 */
178 void getResourceCacheLimits(int* maxResources, size_t* maxResourceBytes) const;
179
180 /**
181 * Return the current GPU resource cache limit in bytes.
182 */
183 size_t getResourceCacheLimit() const;
184
185 /**
186 * Gets the current GPU resource cache usage.
187 *
188 * @param resourceCount If non-null, returns the number of resources that are held in the
189 * cache.
190 * @param maxResourceBytes If non-null, returns the total number of bytes of video memory held
191 * in the cache.
192 */
193 void getResourceCacheUsage(int* resourceCount, size_t* resourceBytes) const;
194
195 /**
196 * Gets the number of bytes in the cache consumed by purgeable (e.g. unlocked) resources.
197 */
198 size_t getResourceCachePurgeableBytes() const;
199
200 /** DEPRECATED
201 * Specify the GPU resource cache limits. If the current cache exceeds the maxResourceBytes
202 * limit, it will be purged (LRU) to keep the cache within the limit.
203 *
204 * @param maxResources Unused.
205 * @param maxResourceBytes The maximum number of bytes of video memory
206 * that can be held in the cache.
207 */
208 void setResourceCacheLimits(int maxResources, size_t maxResourceBytes);
209
210 /**
211 * Specify the GPU resource cache limit. If the cache currently exceeds this limit,
212 * it will be purged (LRU) to keep the cache within the limit.
213 *
214 * @param maxResourceBytes The maximum number of bytes of video memory
215 * that can be held in the cache.
216 */
217 void setResourceCacheLimit(size_t maxResourceBytes);
218
219 /**
220 * Frees GPU created by the context. Can be called to reduce GPU memory
221 * pressure.
222 */
223 void freeGpuResources();
224
225 /**
226 * Purge GPU resources that haven't been used in the past 'msNotUsed' milliseconds or are
227 * otherwise marked for deletion, regardless of whether the context is under budget.
228
229 *
230 * @param msNotUsed Only unlocked resources not used in these last milliseconds will be
231 * cleaned up.
232 * @param opts Specify which resources should be cleaned up. If kScratchResourcesOnly
233 * then, all unlocked scratch resources older than 'msNotUsed' will be purged
234 * but the unlocked resources with persistent data will remain. If
235 * kAllResources
236 */
237
238 void performDeferredCleanup(
239 std::chrono::milliseconds msNotUsed,
241
242 // Temporary compatibility API for Android.
243 void purgeResourcesNotUsedInMs(std::chrono::milliseconds msNotUsed) {
244 this->performDeferredCleanup(msNotUsed);
245 }
246
247 /**
248 * Purge unlocked resources from the cache until the the provided byte count has been reached
249 * or we have purged all unlocked resources. The default policy is to purge in LRU order, but
250 * can be overridden to prefer purging scratch resources (in LRU order) prior to purging other
251 * resource types.
252 *
253 * @param maxBytesToPurge the desired number of bytes to be purged.
254 * @param preferScratchResources If true scratch resources will be purged prior to other
255 * resource types.
256 */
257 void purgeUnlockedResources(size_t bytesToPurge, bool preferScratchResources);
258
259 /**
260 * This entry point is intended for instances where an app has been backgrounded or
261 * suspended.
262 * If 'scratchResourcesOnly' is true all unlocked scratch resources will be purged but the
263 * unlocked resources with persistent data will remain. If 'scratchResourcesOnly' is false
264 * then all unlocked resources will be purged.
265 * In either case, after the unlocked resources are purged a separate pass will be made to
266 * ensure that resource usage is under budget (i.e., even if 'scratchResourcesOnly' is true
267 * some resources with persistent data may be purged to be under budget).
268 *
269 * @param opts If kScratchResourcesOnly only unlocked scratch resources will be purged prior
270 * enforcing the budget requirements.
271 */
272 void purgeUnlockedResources(GrPurgeResourceOptions opts);
273
274 /**
275 * Gets the maximum supported texture size.
276 */
278
279 /**
280 * Gets the maximum supported render target size.
281 */
283
284 /**
285 * Can a SkImage be created with the given color type.
286 */
288
289 /**
290 * Does this context support protected content?
291 */
293
294 /**
295 * Can a SkSurface be created with the given color type. To check whether MSAA is supported
296 * use maxSurfaceSampleCountForColorType().
297 */
299
300 /**
301 * Gets the maximum supported sample count for a color type. 1 is returned if only non-MSAA
302 * rendering is supported for the color type. 0 is returned if rendering to this color type
303 * is not supported at all.
304 */
306
307 ///////////////////////////////////////////////////////////////////////////
308 // Misc.
309
310 /**
311 * Inserts a list of GPU semaphores that the current GPU-backed API must wait on before
312 * executing any more commands on the GPU. We only guarantee blocking transfer and fragment
313 * shader work, but may block earlier stages as well depending on the backend.If this call
314 * returns false, then the GPU back-end will not wait on any passed in semaphores, and the
315 * client will still own the semaphores, regardless of the value of deleteSemaphoresAfterWait.
316 *
317 * If deleteSemaphoresAfterWait is false then Skia will not delete the semaphores. In this case
318 * it is the client's responsibility to not destroy or attempt to reuse the semaphores until it
319 * knows that Skia has finished waiting on them. This can be done by using finishedProcs on
320 * flush calls.
321 *
322 * This is not supported on the GL backend.
323 */
324 bool wait(int numSemaphores, const GrBackendSemaphore* waitSemaphores,
325 bool deleteSemaphoresAfterWait = true);
326
327 /**
328 * Call to ensure all drawing to the context has been flushed and submitted to the underlying 3D
329 * API. This is equivalent to calling GrContext::flush with a default GrFlushInfo followed by
330 * GrContext::submit(sync).
331 */
333 this->flush(GrFlushInfo());
334 this->submit(sync);
335 }
336
337 /**
338 * Call to ensure all drawing to the context has been flushed to underlying 3D API specific
339 * objects. A call to `submit` is always required to ensure work is actually sent to
340 * the gpu. Some specific API details:
341 * GL: Commands are actually sent to the driver, but glFlush is never called. Thus some
342 * sync objects from the flush will not be valid until a submission occurs.
343 *
344 * Vulkan/Metal/D3D/Dawn: Commands are recorded to the backend APIs corresponding command
345 * buffer or encoder objects. However, these objects are not sent to the gpu until a
346 * submission occurs.
347 *
348 * If the return is GrSemaphoresSubmitted::kYes, only initialized GrBackendSemaphores will be
349 * submitted to the gpu during the next submit call (it is possible Skia failed to create a
350 * subset of the semaphores). The client should not wait on these semaphores until after submit
351 * has been called, and must keep them alive until then. If this call returns
352 * GrSemaphoresSubmitted::kNo, the GPU backend will not submit any semaphores to be signaled on
353 * the GPU. Thus the client should not have the GPU wait on any of the semaphores passed in with
354 * the GrFlushInfo. Regardless of whether semaphores were submitted to the GPU or not, the
355 * client is still responsible for deleting any initialized semaphores.
356 * Regardless of semaphore submission the context will still be flushed. It should be
357 * emphasized that a return value of GrSemaphoresSubmitted::kNo does not mean the flush did not
358 * happen. It simply means there were no semaphores submitted to the GPU. A caller should only
359 * take this as a failure if they passed in semaphores to be submitted.
360 */
362
363 void flush() { this->flush(GrFlushInfo()); }
364
365 /** Flushes any pending uses of texture-backed images in the GPU backend. If the image is not
366 * texture-backed (including promise texture images) or if the GrDirectContext does not
367 * have the same context ID as the context backing the image then this is a no-op.
368 * If the image was not used in any non-culled draws in the current queue of work for the
369 * passed GrDirectContext then this is a no-op unless the GrFlushInfo contains semaphores or
370 * a finish proc. Those are respected even when the image has not been used.
371 * @param image the non-null image to flush.
372 * @param info flush options
373 */
375 void flush(const sk_sp<const SkImage>& image);
376
377 /** Version of flush() that uses a default GrFlushInfo. Also submits the flushed work to the
378 * GPU.
379 */
380 void flushAndSubmit(const sk_sp<const SkImage>& image);
381
382 /** Issues pending SkSurface commands to the GPU-backed API objects and resolves any SkSurface
383 * MSAA. A call to GrDirectContext::submit is always required to ensure work is actually sent
384 * to the gpu. Some specific API details:
385 * GL: Commands are actually sent to the driver, but glFlush is never called. Thus some
386 * sync objects from the flush will not be valid until a submission occurs.
387 *
388 * Vulkan/Metal/D3D/Dawn: Commands are recorded to the backend APIs corresponding command
389 * buffer or encoder objects. However, these objects are not sent to the gpu until a
390 * submission occurs.
391 *
392 * The work that is submitted to the GPU will be dependent on the BackendSurfaceAccess that is
393 * passed in.
394 *
395 * If BackendSurfaceAccess::kNoAccess is passed in all commands will be issued to the GPU.
396 *
397 * If BackendSurfaceAccess::kPresent is passed in and the backend API is not Vulkan, it is
398 * treated the same as kNoAccess. If the backend API is Vulkan, the VkImage that backs the
399 * SkSurface will be transferred back to its original queue. If the SkSurface was created by
400 * wrapping a VkImage, the queue will be set to the queue which was originally passed in on
401 * the GrVkImageInfo. Additionally, if the original queue was not external or foreign the
402 * layout of the VkImage will be set to VK_IMAGE_LAYOUT_PRESENT_SRC_KHR.
403 *
404 * The GrFlushInfo describes additional options to flush. Please see documentation at
405 * GrFlushInfo for more info.
406 *
407 * If the return is GrSemaphoresSubmitted::kYes, only initialized GrBackendSemaphores will be
408 * submitted to the gpu during the next submit call (it is possible Skia failed to create a
409 * subset of the semaphores). The client should not wait on these semaphores until after submit
410 * has been called, but must keep them alive until then. If a submit flag was passed in with
411 * the flush these valid semaphores can we waited on immediately. If this call returns
412 * GrSemaphoresSubmitted::kNo, the GPU backend will not submit any semaphores to be signaled on
413 * the GPU. Thus the client should not have the GPU wait on any of the semaphores passed in
414 * with the GrFlushInfo. Regardless of whether semaphores were submitted to the GPU or not, the
415 * client is still responsible for deleting any initialized semaphores.
416 * Regardless of semaphore submission the context will still be flushed. It should be
417 * emphasized that a return value of GrSemaphoresSubmitted::kNo does not mean the flush did not
418 * happen. It simply means there were no semaphores submitted to the GPU. A caller should only
419 * take this as a failure if they passed in semaphores to be submitted.
420 *
421 * Pending surface commands are flushed regardless of the return result.
422 *
423 * @param surface The GPU backed surface to be flushed. Has no effect on a CPU-backed surface.
424 * @param access type of access the call will do on the backend object after flush
425 * @param info flush options
426 */
429 const GrFlushInfo& info);
430
431 /**
432 * Same as above except:
433 *
434 * If a skgpu::MutableTextureState is passed in, at the end of the flush we will transition
435 * the surface to be in the state requested by the skgpu::MutableTextureState. If the surface
436 * (or SkImage or GrBackendSurface wrapping the same backend object) is used again after this
437 * flush the state may be changed and no longer match what is requested here. This is often
438 * used if the surface will be used for presenting or external use and the client wants backend
439 * object to be prepped for that use. A finishedProc or semaphore on the GrFlushInfo will also
440 * include the work for any requested state change.
441 *
442 * If the backend API is Vulkan, the caller can set the skgpu::MutableTextureState's
443 * VkImageLayout to VK_IMAGE_LAYOUT_UNDEFINED or queueFamilyIndex to VK_QUEUE_FAMILY_IGNORED to
444 * tell Skia to not change those respective states.
445 *
446 * @param surface The GPU backed surface to be flushed. Has no effect on a CPU-backed surface.
447 * @param info flush options
448 * @param newState optional state change request after flush
449 */
451 const GrFlushInfo& info,
452 const skgpu::MutableTextureState* newState = nullptr);
453
454 /** Call to ensure all reads/writes of the surface have been issued to the underlying 3D API.
455 * Skia will correctly order its own draws and pixel operations. This must to be used to ensure
456 * correct ordering when the surface backing store is accessed outside Skia (e.g. direct use of
457 * the 3D API or a windowing system). This is equivalent to
458 * calling ::flush with a default GrFlushInfo followed by ::submit(syncCpu).
459 *
460 * Has no effect on a CPU-backed surface.
461 */
462 void flushAndSubmit(SkSurface* surface, GrSyncCpu sync = GrSyncCpu::kNo);
463
464 /**
465 * Flushes the given surface with the default GrFlushInfo.
466 *
467 * Has no effect on a CPU-backed surface.
468 */
469 void flush(SkSurface* surface);
470
471 /**
472 * Submit outstanding work to the gpu from all previously un-submitted flushes. The return
473 * value of the submit will indicate whether or not the submission to the GPU was successful.
474 *
475 * If the call returns true, all previously passed in semaphores in flush calls will have been
476 * submitted to the GPU and they can safely be waited on. The caller should wait on those
477 * semaphores or perform some other global synchronization before deleting the semaphores.
478 *
479 * If it returns false, then those same semaphores will not have been submitted and we will not
480 * try to submit them again. The caller is free to delete the semaphores at any time.
481 *
482 * If sync flag is GrSyncCpu::kYes, this function will return once the gpu has finished with all
483 * submitted work.
484 */
485 bool submit(GrSyncCpu sync = GrSyncCpu::kNo);
486
487 /**
488 * Checks whether any asynchronous work is complete and if so calls related callbacks.
489 */
490 void checkAsyncWorkCompletion();
491
492 /** Enumerates all cached GPU resources and dumps their memory to traceMemoryDump. */
493 // Chrome is using this!
494 void dumpMemoryStatistics(SkTraceMemoryDump* traceMemoryDump) const;
495
496 bool supportsDistanceFieldText() const;
497
498 void storeVkPipelineCacheData();
499
500 /**
501 * Retrieve the default GrBackendFormat for a given SkColorType and renderability.
502 * It is guaranteed that this backend format will be the one used by the following
503 * SkColorType and GrSurfaceCharacterization-based createBackendTexture methods.
504 *
505 * The caller should check that the returned format is valid.
506 */
508
509 /**
510 * The explicitly allocated backend texture API allows clients to use Skia to create backend
511 * objects outside of Skia proper (i.e., Skia's caching system will not know about them.)
512 *
513 * It is the client's responsibility to delete all these objects (using deleteBackendTexture)
514 * before deleting the context used to create them. If the backend is Vulkan, the textures must
515 * be deleted before abandoning the context as well. Additionally, clients should only delete
516 * these objects on the thread for which that context is active.
517 *
518 * The client is responsible for ensuring synchronization between different uses
519 * of the backend object (i.e., wrapping it in a surface, rendering to it, deleting the
520 * surface, rewrapping it in a image and drawing the image will require explicit
521 * synchronization on the client's part).
522 */
523
524 /**
525 * If possible, create an uninitialized backend texture. The client should ensure that the
526 * returned backend texture is valid.
527 * For the Vulkan backend the layout of the created VkImage will be:
528 * VK_IMAGE_LAYOUT_UNDEFINED.
529 */
530 GrBackendTexture createBackendTexture(int width,
531 int height,
532 const GrBackendFormat&,
536 std::string_view label = {});
537
538 /**
539 * If possible, create an uninitialized backend texture. The client should ensure that the
540 * returned backend texture is valid.
541 * If successful, the created backend texture will be compatible with the provided
542 * SkColorType.
543 * For the Vulkan backend the layout of the created VkImage will be:
544 * VK_IMAGE_LAYOUT_UNDEFINED.
545 */
546 GrBackendTexture createBackendTexture(int width,
547 int height,
552 std::string_view label = {});
553
554 /**
555 * If possible, create a backend texture initialized to a particular color. The client should
556 * ensure that the returned backend texture is valid. The client can pass in a finishedProc
557 * to be notified when the data has been uploaded by the gpu and the texture can be deleted. The
558 * client is required to call `submit` to send the upload work to the gpu. The
559 * finishedProc will always get called even if we failed to create the GrBackendTexture.
560 * For the Vulkan backend the layout of the created VkImage will be:
561 * VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL
562 */
563 GrBackendTexture createBackendTexture(int width,
564 int height,
565 const GrBackendFormat&,
566 const SkColor4f& color,
570 GrGpuFinishedProc finishedProc = nullptr,
571 GrGpuFinishedContext finishedContext = nullptr,
572 std::string_view label = {});
573
574 /**
575 * If possible, create a backend texture initialized to a particular color. The client should
576 * ensure that the returned backend texture is valid. The client can pass in a finishedProc
577 * to be notified when the data has been uploaded by the gpu and the texture can be deleted. The
578 * client is required to call `submit` to send the upload work to the gpu. The
579 * finishedProc will always get called even if we failed to create the GrBackendTexture.
580 * If successful, the created backend texture will be compatible with the provided
581 * SkColorType.
582 * For the Vulkan backend the layout of the created VkImage will be:
583 * VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL
584 */
585 GrBackendTexture createBackendTexture(int width,
586 int height,
588 const SkColor4f& color,
592 GrGpuFinishedProc finishedProc = nullptr,
593 GrGpuFinishedContext finishedContext = nullptr,
594 std::string_view label = {});
595
596 /**
597 * If possible, create a backend texture initialized with the provided pixmap data. The client
598 * should ensure that the returned backend texture is valid. The client can pass in a
599 * finishedProc to be notified when the data has been uploaded by the gpu and the texture can be
600 * deleted. The client is required to call `submit` to send the upload work to the gpu.
601 * The finishedProc will always get called even if we failed to create the GrBackendTexture.
602 * If successful, the created backend texture will be compatible with the provided
603 * pixmap(s). Compatible, in this case, means that the backend format will be the result
604 * of calling defaultBackendFormat on the base pixmap's colortype. The src data can be deleted
605 * when this call returns.
606 * If numLevels is 1 a non-mipmapped texture will result. If a mipmapped texture is desired
607 * the data for all the mipmap levels must be provided. In the mipmapped case all the
608 * colortypes of the provided pixmaps must be the same. Additionally, all the miplevels
609 * must be sized correctly (please see SkMipmap::ComputeLevelSize and ComputeLevelCount). The
610 * GrSurfaceOrigin controls whether the pixmap data is vertically flipped in the texture.
611 * Note: the pixmap's alphatypes and colorspaces are ignored.
612 * For the Vulkan backend the layout of the created VkImage will be:
613 * VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL
614 */
615 GrBackendTexture createBackendTexture(const SkPixmap srcData[],
616 int numLevels,
620 GrGpuFinishedProc finishedProc = nullptr,
621 GrGpuFinishedContext finishedContext = nullptr,
622 std::string_view label = {});
623
624 /**
625 * Convenience version createBackendTexture() that takes just a base level pixmap.
626 */
627 GrBackendTexture createBackendTexture(const SkPixmap& srcData,
628 GrSurfaceOrigin textureOrigin,
629 GrRenderable renderable,
630 GrProtected isProtected,
631 GrGpuFinishedProc finishedProc = nullptr,
632 GrGpuFinishedContext finishedContext = nullptr,
633 std::string_view label = {});
634
635 // Deprecated versions that do not take origin and assume top-left.
636 GrBackendTexture createBackendTexture(const SkPixmap srcData[],
637 int numLevels,
638 GrRenderable renderable,
639 GrProtected isProtected,
640 GrGpuFinishedProc finishedProc = nullptr,
641 GrGpuFinishedContext finishedContext = nullptr,
642 std::string_view label = {});
643
644 GrBackendTexture createBackendTexture(const SkPixmap& srcData,
645 GrRenderable renderable,
646 GrProtected isProtected,
647 GrGpuFinishedProc finishedProc = nullptr,
648 GrGpuFinishedContext finishedContext = nullptr,
649 std::string_view label = {});
650
651 /**
652 * If possible, updates a backend texture to be filled to a particular color. The client should
653 * check the return value to see if the update was successful. The client can pass in a
654 * finishedProc to be notified when the data has been uploaded by the gpu and the texture can be
655 * deleted. The client is required to call `submit` to send the upload work to the gpu.
656 * The finishedProc will always get called even if we failed to update the GrBackendTexture.
657 * For the Vulkan backend after a successful update the layout of the created VkImage will be:
658 * VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL
659 */
660 bool updateBackendTexture(const GrBackendTexture&,
661 const SkColor4f& color,
662 GrGpuFinishedProc finishedProc,
663 GrGpuFinishedContext finishedContext);
664
665 /**
666 * If possible, updates a backend texture to be filled to a particular color. The data in
667 * GrBackendTexture and passed in color is interpreted with respect to the passed in
668 * SkColorType. The client should check the return value to see if the update was successful.
669 * The client can pass in a finishedProc to be notified when the data has been uploaded by the
670 * gpu and the texture can be deleted. The client is required to call `submit` to send
671 * the upload work to the gpu. The finishedProc will always get called even if we failed to
672 * update the GrBackendTexture.
673 * For the Vulkan backend after a successful update the layout of the created VkImage will be:
674 * VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL
675 */
676 bool updateBackendTexture(const GrBackendTexture&,
677 SkColorType skColorType,
678 const SkColor4f& color,
679 GrGpuFinishedProc finishedProc,
680 GrGpuFinishedContext finishedContext);
681
682 /**
683 * If possible, updates a backend texture filled with the provided pixmap data. The client
684 * should check the return value to see if the update was successful. The client can pass in a
685 * finishedProc to be notified when the data has been uploaded by the gpu and the texture can be
686 * deleted. The client is required to call `submit` to send the upload work to the gpu.
687 * The finishedProc will always get called even if we failed to create the GrBackendTexture.
688 * The backend texture must be compatible with the provided pixmap(s). Compatible, in this case,
689 * means that the backend format is compatible with the base pixmap's colortype. The src data
690 * can be deleted when this call returns.
691 * If the backend texture is mip mapped, the data for all the mipmap levels must be provided.
692 * In the mipmapped case all the colortypes of the provided pixmaps must be the same.
693 * Additionally, all the miplevels must be sized correctly (please see
694 * SkMipmap::ComputeLevelSize and ComputeLevelCount). The GrSurfaceOrigin controls whether the
695 * pixmap data is vertically flipped in the texture.
696 * Note: the pixmap's alphatypes and colorspaces are ignored.
697 * For the Vulkan backend after a successful update the layout of the created VkImage will be:
698 * VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL
699 */
700 bool updateBackendTexture(const GrBackendTexture&,
701 const SkPixmap srcData[],
702 int numLevels,
704 GrGpuFinishedProc finishedProc = nullptr,
705 GrGpuFinishedContext finishedContext = nullptr);
706
707 /**
708 * Convenience version of updateBackendTexture that takes just a base level pixmap.
709 */
711 const SkPixmap& srcData,
713 GrGpuFinishedProc finishedProc = nullptr,
714 GrGpuFinishedContext finishedContext = nullptr) {
715 return this->updateBackendTexture(texture,
716 &srcData,
717 1,
718 textureOrigin,
719 finishedProc,
720 finishedContext);
721 }
722
723 // Deprecated version that does not take origin and assumes top-left.
724 bool updateBackendTexture(const GrBackendTexture& texture,
725 const SkPixmap srcData[],
726 int numLevels,
727 GrGpuFinishedProc finishedProc,
728 GrGpuFinishedContext finishedContext);
729
730 /**
731 * Retrieve the GrBackendFormat for a given SkTextureCompressionType. This is
732 * guaranteed to match the backend format used by the following
733 * createCompressedBackendTexture methods that take a CompressionType.
734 *
735 * The caller should check that the returned format is valid.
736 */
738
739 /**
740 *If possible, create a compressed backend texture initialized to a particular color. The
741 * client should ensure that the returned backend texture is valid. The client can pass in a
742 * finishedProc to be notified when the data has been uploaded by the gpu and the texture can be
743 * deleted. The client is required to call `submit` to send the upload work to the gpu.
744 * The finishedProc will always get called even if we failed to create the GrBackendTexture.
745 * For the Vulkan backend the layout of the created VkImage will be:
746 * VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL
747 */
748 GrBackendTexture createCompressedBackendTexture(int width,
749 int height,
750 const GrBackendFormat&,
751 const SkColor4f& color,
754 GrGpuFinishedProc finishedProc = nullptr,
755 GrGpuFinishedContext finishedContext = nullptr);
756
757 GrBackendTexture createCompressedBackendTexture(int width,
758 int height,
760 const SkColor4f& color,
763 GrGpuFinishedProc finishedProc = nullptr,
764 GrGpuFinishedContext finishedContext = nullptr);
765
766 /**
767 * If possible, create a backend texture initialized with the provided raw data. The client
768 * should ensure that the returned backend texture is valid. The client can pass in a
769 * finishedProc to be notified when the data has been uploaded by the gpu and the texture can be
770 * deleted. The client is required to call `submit` to send the upload work to the gpu.
771 * The finishedProc will always get called even if we failed to create the GrBackendTexture
772 * If numLevels is 1 a non-mipmapped texture will result. If a mipmapped texture is desired
773 * the data for all the mipmap levels must be provided. Additionally, all the miplevels
774 * must be sized correctly (please see SkMipmap::ComputeLevelSize and ComputeLevelCount).
775 * For the Vulkan backend the layout of the created VkImage will be:
776 * VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL
777 */
778 GrBackendTexture createCompressedBackendTexture(int width,
779 int height,
780 const GrBackendFormat&,
781 const void* data,
782 size_t dataSize,
785 GrGpuFinishedProc finishedProc = nullptr,
786 GrGpuFinishedContext finishedContext = nullptr);
787
788 GrBackendTexture createCompressedBackendTexture(int width,
789 int height,
791 const void* data,
792 size_t dataSize,
795 GrGpuFinishedProc finishedProc = nullptr,
796 GrGpuFinishedContext finishedContext = nullptr);
797
798 /**
799 * If possible, updates a backend texture filled with the provided color. If the texture is
800 * mipmapped, all levels of the mip chain will be updated to have the supplied color. The client
801 * should check the return value to see if the update was successful. The client can pass in a
802 * finishedProc to be notified when the data has been uploaded by the gpu and the texture can be
803 * deleted. The client is required to call `submit` to send the upload work to the gpu.
804 * The finishedProc will always get called even if we failed to create the GrBackendTexture.
805 * For the Vulkan backend after a successful update the layout of the created VkImage will be:
806 * VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL
807 */
808 bool updateCompressedBackendTexture(const GrBackendTexture&,
809 const SkColor4f& color,
810 GrGpuFinishedProc finishedProc,
811 GrGpuFinishedContext finishedContext);
812
813 /**
814 * If possible, updates a backend texture filled with the provided raw data. The client
815 * should check the return value to see if the update was successful. The client can pass in a
816 * finishedProc to be notified when the data has been uploaded by the gpu and the texture can be
817 * deleted. The client is required to call `submit` to send the upload work to the gpu.
818 * The finishedProc will always get called even if we failed to create the GrBackendTexture.
819 * If a mipmapped texture is passed in, the data for all the mipmap levels must be provided.
820 * Additionally, all the miplevels must be sized correctly (please see
821 * SkMipMap::ComputeLevelSize and ComputeLevelCount).
822 * For the Vulkan backend after a successful update the layout of the created VkImage will be:
823 * VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL
824 */
825 bool updateCompressedBackendTexture(const GrBackendTexture&,
826 const void* data,
827 size_t dataSize,
828 GrGpuFinishedProc finishedProc,
829 GrGpuFinishedContext finishedContext);
830
831 /**
832 * Updates the state of the GrBackendTexture/RenderTarget to have the passed in
833 * skgpu::MutableTextureState. All objects that wrap the backend surface (i.e. SkSurfaces and
834 * SkImages) will also be aware of this state change. This call does not submit the state change
835 * to the gpu, but requires the client to call `submit` to send it to the GPU. The work
836 * for this call is ordered linearly with all other calls that require GrContext::submit to be
837 * called (e.g updateBackendTexture and flush). If finishedProc is not null then it will be
838 * called with finishedContext after the state transition is known to have occurred on the GPU.
839 *
840 * See skgpu::MutableTextureState to see what state can be set via this call.
841 *
842 * If the backend API is Vulkan, the caller can set the skgpu::MutableTextureState's
843 * VkImageLayout to VK_IMAGE_LAYOUT_UNDEFINED or queueFamilyIndex to VK_QUEUE_FAMILY_IGNORED to
844 * tell Skia to not change those respective states.
845 *
846 * If previousState is not null and this returns true, then Skia will have filled in
847 * previousState to have the values of the state before this call.
848 */
849 bool setBackendTextureState(const GrBackendTexture&,
851 skgpu::MutableTextureState* previousState = nullptr,
852 GrGpuFinishedProc finishedProc = nullptr,
853 GrGpuFinishedContext finishedContext = nullptr);
854 bool setBackendRenderTargetState(const GrBackendRenderTarget&,
856 skgpu::MutableTextureState* previousState = nullptr,
857 GrGpuFinishedProc finishedProc = nullptr,
858 GrGpuFinishedContext finishedContext = nullptr);
859
860 void deleteBackendTexture(const GrBackendTexture&);
861
862 // This interface allows clients to pre-compile shaders and populate the runtime program cache.
863 // The key and data blobs should be the ones passed to the PersistentCache, in SkSL format.
864 //
865 // Steps to use this API:
866 //
867 // 1) Create a GrDirectContext as normal, but set fPersistentCache on GrContextOptions to
868 // something that will save the cached shader blobs. Set fShaderCacheStrategy to kSkSL. This
869 // will ensure that the blobs are SkSL, and are suitable for pre-compilation.
870 // 2) Run your application, and save all of the key/data pairs that are fed to the cache.
871 //
872 // 3) Switch over to shipping your application. Include the key/data pairs from above.
873 // 4) At startup (or any convenient time), call precompileShader for each key/data pair.
874 // This will compile the SkSL to create a GL program, and populate the runtime cache.
875 //
876 // This is only guaranteed to work if the context/device used in step #2 are created in the
877 // same way as the one used in step #4, and the same GrContextOptions are specified.
878 // Using cached shader blobs on a different device or driver are undefined.
879 bool precompileShader(const SkData& key, const SkData& data);
880
881#ifdef SK_ENABLE_DUMP_GPU
882 /** Returns a string with detailed information about the context & GPU, in JSON format. */
883 SkString dump() const;
884#endif
885
887 public:
889
891
892 bool operator==(const DirectContextID& that) const { return fID == that.fID; }
893 bool operator!=(const DirectContextID& that) const { return !(*this == that); }
894
896 bool isValid() const { return fID != SK_InvalidUniqueID; }
897
898 private:
899 constexpr DirectContextID(uint32_t id) : fID(id) {}
900 uint32_t fID;
901 };
902
903 DirectContextID directContextID() const { return fDirectContextID; }
904
905 // Provides access to functions that aren't part of the public API.
907 const GrDirectContextPriv priv() const; // NOLINT(readability-const-return-type)
908
909protected:
913
914 bool init() override;
915
916 GrAtlasManager* onGetAtlasManager() { return fAtlasManager.get(); }
917#if !defined(SK_ENABLE_OPTIMIZE_SIZE)
918 skgpu::ganesh::SmallPathAtlasMgr* onGetSmallPathAtlasMgr();
919#endif
920
921 GrDirectContext* asDirectContext() override { return this; }
922
923private:
924 // This call will make sure out work on the GPU is finished and will execute any outstanding
925 // asynchronous work (e.g. calling finished procs, freeing resources, etc.) related to the
926 // outstanding work on the gpu. The main use currently for this function is when tearing down or
927 // abandoning the context.
928 //
929 // When we finish up work on the GPU it could trigger callbacks to the client. In the case we
930 // are abandoning the context we don't want the client to be able to use the GrDirectContext to
931 // issue more commands during the callback. Thus before calling this function we set the
932 // GrDirectContext's state to be abandoned. However, we need to be able to get by the abaonded
933 // check in the call to know that it is safe to execute this. The shouldExecuteWhileAbandoned
934 // bool is used for this signal.
935 void syncAllOutstandingGpuWork(bool shouldExecuteWhileAbandoned);
936
937 // This delete callback needs to be the first thing on the GrDirectContext so that it is the
938 // last thing destroyed. The callback may signal the client to clean up things that may need
939 // to survive the lifetime of some of the other objects on the GrDirectCotnext. So make sure
940 // we don't call it until all else has been destroyed.
941 class DeleteCallbackHelper {
942 public:
943 DeleteCallbackHelper(GrDirectContextDestroyedContext context,
945 : fContext(context), fProc(proc) {}
946
947 ~DeleteCallbackHelper() {
948 if (fProc) {
950 }
951 }
952
953 private:
956 };
957 std::unique_ptr<DeleteCallbackHelper> fDeleteCallbackHelper;
958
959 const DirectContextID fDirectContextID;
960 // fTaskGroup must appear before anything that uses it (e.g. fGpu), so that it is destroyed
961 // after all of its users. Clients of fTaskGroup will generally want to ensure that they call
962 // wait() on it as they are being destroyed, to avoid the possibility of pending tasks being
963 // invoked after objects they depend upon have already been destroyed.
964 std::unique_ptr<SkTaskGroup> fTaskGroup;
965 std::unique_ptr<sktext::gpu::StrikeCache> fStrikeCache;
966 std::unique_ptr<GrGpu> fGpu;
967 std::unique_ptr<GrResourceCache> fResourceCache;
968 std::unique_ptr<GrResourceProvider> fResourceProvider;
969
970 // This is incremented before we start calling ReleaseProcs from GrSurfaces and decremented
971 // after. A ReleaseProc may trigger code causing another resource to get freed so we to track
972 // the count to know if we in a ReleaseProc at any level. When this is set to a value greated
973 // than zero we will not allow abandonContext calls to be made on the context.
974 int fInsideReleaseProcCnt = 0;
975
976 bool fDidTestPMConversions;
977 // true if the PM/UPM conversion succeeded; false otherwise
978 bool fPMUPMConversionsRoundTrip;
979
980 GrContextOptions::PersistentCache* fPersistentCache;
981
982 std::unique_ptr<GrClientMappedBufferManager> fMappedBufferManager;
983 std::unique_ptr<GrAtlasManager> fAtlasManager;
984
985#if !defined(SK_ENABLE_OPTIMIZE_SIZE)
986 std::unique_ptr<skgpu::ganesh::SmallPathAtlasMgr> fSmallPathAtlasMgr;
987#endif
988
990};
991
992
993#endif
const char * options
const char * backend
static void info(const char *fmt,...) SK_PRINTF_LIKE(1
Definition: DM.cpp:213
GrPurgeResourceOptions
Definition: GrTypes.h:234
GrSurfaceOrigin
Definition: GrTypes.h:147
@ kTopLeft_GrSurfaceOrigin
Definition: GrTypes.h:148
static const uint32_t kAll_GrBackendState
Definition: GrTypes.h:176
GrBackendApi
Definition: GrTypes.h:95
void * GrDirectContextDestroyedContext
Definition: GrTypes.h:184
GrSemaphoresSubmitted
Definition: GrTypes.h:229
void * GrGpuFinishedContext
Definition: GrTypes.h:178
void(* GrDirectContextDestroyedProc)(GrDirectContextDestroyedContext destroyedContext)
Definition: GrTypes.h:185
void(* GrGpuFinishedProc)(GrGpuFinishedContext finishedContext)
Definition: GrTypes.h:179
GrSyncCpu
Definition: GrTypes.h:239
IsFiniteProc fProc
Definition: MathBench.cpp:219
#define SK_API
Definition: SkAPI.h:35
SkColorType
Definition: SkColorType.h:19
const Context & fContext
SkTextureCompressionType
static constexpr uint32_t SK_InvalidUniqueID
Definition: SkTypes.h:196
static void dump(const float m[20], SkYUVColorSpace cs, bool rgb2yuv)
Definition: SkYUVMath.cpp:629
sk_sp< GrContextThreadSafeProxy > threadSafeProxy()
SK_API GrBackendFormat defaultBackendFormat(SkColorType, GrRenderable) const
SK_API GrBackendFormat compressedBackendFormat(SkTextureCompressionType) const
bool operator!=(const DirectContextID &that) const
bool operator==(const DirectContextID &that) const
void purgeResourcesNotUsedInMs(std::chrono::milliseconds msNotUsed)
GrDirectContext * asDirectContext() override
void flushAndSubmit(GrSyncCpu sync=GrSyncCpu::kNo)
bool updateBackendTexture(const GrBackendTexture &texture, const SkPixmap &srcData, GrSurfaceOrigin textureOrigin=kTopLeft_GrSurfaceOrigin, GrGpuFinishedProc finishedProc=nullptr, GrGpuFinishedContext finishedContext=nullptr)
DirectContextID directContextID() const
GrAtlasManager * onGetAtlasManager()
Definition: GrGpu.h:62
GrRecordingContextPriv priv()
bool abandoned() override
SK_API bool colorTypeSupportedAsSurface(SkColorType colorType) const
SK_API bool colorTypeSupportedAsImage(SkColorType) const
SK_API int maxSurfaceSampleCountForColorType(SkColorType colorType) const
SK_API int maxTextureSize() const
SK_API bool supportsProtectedContent() const
SK_API int maxRenderTargetSize() const
void abandonContext() override
Definition: SkData.h:25
DlColor color
VkSurfaceKHR surface
Definition: main.cc:49
AtkStateType state
FlTexture * texture
sk_sp< const SkImage > image
Definition: SkRecords.h:269
BackendSurfaceAccess
Definition: SkSurface.h:44
Definition: GpuTools.h:21
Renderable
Definition: GpuTypes.h:69
Mipmapped
Definition: GpuTypes.h:53
Protected
Definition: GpuTypes.h:61
int32_t height
int32_t width
std::shared_ptr< const fml::Mapping > data
Definition: texture_gles.cc:63
const uintptr_t id