19#define VK_CALL(GPU, X) GR_VK_CALL(GPU->vkInterface(), X)
21GrVkBuffer::GrVkBuffer(
GrVkGpu* gpu,
28 std::string_view label)
29 :
GrGpuBuffer(gpu, sizeInBytes, bufferType, accessPattern, label)
32 , fUniformDescriptorSet(uniformDescriptorSet) {
54 descriptorWrite.
pNext =
nullptr;
65 UpdateDescriptorSets(gpu->
device(), 1, &descriptorWrite, 0,
nullptr));
98 allocUsage = requiresMappable ? BufferUsage::kCpuWritesGpuReads : BufferUsage::kGpuOnly;
102 allocUsage = requiresMappable ? BufferUsage::kCpuWritesGpuReads : BufferUsage::kGpuOnly;
106 allocUsage = requiresMappable ? BufferUsage::kCpuWritesGpuReads : BufferUsage::kGpuOnly;
110 allocUsage = BufferUsage::kCpuWritesGpuReads;
114 allocUsage = BufferUsage::kTransfersFromCpuToGpu;
118 allocUsage = BufferUsage::kTransfersFromGpuToCpu;
125 if (!requiresMappable) {
140 auto checkResult = [gpu, allocUsage, shouldPersistentlyMapCpuToGpu](
VkResult result) {
142 "(allocUsage:%d, shouldPersistentlyMapCpuToGpu:%d)",
143 (
int)allocUsage, (
int)shouldPersistentlyMapCpuToGpu);
150 shouldPersistentlyMapCpuToGpu,
172 if (!uniformDescSet) {
184void GrVkBuffer::vkMap(
size_t readOffset,
size_t readSize) {
186 if (this->isVkMappable()) {
195 GrVkGpu* gpu = this->getVkGpu();
202 if (
fMapPtr && readSize != 0) {
203 auto checkResult_invalidateMapAlloc = [gpu, readOffset, readSize](
VkResult result) {
205 "(readOffset:%zu, readSize:%zu)",
206 readOffset, readSize);
215 checkResult_invalidateMapAlloc);
220void GrVkBuffer::vkUnmap(
size_t flushOffset,
size_t flushSize) {
226 GrVkGpu* gpu = this->getVkGpu();
227 auto checkResult = [gpu, flushOffset, flushSize](
VkResult result) {
229 "(flushOffset:%zu, flushSize:%zu)",
230 flushOffset, flushSize);
238void GrVkBuffer::copyCpuDataToGpuBuffer(
const void*
src,
size_t offset,
size_t size) {
241 GrVkGpu* gpu = this->getVkGpu();
258 if (!transferBuffer) {
274 bool byRegion)
const {
289 &bufferMemoryBarrier);
292void GrVkBuffer::vkRelease() {
298 this->vkUnmap(0, this->
size());
302 if (fUniformDescriptorSet) {
303 fUniformDescriptorSet->
recycle();
304 fUniformDescriptorSet =
nullptr;
309 VK_CALL(this->getVkGpu(), DestroyBuffer(this->getVkGpu()->
device(), fBuffer,
nullptr));
317void GrVkBuffer::onRelease() {
322void GrVkBuffer::onAbandon() {
327void GrVkBuffer::onMap(MapType
type) {
331void GrVkBuffer::onUnmap(MapType
type) {
337bool GrVkBuffer::onUpdateData(
const void*
src,
size_t offset,
size_t size,
bool ) {
338 if (this->isVkMappable()) {
354GrVkGpu* GrVkBuffer::getVkGpu()
const {
@ kDynamic_GrAccessPattern
@ kStream_GrAccessPattern
static const GrVkDescriptorSet * make_uniform_desc_set(GrVkGpu *gpu, VkBuffer buffer, size_t size)
#define GR_VK_CALL(IFACE, X)
#define GR_VK_LOG_IF_NOT_SUCCESS(GPU, RESULT, X,...)
#define GR_VK_CALL_RESULT(GPU, RESULT, X)
static constexpr bool SkIsAlign4(T x)
sk_sp< T > sk_ref_sp(T *obj)
GrResourceProvider * resourceProvider()
GrDirectContextPriv priv()
size_t size() const final
GrAccessPattern accessPattern() const
bool wasDestroyed() const
GrDirectContext * getContext()
bool transferFromBufferToBuffer(sk_sp< GrGpuBuffer > src, size_t srcOffset, sk_sp< GrGpuBuffer > dst, size_t dstOffset, size_t size)
bool internalHasNoCommandBufferUsages() const
sk_sp< GrGpuBuffer > createBuffer(size_t size, GrGpuBufferType, GrAccessPattern, ZeroInit)
static sk_sp< GrVkBuffer > Make(GrVkGpu *gpu, size_t size, GrGpuBufferType bufferType, GrAccessPattern accessPattern)
void addMemoryBarrier(VkAccessFlags srcAccessMask, VkAccessFlags dstAccesMask, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask, bool byRegion) const
const VkDescriptorSet * uniformDescriptorSet() const
bool shouldPersistentlyMapCpuToGpuBuffers() const
bool avoidUpdateBuffers() const
bool gpuOnlyBuffersMorePerformant() const
const VkDescriptorSet * descriptorSet() const
const GrVkCaps & vkCaps() const
bool zeroBuffer(sk_sp< GrGpuBuffer >)
void addBufferMemoryBarrier(const GrManagedResource *, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask, bool byRegion, VkBufferMemoryBarrier *barrier) const
const skgpu::VulkanInterface * vkInterface() const
GrVkResourceProvider & resourceProvider()
bool updateBuffer(sk_sp< GrVkBuffer > buffer, const void *src, VkDeviceSize offset, VkDeviceSize size)
bool protectedContext() const
bool checkVkResult(VkResult)
skgpu::VulkanMemoryAllocator * memoryAllocator() const
const GrVkDescriptorSet * getUniformDescriptorSet()
DEF_SWITCHES_START aot vmservice shared library Name of the *so containing AOT compiled Dart assets for launching the service isolate vm snapshot The VM snapshot data that will be memory mapped as read only SnapshotAssetPath must be present isolate snapshot The isolate snapshot data that will be memory mapped as read only SnapshotAssetPath must be present cache dir Path to the cache directory This is different from the persistent_cache_path in embedder which is used for Skia shader cache icu native lib Path to the library file that exports the ICU data vm service The hostname IP address on which the Dart VM Service should be served If not defaults to or::depending on whether ipv6 is specified vm service A custom Dart VM Service port The default is to pick a randomly available open port disable vm Disable the Dart VM Service The Dart VM Service is never available in release mode disable vm service Disable mDNS Dart VM Service publication Bind to the IPv6 localhost address for the Dart VM Service Ignored if vm service host is set endless trace buffer
it will be possible to load the file into Perfetto s trace viewer disable asset Prevents usage of any non test fonts unless they were explicitly Loaded via prefetched default font Indicates whether the embedding started a prefetch of the default font manager before creating the engine run In non interactive keep the shell running after the Dart script has completed enable serial On low power devices with low core running concurrent GC tasks on threads can cause them to contend with the UI thread which could potentially lead to jank This option turns off all concurrent GC activities domain network JSON encoded network policy per domain This overrides the DisallowInsecureConnections switch Embedder can specify whether to allow or disallow insecure connections at a domain level old gen heap size
void FreeBufferMemory(VulkanMemoryAllocator *, const VulkanAlloc &alloc)
bool AllocBufferMemory(VulkanMemoryAllocator *, VkBuffer buffer, skgpu::VulkanMemoryAllocator::BufferUsage, bool shouldPersistentlyMapCpuToGpu, const std::function< CheckResult > &, VulkanAlloc *alloc)
void * MapAlloc(VulkanMemoryAllocator *, const VulkanAlloc &, const std::function< CheckResult > &)
void FlushMappedAlloc(VulkanMemoryAllocator *, const skgpu::VulkanAlloc &, VkDeviceSize offset, VkDeviceSize size, const std::function< CheckResult > &)
void UnmapAlloc(VulkanMemoryAllocator *, const VulkanAlloc &alloc)
void InvalidateMappedAlloc(VulkanMemoryAllocator *, const VulkanAlloc &alloc, VkDeviceSize offset, VkDeviceSize size, const std::function< CheckResult > &)
VulkanMemoryAllocator::BufferUsage BufferUsage
uint32_t queueFamilyIndexCount
const uint32_t * pQueueFamilyIndices
VkBufferCreateFlags flags
VkSharingMode sharingMode
const VkBufferView * pTexelBufferView
const VkDescriptorImageInfo * pImageInfo
const VkDescriptorBufferInfo * pBufferInfo
VkDescriptorType descriptorType
VulkanBackendMemory fBackendMemory
VkFlags VkPipelineStageFlags
@ VK_SHARING_MODE_EXCLUSIVE
@ VK_BUFFER_USAGE_TRANSFER_DST_BIT
@ VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT
@ VK_BUFFER_USAGE_INDEX_BUFFER_BIT
@ VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT
@ VK_BUFFER_USAGE_VERTEX_BUFFER_BIT
@ VK_BUFFER_USAGE_TRANSFER_SRC_BIT
@ VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER
#define VK_QUEUE_FAMILY_IGNORED
@ VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO
@ VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET
@ VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER