19#define VK_CALL(GPU, X) GR_VK_CALL(GPU->vkInterface(), X)
21GrVkBuffer::GrVkBuffer(
GrVkGpu* gpu,
28 std::string_view label)
29 :
GrGpuBuffer(gpu, sizeInBytes, bufferType, accessPattern, label)
32 , fUniformDescriptorSet(uniformDescriptorSet) {
49 bufferInfo.
range = size;
54 descriptorWrite.
pNext =
nullptr;
65 UpdateDescriptorSets(gpu->
device(), 1, &descriptorWrite, 0,
nullptr));
85 BufferUsage allocUsage;
98 allocUsage = requiresMappable ? BufferUsage::kCpuWritesGpuReads : BufferUsage::kGpuOnly;
102 allocUsage = requiresMappable ? BufferUsage::kCpuWritesGpuReads : BufferUsage::kGpuOnly;
106 allocUsage = requiresMappable ? BufferUsage::kCpuWritesGpuReads : BufferUsage::kGpuOnly;
110 allocUsage = BufferUsage::kCpuWritesGpuReads;
114 allocUsage = BufferUsage::kTransfersFromCpuToGpu;
118 allocUsage = BufferUsage::kTransfersFromGpuToCpu;
125 if (!requiresMappable) {
140 auto checkResult = [gpu, allocUsage, shouldPersistentlyMapCpuToGpu](
VkResult result) {
142 "(allocUsage:%d, shouldPersistentlyMapCpuToGpu:%d)",
143 (
int)allocUsage, (
int)shouldPersistentlyMapCpuToGpu);
150 shouldPersistentlyMapCpuToGpu,
172 if (!uniformDescSet) {
184void GrVkBuffer::vkMap(
size_t readOffset,
size_t readSize) {
186 if (this->isVkMappable()) {
195 GrVkGpu* gpu = this->getVkGpu();
202 if (
fMapPtr && readSize != 0) {
203 auto checkResult_invalidateMapAlloc = [gpu, readOffset, readSize](
VkResult result) {
205 "(readOffset:%zu, readSize:%zu)",
206 readOffset, readSize);
215 checkResult_invalidateMapAlloc);
220void GrVkBuffer::vkUnmap(
size_t flushOffset,
size_t flushSize) {
226 GrVkGpu* gpu = this->getVkGpu();
227 auto checkResult = [gpu, flushOffset, flushSize](
VkResult result) {
229 "(flushOffset:%zu, flushSize:%zu)",
230 flushOffset, flushSize);
238void GrVkBuffer::copyCpuDataToGpuBuffer(
const void* src,
size_t offset,
size_t size) {
241 GrVkGpu* gpu = this->getVkGpu();
258 if (!transferBuffer) {
274 bool byRegion)
const {
289 &bufferMemoryBarrier);
292void GrVkBuffer::vkRelease() {
298 this->vkUnmap(0, this->
size());
302 if (fUniformDescriptorSet) {
303 fUniformDescriptorSet->
recycle();
304 fUniformDescriptorSet =
nullptr;
309 VK_CALL(this->getVkGpu(), DestroyBuffer(this->getVkGpu()->
device(), fBuffer,
nullptr));
338 if (this->isVkMappable()) {
349 this->copyCpuDataToGpuBuffer(src,
offset,
size);
354GrVkGpu* GrVkBuffer::getVkGpu()
const {
@ kDynamic_GrAccessPattern
@ kStream_GrAccessPattern
static const GrVkDescriptorSet * make_uniform_desc_set(GrVkGpu *gpu, VkBuffer buffer, size_t size)
#define GR_VK_CALL(IFACE, X)
#define GR_VK_LOG_IF_NOT_SUCCESS(GPU, RESULT, X,...)
#define GR_VK_CALL_RESULT(GPU, RESULT, X)
static constexpr bool SkIsAlign4(T x)
sk_sp< T > sk_ref_sp(T *obj)
GrResourceProvider * resourceProvider()
GrDirectContextPriv priv()
size_t size() const final
GrAccessPattern accessPattern() const
bool wasDestroyed() const
GrDirectContext * getContext()
bool transferFromBufferToBuffer(sk_sp< GrGpuBuffer > src, size_t srcOffset, sk_sp< GrGpuBuffer > dst, size_t dstOffset, size_t size)
bool internalHasNoCommandBufferUsages() const
sk_sp< GrGpuBuffer > createBuffer(size_t size, GrGpuBufferType, GrAccessPattern, ZeroInit)
static sk_sp< GrVkBuffer > Make(GrVkGpu *gpu, size_t size, GrGpuBufferType bufferType, GrAccessPattern accessPattern)
bool onUpdateData(const void *src, size_t offset, size_t size, bool preserve) override
void onMap(MapType) override
void addMemoryBarrier(VkAccessFlags srcAccessMask, VkAccessFlags dstAccesMask, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask, bool byRegion) const
bool onClearToZero() override
const VkDescriptorSet * uniformDescriptorSet() const
void onUnmap(MapType) override
void onRelease() override
void onAbandon() override
bool shouldPersistentlyMapCpuToGpuBuffers() const
bool avoidUpdateBuffers() const
bool gpuOnlyBuffersMorePerformant() const
const VkDescriptorSet * descriptorSet() const
const GrVkCaps & vkCaps() const
bool zeroBuffer(sk_sp< GrGpuBuffer >)
void addBufferMemoryBarrier(const GrManagedResource *, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask, bool byRegion, VkBufferMemoryBarrier *barrier) const
const skgpu::VulkanInterface * vkInterface() const
GrVkResourceProvider & resourceProvider()
bool updateBuffer(sk_sp< GrVkBuffer > buffer, const void *src, VkDeviceSize offset, VkDeviceSize size)
bool protectedContext() const
bool checkVkResult(VkResult)
skgpu::VulkanMemoryAllocator * memoryAllocator() const
const GrVkDescriptorSet * getUniformDescriptorSet()
static const uint8_t buffer[]
void FreeBufferMemory(VulkanMemoryAllocator *, const VulkanAlloc &alloc)
bool AllocBufferMemory(VulkanMemoryAllocator *, VkBuffer buffer, skgpu::VulkanMemoryAllocator::BufferUsage, bool shouldPersistentlyMapCpuToGpu, const std::function< CheckResult > &, VulkanAlloc *alloc)
void * MapAlloc(VulkanMemoryAllocator *, const VulkanAlloc &, const std::function< CheckResult > &)
void FlushMappedAlloc(VulkanMemoryAllocator *, const skgpu::VulkanAlloc &, VkDeviceSize offset, VkDeviceSize size, const std::function< CheckResult > &)
void UnmapAlloc(VulkanMemoryAllocator *, const VulkanAlloc &alloc)
void InvalidateMappedAlloc(VulkanMemoryAllocator *, const VulkanAlloc &alloc, VkDeviceSize offset, VkDeviceSize size, const std::function< CheckResult > &)
uint32_t queueFamilyIndexCount
const uint32_t * pQueueFamilyIndices
VkBufferCreateFlags flags
VkSharingMode sharingMode
const VkBufferView * pTexelBufferView
const VkDescriptorImageInfo * pImageInfo
const VkDescriptorBufferInfo * pBufferInfo
VkDescriptorType descriptorType
VulkanBackendMemory fBackendMemory
VkFlags VkPipelineStageFlags
@ VK_SHARING_MODE_EXCLUSIVE
@ VK_BUFFER_USAGE_TRANSFER_DST_BIT
@ VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT
@ VK_BUFFER_USAGE_INDEX_BUFFER_BIT
@ VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT
@ VK_BUFFER_USAGE_VERTEX_BUFFER_BIT
@ VK_BUFFER_USAGE_TRANSFER_SRC_BIT
@ VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER
#define VK_QUEUE_FAMILY_IGNORED
@ VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO
@ VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET
@ VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER