Flutter Engine
The Flutter Engine
Loading...
Searching...
No Matches
VulkanAMDMemoryAllocator.cpp
Go to the documentation of this file.
1/*
2 * Copyright 2018 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
9
13
14namespace skgpu {
15
16#ifndef SK_USE_VMA
18 VkInstance instance,
19 VkPhysicalDevice physicalDevice,
20 VkDevice device,
21 uint32_t physicalDeviceVersion,
22 const VulkanExtensions* extensions,
23 const VulkanInterface* interface,
24 bool threadSafe) {
25 return nullptr;
26}
27#else
28
30 VkInstance instance,
31 VkPhysicalDevice physicalDevice,
32 VkDevice device,
33 uint32_t physicalDeviceVersion,
34 const VulkanExtensions* extensions,
35 const VulkanInterface* interface,
36 bool threadSafe) {
37#define SKGPU_COPY_FUNCTION(NAME) functions.vk##NAME = interface->fFunctions.f##NAME
38#define SKGPU_COPY_FUNCTION_KHR(NAME) functions.vk##NAME##KHR = interface->fFunctions.f##NAME
39
40 VmaVulkanFunctions functions;
41 // We should be setting all the required functions (at least through vulkan 1.1), but this is
42 // just extra belt and suspenders to make sure there isn't unitialized values here.
43 memset(&functions, 0, sizeof(VmaVulkanFunctions));
44
45 // We don't use dynamic function getting in the allocator so we set the getProc functions to
46 // null.
47 functions.vkGetInstanceProcAddr = nullptr;
48 functions.vkGetDeviceProcAddr = nullptr;
49 SKGPU_COPY_FUNCTION(GetPhysicalDeviceProperties);
50 SKGPU_COPY_FUNCTION(GetPhysicalDeviceMemoryProperties);
51 SKGPU_COPY_FUNCTION(AllocateMemory);
52 SKGPU_COPY_FUNCTION(FreeMemory);
53 SKGPU_COPY_FUNCTION(MapMemory);
54 SKGPU_COPY_FUNCTION(UnmapMemory);
55 SKGPU_COPY_FUNCTION(FlushMappedMemoryRanges);
56 SKGPU_COPY_FUNCTION(InvalidateMappedMemoryRanges);
57 SKGPU_COPY_FUNCTION(BindBufferMemory);
58 SKGPU_COPY_FUNCTION(BindImageMemory);
59 SKGPU_COPY_FUNCTION(GetBufferMemoryRequirements);
60 SKGPU_COPY_FUNCTION(GetImageMemoryRequirements);
61 SKGPU_COPY_FUNCTION(CreateBuffer);
62 SKGPU_COPY_FUNCTION(DestroyBuffer);
63 SKGPU_COPY_FUNCTION(CreateImage);
64 SKGPU_COPY_FUNCTION(DestroyImage);
65 SKGPU_COPY_FUNCTION(CmdCopyBuffer);
66 SKGPU_COPY_FUNCTION_KHR(GetBufferMemoryRequirements2);
67 SKGPU_COPY_FUNCTION_KHR(GetImageMemoryRequirements2);
68 SKGPU_COPY_FUNCTION_KHR(BindBufferMemory2);
69 SKGPU_COPY_FUNCTION_KHR(BindImageMemory2);
70 SKGPU_COPY_FUNCTION_KHR(GetPhysicalDeviceMemoryProperties2);
71
72 VmaAllocatorCreateInfo info;
73 info.flags = 0;
74 if (!threadSafe) {
75 info.flags |= VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT;
76 }
77 if (physicalDeviceVersion >= VK_MAKE_VERSION(1, 1, 0) ||
78 (extensions->hasExtension(VK_KHR_DEDICATED_ALLOCATION_EXTENSION_NAME, 1) &&
79 extensions->hasExtension(VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME, 1))) {
80 info.flags |= VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT;
81 }
82
83 info.physicalDevice = physicalDevice;
84 info.device = device;
85 // The old value was found to result in roughly 20% wasted space in Chromium. Reducing to only
86 // 64KB cut down the wasted space to about 1%, with no perf regressions and some perf
87 // improvements. The AMD allocator will start making blocks at 1/8 the max size set here and
88 // builds up as needed until capping at this max size.
89 info.preferredLargeHeapBlockSize = 64*1024;
90 info.pAllocationCallbacks = nullptr;
91 info.pDeviceMemoryCallbacks = nullptr;
92 info.pHeapSizeLimit = nullptr;
93 info.pVulkanFunctions = &functions;
94 info.instance = instance;
95 // TODO: Update our interface and headers to support vulkan 1.3 and add in the new required
96 // functions for 1.3 that the allocator needs. Until then we just clamp the version to 1.1.
97 info.vulkanApiVersion = std::min(physicalDeviceVersion, VK_MAKE_VERSION(1, 1, 0));
98 info.pTypeExternalMemoryHandleTypes = nullptr;
99
100 VmaAllocator allocator;
101 vmaCreateAllocator(&info, &allocator);
102
103 return sk_sp<VulkanAMDMemoryAllocator>(new VulkanAMDMemoryAllocator(allocator));
104}
105
106VulkanAMDMemoryAllocator::VulkanAMDMemoryAllocator(VmaAllocator allocator)
107 : fAllocator(allocator) {}
108
109VulkanAMDMemoryAllocator::~VulkanAMDMemoryAllocator() {
110 vmaDestroyAllocator(fAllocator);
111 fAllocator = VK_NULL_HANDLE;
112}
113
114VkResult VulkanAMDMemoryAllocator::allocateImageMemory(VkImage image,
115 uint32_t allocationPropertyFlags,
116 skgpu::VulkanBackendMemory* backendMemory) {
117 TRACE_EVENT0_ALWAYS("skia.gpu", TRACE_FUNC);
118 VmaAllocationCreateInfo info;
119 info.flags = 0;
120 info.usage = VMA_MEMORY_USAGE_UNKNOWN;
122 info.preferredFlags = 0;
123 info.memoryTypeBits = 0;
124 info.pool = VK_NULL_HANDLE;
125 info.pUserData = nullptr;
126
127 if (kDedicatedAllocation_AllocationPropertyFlag & allocationPropertyFlags) {
128 info.flags |= VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT;
129 }
130 if (kLazyAllocation_AllocationPropertyFlag & allocationPropertyFlags) {
132 }
133 if (kProtected_AllocationPropertyFlag & allocationPropertyFlags) {
134 info.requiredFlags |= VK_MEMORY_PROPERTY_PROTECTED_BIT;
135 }
136
137 VmaAllocation allocation;
138 VkResult result = vmaAllocateMemoryForImage(fAllocator, image, &info, &allocation, nullptr);
139 if (VK_SUCCESS == result) {
140 *backendMemory = (VulkanBackendMemory)allocation;
141 }
142 return result;
143}
144
145VkResult VulkanAMDMemoryAllocator::allocateBufferMemory(VkBuffer buffer,
147 uint32_t allocationPropertyFlags,
148 skgpu::VulkanBackendMemory* backendMemory) {
149 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
150 VmaAllocationCreateInfo info;
151 info.flags = 0;
152 info.usage = VMA_MEMORY_USAGE_UNKNOWN;
153 info.memoryTypeBits = 0;
154 info.pool = VK_NULL_HANDLE;
155 info.pUserData = nullptr;
156
157 switch (usage) {
158 case BufferUsage::kGpuOnly:
160 info.preferredFlags = 0;
161 break;
162 case BufferUsage::kCpuWritesGpuReads:
163 // When doing cpu writes and gpu reads the general rule of thumb is to use coherent
164 // memory. Though this depends on the fact that we are not doing any cpu reads and the
165 // cpu writes are sequential. For sparse writes we'd want cpu cached memory, however we
166 // don't do these types of writes in Skia.
167 //
168 // TODO: In the future there may be times where specific types of memory could benefit
169 // from a coherent and cached memory. Typically these allow for the gpu to read cpu
170 // writes from the cache without needing to flush the writes throughout the cache. The
171 // reverse is not true and GPU writes tend to invalidate the cache regardless. Also
172 // these gpu cache read access are typically lower bandwidth than non-cached memory.
173 // For now Skia doesn't really have a need or want of this type of memory. But if we
174 // ever do we could pass in an AllocationPropertyFlag that requests the cached property.
175 info.requiredFlags =
178 break;
179 case BufferUsage::kTransfersFromCpuToGpu:
180 info.requiredFlags =
182 info.preferredFlags = 0;
183 break;
184 case BufferUsage::kTransfersFromGpuToCpu:
187 break;
188 }
189
190 if (kDedicatedAllocation_AllocationPropertyFlag & allocationPropertyFlags) {
191 info.flags |= VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT;
192 }
193 if ((kLazyAllocation_AllocationPropertyFlag & allocationPropertyFlags) &&
194 BufferUsage::kGpuOnly == usage) {
196 }
197
198 if (kPersistentlyMapped_AllocationPropertyFlag & allocationPropertyFlags) {
199 SkASSERT(BufferUsage::kGpuOnly != usage);
200 info.flags |= VMA_ALLOCATION_CREATE_MAPPED_BIT;
201 }
202
203 VmaAllocation allocation;
204 VkResult result = vmaAllocateMemoryForBuffer(fAllocator, buffer, &info, &allocation, nullptr);
205 if (VK_SUCCESS == result) {
206 *backendMemory = (VulkanBackendMemory)allocation;
207 }
208
209 return result;
210}
211
212void VulkanAMDMemoryAllocator::freeMemory(const VulkanBackendMemory& memoryHandle) {
213 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
214 const VmaAllocation allocation = (VmaAllocation)memoryHandle;
215 vmaFreeMemory(fAllocator, allocation);
216}
217
218void VulkanAMDMemoryAllocator::getAllocInfo(const VulkanBackendMemory& memoryHandle,
219 VulkanAlloc* alloc) const {
220 const VmaAllocation allocation = (VmaAllocation)memoryHandle;
221 VmaAllocationInfo vmaInfo;
222 vmaGetAllocationInfo(fAllocator, allocation, &vmaInfo);
223
224 VkMemoryPropertyFlags memFlags;
225 vmaGetMemoryTypeProperties(fAllocator, vmaInfo.memoryType, &memFlags);
226
227 uint32_t flags = 0;
230 }
233 }
236 }
237
238 alloc->fMemory = vmaInfo.deviceMemory;
239 alloc->fOffset = vmaInfo.offset;
240 alloc->fSize = vmaInfo.size;
241 alloc->fFlags = flags;
242 alloc->fBackendMemory = memoryHandle;
243}
244
245VkResult VulkanAMDMemoryAllocator::mapMemory(const VulkanBackendMemory& memoryHandle,
246 void** data) {
247 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
248 const VmaAllocation allocation = (VmaAllocation)memoryHandle;
249 return vmaMapMemory(fAllocator, allocation, data);
250}
251
252void VulkanAMDMemoryAllocator::unmapMemory(const VulkanBackendMemory& memoryHandle) {
253 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
254 const VmaAllocation allocation = (VmaAllocation)memoryHandle;
255 vmaUnmapMemory(fAllocator, allocation);
256}
257
258VkResult VulkanAMDMemoryAllocator::flushMemory(const VulkanBackendMemory& memoryHandle,
260 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
261 const VmaAllocation allocation = (VmaAllocation)memoryHandle;
262 return vmaFlushAllocation(fAllocator, allocation, offset, size);
263}
264
265VkResult VulkanAMDMemoryAllocator::invalidateMemory(const VulkanBackendMemory& memoryHandle,
267 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
268 const VmaAllocation allocation = (VmaAllocation)memoryHandle;
269 return vmaInvalidateAllocation(fAllocator, allocation, offset, size);
270}
271
272std::pair<uint64_t, uint64_t> VulkanAMDMemoryAllocator::totalAllocatedAndUsedMemory() const {
273 VmaTotalStatistics stats;
274 vmaCalculateStatistics(fAllocator, &stats);
275 return {stats.total.statistics.blockBytes, stats.total.statistics.allocationBytes};
276}
277
278#endif // SK_USE_VMA
279
280} // namespace skgpu
static void info(const char *fmt,...) SK_PRINTF_LIKE(1
Definition DM.cpp:213
#define SkASSERT(cond)
Definition SkAssert.h:116
static constexpr bool SkToBool(const T &x)
Definition SkTo.h:35
#define TRACE_EVENT0_ALWAYS(category_group, name)
#define TRACE_FUNC
#define SKGPU_COPY_FUNCTION_KHR(NAME)
#define SKGPU_COPY_FUNCTION(NAME)
static sk_sp< VulkanMemoryAllocator > Make(VkInstance instance, VkPhysicalDevice physicalDevice, VkDevice device, uint32_t physicalDeviceVersion, const VulkanExtensions *extensions, const VulkanInterface *interface, bool threadSafe)
VkDevice device
Definition main.cc:53
VkInstance instance
Definition main.cc:48
sk_sp< SkImage > image
Definition examples.cpp:29
FlutterSemanticsFlag flags
static const uint8_t buffer[]
GAsyncResult * result
dict stats
Definition malisc.py:20
VulkanMemoryAllocator::BufferUsage BufferUsage
intptr_t VulkanBackendMemory
Definition VulkanTypes.h:31
static void usage(char *argv0)
Point offset
#define TRACE_EVENT0(category_group, name)
VkFlags VkMemoryPropertyFlags
@ VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT
@ VK_MEMORY_PROPERTY_HOST_COHERENT_BIT
@ VK_MEMORY_PROPERTY_PROTECTED_BIT
@ VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT
@ VK_MEMORY_PROPERTY_HOST_CACHED_BIT
@ VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT
uint64_t VkDeviceSize
Definition vulkan_core.h:96
#define VK_MAKE_VERSION(major, minor, patch)
Definition vulkan_core.h:78
#define VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME
VkResult
@ VK_SUCCESS
#define VK_NULL_HANDLE
Definition vulkan_core.h:46
#define VK_KHR_DEDICATED_ALLOCATION_EXTENSION_NAME