Flutter Engine
The Flutter Engine
VulkanAMDMemoryAllocator.cpp
Go to the documentation of this file.
1/*
2 * Copyright 2018 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
9
15
16#include <algorithm>
17#include <cstring>
18
19namespace skgpu {
20
22 VkPhysicalDevice physicalDevice,
23 VkDevice device,
24 uint32_t physicalDeviceVersion,
26 const VulkanInterface* interface,
27 ThreadSafe threadSafe) {
28#define SKGPU_COPY_FUNCTION(NAME) functions.vk##NAME = interface->fFunctions.f##NAME
29#define SKGPU_COPY_FUNCTION_KHR(NAME) functions.vk##NAME##KHR = interface->fFunctions.f##NAME
30
31 VmaVulkanFunctions functions;
32 // We should be setting all the required functions (at least through vulkan 1.1), but this is
33 // just extra belt and suspenders to make sure there isn't unitialized values here.
34 std::memset(&functions, 0, sizeof(VmaVulkanFunctions));
35
36 // We don't use dynamic function getting in the allocator so we set the getProc functions to
37 // null.
38 functions.vkGetInstanceProcAddr = nullptr;
39 functions.vkGetDeviceProcAddr = nullptr;
40 SKGPU_COPY_FUNCTION(GetPhysicalDeviceProperties);
41 SKGPU_COPY_FUNCTION(GetPhysicalDeviceMemoryProperties);
42 SKGPU_COPY_FUNCTION(AllocateMemory);
43 SKGPU_COPY_FUNCTION(FreeMemory);
44 SKGPU_COPY_FUNCTION(MapMemory);
45 SKGPU_COPY_FUNCTION(UnmapMemory);
46 SKGPU_COPY_FUNCTION(FlushMappedMemoryRanges);
47 SKGPU_COPY_FUNCTION(InvalidateMappedMemoryRanges);
48 SKGPU_COPY_FUNCTION(BindBufferMemory);
49 SKGPU_COPY_FUNCTION(BindImageMemory);
50 SKGPU_COPY_FUNCTION(GetBufferMemoryRequirements);
51 SKGPU_COPY_FUNCTION(GetImageMemoryRequirements);
52 SKGPU_COPY_FUNCTION(CreateBuffer);
53 SKGPU_COPY_FUNCTION(DestroyBuffer);
54 SKGPU_COPY_FUNCTION(CreateImage);
55 SKGPU_COPY_FUNCTION(DestroyImage);
56 SKGPU_COPY_FUNCTION(CmdCopyBuffer);
57 SKGPU_COPY_FUNCTION_KHR(GetBufferMemoryRequirements2);
58 SKGPU_COPY_FUNCTION_KHR(GetImageMemoryRequirements2);
59 SKGPU_COPY_FUNCTION_KHR(BindBufferMemory2);
60 SKGPU_COPY_FUNCTION_KHR(BindImageMemory2);
61 SKGPU_COPY_FUNCTION_KHR(GetPhysicalDeviceMemoryProperties2);
62
63 VmaAllocatorCreateInfo info;
64 info.flags = 0;
65 if (threadSafe == ThreadSafe::kNo) {
66 info.flags |= VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT;
67 }
68 if (physicalDeviceVersion >= VK_MAKE_VERSION(1, 1, 0) ||
71 info.flags |= VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT;
72 }
73
74 info.physicalDevice = physicalDevice;
75 info.device = device;
76 // 4MB was picked for the size here by looking at memory usage of Android apps and runs of DM.
77 // It seems to be a good compromise of not wasting unused allocated space and not making too
78 // many small allocations. The AMD allocator will start making blocks at 1/8 the max size and
79 // builds up block size as needed before capping at the max set here.
80 info.preferredLargeHeapBlockSize = 4*1024*1024;
81 info.pAllocationCallbacks = nullptr;
82 info.pDeviceMemoryCallbacks = nullptr;
83 info.pHeapSizeLimit = nullptr;
84 info.pVulkanFunctions = &functions;
85 info.instance = instance;
86 // TODO: Update our interface and headers to support vulkan 1.3 and add in the new required
87 // functions for 1.3 that the allocator needs. Until then we just clamp the version to 1.1.
88 info.vulkanApiVersion = std::min(physicalDeviceVersion, VK_MAKE_VERSION(1, 1, 0));
89 info.pTypeExternalMemoryHandleTypes = nullptr;
90
91 VmaAllocator allocator;
92 vmaCreateAllocator(&info, &allocator);
93
95}
96
97VulkanAMDMemoryAllocator::VulkanAMDMemoryAllocator(VmaAllocator allocator)
98 : fAllocator(allocator) {}
99
101 vmaDestroyAllocator(fAllocator);
102 fAllocator = VK_NULL_HANDLE;
103}
104
106 uint32_t allocationPropertyFlags,
107 skgpu::VulkanBackendMemory* backendMemory) {
108 TRACE_EVENT0_ALWAYS("skia.gpu", TRACE_FUNC);
109 VmaAllocationCreateInfo info;
110 info.flags = 0;
111 info.usage = VMA_MEMORY_USAGE_UNKNOWN;
113 info.preferredFlags = 0;
114 info.memoryTypeBits = 0;
115 info.pool = VK_NULL_HANDLE;
116 info.pUserData = nullptr;
117
118 if (kDedicatedAllocation_AllocationPropertyFlag & allocationPropertyFlags) {
119 info.flags |= VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT;
120 }
121 if (kLazyAllocation_AllocationPropertyFlag & allocationPropertyFlags) {
123 }
124 if (kProtected_AllocationPropertyFlag & allocationPropertyFlags) {
125 info.requiredFlags |= VK_MEMORY_PROPERTY_PROTECTED_BIT;
126 }
127
128 VmaAllocation allocation;
129 VkResult result = vmaAllocateMemoryForImage(fAllocator, image, &info, &allocation, nullptr);
130 if (VK_SUCCESS == result) {
131 *backendMemory = (VulkanBackendMemory)allocation;
132 }
133 return result;
134}
135
138 uint32_t allocationPropertyFlags,
139 skgpu::VulkanBackendMemory* backendMemory) {
140 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
141 VmaAllocationCreateInfo info;
142 info.flags = 0;
143 info.usage = VMA_MEMORY_USAGE_UNKNOWN;
144 info.memoryTypeBits = 0;
145 info.pool = VK_NULL_HANDLE;
146 info.pUserData = nullptr;
147
148 switch (usage) {
151 info.preferredFlags = 0;
152 break;
154 // When doing cpu writes and gpu reads the general rule of thumb is to use coherent
155 // memory. Though this depends on the fact that we are not doing any cpu reads and the
156 // cpu writes are sequential. For sparse writes we'd want cpu cached memory, however we
157 // don't do these types of writes in Skia.
158 //
159 // TODO: In the future there may be times where specific types of memory could benefit
160 // from a coherent and cached memory. Typically these allow for the gpu to read cpu
161 // writes from the cache without needing to flush the writes throughout the cache. The
162 // reverse is not true and GPU writes tend to invalidate the cache regardless. Also
163 // these gpu cache read access are typically lower bandwidth than non-cached memory.
164 // For now Skia doesn't really have a need or want of this type of memory. But if we
165 // ever do we could pass in an AllocationPropertyFlag that requests the cached property.
166 info.requiredFlags =
169 break;
171 info.requiredFlags =
173 info.preferredFlags = 0;
174 break;
178 break;
179 }
180
181 if (kDedicatedAllocation_AllocationPropertyFlag & allocationPropertyFlags) {
182 info.flags |= VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT;
183 }
184 if ((kLazyAllocation_AllocationPropertyFlag & allocationPropertyFlags) &&
187 }
188
189 if (kPersistentlyMapped_AllocationPropertyFlag & allocationPropertyFlags) {
191 info.flags |= VMA_ALLOCATION_CREATE_MAPPED_BIT;
192 }
193
194 VmaAllocation allocation;
195 VkResult result = vmaAllocateMemoryForBuffer(fAllocator, buffer, &info, &allocation, nullptr);
196 if (VK_SUCCESS == result) {
197 *backendMemory = (VulkanBackendMemory)allocation;
198 }
199
200 return result;
201}
202
204 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
205 const VmaAllocation allocation = (VmaAllocation)memoryHandle;
206 vmaFreeMemory(fAllocator, allocation);
207}
208
210 VulkanAlloc* alloc) const {
211 const VmaAllocation allocation = (VmaAllocation)memoryHandle;
212 VmaAllocationInfo vmaInfo;
213 vmaGetAllocationInfo(fAllocator, allocation, &vmaInfo);
214
215 VkMemoryPropertyFlags memFlags;
216 vmaGetMemoryTypeProperties(fAllocator, vmaInfo.memoryType, &memFlags);
217
218 uint32_t flags = 0;
221 }
224 }
227 }
228
229 alloc->fMemory = vmaInfo.deviceMemory;
230 alloc->fOffset = vmaInfo.offset;
231 alloc->fSize = vmaInfo.size;
232 alloc->fFlags = flags;
233 alloc->fBackendMemory = memoryHandle;
234}
235
237 void** data) {
238 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
239 const VmaAllocation allocation = (VmaAllocation)memoryHandle;
240 return vmaMapMemory(fAllocator, allocation, data);
241}
242
244 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
245 const VmaAllocation allocation = (VmaAllocation)memoryHandle;
246 vmaUnmapMemory(fAllocator, allocation);
247}
248
251 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
252 const VmaAllocation allocation = (VmaAllocation)memoryHandle;
253 return vmaFlushAllocation(fAllocator, allocation, offset, size);
254}
255
258 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
259 const VmaAllocation allocation = (VmaAllocation)memoryHandle;
260 return vmaInvalidateAllocation(fAllocator, allocation, offset, size);
261}
262
263std::pair<uint64_t, uint64_t> VulkanAMDMemoryAllocator::totalAllocatedAndUsedMemory() const {
264 VmaTotalStatistics stats;
265 vmaCalculateStatistics(fAllocator, &stats);
266 return {stats.total.statistics.blockBytes, stats.total.statistics.allocationBytes};
267}
268
269} // namespace skgpu
static void info(const char *fmt,...) SK_PRINTF_LIKE(1
Definition: DM.cpp:213
#define SkASSERT(cond)
Definition: SkAssert.h:116
static constexpr bool SkToBool(const T &x)
Definition: SkTo.h:35
#define TRACE_EVENT0_ALWAYS(category_group, name)
#define TRACE_FUNC
Definition: SkTraceEvent.h:30
#define SKGPU_COPY_FUNCTION_KHR(NAME)
#define SKGPU_COPY_FUNCTION(NAME)
void getAllocInfo(const VulkanBackendMemory &, VulkanAlloc *) const override
void freeMemory(const VulkanBackendMemory &) override
VkResult mapMemory(const VulkanBackendMemory &, void **data) override
VkResult allocateBufferMemory(VkBuffer buffer, BufferUsage usage, uint32_t allocationPropertyFlags, skgpu::VulkanBackendMemory *) override
std::pair< uint64_t, uint64_t > totalAllocatedAndUsedMemory() const override
static sk_sp< VulkanMemoryAllocator > Make(VkInstance instance, VkPhysicalDevice physicalDevice, VkDevice device, uint32_t physicalDeviceVersion, const VulkanExtensions *extensions, const VulkanInterface *interface, ThreadSafe)
void unmapMemory(const VulkanBackendMemory &) override
VkResult allocateImageMemory(VkImage image, uint32_t allocationPropertyFlags, skgpu::VulkanBackendMemory *) override
VkResult flushMemory(const VulkanBackendMemory &, VkDeviceSize offset, VkDeviceSize size) override
VkResult invalidateMemory(const VulkanBackendMemory &, VkDeviceSize offset, VkDeviceSize size) override
VkDevice device
Definition: main.cc:53
VkInstance instance
Definition: main.cc:48
FlutterSemanticsFlag flags
GAsyncResult * result
static float min(float r, float g, float b)
Definition: hsl.cpp:48
sk_sp< const SkImage > image
Definition: SkRecords.h:269
DEF_SWITCHES_START aot vmservice shared library Name of the *so containing AOT compiled Dart assets for launching the service isolate vm snapshot The VM snapshot data that will be memory mapped as read only SnapshotAssetPath must be present isolate snapshot The isolate snapshot data that will be memory mapped as read only SnapshotAssetPath must be present cache dir Path to the cache directory This is different from the persistent_cache_path in embedder which is used for Skia shader cache icu native lib Path to the library file that exports the ICU data vm service The hostname IP address on which the Dart VM Service should be served If not defaults to or::depending on whether ipv6 is specified vm service A custom Dart VM Service port The default is to pick a randomly available open port disable vm Disable the Dart VM Service The Dart VM Service is never available in release mode disable vm service Disable mDNS Dart VM Service publication Bind to the IPv6 localhost address for the Dart VM Service Ignored if vm service host is set endless trace buffer
Definition: switches.h:126
it will be possible to load the file into Perfetto s trace viewer disable asset Prevents usage of any non test fonts unless they were explicitly Loaded via prefetched default font Indicates whether the embedding started a prefetch of the default font manager before creating the engine run In non interactive keep the shell running after the Dart script has completed enable serial On low power devices with low core running concurrent GC tasks on threads can cause them to contend with the UI thread which could potentially lead to jank This option turns off all concurrent GC activities domain network JSON encoded network policy per domain This overrides the DisallowInsecureConnections switch Embedder can specify whether to allow or disallow insecure connections at a domain level old gen heap size
Definition: switches.h:259
dictionary stats
Definition: malisc.py:20
Definition: GpuTools.h:21
intptr_t VulkanBackendMemory
Definition: VulkanTypes.h:32
static void usage(char *argv0)
SeparatedVector2 offset
VkDeviceSize fSize
Definition: VulkanTypes.h:41
VulkanBackendMemory fBackendMemory
Definition: VulkanTypes.h:44
VkDeviceMemory fMemory
Definition: VulkanTypes.h:39
VkDeviceSize fOffset
Definition: VulkanTypes.h:40
std::shared_ptr< const fml::Mapping > data
Definition: texture_gles.cc:63
#define TRACE_EVENT0(category_group, name)
Definition: trace_event.h:131
VkFlags VkMemoryPropertyFlags
Definition: vulkan_core.h:2410
@ VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT
Definition: vulkan_core.h:2403
@ VK_MEMORY_PROPERTY_HOST_COHERENT_BIT
Definition: vulkan_core.h:2401
@ VK_MEMORY_PROPERTY_PROTECTED_BIT
Definition: vulkan_core.h:2404
@ VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT
Definition: vulkan_core.h:2399
@ VK_MEMORY_PROPERTY_HOST_CACHED_BIT
Definition: vulkan_core.h:2402
@ VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT
Definition: vulkan_core.h:2400
uint64_t VkDeviceSize
Definition: vulkan_core.h:96
#define VK_MAKE_VERSION(major, minor, patch)
Definition: vulkan_core.h:78
#define VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME
Definition: vulkan_core.h:9396
VkResult
Definition: vulkan_core.h:140
@ VK_SUCCESS
Definition: vulkan_core.h:141
#define VK_NULL_HANDLE
Definition: vulkan_core.h:46
#define VK_KHR_DEDICATED_ALLOCATION_EXTENSION_NAME
Definition: vulkan_core.h:9374