GrVkAMDMemoryAllocator.cpp 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275
  1. /*
  2. * Copyright 2018 Google Inc.
  3. *
  4. * Use of this source code is governed by a BSD-style license that can be
  5. * found in the LICENSE file.
  6. */
  7. #include "src/gpu/vk/GrVkAMDMemoryAllocator.h"
  8. #include "src/core/SkTraceEvent.h"
  9. #include "src/gpu/vk/GrVkInterface.h"
  10. #include "src/gpu/vk/GrVkMemory.h"
  11. #include "src/gpu/vk/GrVkUtil.h"
  12. GrVkAMDMemoryAllocator::GrVkAMDMemoryAllocator(VkPhysicalDevice physicalDevice,
  13. VkDevice device,
  14. sk_sp<const GrVkInterface> interface)
  15. : fAllocator(VK_NULL_HANDLE)
  16. , fInterface(std::move(interface))
  17. , fDevice(device) {
  18. #define GR_COPY_FUNCTION(NAME) functions.vk##NAME = fInterface->fFunctions.f##NAME
  19. VmaVulkanFunctions functions;
  20. GR_COPY_FUNCTION(GetPhysicalDeviceProperties);
  21. GR_COPY_FUNCTION(GetPhysicalDeviceMemoryProperties);
  22. GR_COPY_FUNCTION(AllocateMemory);
  23. GR_COPY_FUNCTION(FreeMemory);
  24. GR_COPY_FUNCTION(MapMemory);
  25. GR_COPY_FUNCTION(UnmapMemory);
  26. GR_COPY_FUNCTION(BindBufferMemory);
  27. GR_COPY_FUNCTION(BindImageMemory);
  28. GR_COPY_FUNCTION(GetBufferMemoryRequirements);
  29. GR_COPY_FUNCTION(GetImageMemoryRequirements);
  30. GR_COPY_FUNCTION(CreateBuffer);
  31. GR_COPY_FUNCTION(DestroyBuffer);
  32. GR_COPY_FUNCTION(CreateImage);
  33. GR_COPY_FUNCTION(DestroyImage);
  34. // Skia current doesn't support VK_KHR_dedicated_allocation
  35. functions.vkGetBufferMemoryRequirements2KHR = nullptr;
  36. functions.vkGetImageMemoryRequirements2KHR = nullptr;
  37. VmaAllocatorCreateInfo info;
  38. info.flags = 0;
  39. info.physicalDevice = physicalDevice;
  40. info.device = device;
  41. // 4MB was picked for the size here by looking at memory usage of Android apps and runs of DM.
  42. // It seems to be a good compromise of not wasting unused allocated space and not making too
  43. // many small allocations. The AMD allocator will start making blocks at 1/8 the max size and
  44. // builds up block size as needed before capping at the max set here.
  45. info.preferredLargeHeapBlockSize = 4*1024*1024;
  46. info.pAllocationCallbacks = nullptr;
  47. info.pDeviceMemoryCallbacks = nullptr;
  48. info.frameInUseCount = 0;
  49. info.pHeapSizeLimit = nullptr;
  50. info.pVulkanFunctions = &functions;
  51. vmaCreateAllocator(&info, &fAllocator);
  52. }
  53. GrVkAMDMemoryAllocator::~GrVkAMDMemoryAllocator() {
  54. vmaDestroyAllocator(fAllocator);
  55. fAllocator = VK_NULL_HANDLE;
  56. }
  57. bool GrVkAMDMemoryAllocator::allocateMemoryForImage(VkImage image, AllocationPropertyFlags flags,
  58. GrVkBackendMemory* backendMemory) {
  59. TRACE_EVENT0("skia.gpu", TRACE_FUNC);
  60. VmaAllocationCreateInfo info;
  61. info.flags = 0;
  62. info.usage = VMA_MEMORY_USAGE_UNKNOWN;
  63. info.requiredFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
  64. info.preferredFlags = 0;
  65. info.memoryTypeBits = 0;
  66. info.pool = VK_NULL_HANDLE;
  67. info.pUserData = nullptr;
  68. if (AllocationPropertyFlags::kDedicatedAllocation & flags) {
  69. info.flags |= VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT;
  70. }
  71. if (AllocationPropertyFlags::kLazyAllocation & flags) {
  72. info.preferredFlags |= VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT;
  73. }
  74. if (AllocationPropertyFlags::kProtected & flags) {
  75. info.requiredFlags |= VK_MEMORY_PROPERTY_PROTECTED_BIT;
  76. }
  77. VmaAllocation allocation;
  78. VkResult result = vmaAllocateMemoryForImage(fAllocator, image, &info, &allocation, nullptr);
  79. if (VK_SUCCESS != result) {
  80. return false;
  81. }
  82. *backendMemory = (GrVkBackendMemory)allocation;
  83. return true;
  84. }
  85. bool GrVkAMDMemoryAllocator::allocateMemoryForBuffer(VkBuffer buffer, BufferUsage usage,
  86. AllocationPropertyFlags flags,
  87. GrVkBackendMemory* backendMemory) {
  88. TRACE_EVENT0("skia.gpu", TRACE_FUNC);
  89. VmaAllocationCreateInfo info;
  90. info.flags = 0;
  91. info.usage = VMA_MEMORY_USAGE_UNKNOWN;
  92. info.memoryTypeBits = 0;
  93. info.pool = VK_NULL_HANDLE;
  94. info.pUserData = nullptr;
  95. switch (usage) {
  96. case BufferUsage::kGpuOnly:
  97. info.requiredFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
  98. info.preferredFlags = 0;
  99. break;
  100. case BufferUsage::kCpuOnly:
  101. info.requiredFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
  102. VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
  103. info.preferredFlags = VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
  104. break;
  105. case BufferUsage::kCpuWritesGpuReads:
  106. // First attempt to try memory is also cached
  107. info.requiredFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
  108. VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
  109. info.preferredFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
  110. break;
  111. case BufferUsage::kGpuWritesCpuReads:
  112. info.requiredFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
  113. info.preferredFlags = VK_MEMORY_PROPERTY_HOST_COHERENT_BIT |
  114. VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
  115. break;
  116. }
  117. if (AllocationPropertyFlags::kDedicatedAllocation & flags) {
  118. info.flags |= VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT;
  119. }
  120. if ((AllocationPropertyFlags::kLazyAllocation & flags) && BufferUsage::kGpuOnly == usage) {
  121. info.preferredFlags |= VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT;
  122. }
  123. if (AllocationPropertyFlags::kPersistentlyMapped & flags) {
  124. SkASSERT(BufferUsage::kGpuOnly != usage);
  125. info.flags |= VMA_ALLOCATION_CREATE_MAPPED_BIT;
  126. }
  127. VmaAllocation allocation;
  128. VkResult result = vmaAllocateMemoryForBuffer(fAllocator, buffer, &info, &allocation, nullptr);
  129. if (VK_SUCCESS != result) {
  130. if (usage == BufferUsage::kCpuWritesGpuReads) {
  131. // We try again but this time drop the requirement for cached
  132. info.requiredFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
  133. result = vmaAllocateMemoryForBuffer(fAllocator, buffer, &info, &allocation, nullptr);
  134. }
  135. }
  136. if (VK_SUCCESS != result) {
  137. return false;
  138. }
  139. *backendMemory = (GrVkBackendMemory)allocation;
  140. return true;
  141. }
  142. void GrVkAMDMemoryAllocator::freeMemory(const GrVkBackendMemory& memoryHandle) {
  143. TRACE_EVENT0("skia.gpu", TRACE_FUNC);
  144. const VmaAllocation allocation = (const VmaAllocation)memoryHandle;
  145. vmaFreeMemory(fAllocator, allocation);
  146. }
  147. void GrVkAMDMemoryAllocator::getAllocInfo(const GrVkBackendMemory& memoryHandle,
  148. GrVkAlloc* alloc) const {
  149. const VmaAllocation allocation = (const VmaAllocation)memoryHandle;
  150. VmaAllocationInfo vmaInfo;
  151. vmaGetAllocationInfo(fAllocator, allocation, &vmaInfo);
  152. VkMemoryPropertyFlags memFlags;
  153. vmaGetMemoryTypeProperties(fAllocator, vmaInfo.memoryType, &memFlags);
  154. uint32_t flags = 0;
  155. if (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT & memFlags) {
  156. flags |= GrVkAlloc::kMappable_Flag;
  157. }
  158. if (!SkToBool(VK_MEMORY_PROPERTY_HOST_COHERENT_BIT & memFlags)) {
  159. flags |= GrVkAlloc::kNoncoherent_Flag;
  160. }
  161. alloc->fMemory = vmaInfo.deviceMemory;
  162. alloc->fOffset = vmaInfo.offset;
  163. alloc->fSize = vmaInfo.size;
  164. alloc->fFlags = flags;
  165. alloc->fBackendMemory = memoryHandle;
  166. // TODO: Remove this hack once the AMD allocator is able to handle the alignment of noncoherent
  167. // memory itself.
  168. if (!SkToBool(VK_MEMORY_PROPERTY_HOST_COHERENT_BIT & memFlags)) {
  169. // This is a hack to say that the allocation size is actually larger than it is. This is to
  170. // make sure when we are flushing and invalidating noncoherent memory we have a size that is
  171. // aligned to the nonCoherentAtomSize. This is safe for three reasons. First the total size
  172. // of the VkDeviceMemory we allocate will always be a multple of the max possible alignment
  173. // (currently 256). Second all sub allocations are alignmed with an offset of 256. And
  174. // finally the allocator we are using always maps the entire VkDeviceMemory so the range
  175. // we'll be flushing/invalidating will be mapped. So our new fake allocation size will
  176. // always fit into the VkDeviceMemory, will never push it into another suballocation, and
  177. // will always be mapped when map is called.
  178. const VkPhysicalDeviceProperties* devProps;
  179. vmaGetPhysicalDeviceProperties(fAllocator, &devProps);
  180. VkDeviceSize alignment = devProps->limits.nonCoherentAtomSize;
  181. alloc->fSize = (alloc->fSize + alignment - 1) & ~(alignment -1);
  182. }
  183. }
  184. void* GrVkAMDMemoryAllocator::mapMemory(const GrVkBackendMemory& memoryHandle) {
  185. TRACE_EVENT0("skia.gpu", TRACE_FUNC);
  186. const VmaAllocation allocation = (const VmaAllocation)memoryHandle;
  187. void* mapPtr;
  188. vmaMapMemory(fAllocator, allocation, &mapPtr);
  189. return mapPtr;
  190. }
  191. void GrVkAMDMemoryAllocator::unmapMemory(const GrVkBackendMemory& memoryHandle) {
  192. TRACE_EVENT0("skia.gpu", TRACE_FUNC);
  193. const VmaAllocation allocation = (const VmaAllocation)memoryHandle;
  194. vmaUnmapMemory(fAllocator, allocation);
  195. }
  196. void GrVkAMDMemoryAllocator::flushMappedMemory(const GrVkBackendMemory& memoryHandle,
  197. VkDeviceSize offset, VkDeviceSize size) {
  198. TRACE_EVENT0("skia.gpu", TRACE_FUNC);
  199. GrVkAlloc info;
  200. this->getAllocInfo(memoryHandle, &info);
  201. if (GrVkAlloc::kNoncoherent_Flag & info.fFlags) {
  202. // We need to store the nonCoherentAtomSize for non-coherent flush/invalidate alignment.
  203. const VkPhysicalDeviceProperties* physDevProps;
  204. vmaGetPhysicalDeviceProperties(fAllocator, &physDevProps);
  205. VkDeviceSize alignment = physDevProps->limits.nonCoherentAtomSize;
  206. VkMappedMemoryRange mappedMemoryRange;
  207. GrVkMemory::GetNonCoherentMappedMemoryRange(info, offset, size, alignment,
  208. &mappedMemoryRange);
  209. GR_VK_CALL(fInterface, FlushMappedMemoryRanges(fDevice, 1, &mappedMemoryRange));
  210. }
  211. }
  212. void GrVkAMDMemoryAllocator::invalidateMappedMemory(const GrVkBackendMemory& memoryHandle,
  213. VkDeviceSize offset, VkDeviceSize size) {
  214. TRACE_EVENT0("skia.gpu", TRACE_FUNC);
  215. GrVkAlloc info;
  216. this->getAllocInfo(memoryHandle, &info);
  217. if (GrVkAlloc::kNoncoherent_Flag & info.fFlags) {
  218. // We need to store the nonCoherentAtomSize for non-coherent flush/invalidate alignment.
  219. const VkPhysicalDeviceProperties* physDevProps;
  220. vmaGetPhysicalDeviceProperties(fAllocator, &physDevProps);
  221. VkDeviceSize alignment = physDevProps->limits.nonCoherentAtomSize;
  222. VkMappedMemoryRange mappedMemoryRange;
  223. GrVkMemory::GetNonCoherentMappedMemoryRange(info, offset, size, alignment,
  224. &mappedMemoryRange);
  225. GR_VK_CALL(fInterface, InvalidateMappedMemoryRanges(fDevice, 1, &mappedMemoryRange));
  226. }
  227. }
  228. uint64_t GrVkAMDMemoryAllocator::totalUsedMemory() const {
  229. VmaStats stats;
  230. vmaCalculateStats(fAllocator, &stats);
  231. return stats.total.usedBytes;
  232. }
  233. uint64_t GrVkAMDMemoryAllocator::totalAllocatedMemory() const {
  234. VmaStats stats;
  235. vmaCalculateStats(fAllocator, &stats);
  236. return stats.total.usedBytes + stats.total.unusedBytes;
  237. }