vulkan_device_queue.cc 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452
  1. // Copyright (c) 2016 The Chromium Authors. All rights reserved.
  2. // Use of this source code is governed by a BSD-style license that can be
  3. // found in the LICENSE file.
  4. #include "gpu/vulkan/vulkan_device_queue.h"
  5. #include <cstring>
  6. #include <unordered_set>
  7. #include <utility>
  8. #include <vector>
  9. #include "base/logging.h"
  10. #include "base/strings/stringprintf.h"
  11. #include "build/build_config.h"
  12. #include "gpu/config/gpu_info.h" // nogncheck
  13. #include "gpu/config/vulkan_info.h"
  14. #include "gpu/vulkan/vulkan_command_pool.h"
  15. #include "gpu/vulkan/vulkan_crash_keys.h"
  16. #include "gpu/vulkan/vulkan_fence_helper.h"
  17. #include "gpu/vulkan/vulkan_function_pointers.h"
  18. #include "gpu/vulkan/vulkan_util.h"
  19. #include "ui/gl/gl_angle_util_vulkan.h"
  20. namespace gpu {
  21. VulkanDeviceQueue::VulkanDeviceQueue(VkInstance vk_instance)
  22. : vk_instance_(vk_instance) {}
  23. VulkanDeviceQueue::VulkanDeviceQueue(VulkanInstance* instance)
  24. : vk_instance_(instance->vk_instance()), instance_(instance) {}
  25. VulkanDeviceQueue::~VulkanDeviceQueue() {
  26. DCHECK_EQ(static_cast<VkPhysicalDevice>(VK_NULL_HANDLE), vk_physical_device_);
  27. DCHECK_EQ(static_cast<VkDevice>(VK_NULL_HANDLE), vk_device_);
  28. DCHECK_EQ(static_cast<VkQueue>(VK_NULL_HANDLE), vk_queue_);
  29. }
  30. bool VulkanDeviceQueue::Initialize(
  31. uint32_t options,
  32. const GPUInfo* gpu_info,
  33. const std::vector<const char*>& required_extensions,
  34. const std::vector<const char*>& optional_extensions,
  35. bool allow_protected_memory,
  36. const GetPresentationSupportCallback& get_presentation_support,
  37. uint32_t heap_memory_limit) {
  38. DCHECK_EQ(static_cast<VkPhysicalDevice>(VK_NULL_HANDLE), vk_physical_device_);
  39. DCHECK_EQ(static_cast<VkDevice>(VK_NULL_HANDLE), owned_vk_device_);
  40. DCHECK_EQ(static_cast<VkDevice>(VK_NULL_HANDLE), vk_device_);
  41. DCHECK_EQ(static_cast<VkQueue>(VK_NULL_HANDLE), vk_queue_);
  42. if (VK_NULL_HANDLE == vk_instance_)
  43. return false;
  44. const VulkanInfo& info = instance_->vulkan_info();
  45. VkResult result = VK_SUCCESS;
  46. VkQueueFlags queue_flags = 0;
  47. if (options & DeviceQueueOption::GRAPHICS_QUEUE_FLAG)
  48. queue_flags |= VK_QUEUE_GRAPHICS_BIT;
  49. // We prefer to use discrete GPU, integrated GPU is the second, and then
  50. // others.
  51. static constexpr int kDeviceTypeScores[] = {
  52. 0, // VK_PHYSICAL_DEVICE_TYPE_OTHER
  53. 3, // VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU
  54. 4, // VK_PHYSICAL_DEVICE_TYPE_DISCRETE_GPU
  55. 2, // VK_PHYSICAL_DEVICE_TYPE_VIRTUAL_GPU
  56. 1, // VK_PHYSICAL_DEVICE_TYPE_CPU
  57. };
  58. static_assert(VK_PHYSICAL_DEVICE_TYPE_OTHER == 0, "");
  59. static_assert(VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU == 1, "");
  60. static_assert(VK_PHYSICAL_DEVICE_TYPE_DISCRETE_GPU == 2, "");
  61. static_assert(VK_PHYSICAL_DEVICE_TYPE_VIRTUAL_GPU == 3, "");
  62. static_assert(VK_PHYSICAL_DEVICE_TYPE_CPU == 4, "");
  63. int device_index = -1;
  64. int queue_index = -1;
  65. int device_score = -1;
  66. for (size_t i = 0; i < info.physical_devices.size(); ++i) {
  67. const auto& device_info = info.physical_devices[i];
  68. const auto& device_properties = device_info.properties;
  69. if (device_properties.apiVersion < info.used_api_version)
  70. continue;
  71. // In dual-CPU cases, we cannot detect the active GPU correctly on Linux,
  72. // so don't select GPU device based on the |gpu_info|.
  73. #if !BUILDFLAG(IS_LINUX)
  74. // If gpu_info is provided, the device should match it.
  75. if (gpu_info && (device_properties.vendorID != gpu_info->gpu.vendor_id ||
  76. device_properties.deviceID != gpu_info->gpu.device_id)) {
  77. continue;
  78. }
  79. #endif
  80. if (device_properties.deviceType < 0 ||
  81. device_properties.deviceType > VK_PHYSICAL_DEVICE_TYPE_CPU) {
  82. DLOG(ERROR) << "Unsupported device type: "
  83. << device_properties.deviceType;
  84. continue;
  85. }
  86. const VkPhysicalDevice& device = device_info.device;
  87. bool found = false;
  88. for (size_t n = 0; n < device_info.queue_families.size(); ++n) {
  89. if ((device_info.queue_families[n].queueFlags & queue_flags) !=
  90. queue_flags) {
  91. continue;
  92. }
  93. if (options & DeviceQueueOption::PRESENTATION_SUPPORT_QUEUE_FLAG &&
  94. !get_presentation_support.Run(device, device_info.queue_families,
  95. n)) {
  96. continue;
  97. }
  98. if (kDeviceTypeScores[device_properties.deviceType] > device_score) {
  99. device_index = i;
  100. queue_index = static_cast<int>(n);
  101. device_score = kDeviceTypeScores[device_properties.deviceType];
  102. found = true;
  103. break;
  104. }
  105. }
  106. if (!found)
  107. continue;
  108. // Use the device, if it matches gpu_info.
  109. if (gpu_info)
  110. break;
  111. // If the device is a discrete GPU, we will use it. Otherwise go through
  112. // all the devices and find the device with the highest score.
  113. if (device_properties.deviceType == VK_PHYSICAL_DEVICE_TYPE_DISCRETE_GPU)
  114. break;
  115. }
  116. if (device_index == -1) {
  117. DLOG(ERROR) << "Cannot find capable device.";
  118. return false;
  119. }
  120. const auto& physical_device_info = info.physical_devices[device_index];
  121. vk_physical_device_ = physical_device_info.device;
  122. vk_physical_device_properties_ = physical_device_info.properties;
  123. vk_physical_device_driver_properties_ =
  124. physical_device_info.driver_properties;
  125. vk_queue_index_ = queue_index;
  126. float queue_priority = 0.0f;
  127. VkDeviceQueueCreateInfo queue_create_info = {};
  128. queue_create_info.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO;
  129. queue_create_info.queueFamilyIndex = queue_index;
  130. queue_create_info.queueCount = 1;
  131. queue_create_info.pQueuePriorities = &queue_priority;
  132. queue_create_info.flags =
  133. allow_protected_memory ? VK_DEVICE_QUEUE_CREATE_PROTECTED_BIT : 0;
  134. std::vector<const char*> enabled_extensions;
  135. for (const char* extension : required_extensions) {
  136. const auto it =
  137. std::find_if(physical_device_info.extensions.begin(),
  138. physical_device_info.extensions.end(),
  139. [extension](const VkExtensionProperties& p) {
  140. return std::strcmp(extension, p.extensionName) == 0;
  141. });
  142. if (it == physical_device_info.extensions.end()) {
  143. // On Fuchsia, some device extensions are provided by layers.
  144. // TODO(penghuang): checking extensions against layer device extensions
  145. // too.
  146. #if !BUILDFLAG(IS_FUCHSIA)
  147. DLOG(ERROR) << "Required Vulkan extension " << extension
  148. << " is not supported.";
  149. return false;
  150. #endif
  151. }
  152. enabled_extensions.push_back(extension);
  153. }
  154. for (const char* extension : optional_extensions) {
  155. const auto it =
  156. std::find_if(physical_device_info.extensions.begin(),
  157. physical_device_info.extensions.end(),
  158. [extension](const VkExtensionProperties& p) {
  159. return std::strcmp(extension, p.extensionName) == 0;
  160. });
  161. if (it == physical_device_info.extensions.end()) {
  162. DLOG(ERROR) << "Optional Vulkan extension " << extension
  163. << " is not supported.";
  164. } else {
  165. enabled_extensions.push_back(extension);
  166. }
  167. }
  168. crash_keys::vulkan_device_api_version.Set(
  169. VkVersionToString(vk_physical_device_properties_.apiVersion));
  170. if (vk_physical_device_properties_.vendorID == 0x10DE) {
  171. // NVIDIA
  172. // 10 bits = major version (up to r1023)
  173. // 8 bits = minor version (up to 255)
  174. // 8 bits = secondary branch version/build version (up to 255)
  175. // 6 bits = tertiary branch/build version (up to 63)
  176. auto version = vk_physical_device_properties_.driverVersion;
  177. uint32_t major = (version >> 22) & 0x3ff;
  178. uint32_t minor = (version >> 14) & 0x0ff;
  179. uint32_t secondary_branch = (version >> 6) & 0x0ff;
  180. uint32_t tertiary_branch = version & 0x003f;
  181. crash_keys::vulkan_device_driver_version.Set(base::StringPrintf(
  182. "%d.%d.%d.%d", major, minor, secondary_branch, tertiary_branch));
  183. } else {
  184. crash_keys::vulkan_device_driver_version.Set(
  185. VkVersionToString(vk_physical_device_properties_.driverVersion));
  186. }
  187. crash_keys::vulkan_device_vendor_id.Set(
  188. base::StringPrintf("0x%04x", vk_physical_device_properties_.vendorID));
  189. crash_keys::vulkan_device_id.Set(
  190. base::StringPrintf("0x%04x", vk_physical_device_properties_.deviceID));
  191. static const char* kDeviceTypeNames[] = {
  192. "other", "integrated", "discrete", "virtual", "cpu",
  193. };
  194. uint32_t gpu_type = vk_physical_device_properties_.deviceType;
  195. if (gpu_type >= std::size(kDeviceTypeNames))
  196. gpu_type = 0;
  197. crash_keys::vulkan_device_type.Set(kDeviceTypeNames[gpu_type]);
  198. crash_keys::vulkan_device_name.Set(vk_physical_device_properties_.deviceName);
  199. // Disable all physical device features by default.
  200. enabled_device_features_2_ = {VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2};
  201. // Android, Fuchsia, and Linux(VaapiVideoDecoder) need YCbCr sampler support.
  202. #if BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_FUCHSIA) || BUILDFLAG(IS_LINUX)
  203. if (!physical_device_info.feature_sampler_ycbcr_conversion) {
  204. LOG(ERROR) << "samplerYcbcrConversion is not supported.";
  205. return false;
  206. }
  207. sampler_ycbcr_conversion_features_ = {
  208. VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES};
  209. sampler_ycbcr_conversion_features_.samplerYcbcrConversion = VK_TRUE;
  210. // Add VkPhysicalDeviceSamplerYcbcrConversionFeatures struct to pNext chain
  211. // of VkPhysicalDeviceFeatures2 to enable YCbCr sampler support.
  212. sampler_ycbcr_conversion_features_.pNext = enabled_device_features_2_.pNext;
  213. enabled_device_features_2_.pNext = &sampler_ycbcr_conversion_features_;
  214. #endif // BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_FUCHSIA) || BUILDFLAG(IS_LINUX)
  215. if (allow_protected_memory) {
  216. if (!physical_device_info.feature_protected_memory) {
  217. DLOG(ERROR) << "Protected memory is not supported";
  218. return false;
  219. }
  220. protected_memory_features_ = {
  221. VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_FEATURES};
  222. protected_memory_features_.protectedMemory = VK_TRUE;
  223. // Add VkPhysicalDeviceProtectedMemoryFeatures struct to pNext chain
  224. // of VkPhysicalDeviceFeatures2 to enable YCbCr sampler support.
  225. protected_memory_features_.pNext = enabled_device_features_2_.pNext;
  226. enabled_device_features_2_.pNext = &protected_memory_features_;
  227. }
  228. VkDeviceCreateInfo device_create_info = {
  229. VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO};
  230. device_create_info.pNext = enabled_device_features_2_.pNext;
  231. device_create_info.queueCreateInfoCount = 1;
  232. device_create_info.pQueueCreateInfos = &queue_create_info;
  233. device_create_info.enabledExtensionCount = enabled_extensions.size();
  234. device_create_info.ppEnabledExtensionNames = enabled_extensions.data();
  235. device_create_info.pEnabledFeatures = &enabled_device_features_2_.features;
  236. result = vkCreateDevice(vk_physical_device_, &device_create_info, nullptr,
  237. &owned_vk_device_);
  238. if (VK_SUCCESS != result) {
  239. DLOG(ERROR) << "vkCreateDevice failed. result:" << result;
  240. return false;
  241. }
  242. enabled_extensions_ = gfx::ExtensionSet(std::begin(enabled_extensions),
  243. std::end(enabled_extensions));
  244. if (!gpu::GetVulkanFunctionPointers()->BindDeviceFunctionPointers(
  245. owned_vk_device_, info.used_api_version, enabled_extensions_)) {
  246. vkDestroyDevice(owned_vk_device_, nullptr);
  247. owned_vk_device_ = VK_NULL_HANDLE;
  248. return false;
  249. }
  250. vk_device_ = owned_vk_device_;
  251. if (allow_protected_memory) {
  252. VkDeviceQueueInfo2 queue_info2 = {};
  253. queue_info2.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_INFO_2;
  254. queue_info2.flags = VK_DEVICE_QUEUE_CREATE_PROTECTED_BIT;
  255. queue_info2.queueFamilyIndex = queue_index;
  256. queue_info2.queueIndex = 0;
  257. vkGetDeviceQueue2(vk_device_, &queue_info2, &vk_queue_);
  258. } else {
  259. vkGetDeviceQueue(vk_device_, queue_index, 0, &vk_queue_);
  260. }
  261. std::vector<VkDeviceSize> heap_size_limit(
  262. VK_MAX_MEMORY_HEAPS,
  263. heap_memory_limit ? heap_memory_limit : VK_WHOLE_SIZE);
  264. vma::CreateAllocator(vk_physical_device_, vk_device_, vk_instance_,
  265. heap_size_limit.data(), &vma_allocator_);
  266. cleanup_helper_ = std::make_unique<VulkanFenceHelper>(this);
  267. allow_protected_memory_ = allow_protected_memory;
  268. return true;
  269. }
  270. bool VulkanDeviceQueue::InitCommon(VkPhysicalDevice vk_physical_device,
  271. VkDevice vk_device,
  272. VkQueue vk_queue,
  273. uint32_t vk_queue_index,
  274. gfx::ExtensionSet enabled_extensions) {
  275. DCHECK_EQ(static_cast<VkPhysicalDevice>(VK_NULL_HANDLE), vk_physical_device_);
  276. DCHECK_EQ(static_cast<VkDevice>(VK_NULL_HANDLE), owned_vk_device_);
  277. DCHECK_EQ(static_cast<VkDevice>(VK_NULL_HANDLE), vk_device_);
  278. DCHECK_EQ(static_cast<VkQueue>(VK_NULL_HANDLE), vk_queue_);
  279. vk_physical_device_ = vk_physical_device;
  280. vk_device_ = vk_device;
  281. vk_queue_ = vk_queue;
  282. vk_queue_index_ = vk_queue_index;
  283. enabled_extensions_ = std::move(enabled_extensions);
  284. vma::CreateAllocator(vk_physical_device_, vk_device_, vk_instance_, nullptr,
  285. &vma_allocator_);
  286. cleanup_helper_ = std::make_unique<VulkanFenceHelper>(this);
  287. return true;
  288. }
  289. bool VulkanDeviceQueue::InitializeFromANGLE() {
  290. const VulkanInfo& info = instance_->vulkan_info();
  291. VkPhysicalDevice vk_physical_device = gl::QueryVkPhysicalDeviceFromANGLE();
  292. if (vk_physical_device == VK_NULL_HANDLE)
  293. return false;
  294. int device_index = -1;
  295. for (size_t i = 0; i < info.physical_devices.size(); ++i) {
  296. if (info.physical_devices[i].device == vk_physical_device) {
  297. device_index = i;
  298. break;
  299. }
  300. }
  301. if (device_index == -1) {
  302. DLOG(ERROR) << "Cannot find physical device match ANGLE.";
  303. return false;
  304. }
  305. const auto& physical_device_info = info.physical_devices[device_index];
  306. vk_physical_device_properties_ = physical_device_info.properties;
  307. vk_physical_device_driver_properties_ =
  308. physical_device_info.driver_properties;
  309. VkDevice vk_device = gl::QueryVkDeviceFromANGLE();
  310. VkQueue vk_queue = gl::QueryVkQueueFromANGLE();
  311. uint32_t vk_queue_index = gl::QueryVkQueueFramiliyIndexFromANGLE();
  312. auto enabled_extensions = gl::QueryVkDeviceExtensionsFromANGLE();
  313. if (!gpu::GetVulkanFunctionPointers()->BindDeviceFunctionPointers(
  314. vk_device, info.used_api_version, enabled_extensions)) {
  315. return false;
  316. }
  317. enabled_device_features_2_from_angle_ =
  318. gl::QueryVkEnabledDeviceFeaturesFromANGLE();
  319. if (!enabled_device_features_2_from_angle_)
  320. return false;
  321. return InitCommon(vk_physical_device, vk_device, vk_queue, vk_queue_index,
  322. enabled_extensions);
  323. }
  324. bool VulkanDeviceQueue::InitializeForWebView(
  325. VkPhysicalDevice vk_physical_device,
  326. VkDevice vk_device,
  327. VkQueue vk_queue,
  328. uint32_t vk_queue_index,
  329. gfx::ExtensionSet enabled_extensions) {
  330. return InitCommon(vk_physical_device, vk_device, vk_queue, vk_queue_index,
  331. enabled_extensions);
  332. }
  333. bool VulkanDeviceQueue::InitializeForCompositorGpuThread(
  334. VkPhysicalDevice vk_physical_device,
  335. VkDevice vk_device,
  336. VkQueue vk_queue,
  337. uint32_t vk_queue_index,
  338. gfx::ExtensionSet enabled_extensions,
  339. const VkPhysicalDeviceFeatures2& vk_physical_device_features2) {
  340. // Currently VulkanDeviceQueue for drdc thread(aka CompositorGpuThread) uses
  341. // the same vulkan queue as the gpu main thread. Now since both gpu main and
  342. // drdc threads would be accessing/submitting work to the same queue, all the
  343. // queue access should be made thread safe. This is done by using locks. This
  344. // lock is per |vk_queue|. Note that we are intentionally overwriting a
  345. // previous lock if any.
  346. // Since the map itself would be accessed by multiple gpu threads, we need to
  347. // ensure that the access are thread safe. Here the locks are created and
  348. // written into the map only when drdc thread is initialized which happens
  349. // during GpuServiceImpl init. At this point none of the gpu threads would be
  350. // doing read access until GpuServiceImpl init completed. Hence its safe to
  351. // access map here.
  352. GetVulkanFunctionPointers()->per_queue_lock_map[vk_queue] =
  353. std::make_unique<base::Lock>();
  354. enabled_device_features_2_ = vk_physical_device_features2;
  355. return InitCommon(vk_physical_device, vk_device, vk_queue, vk_queue_index,
  356. enabled_extensions);
  357. }
  358. void VulkanDeviceQueue::Destroy() {
  359. if (cleanup_helper_) {
  360. cleanup_helper_->Destroy();
  361. cleanup_helper_.reset();
  362. }
  363. if (vma_allocator_ != VK_NULL_HANDLE) {
  364. vma::DestroyAllocator(vma_allocator_);
  365. vma_allocator_ = VK_NULL_HANDLE;
  366. }
  367. if (VK_NULL_HANDLE != owned_vk_device_) {
  368. vkDestroyDevice(owned_vk_device_, nullptr);
  369. owned_vk_device_ = VK_NULL_HANDLE;
  370. // Clear all the entries from this map since the device and hence all the
  371. // generated queue(and their corresponding lock) from this device is
  372. // destroyed.
  373. // This happens when VulkanDeviceQueue is destroyed on gpu main thread
  374. // during GpuServiceImpl destruction which happens after CompositorGpuThread
  375. // is destroyed. Hence CompositorGpuThread would not be accessing the map at
  376. // this point and its thread safe to delete map entries here.
  377. GetVulkanFunctionPointers()->per_queue_lock_map.clear();
  378. }
  379. vk_device_ = VK_NULL_HANDLE;
  380. vk_queue_ = VK_NULL_HANDLE;
  381. vk_queue_index_ = 0;
  382. vk_physical_device_ = VK_NULL_HANDLE;
  383. }
  384. std::unique_ptr<VulkanCommandPool> VulkanDeviceQueue::CreateCommandPool() {
  385. std::unique_ptr<VulkanCommandPool> command_pool(new VulkanCommandPool(this));
  386. if (!command_pool->Initialize())
  387. return nullptr;
  388. return command_pool;
  389. }
  390. } // namespace gpu