gpu_info.h 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517
  1. // Copyright (c) 2012 The Chromium Authors. All rights reserved.
  2. // Use of this source code is governed by a BSD-style license that can be
  3. // found in the LICENSE file.
  4. #ifndef GPU_CONFIG_GPU_INFO_H_
  5. #define GPU_CONFIG_GPU_INFO_H_
  6. // Provides access to the GPU information for the system
  7. // on which chrome is currently running.
  8. #include <stdint.h>
  9. #include <string>
  10. #include <vector>
  11. #include "base/containers/flat_map.h"
  12. #include "base/containers/span.h"
  13. #include "base/time/time.h"
  14. #include "base/version.h"
  15. #include "build/build_config.h"
  16. #include "gpu/config/dx_diag_node.h"
  17. #include "gpu/gpu_export.h"
  18. #include "gpu/vulkan/buildflags.h"
  19. #include "third_party/abseil-cpp/absl/types/optional.h"
  20. #include "ui/gfx/geometry/size.h"
  21. #include "ui/gl/gpu_preference.h"
  22. #if BUILDFLAG(IS_WIN)
  23. #include <dxgi.h>
  24. #include "base/win/windows_types.h"
  25. #endif
  26. #if BUILDFLAG(ENABLE_VULKAN)
  27. #include "gpu/config/vulkan_info.h"
  28. #endif
  29. namespace gpu {
  30. // These values are persistent to logs. Entries should not be renumbered and
  31. // numeric values should never be reused.
  32. // This should match enum IntelGpuSeriesType in
  33. // \tools\metrics\histograms\enums.xml
  34. enum class IntelGpuSeriesType {
  35. kUnknown = 0,
  36. // Intel 4th gen
  37. kBroadwater = 16,
  38. kEaglelake = 17,
  39. // Intel 5th gen
  40. kIronlake = 18,
  41. // Intel 6th gen
  42. kSandybridge = 1,
  43. // Intel 7th gen
  44. kBaytrail = 2,
  45. kIvybridge = 3,
  46. kHaswell = 4,
  47. // Intel 8th gen
  48. kCherrytrail = 5,
  49. kBroadwell = 6,
  50. // Intel 9th gen
  51. kApollolake = 7,
  52. kSkylake = 8,
  53. kGeminilake = 9,
  54. kAmberlake = 23,
  55. kKabylake = 10,
  56. kCoffeelake = 11,
  57. kWhiskeylake = 12,
  58. kCometlake = 13,
  59. // Intel 10th gen
  60. kCannonlake = 14,
  61. // Intel 11th gen
  62. kIcelake = 15,
  63. kElkhartlake = 19,
  64. kJasperlake = 20,
  65. // Intel 12th gen
  66. kTigerlake = 21,
  67. kRocketlake = 24,
  68. kDG1 = 25,
  69. kAlderlake = 22,
  70. kAlchemist = 26,
  71. // Please also update |gpu_series_map| in process_json.py.
  72. kMaxValue = kAlchemist,
  73. };
  74. // Video profile. This *must* match media::VideoCodecProfile.
  75. enum VideoCodecProfile {
  76. VIDEO_CODEC_PROFILE_UNKNOWN = -1,
  77. VIDEO_CODEC_PROFILE_MIN = VIDEO_CODEC_PROFILE_UNKNOWN,
  78. H264PROFILE_BASELINE = 0,
  79. H264PROFILE_MAIN,
  80. H264PROFILE_EXTENDED,
  81. H264PROFILE_HIGH,
  82. H264PROFILE_HIGH10PROFILE,
  83. H264PROFILE_HIGH422PROFILE,
  84. H264PROFILE_HIGH444PREDICTIVEPROFILE,
  85. H264PROFILE_SCALABLEBASELINE,
  86. H264PROFILE_SCALABLEHIGH,
  87. H264PROFILE_STEREOHIGH,
  88. H264PROFILE_MULTIVIEWHIGH,
  89. VP8PROFILE_ANY,
  90. VP9PROFILE_PROFILE0,
  91. VP9PROFILE_PROFILE1,
  92. VP9PROFILE_PROFILE2,
  93. VP9PROFILE_PROFILE3,
  94. HEVCPROFILE_MAIN,
  95. HEVCPROFILE_MAIN10,
  96. HEVCPROFILE_MAIN_STILL_PICTURE,
  97. DOLBYVISION_PROFILE0,
  98. DOLBYVISION_PROFILE4,
  99. DOLBYVISION_PROFILE5,
  100. DOLBYVISION_PROFILE7,
  101. THEORAPROFILE_ANY,
  102. AV1PROFILE_PROFILE_MAIN,
  103. AV1PROFILE_PROFILE_HIGH,
  104. AV1PROFILE_PROFILE_PRO,
  105. DOLBYVISION_PROFILE8,
  106. DOLBYVISION_PROFILE9,
  107. HEVCPROFILE_REXT,
  108. HEVCPROFILE_HIGH_THROUGHPUT,
  109. HEVCPROFILE_MULTIVIEW_MAIN,
  110. HEVCPROFILE_SCALABLE_MAIN,
  111. HEVCPROFILE_3D_MAIN,
  112. HEVCPROFILE_SCREEN_EXTENDED,
  113. HEVCPROFILE_SCALABLE_REXT,
  114. HEVCPROFILE_HIGH_THROUGHPUT_SCREEN_EXTENDED,
  115. VIDEO_CODEC_PROFILE_MAX = HEVCPROFILE_HIGH_THROUGHPUT_SCREEN_EXTENDED,
  116. };
  117. // Specification of a decoding profile supported by a hardware decoder.
  118. struct GPU_EXPORT VideoDecodeAcceleratorSupportedProfile {
  119. VideoCodecProfile profile;
  120. gfx::Size max_resolution;
  121. gfx::Size min_resolution;
  122. bool encrypted_only;
  123. };
  124. using VideoDecodeAcceleratorSupportedProfiles =
  125. std::vector<VideoDecodeAcceleratorSupportedProfile>;
  126. struct GPU_EXPORT VideoDecodeAcceleratorCapabilities {
  127. VideoDecodeAcceleratorCapabilities();
  128. VideoDecodeAcceleratorCapabilities(
  129. const VideoDecodeAcceleratorCapabilities& other);
  130. ~VideoDecodeAcceleratorCapabilities();
  131. VideoDecodeAcceleratorSupportedProfiles supported_profiles;
  132. uint32_t flags;
  133. };
  134. // Specification of an encoding profile supported by a hardware encoder.
  135. struct GPU_EXPORT VideoEncodeAcceleratorSupportedProfile {
  136. VideoCodecProfile profile;
  137. gfx::Size min_resolution;
  138. gfx::Size max_resolution;
  139. uint32_t max_framerate_numerator;
  140. uint32_t max_framerate_denominator;
  141. };
  142. using VideoEncodeAcceleratorSupportedProfiles =
  143. std::vector<VideoEncodeAcceleratorSupportedProfile>;
  144. enum class ImageDecodeAcceleratorType {
  145. kUnknown = 0,
  146. kJpeg = 1,
  147. kWebP = 2,
  148. kMaxValue = kWebP,
  149. };
  150. enum class ImageDecodeAcceleratorSubsampling {
  151. k420 = 0,
  152. k422 = 1,
  153. k444 = 2,
  154. kMaxValue = k444,
  155. };
  156. // Specification of an image decoding profile supported by a hardware decoder.
  157. struct GPU_EXPORT ImageDecodeAcceleratorSupportedProfile {
  158. ImageDecodeAcceleratorSupportedProfile();
  159. ImageDecodeAcceleratorSupportedProfile(
  160. const ImageDecodeAcceleratorSupportedProfile& other);
  161. ImageDecodeAcceleratorSupportedProfile(
  162. ImageDecodeAcceleratorSupportedProfile&& other);
  163. ~ImageDecodeAcceleratorSupportedProfile();
  164. ImageDecodeAcceleratorSupportedProfile& operator=(
  165. const ImageDecodeAcceleratorSupportedProfile& other);
  166. ImageDecodeAcceleratorSupportedProfile& operator=(
  167. ImageDecodeAcceleratorSupportedProfile&& other);
  168. // Fields common to all image types.
  169. // Type of image to which this profile applies, e.g., JPEG.
  170. ImageDecodeAcceleratorType image_type;
  171. // Minimum and maximum supported pixel dimensions of the encoded image.
  172. gfx::Size min_encoded_dimensions;
  173. gfx::Size max_encoded_dimensions;
  174. // Fields specific to |image_type| == kJpeg.
  175. // The supported chroma subsampling formats, e.g. 4:2:0.
  176. std::vector<ImageDecodeAcceleratorSubsampling> subsamplings;
  177. };
  178. using ImageDecodeAcceleratorSupportedProfiles =
  179. std::vector<ImageDecodeAcceleratorSupportedProfile>;
  180. #if BUILDFLAG(IS_WIN)
  181. enum class OverlaySupport {
  182. kNone = 0,
  183. kDirect = 1,
  184. kScaling = 2,
  185. kSoftware = 3
  186. };
  187. GPU_EXPORT const char* OverlaySupportToString(OverlaySupport support);
  188. struct GPU_EXPORT OverlayInfo {
  189. OverlayInfo& operator=(const OverlayInfo& other) = default;
  190. bool operator==(const OverlayInfo& other) const {
  191. return direct_composition == other.direct_composition &&
  192. supports_overlays == other.supports_overlays &&
  193. yuy2_overlay_support == other.yuy2_overlay_support &&
  194. nv12_overlay_support == other.nv12_overlay_support &&
  195. bgra8_overlay_support == other.bgra8_overlay_support &&
  196. rgb10a2_overlay_support == other.rgb10a2_overlay_support;
  197. }
  198. bool operator!=(const OverlayInfo& other) const { return !(*this == other); }
  199. // True if we use direct composition surface on Windows.
  200. bool direct_composition = false;
  201. // True if we use direct composition surface overlays on Windows.
  202. bool supports_overlays = false;
  203. OverlaySupport yuy2_overlay_support = OverlaySupport::kNone;
  204. OverlaySupport nv12_overlay_support = OverlaySupport::kNone;
  205. OverlaySupport bgra8_overlay_support = OverlaySupport::kNone;
  206. OverlaySupport rgb10a2_overlay_support = OverlaySupport::kNone;
  207. };
  208. #endif
  209. #if BUILDFLAG(IS_MAC)
  210. GPU_EXPORT bool ValidateMacOSSpecificTextureTarget(int target);
  211. #endif // BUILDFLAG(IS_MAC)
  212. struct GPU_EXPORT GPUInfo {
  213. struct GPU_EXPORT GPUDevice {
  214. GPUDevice();
  215. GPUDevice(const GPUDevice& other);
  216. GPUDevice(GPUDevice&& other) noexcept;
  217. ~GPUDevice() noexcept;
  218. GPUDevice& operator=(const GPUDevice& other);
  219. GPUDevice& operator=(GPUDevice&& other) noexcept;
  220. bool IsSoftwareRenderer() const;
  221. // The DWORD (uint32_t) representing the graphics card vendor id.
  222. uint32_t vendor_id = 0u;
  223. // The DWORD (uint32_t) representing the graphics card device id.
  224. // Device ids are unique to vendor, not to one another.
  225. uint32_t device_id = 0u;
  226. #if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_CHROMEOS)
  227. // The graphics card revision number.
  228. uint32_t revision = 0u;
  229. #endif
  230. #if BUILDFLAG(IS_WIN)
  231. // The graphics card subsystem id.
  232. // The lower 16 bits represents the subsystem vendor id.
  233. uint32_t sub_sys_id = 0u;
  234. // The graphics card LUID. This is a unique identifier for the graphics card
  235. // that is guaranteed to be unique until the computer is restarted. The LUID
  236. // is used over the vendor id and device id because the device id is only
  237. // unique relative its vendor, not to each other. If there are more than one
  238. // of the same exact graphics card, they all have the same vendor id and
  239. // device id but different LUIDs.
  240. CHROME_LUID luid;
  241. #endif // BUILDFLAG(IS_WIN)
  242. #if BUILDFLAG(IS_MAC)
  243. // The registry ID of an IOGraphicsAccelerator2 or AGXAccelerator matches
  244. // the ID used for GPU selection by ANGLE_platform_angle_device_id.
  245. uint64_t register_id = 0ULL;
  246. #endif // BUILDFLAG(IS_MAC)
  247. // Whether this GPU is the currently used one.
  248. // Currently this field is only supported and meaningful on OS X and on
  249. // Windows using Angle with D3D11.
  250. bool active = false;
  251. // The strings that describe the GPU.
  252. // In Linux these strings are obtained through libpci.
  253. // In Win/MacOSX, these two strings are not filled at the moment.
  254. // In Android, these are respectively GL_VENDOR and GL_RENDERER.
  255. std::string vendor_string;
  256. std::string device_string;
  257. std::string driver_vendor;
  258. std::string driver_version;
  259. // NVIDIA CUDA compute capability, major version. 0 if undetermined. Can be
  260. // used to determine the hardware generation that the GPU belongs to.
  261. int cuda_compute_capability_major = 0;
  262. // If this device is identified as high performance or low power GPU.
  263. gl::GpuPreference gpu_preference = gl::GpuPreference::kNone;
  264. };
  265. GPUInfo();
  266. GPUInfo(const GPUInfo& other);
  267. ~GPUInfo();
  268. // The currently active gpu.
  269. GPUDevice& active_gpu();
  270. const GPUDevice& active_gpu() const;
  271. bool IsInitialized() const;
  272. bool UsesSwiftShader() const;
  273. unsigned int GpuCount() const;
  274. const GPUDevice* GetGpuByPreference(gl::GpuPreference preference) const;
  275. #if BUILDFLAG(IS_WIN)
  276. GPUDevice* FindGpuByLuid(DWORD low_part, LONG high_part);
  277. #endif // BUILDFLAG(IS_WIN)
  278. // The amount of time taken to get from the process starting to the message
  279. // loop being pumped.
  280. base::TimeDelta initialization_time;
  281. // Computer has NVIDIA Optimus
  282. bool optimus;
  283. // Computer has AMD Dynamic Switchable Graphics
  284. bool amd_switchable;
  285. // Primary GPU, for exmaple, the discrete GPU in a dual GPU machine.
  286. GPUDevice gpu;
  287. // Secondary GPUs, for example, the integrated GPU in a dual GPU machine.
  288. std::vector<GPUDevice> secondary_gpus;
  289. // The version of the pixel/fragment shader used by the gpu.
  290. std::string pixel_shader_version;
  291. // The version of the vertex shader used by the gpu.
  292. std::string vertex_shader_version;
  293. // The maximum multisapling sample count, either through ES3 or
  294. // EXT_multisampled_render_to_texture MSAA.
  295. std::string max_msaa_samples;
  296. // The machine model identifier. They can contain any character, including
  297. // whitespaces. Currently it is supported on MacOSX and Android.
  298. // Android examples: "Naxus 5", "XT1032".
  299. // On MacOSX, the version is stripped out of the model identifier, for
  300. // example, the original identifier is "MacBookPro7,2", and we put
  301. // "MacBookPro" as machine_model_name, and "7.2" as machine_model_version.
  302. std::string machine_model_name;
  303. // The version of the machine model. Currently it is supported on MacOSX.
  304. // See machine_model_name's comment.
  305. std::string machine_model_version;
  306. // The GL_VERSION string.
  307. std::string gl_version;
  308. // The GL_VENDOR string.
  309. std::string gl_vendor;
  310. // The GL_RENDERER string.
  311. std::string gl_renderer;
  312. // The GL_EXTENSIONS string.
  313. std::string gl_extensions;
  314. // GL window system binding vendor. "" if not available.
  315. std::string gl_ws_vendor;
  316. // GL window system binding version. "" if not available.
  317. std::string gl_ws_version;
  318. // GL window system binding extensions. "" if not available.
  319. std::string gl_ws_extensions;
  320. // GL reset notification strategy as defined by GL_ARB_robustness. 0 if GPU
  321. // reset detection or notification not available.
  322. uint32_t gl_reset_notification_strategy;
  323. bool software_rendering;
  324. // Empty means unknown. Defined on X11 as
  325. // - "1" means indirect (versions can't be all zero)
  326. // - "2" means some type of direct rendering, but version cannot not be
  327. // reliably determined
  328. // - "2.1", "2.2", "2.3" for DRI, DRI2, DRI3 respectively
  329. std::string direct_rendering_version;
  330. // Whether the gpu process is running in a sandbox.
  331. bool sandboxed;
  332. // True if the GPU is running in the browser process instead of its own.
  333. bool in_process_gpu;
  334. // True if the GPU process is using the passthrough command decoder.
  335. bool passthrough_cmd_decoder;
  336. // True only on android when extensions for threaded mailbox sharing are
  337. // present. Threaded mailbox sharing is used on Android only, so this check
  338. // is only implemented on Android.
  339. bool can_support_threaded_texture_mailbox = false;
  340. // Whether the browser was built with ASAN or not.
  341. #if defined(ADDRESS_SANITIZER)
  342. bool is_asan = true;
  343. #else
  344. bool is_asan = false;
  345. #endif
  346. #if BUILDFLAG(IS_MAC)
  347. // Enum describing which texture target is used for native GpuMemoryBuffers on
  348. // MacOS. Valid values are GL_TEXTURE_2D and GL_TEXTURE_RECTANGLE_ARB.
  349. uint32_t macos_specific_texture_target;
  350. #endif // BUILDFLAG(IS_MAC)
  351. #if BUILDFLAG(IS_WIN)
  352. // The information returned by the DirectX Diagnostics Tool.
  353. DxDiagNode dx_diagnostics;
  354. // The supported d3d feature level in the gpu driver;
  355. uint32_t d3d12_feature_level = 0;
  356. // The support Vulkan API version in the gpu driver;
  357. uint32_t vulkan_version = 0;
  358. // The GPU hardware overlay info.
  359. OverlayInfo overlay_info;
  360. #endif
  361. VideoDecodeAcceleratorSupportedProfiles
  362. video_decode_accelerator_supported_profiles;
  363. // DO NOT use for anything but diagnostics/metrics like chrome://gpu,
  364. // it's not populated at start up and can be unreliable for a while.
  365. VideoEncodeAcceleratorSupportedProfiles
  366. video_encode_accelerator_supported_profiles;
  367. bool jpeg_decode_accelerator_supported;
  368. ImageDecodeAcceleratorSupportedProfiles
  369. image_decode_accelerator_supported_profiles;
  370. bool subpixel_font_rendering;
  371. uint32_t visibility_callback_call_count = 0;
  372. #if BUILDFLAG(ENABLE_VULKAN)
  373. absl::optional<VulkanInfo> vulkan_info;
  374. #endif
  375. // Note: when adding new members, please remember to update EnumerateFields
  376. // in gpu_info.cc.
  377. // In conjunction with EnumerateFields, this allows the embedder to
  378. // enumerate the values in this structure without having to embed
  379. // references to its specific member variables. This simplifies the
  380. // addition of new fields to this type.
  381. class Enumerator {
  382. public:
  383. // The following methods apply to the "current" object. Initially this
  384. // is the root object, but calls to BeginGPUDevice/EndGPUDevice and
  385. // BeginAuxAttributes/EndAuxAttributes change the object to which these
  386. // calls should apply.
  387. virtual void AddInt64(const char* name, int64_t value) = 0;
  388. virtual void AddInt(const char* name, int value) = 0;
  389. virtual void AddString(const char* name, const std::string& value) = 0;
  390. virtual void AddBool(const char* name, bool value) = 0;
  391. virtual void AddTimeDeltaInSecondsF(const char* name,
  392. const base::TimeDelta& value) = 0;
  393. virtual void AddBinary(const char* name,
  394. const base::span<const uint8_t>& blob) = 0;
  395. // Markers indicating that a GPUDevice is being described.
  396. virtual void BeginGPUDevice() = 0;
  397. virtual void EndGPUDevice() = 0;
  398. // Markers indicating that a VideoDecodeAcceleratorSupportedProfile is
  399. // being described.
  400. virtual void BeginVideoDecodeAcceleratorSupportedProfile() = 0;
  401. virtual void EndVideoDecodeAcceleratorSupportedProfile() = 0;
  402. // Markers indicating that a VideoEncodeAcceleratorSupportedProfile is
  403. // being described.
  404. virtual void BeginVideoEncodeAcceleratorSupportedProfile() = 0;
  405. virtual void EndVideoEncodeAcceleratorSupportedProfile() = 0;
  406. // Markers indicating that an ImageDecodeAcceleratorSupportedProfile is
  407. // being described.
  408. virtual void BeginImageDecodeAcceleratorSupportedProfile() = 0;
  409. virtual void EndImageDecodeAcceleratorSupportedProfile() = 0;
  410. // Markers indicating that "auxiliary" attributes of the GPUInfo
  411. // (according to the DevTools protocol) are being described.
  412. virtual void BeginAuxAttributes() = 0;
  413. virtual void EndAuxAttributes() = 0;
  414. virtual void BeginOverlayInfo() = 0;
  415. virtual void EndOverlayInfo() = 0;
  416. protected:
  417. virtual ~Enumerator() = default;
  418. };
  419. // Outputs the fields in this structure to the provided enumerator.
  420. void EnumerateFields(Enumerator* enumerator) const;
  421. };
  422. } // namespace gpu
  423. #endif // GPU_CONFIG_GPU_INFO_H_