gpu_host_impl.cc 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656
  1. // Copyright 2018 The Chromium Authors. All rights reserved.
  2. // Use of this source code is governed by a BSD-style license that can be
  3. // found in the LICENSE file.
  4. #include "components/viz/host/gpu_host_impl.h"
  5. #include <utility>
  6. #include "base/bind.h"
  7. #include "base/callback_helpers.h"
  8. #include "base/feature_list.h"
  9. #include "base/metrics/histogram_macros.h"
  10. #include "base/no_destructor.h"
  11. #include "base/process/process_handle.h"
  12. #include "base/strings/strcat.h"
  13. #include "base/threading/thread_checker.h"
  14. #include "base/trace_event/trace_event.h"
  15. #include "base/values.h"
  16. #include "build/build_config.h"
  17. #include "components/viz/common/buildflags.h"
  18. #include "components/viz/common/features.h"
  19. #include "gpu/config/gpu_driver_bug_workaround_type.h"
  20. #include "gpu/config/gpu_feature_info.h"
  21. #include "gpu/config/gpu_finch_features.h"
  22. #include "gpu/config/gpu_info.h"
  23. #include "gpu/ipc/common/gpu_client_ids.h"
  24. #include "gpu/ipc/host/gpu_disk_cache.h"
  25. #include "mojo/public/cpp/bindings/sync_call_restrictions.h"
  26. #include "ui/gfx/font_render_params.h"
  27. #if BUILDFLAG(IS_ANDROID)
  28. #include "base/android/build_info.h"
  29. #endif
  30. #if BUILDFLAG(IS_WIN)
  31. #include "ui/gfx/win/rendering_window_manager.h"
  32. #elif BUILDFLAG(IS_MAC)
  33. #include "ui/accelerated_widget_mac/window_resize_helper_mac.h"
  34. #endif
  35. #if defined(USE_OZONE)
  36. #include "ui/ozone/public/gpu_platform_support_host.h"
  37. #include "ui/ozone/public/ozone_platform.h"
  38. #endif
  39. namespace viz {
  40. namespace {
  41. // A wrapper around gfx::FontRenderParams that checks it is set and accessed on
  42. // the same thread.
  43. class FontRenderParams {
  44. public:
  45. FontRenderParams(const FontRenderParams&) = delete;
  46. FontRenderParams& operator=(const FontRenderParams&) = delete;
  47. void Set(const gfx::FontRenderParams& params);
  48. void Reset();
  49. const absl::optional<gfx::FontRenderParams>& Get();
  50. private:
  51. friend class base::NoDestructor<FontRenderParams>;
  52. FontRenderParams();
  53. ~FontRenderParams();
  54. THREAD_CHECKER(thread_checker_);
  55. absl::optional<gfx::FontRenderParams> params_;
  56. };
  57. void FontRenderParams::Set(const gfx::FontRenderParams& params) {
  58. DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
  59. params_ = params;
  60. }
  61. void FontRenderParams::Reset() {
  62. DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
  63. params_ = absl::nullopt;
  64. }
  65. const absl::optional<gfx::FontRenderParams>& FontRenderParams::Get() {
  66. DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
  67. return params_;
  68. }
  69. FontRenderParams::FontRenderParams() = default;
  70. FontRenderParams::~FontRenderParams() {
  71. NOTREACHED();
  72. }
  73. FontRenderParams& GetFontRenderParams() {
  74. static base::NoDestructor<FontRenderParams> instance;
  75. return *instance;
  76. }
  77. } // namespace
  78. GpuHostImpl::InitParams::InitParams() = default;
  79. GpuHostImpl::InitParams::InitParams(InitParams&&) = default;
  80. GpuHostImpl::InitParams::~InitParams() = default;
  81. GpuHostImpl::GpuHostImpl(Delegate* delegate,
  82. mojo::PendingRemote<mojom::VizMain> viz_main,
  83. InitParams params)
  84. : delegate_(delegate),
  85. viz_main_(std::move(viz_main)),
  86. params_(std::move(params)) {
  87. // Create a special GPU info collection service if the GPU process is used for
  88. // info collection only.
  89. #if BUILDFLAG(IS_WIN)
  90. if (params_.info_collection_gpu_process) {
  91. viz_main_->CreateInfoCollectionGpuService(
  92. info_collection_gpu_service_remote_.BindNewPipeAndPassReceiver());
  93. return;
  94. }
  95. #endif
  96. DCHECK(delegate_);
  97. mojo::PendingRemote<discardable_memory::mojom::DiscardableSharedMemoryManager>
  98. discardable_manager_remote;
  99. delegate_->BindDiscardableMemoryReceiver(
  100. discardable_manager_remote.InitWithNewPipeAndPassReceiver());
  101. DCHECK(GetFontRenderParams().Get());
  102. scoped_refptr<base::SequencedTaskRunner> task_runner = nullptr;
  103. #if BUILDFLAG(IS_MAC)
  104. if (params_.main_thread_task_runner->BelongsToCurrentThread())
  105. task_runner = ui::WindowResizeHelperMac::Get()->task_runner();
  106. #endif
  107. #if BUILDFLAG(IS_ANDROID)
  108. viz_main_->SetHostProcessId(base::GetCurrentProcId());
  109. #endif
  110. viz_main_->CreateGpuService(
  111. gpu_service_remote_.BindNewPipeAndPassReceiver(task_runner),
  112. gpu_host_receiver_.BindNewPipeAndPassRemote(task_runner),
  113. std::move(discardable_manager_remote), activity_flags_.CloneRegion(),
  114. GetFontRenderParams().Get()->subpixel_rendering);
  115. #if defined(USE_OZONE)
  116. InitOzone();
  117. #endif // defined(USE_OZONE)
  118. }
  119. GpuHostImpl::~GpuHostImpl() {
  120. SendOutstandingReplies();
  121. }
  122. // static
  123. void GpuHostImpl::InitFontRenderParams(const gfx::FontRenderParams& params) {
  124. DCHECK(!GetFontRenderParams().Get());
  125. GetFontRenderParams().Set(params);
  126. }
  127. // static
  128. void GpuHostImpl::ResetFontRenderParams() {
  129. DCHECK(GetFontRenderParams().Get());
  130. GetFontRenderParams().Reset();
  131. }
  132. void GpuHostImpl::SetProcessId(base::ProcessId pid) {
  133. DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
  134. DCHECK_EQ(base::kNullProcessId, pid_);
  135. DCHECK_NE(base::kNullProcessId, pid);
  136. pid_ = pid;
  137. }
  138. void GpuHostImpl::OnProcessCrashed() {
  139. DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
  140. // If the GPU process crashed while compiling a shader, we may have invalid
  141. // cached binaries. Completely clear the shader cache to force shader binaries
  142. // to be re-created.
  143. if (activity_flags_.IsFlagSet(
  144. gpu::ActivityFlagsBase::FLAG_LOADING_PROGRAM_BINARY)) {
  145. auto* gpu_disk_cache_factory = delegate_->GetGpuDiskCacheFactory();
  146. for (auto& [_, cache] : client_id_to_caches_) {
  147. // This call will temporarily extend the lifetime of the cache (kept
  148. // alive in the factory), and may drop loads of cached shader binaries if
  149. // it takes a while to complete. As we are intentionally dropping all
  150. // binaries, this behavior is fine.
  151. gpu_disk_cache_factory->ClearByCache(
  152. cache, base::Time(), base::Time::Max(), base::DoNothing());
  153. }
  154. }
  155. }
  156. void GpuHostImpl::AddConnectionErrorHandler(base::OnceClosure handler) {
  157. connection_error_handlers_.push_back(std::move(handler));
  158. }
  159. void GpuHostImpl::BlockLiveOffscreenContexts() {
  160. DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
  161. std::set<GURL> urls(urls_with_live_offscreen_contexts_.begin(),
  162. urls_with_live_offscreen_contexts_.end());
  163. delegate_->BlockDomainsFrom3DAPIs(urls, gpu::DomainGuilt::kUnknown);
  164. }
  165. void GpuHostImpl::ConnectFrameSinkManager(
  166. mojo::PendingReceiver<mojom::FrameSinkManager> receiver,
  167. mojo::PendingRemote<mojom::FrameSinkManagerClient> client,
  168. const DebugRendererSettings& debug_renderer_settings) {
  169. DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
  170. TRACE_EVENT0("gpu", "GpuHostImpl::ConnectFrameSinkManager");
  171. mojom::FrameSinkManagerParamsPtr params =
  172. mojom::FrameSinkManagerParams::New();
  173. params->restart_id = params_.restart_id;
  174. params->use_activation_deadline =
  175. params_.deadline_to_synchronize_surfaces.has_value();
  176. params->activation_deadline_in_frames =
  177. params_.deadline_to_synchronize_surfaces.value_or(0u);
  178. params->frame_sink_manager = std::move(receiver);
  179. params->frame_sink_manager_client = std::move(client);
  180. params->debug_renderer_settings = debug_renderer_settings;
  181. viz_main_->CreateFrameSinkManager(std::move(params));
  182. }
  183. void GpuHostImpl::EstablishGpuChannel(int client_id,
  184. uint64_t client_tracing_id,
  185. bool is_gpu_host,
  186. bool sync,
  187. EstablishChannelCallback callback) {
  188. DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
  189. TRACE_EVENT0("gpu", "GpuHostImpl::EstablishGpuChannel");
  190. shutdown_timeout_.Stop();
  191. // If GPU features are already blocklisted, no need to establish the channel.
  192. if (!delegate_->GpuAccessAllowed()) {
  193. DVLOG(1) << "GPU access blocked, refusing to open a GPU channel.";
  194. std::move(callback).Run(mojo::ScopedMessagePipeHandle(), gpu::GPUInfo(),
  195. gpu::GpuFeatureInfo(),
  196. EstablishChannelStatus::kGpuAccessDenied);
  197. return;
  198. }
  199. if (gpu::IsReservedClientId(client_id)) {
  200. // The display-compositor/GrShaderCache in the gpu process uses these
  201. // special client ids.
  202. std::move(callback).Run(mojo::ScopedMessagePipeHandle(), gpu::GPUInfo(),
  203. gpu::GpuFeatureInfo(),
  204. EstablishChannelStatus::kGpuAccessDenied);
  205. return;
  206. }
  207. channel_requests_[client_id] = std::move(callback);
  208. if (sync) {
  209. mojo::ScopedMessagePipeHandle channel_handle;
  210. gpu::GPUInfo gpu_info;
  211. gpu::GpuFeatureInfo gpu_feature_info;
  212. {
  213. mojo::SyncCallRestrictions::ScopedAllowSyncCall scoped_allow;
  214. gpu_service_remote_->EstablishGpuChannel(client_id, client_tracing_id,
  215. is_gpu_host, &channel_handle,
  216. &gpu_info, &gpu_feature_info);
  217. }
  218. OnChannelEstablished(client_id, true, std::move(channel_handle), gpu_info,
  219. gpu_feature_info);
  220. } else {
  221. gpu_service_remote_->EstablishGpuChannel(
  222. client_id, client_tracing_id, is_gpu_host,
  223. base::BindOnce(&GpuHostImpl::OnChannelEstablished,
  224. weak_ptr_factory_.GetWeakPtr(), client_id, false));
  225. }
  226. // The gpu host channel uses the same cache as the compositor client.
  227. if (is_gpu_host) {
  228. SetChannelDiskCacheHandle(client_id,
  229. gpu::kDisplayCompositorGpuDiskCacheHandle);
  230. }
  231. }
  232. void GpuHostImpl::SetChannelClientPid(int client_id,
  233. base::ProcessId client_pid) {
  234. gpu_service_remote_->SetChannelClientPid(client_id, client_pid);
  235. }
  236. void GpuHostImpl::SetChannelDiskCacheHandle(
  237. int client_id,
  238. const gpu::GpuDiskCacheHandle& handle) {
  239. if (params_.disable_gpu_shader_disk_cache) {
  240. return;
  241. }
  242. scoped_refptr<gpu::GpuDiskCache> cache =
  243. delegate_->GetGpuDiskCacheFactory()->Get(handle);
  244. if (!cache) {
  245. // Create the cache if necessary and save a reference.
  246. cache = delegate_->GetGpuDiskCacheFactory()->Create(
  247. handle,
  248. base::BindRepeating(&GpuHostImpl::LoadedBlob,
  249. weak_ptr_factory_.GetWeakPtr()),
  250. base::BindOnce(&GpuHostImpl::OnDiskCacheHandleDestoyed,
  251. weak_ptr_factory_.GetWeakPtr()));
  252. if (!cache) {
  253. return;
  254. }
  255. }
  256. client_id_to_caches_.emplace(client_id, cache);
  257. gpu_service_remote_->SetChannelDiskCacheHandle(client_id, handle);
  258. }
  259. void GpuHostImpl::RemoveChannelDiskCacheHandles(int client_id) {
  260. // Release the handle, then release the cache.
  261. auto [start, end] = client_id_to_caches_.equal_range(client_id);
  262. for (auto it = start; it != end; ++it) {
  263. delegate_->GetGpuDiskCacheFactory()->ReleaseCacheHandle(it->second.get());
  264. }
  265. client_id_to_caches_.erase(client_id);
  266. }
  267. void GpuHostImpl::CloseChannel(int client_id) {
  268. gpu_service_remote_->CloseChannel(client_id);
  269. channel_requests_.erase(client_id);
  270. }
  271. #if BUILDFLAG(USE_VIZ_DEBUGGER)
  272. void GpuHostImpl::FilterVisualDebugStream(base::Value json) {
  273. viz_main_->FilterDebugStream(std::move(json));
  274. }
  275. void GpuHostImpl::StartVisualDebugStream(
  276. base::RepeatingCallback<void(base::Value)> callback) {
  277. viz_debug_output_callback_ = std::move(callback);
  278. viz_main_->StartDebugStream(viz_debug_output_.BindNewPipeAndPassRemote());
  279. }
  280. void GpuHostImpl::StopVisualDebugStream() {
  281. viz_main_->StopDebugStream();
  282. viz_debug_output_.reset();
  283. }
  284. #endif
  285. void GpuHostImpl::SendOutstandingReplies() {
  286. DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
  287. for (auto& handler : connection_error_handlers_)
  288. std::move(handler).Run();
  289. connection_error_handlers_.clear();
  290. // Send empty channel handles for all EstablishChannel requests.
  291. for (auto& entry : channel_requests_) {
  292. std::move(entry.second)
  293. .Run(mojo::ScopedMessagePipeHandle(), gpu::GPUInfo(),
  294. gpu::GpuFeatureInfo(), EstablishChannelStatus::kGpuHostInvalid);
  295. }
  296. channel_requests_.clear();
  297. }
  298. void GpuHostImpl::BindInterface(const std::string& interface_name,
  299. mojo::ScopedMessagePipeHandle interface_pipe) {
  300. delegate_->BindInterface(interface_name, std::move(interface_pipe));
  301. }
  302. mojom::GpuService* GpuHostImpl::gpu_service() {
  303. DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
  304. DCHECK(gpu_service_remote_.is_bound());
  305. return gpu_service_remote_.get();
  306. }
  307. #if BUILDFLAG(IS_WIN)
  308. mojom::InfoCollectionGpuService* GpuHostImpl::info_collection_gpu_service() {
  309. DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
  310. DCHECK(info_collection_gpu_service_remote_.is_bound());
  311. return info_collection_gpu_service_remote_.get();
  312. }
  313. #endif
  314. #if defined(USE_OZONE)
  315. void GpuHostImpl::InitOzone() {
  316. // Ozone needs to send the primary DRM device to GPU service as early as
  317. // possible to ensure the latter always has a valid device.
  318. // https://crbug.com/608839
  319. //
  320. // The Ozone/Wayland requires mojo communication to be established to be
  321. // functional with a separate gpu process. Thus, using the PlatformProperties,
  322. // check if there is such a requirement.
  323. auto interface_binder = base::BindRepeating(&GpuHostImpl::BindInterface,
  324. weak_ptr_factory_.GetWeakPtr());
  325. auto terminate_callback = base::BindOnce(&GpuHostImpl::TerminateGpuProcess,
  326. weak_ptr_factory_.GetWeakPtr());
  327. ui::OzonePlatform::GetInstance()
  328. ->GetGpuPlatformSupportHost()
  329. ->OnGpuServiceLaunched(params_.restart_id, interface_binder,
  330. std::move(terminate_callback));
  331. }
  332. void GpuHostImpl::TerminateGpuProcess(const std::string& message) {
  333. delegate_->TerminateGpuProcess(message);
  334. }
  335. #endif // defined(USE_OZONE)
  336. std::string GpuHostImpl::GetShaderPrefixKey() {
  337. if (shader_prefix_key_.empty()) {
  338. const gpu::GPUInfo& info = delegate_->GetGPUInfo();
  339. const gpu::GPUInfo::GPUDevice& active_gpu = info.active_gpu();
  340. shader_prefix_key_ = params_.product + "-" + info.gl_vendor + "-" +
  341. info.gl_renderer + "-" + active_gpu.driver_version +
  342. "-" + active_gpu.driver_vendor;
  343. #if BUILDFLAG(IS_ANDROID)
  344. std::string build_fp =
  345. base::android::BuildInfo::GetInstance()->android_build_fp();
  346. shader_prefix_key_ += "-" + build_fp;
  347. #endif
  348. }
  349. return shader_prefix_key_;
  350. }
  351. void GpuHostImpl::LoadedBlob(const gpu::GpuDiskCacheHandle& handle,
  352. const std::string& key,
  353. const std::string& data) {
  354. DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
  355. switch (gpu::GetHandleType(handle)) {
  356. case gpu::GpuDiskCacheType::kGlShaders: {
  357. std::string prefix = GetShaderPrefixKey();
  358. bool prefix_ok = !key.compare(0, prefix.length(), prefix);
  359. UMA_HISTOGRAM_BOOLEAN("GPU.ShaderLoadPrefixOK", prefix_ok);
  360. if (prefix_ok) {
  361. // Remove the prefix from the key before load.
  362. std::string key_no_prefix = key.substr(prefix.length() + 1);
  363. gpu_service_remote_->LoadedBlob(handle, key_no_prefix, data);
  364. }
  365. break;
  366. }
  367. case gpu::GpuDiskCacheType::kDawnWebGPU: {
  368. gpu_service_remote_->LoadedBlob(handle, key, data);
  369. break;
  370. }
  371. }
  372. }
  373. void GpuHostImpl::OnDiskCacheHandleDestoyed(
  374. const gpu::GpuDiskCacheHandle& handle) {
  375. DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
  376. gpu_service_remote_->OnDiskCacheHandleDestoyed(handle);
  377. }
  378. void GpuHostImpl::OnChannelEstablished(
  379. int client_id,
  380. bool sync,
  381. mojo::ScopedMessagePipeHandle channel_handle,
  382. const gpu::GPUInfo& gpu_info,
  383. const gpu::GpuFeatureInfo& gpu_feature_info) {
  384. DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
  385. TRACE_EVENT0("gpu", "GpuHostImpl::OnChannelEstablished");
  386. auto it = channel_requests_.find(client_id);
  387. if (it == channel_requests_.end())
  388. return;
  389. auto callback = std::move(it->second);
  390. channel_requests_.erase(it);
  391. // Currently if any of the GPU features are blocklisted, we don't establish a
  392. // GPU channel.
  393. if (channel_handle.is_valid() && !delegate_->GpuAccessAllowed()) {
  394. gpu_service_remote_->CloseChannel(client_id);
  395. std::move(callback).Run(mojo::ScopedMessagePipeHandle(), gpu::GPUInfo(),
  396. gpu::GpuFeatureInfo(),
  397. EstablishChannelStatus::kGpuAccessDenied);
  398. RecordLogMessage(logging::LOG_WARNING, "WARNING",
  399. "Hardware acceleration is unavailable.");
  400. return;
  401. }
  402. // TODO(jam): always use GPUInfo & GpuFeatureInfo from the service once we
  403. // know there's no issue with the ProcessHostOnUI which is the only mode
  404. // that currently uses it. This is because in that mode the sync mojo call
  405. // in the caller means we won't get the async DidInitialize() call before
  406. // this point, so the delegate_ methods won't have the GPU info structs yet.
  407. if (sync) {
  408. std::move(callback).Run(std::move(channel_handle), gpu_info,
  409. gpu_feature_info, EstablishChannelStatus::kSuccess);
  410. } else {
  411. std::move(callback).Run(std::move(channel_handle), delegate_->GetGPUInfo(),
  412. delegate_->GetGpuFeatureInfo(),
  413. EstablishChannelStatus::kSuccess);
  414. }
  415. }
  416. void GpuHostImpl::DidInitialize(
  417. const gpu::GPUInfo& gpu_info,
  418. const gpu::GpuFeatureInfo& gpu_feature_info,
  419. const absl::optional<gpu::GPUInfo>& gpu_info_for_hardware_gpu,
  420. const absl::optional<gpu::GpuFeatureInfo>&
  421. gpu_feature_info_for_hardware_gpu,
  422. const gfx::GpuExtraInfo& gpu_extra_info) {
  423. UMA_HISTOGRAM_BOOLEAN("GPU.GPUProcessInitialized", true);
  424. delegate_->DidInitialize(gpu_info, gpu_feature_info,
  425. gpu_info_for_hardware_gpu,
  426. gpu_feature_info_for_hardware_gpu, gpu_extra_info);
  427. if (!params_.disable_gpu_shader_disk_cache) {
  428. SetChannelDiskCacheHandle(gpu::kDisplayCompositorClientId,
  429. gpu::kDisplayCompositorGpuDiskCacheHandle);
  430. SetChannelDiskCacheHandle(gpu::kGrShaderCacheClientId,
  431. gpu::kGrShaderGpuDiskCacheHandle);
  432. }
  433. }
  434. void GpuHostImpl::DidFailInitialize() {
  435. UMA_HISTOGRAM_BOOLEAN("GPU.GPUProcessInitialized", false);
  436. delegate_->DidFailInitialize();
  437. }
  438. void GpuHostImpl::DidCreateContextSuccessfully() {
  439. delegate_->DidCreateContextSuccessfully();
  440. }
  441. void GpuHostImpl::DidCreateOffscreenContext(const GURL& url) {
  442. urls_with_live_offscreen_contexts_.insert(url);
  443. }
  444. void GpuHostImpl::DidDestroyOffscreenContext(const GURL& url) {
  445. // We only want to remove *one* of the entries in the multiset for this
  446. // particular URL, so can't use the erase method taking a key.
  447. auto candidate = urls_with_live_offscreen_contexts_.find(url);
  448. if (candidate != urls_with_live_offscreen_contexts_.end())
  449. urls_with_live_offscreen_contexts_.erase(candidate);
  450. }
  451. void GpuHostImpl::DidDestroyChannel(int32_t client_id) {
  452. TRACE_EVENT0("gpu", "GpuHostImpl::DidDestroyChannel");
  453. client_id_to_caches_.erase(client_id);
  454. }
  455. void GpuHostImpl::DidDestroyAllChannels() {
  456. DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
  457. if (!channel_requests_.empty())
  458. return;
  459. constexpr base::TimeDelta kShutDownTimeout = base::Seconds(10);
  460. shutdown_timeout_.Start(FROM_HERE, kShutDownTimeout,
  461. base::BindOnce(&GpuHostImpl::MaybeShutdownGpuProcess,
  462. base::Unretained(this)));
  463. }
  464. void GpuHostImpl::MaybeShutdownGpuProcess() {
  465. DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
  466. DCHECK(channel_requests_.empty());
  467. delegate_->MaybeShutdownGpuProcess();
  468. }
  469. void GpuHostImpl::DidLoseContext(bool offscreen,
  470. gpu::error::ContextLostReason reason,
  471. const GURL& active_url) {
  472. // TODO(kbr): would be nice to see the "offscreen" flag too.
  473. TRACE_EVENT2("gpu", "GpuHostImpl::DidLoseContext", "reason", reason, "url",
  474. active_url.possibly_invalid_spec());
  475. if (active_url.is_empty()) {
  476. return;
  477. }
  478. gpu::DomainGuilt guilt = gpu::DomainGuilt::kUnknown;
  479. switch (reason) {
  480. case gpu::error::kGuilty:
  481. guilt = gpu::DomainGuilt::kKnown;
  482. break;
  483. // Treat most other error codes as though they had unknown provenance.
  484. // In practice this doesn't affect the user experience. A lost context
  485. // of either known or unknown guilt still causes user-level 3D APIs
  486. // (e.g. WebGL) to be blocked on that domain until the user manually
  487. // reenables them.
  488. case gpu::error::kUnknown:
  489. case gpu::error::kOutOfMemory:
  490. case gpu::error::kMakeCurrentFailed:
  491. case gpu::error::kGpuChannelLost:
  492. case gpu::error::kInvalidGpuMessage:
  493. break;
  494. case gpu::error::kInnocent:
  495. return;
  496. }
  497. std::set<GURL> urls{active_url};
  498. delegate_->BlockDomainsFrom3DAPIs(urls, guilt);
  499. }
  500. void GpuHostImpl::DisableGpuCompositing() {
  501. delegate_->DisableGpuCompositing();
  502. }
  503. void GpuHostImpl::DidUpdateGPUInfo(const gpu::GPUInfo& gpu_info) {
  504. delegate_->DidUpdateGPUInfo(gpu_info);
  505. }
  506. #if BUILDFLAG(IS_WIN)
  507. void GpuHostImpl::DidUpdateOverlayInfo(const gpu::OverlayInfo& overlay_info) {
  508. delegate_->DidUpdateOverlayInfo(overlay_info);
  509. }
  510. void GpuHostImpl::DidUpdateDXGIInfo(gfx::mojom::DXGIInfoPtr dxgi_info) {
  511. delegate_->DidUpdateDXGIInfo(std::move(dxgi_info));
  512. }
  513. void GpuHostImpl::SetChildSurface(gpu::SurfaceHandle parent,
  514. gpu::SurfaceHandle child) {
  515. if (pid_ != base::kNullProcessId) {
  516. gfx::RenderingWindowManager::GetInstance()->RegisterChild(
  517. parent, child, /*expected_child_process_id=*/pid_);
  518. }
  519. }
  520. #endif // BUILDFLAG(IS_WIN)
  521. void GpuHostImpl::StoreBlobToDisk(const gpu::GpuDiskCacheHandle& handle,
  522. const std::string& key,
  523. const std::string& blob) {
  524. TRACE_EVENT0("gpu", "GpuHostImpl::StoreBlobToDisk");
  525. scoped_refptr<gpu::GpuDiskCache> cache =
  526. delegate_->GetGpuDiskCacheFactory()->Get(handle);
  527. if (!cache) {
  528. return;
  529. }
  530. switch (GetHandleType(handle)) {
  531. case gpu::GpuDiskCacheType::kGlShaders: {
  532. std::string prefix = GetShaderPrefixKey();
  533. cache->Cache(base::StrCat({prefix, ":", key}), blob);
  534. break;
  535. }
  536. case gpu::GpuDiskCacheType::kDawnWebGPU: {
  537. cache->Cache(key, blob);
  538. break;
  539. }
  540. }
  541. }
  542. void GpuHostImpl::RecordLogMessage(int32_t severity,
  543. const std::string& header,
  544. const std::string& message) {
  545. delegate_->RecordLogMessage(severity, header, message);
  546. }
  547. #if BUILDFLAG(USE_VIZ_DEBUGGER)
  548. void GpuHostImpl::LogFrame(base::Value frame_data) {
  549. if (!viz_debug_output_callback_.is_null())
  550. viz_debug_output_callback_.Run(std::move(frame_data));
  551. }
  552. #endif
  553. } // namespace viz