gpu_channel_manager.cc 38 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017
  1. // Copyright (c) 2012 The Chromium Authors. All rights reserved.
  2. // Use of this source code is governed by a BSD-style license that can be
  3. // found in the LICENSE file.
  4. #include "gpu/ipc/service/gpu_channel_manager.h"
  5. #include <algorithm>
  6. #include <memory>
  7. #include <utility>
  8. #include "build/build_config.h"
  9. #if BUILDFLAG(IS_WIN)
  10. #include <dxgi1_3.h>
  11. #endif
  12. #include "base/bind.h"
  13. #include "base/command_line.h"
  14. #include "base/debug/crash_logging.h"
  15. #include "base/debug/dump_without_crashing.h"
  16. #include "base/location.h"
  17. #include "base/metrics/histogram_macros.h"
  18. #include "base/run_loop.h"
  19. #include "base/strings/stringprintf.h"
  20. #include "base/system/sys_info.h"
  21. #include "base/task/bind_post_task.h"
  22. #include "base/task/single_thread_task_runner.h"
  23. #include "base/threading/thread_task_runner_handle.h"
  24. #include "base/trace_event/traced_value.h"
  25. #include "build/build_config.h"
  26. #include "gpu/command_buffer/common/context_creation_attribs.h"
  27. #include "gpu/command_buffer/common/sync_token.h"
  28. #include "gpu/command_buffer/service/feature_info.h"
  29. #include "gpu/command_buffer/service/gl_utils.h"
  30. #include "gpu/command_buffer/service/gpu_tracer.h"
  31. #include "gpu/command_buffer/service/mailbox_manager_factory.h"
  32. #include "gpu/command_buffer/service/memory_program_cache.h"
  33. #include "gpu/command_buffer/service/passthrough_program_cache.h"
  34. #include "gpu/command_buffer/service/scheduler.h"
  35. #include "gpu/command_buffer/service/service_utils.h"
  36. #include "gpu/command_buffer/service/sync_point_manager.h"
  37. #include "gpu/config/gpu_crash_keys.h"
  38. #include "gpu/config/gpu_finch_features.h"
  39. #include "gpu/ipc/common/gpu_client_ids.h"
  40. #include "gpu/ipc/common/memory_stats.h"
  41. #include "gpu/ipc/service/gpu_channel.h"
  42. #include "gpu/ipc/service/gpu_channel_manager_delegate.h"
  43. #include "gpu/ipc/service/gpu_memory_ablation_experiment.h"
  44. #include "gpu/ipc/service/gpu_memory_buffer_factory.h"
  45. #include "gpu/ipc/service/gpu_watchdog_thread.h"
  46. #include "third_party/skia/include/core/SkGraphics.h"
  47. #if BUILDFLAG(IS_WIN)
  48. #include "ui/gl/gl_angle_util_win.h"
  49. #endif
  50. #include "ui/gl/gl_bindings.h"
  51. #include "ui/gl/gl_enums.h"
  52. #include "ui/gl/gl_share_group.h"
  53. #include "ui/gl/gl_surface_egl.h"
  54. #include "ui/gl/gl_version_info.h"
  55. #include "ui/gl/init/gl_factory.h"
  56. namespace gpu {
  57. namespace {
  58. #if BUILDFLAG(IS_ANDROID)
  59. // Amount of time we expect the GPU to stay powered up without being used.
  60. const int kMaxGpuIdleTimeMs = 40;
  61. // Maximum amount of time we keep pinging the GPU waiting for the client to
  62. // draw.
  63. const int kMaxKeepAliveTimeMs = 200;
  64. #endif
  65. #if BUILDFLAG(IS_WIN)
  66. void TrimD3DResources() {
  67. // Graphics drivers periodically allocate internal memory buffers in
  68. // order to speed up subsequent rendering requests. These memory allocations
  69. // in general lead to increased memory usage by the overall system.
  70. // Calling Trim discards internal memory buffers allocated for the app,
  71. // reducing its memory footprint.
  72. // Calling Trim method does not change the rendering state of the
  73. // graphics device and has no effect on rendering operations.
  74. // There is a brief performance hit when internal buffers are reallocated
  75. // during the first rendering operations after the Trim call, therefore
  76. // apps should only call Trim when going idle for a period of time or during
  77. // low memory conditions.
  78. Microsoft::WRL::ComPtr<ID3D11Device> d3d11_device =
  79. gl::QueryD3D11DeviceObjectFromANGLE();
  80. if (d3d11_device) {
  81. Microsoft::WRL::ComPtr<IDXGIDevice3> dxgi_device;
  82. if (SUCCEEDED(d3d11_device.As(&dxgi_device))) {
  83. dxgi_device->Trim();
  84. }
  85. }
  86. }
  87. #endif
  88. void APIENTRY CrashReportOnGLErrorDebugCallback(GLenum source,
  89. GLenum type,
  90. GLuint id,
  91. GLenum severity,
  92. GLsizei length,
  93. const GLchar* message,
  94. const GLvoid* user_param) {
  95. if (type == GL_DEBUG_TYPE_ERROR && source == GL_DEBUG_SOURCE_API &&
  96. user_param) {
  97. // Note: log_message cannot contain any user data. The error strings
  98. // generated from ANGLE are all static strings and do not contain user
  99. // information such as shader source code. Be careful if updating the
  100. // contents of this string.
  101. std::string log_message = gl::GLEnums::GetStringEnum(id);
  102. if (message && length > 0) {
  103. log_message += ": " + std::string(message, length);
  104. }
  105. LOG(ERROR) << log_message;
  106. crash_keys::gpu_gl_error_message.Set(log_message);
  107. int* remaining_reports =
  108. const_cast<int*>(static_cast<const int*>(user_param));
  109. if (*remaining_reports > 0) {
  110. base::debug::DumpWithoutCrashing();
  111. (*remaining_reports)--;
  112. }
  113. }
  114. }
  115. void FormatAllocationSourcesForTracing(
  116. base::trace_event::TracedValue* dict,
  117. base::flat_map<GpuPeakMemoryAllocationSource, uint64_t>&
  118. allocation_sources) {
  119. dict->SetInteger("UNKNOWN",
  120. allocation_sources[GpuPeakMemoryAllocationSource::UNKNOWN]);
  121. dict->SetInteger(
  122. "COMMAND_BUFFER",
  123. allocation_sources[GpuPeakMemoryAllocationSource::COMMAND_BUFFER]);
  124. dict->SetInteger(
  125. "SHARED_CONTEXT_STATE",
  126. allocation_sources[GpuPeakMemoryAllocationSource::SHARED_CONTEXT_STATE]);
  127. dict->SetInteger(
  128. "SHARED_IMAGE_STUB",
  129. allocation_sources[GpuPeakMemoryAllocationSource::SHARED_IMAGE_STUB]);
  130. dict->SetInteger("SKIA",
  131. allocation_sources[GpuPeakMemoryAllocationSource::SKIA]);
  132. }
  133. void SetCrashKeyTimeDelta(base::debug::CrashKeyString* key,
  134. base::TimeDelta time_delta) {
  135. auto str = base::StringPrintf(
  136. "%d hours, %d min, %lld sec, %lld ms", time_delta.InHours(),
  137. time_delta.InMinutes() % 60, time_delta.InSeconds() % 60ll,
  138. time_delta.InMilliseconds() % 1000ll);
  139. base::debug::SetCrashKeyString(key, str);
  140. }
  141. } // namespace
  142. GpuChannelManager::GpuPeakMemoryMonitor::GpuPeakMemoryMonitor(
  143. GpuChannelManager* channel_manager,
  144. scoped_refptr<base::SingleThreadTaskRunner> task_runner)
  145. : ablation_experiment_(
  146. std::make_unique<GpuMemoryAblationExperiment>(channel_manager,
  147. task_runner)),
  148. weak_factory_(this) {}
  149. GpuChannelManager::GpuPeakMemoryMonitor::~GpuPeakMemoryMonitor() = default;
  150. base::flat_map<GpuPeakMemoryAllocationSource, uint64_t>
  151. GpuChannelManager::GpuPeakMemoryMonitor::GetPeakMemoryUsage(
  152. uint32_t sequence_num,
  153. uint64_t* out_peak_memory) {
  154. auto sequence = sequence_trackers_.find(sequence_num);
  155. base::flat_map<GpuPeakMemoryAllocationSource, uint64_t> allocation_per_source;
  156. *out_peak_memory = 0u;
  157. if (sequence != sequence_trackers_.end()) {
  158. *out_peak_memory = sequence->second.total_memory_;
  159. allocation_per_source = sequence->second.peak_memory_per_source_;
  160. uint64_t ablation_memory =
  161. ablation_experiment_->GetPeakMemory(sequence_num);
  162. *out_peak_memory += ablation_memory;
  163. allocation_per_source[GpuPeakMemoryAllocationSource::SHARED_IMAGE_STUB] +=
  164. ablation_memory;
  165. }
  166. return allocation_per_source;
  167. }
  168. void GpuChannelManager::GpuPeakMemoryMonitor::StartGpuMemoryTracking(
  169. uint32_t sequence_num) {
  170. sequence_trackers_.emplace(
  171. sequence_num,
  172. SequenceTracker(current_memory_, current_memory_per_source_));
  173. ablation_experiment_->StartSequence(sequence_num);
  174. TRACE_EVENT_ASYNC_BEGIN2("gpu", "PeakMemoryTracking", sequence_num, "start",
  175. current_memory_, "start_sources",
  176. StartTrackingTracedValue());
  177. }
  178. void GpuChannelManager::GpuPeakMemoryMonitor::StopGpuMemoryTracking(
  179. uint32_t sequence_num) {
  180. auto sequence = sequence_trackers_.find(sequence_num);
  181. if (sequence != sequence_trackers_.end()) {
  182. TRACE_EVENT_ASYNC_END2("gpu", "PeakMemoryTracking", sequence_num, "peak",
  183. sequence->second.total_memory_, "end_sources",
  184. StopTrackingTracedValue(sequence->second));
  185. sequence_trackers_.erase(sequence);
  186. ablation_experiment_->StopSequence(sequence_num);
  187. }
  188. }
  189. base::WeakPtr<MemoryTracker::Observer>
  190. GpuChannelManager::GpuPeakMemoryMonitor::GetWeakPtr() {
  191. return weak_factory_.GetWeakPtr();
  192. }
  193. void GpuChannelManager::GpuPeakMemoryMonitor::InvalidateWeakPtrs() {
  194. weak_factory_.InvalidateWeakPtrs();
  195. }
  196. GpuChannelManager::GpuPeakMemoryMonitor::SequenceTracker::SequenceTracker(
  197. uint64_t current_memory,
  198. base::flat_map<GpuPeakMemoryAllocationSource, uint64_t>
  199. current_memory_per_source)
  200. : initial_memory_(current_memory),
  201. total_memory_(current_memory),
  202. initial_memory_per_source_(current_memory_per_source),
  203. peak_memory_per_source_(std::move(current_memory_per_source)) {}
  204. GpuChannelManager::GpuPeakMemoryMonitor::SequenceTracker::SequenceTracker(
  205. const SequenceTracker& other) = default;
  206. GpuChannelManager::GpuPeakMemoryMonitor::SequenceTracker::~SequenceTracker() =
  207. default;
  208. std::unique_ptr<base::trace_event::TracedValue>
  209. GpuChannelManager::GpuPeakMemoryMonitor::StartTrackingTracedValue() {
  210. auto dict = std::make_unique<base::trace_event::TracedValue>();
  211. FormatAllocationSourcesForTracing(dict.get(), current_memory_per_source_);
  212. return dict;
  213. }
  214. std::unique_ptr<base::trace_event::TracedValue>
  215. GpuChannelManager::GpuPeakMemoryMonitor::StopTrackingTracedValue(
  216. SequenceTracker& sequence) {
  217. auto dict = std::make_unique<base::trace_event::TracedValue>();
  218. dict->BeginDictionary("source_totals");
  219. FormatAllocationSourcesForTracing(dict.get(),
  220. sequence.peak_memory_per_source_);
  221. dict->EndDictionary();
  222. dict->BeginDictionary("difference");
  223. int total_diff = sequence.total_memory_ - sequence.initial_memory_;
  224. dict->SetInteger("TOTAL", total_diff);
  225. dict->EndDictionary();
  226. dict->BeginDictionary("source_difference");
  227. for (auto it : sequence.peak_memory_per_source_) {
  228. int diff = (it.second - sequence.initial_memory_per_source_[it.first]);
  229. switch (it.first) {
  230. case GpuPeakMemoryAllocationSource::UNKNOWN:
  231. dict->SetInteger("UNKNOWN", diff);
  232. break;
  233. case GpuPeakMemoryAllocationSource::COMMAND_BUFFER:
  234. dict->SetInteger("COMMAND_BUFFER", diff);
  235. break;
  236. case GpuPeakMemoryAllocationSource::SHARED_CONTEXT_STATE:
  237. dict->SetInteger("SHARED_CONTEXT_STATE", diff);
  238. break;
  239. case GpuPeakMemoryAllocationSource::SHARED_IMAGE_STUB:
  240. dict->SetInteger("SHARED_IMAGE_STUB", diff);
  241. break;
  242. case GpuPeakMemoryAllocationSource::SKIA:
  243. dict->SetInteger("SKIA", diff);
  244. break;
  245. }
  246. }
  247. dict->EndDictionary();
  248. return dict;
  249. }
  250. void GpuChannelManager::GpuPeakMemoryMonitor::OnMemoryAllocatedChange(
  251. CommandBufferId id,
  252. uint64_t old_size,
  253. uint64_t new_size,
  254. GpuPeakMemoryAllocationSource source) {
  255. uint64_t diff = new_size - old_size;
  256. current_memory_ += diff;
  257. current_memory_per_source_[source] += diff;
  258. ablation_experiment_->OnMemoryAllocated(old_size, new_size);
  259. if (old_size < new_size) {
  260. // When memory has increased, iterate over the sequences to update their
  261. // peak.
  262. // TODO(jonross): This should be fine if we typically have 1-2 sequences.
  263. // However if that grows we may end up iterating many times are memory
  264. // approaches peak. If that is the case we should track a
  265. // |peak_since_last_sequence_update_| on the the memory changes. Then only
  266. // update the sequences with a new one is added, or the peak is requested.
  267. for (auto& seq : sequence_trackers_) {
  268. if (current_memory_ > seq.second.total_memory_) {
  269. seq.second.total_memory_ = current_memory_;
  270. for (auto& sequence : sequence_trackers_) {
  271. TRACE_EVENT_ASYNC_STEP_INTO1("gpu", "PeakMemoryTracking",
  272. sequence.first, "Peak", "peak",
  273. current_memory_);
  274. }
  275. for (auto& memory_per_source : current_memory_per_source_) {
  276. seq.second.peak_memory_per_source_[memory_per_source.first] =
  277. memory_per_source.second;
  278. }
  279. }
  280. }
  281. }
  282. }
  283. GpuChannelManager::GpuChannelManager(
  284. const GpuPreferences& gpu_preferences,
  285. GpuChannelManagerDelegate* delegate,
  286. GpuWatchdogThread* watchdog,
  287. scoped_refptr<base::SingleThreadTaskRunner> task_runner,
  288. scoped_refptr<base::SingleThreadTaskRunner> io_task_runner,
  289. Scheduler* scheduler,
  290. SyncPointManager* sync_point_manager,
  291. SharedImageManager* shared_image_manager,
  292. GpuMemoryBufferFactory* gpu_memory_buffer_factory,
  293. const GpuFeatureInfo& gpu_feature_info,
  294. GpuProcessActivityFlags activity_flags,
  295. scoped_refptr<gl::GLSurface> default_offscreen_surface,
  296. ImageDecodeAcceleratorWorker* image_decode_accelerator_worker,
  297. viz::VulkanContextProvider* vulkan_context_provider,
  298. viz::MetalContextProvider* metal_context_provider,
  299. viz::DawnContextProvider* dawn_context_provider)
  300. : task_runner_(task_runner),
  301. io_task_runner_(io_task_runner),
  302. gpu_preferences_(gpu_preferences),
  303. gpu_driver_bug_workarounds_(
  304. gpu_feature_info.enabled_gpu_driver_bug_workarounds),
  305. delegate_(delegate),
  306. watchdog_(watchdog),
  307. share_group_(new gl::GLShareGroup()),
  308. mailbox_manager_(gles2::CreateMailboxManager(gpu_preferences)),
  309. scheduler_(scheduler),
  310. sync_point_manager_(sync_point_manager),
  311. shared_image_manager_(shared_image_manager),
  312. shader_translator_cache_(gpu_preferences_),
  313. default_offscreen_surface_(std::move(default_offscreen_surface)),
  314. gpu_memory_buffer_factory_(gpu_memory_buffer_factory),
  315. gpu_feature_info_(gpu_feature_info),
  316. discardable_manager_(gpu_preferences_),
  317. passthrough_discardable_manager_(gpu_preferences_),
  318. image_decode_accelerator_worker_(image_decode_accelerator_worker),
  319. activity_flags_(std::move(activity_flags)),
  320. memory_pressure_listener_(
  321. FROM_HERE,
  322. base::BindRepeating(&GpuChannelManager::HandleMemoryPressure,
  323. base::Unretained(this))),
  324. vulkan_context_provider_(vulkan_context_provider),
  325. metal_context_provider_(metal_context_provider),
  326. dawn_context_provider_(dawn_context_provider),
  327. peak_memory_monitor_(this, task_runner) {
  328. DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
  329. DCHECK(task_runner->BelongsToCurrentThread());
  330. DCHECK(io_task_runner);
  331. DCHECK(scheduler);
  332. const bool enable_gr_shader_cache =
  333. (gpu_feature_info_.status_values[GPU_FEATURE_TYPE_GPU_RASTERIZATION] ==
  334. gpu::kGpuFeatureStatusEnabled);
  335. const bool disable_disk_cache =
  336. gpu_preferences_.disable_gpu_shader_disk_cache;
  337. if (enable_gr_shader_cache && !disable_disk_cache) {
  338. gr_shader_cache_.emplace(gpu_preferences.gpu_program_cache_size, this);
  339. gr_shader_cache_->CacheClientIdOnDisk(gpu::kDisplayCompositorClientId);
  340. }
  341. }
  342. GpuChannelManager::~GpuChannelManager() {
  343. DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
  344. // Clear |gpu_channels_| first to prevent reentrancy problems from GpuChannel
  345. // destructor.
  346. auto gpu_channels = std::move(gpu_channels_);
  347. gpu_channels_.clear();
  348. gpu_channels.clear();
  349. if (default_offscreen_surface_.get()) {
  350. default_offscreen_surface_->Destroy();
  351. default_offscreen_surface_ = nullptr;
  352. }
  353. // Inavlidate here as the |shared_context_state_| attempts to call back to
  354. // |this| in the middle of the deletion.
  355. peak_memory_monitor_.InvalidateWeakPtrs();
  356. // Try to make the context current so that GPU resources can be destroyed
  357. // correctly.
  358. if (shared_context_state_)
  359. shared_context_state_->MakeCurrent(nullptr);
  360. }
  361. gles2::Outputter* GpuChannelManager::outputter() {
  362. DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
  363. if (!outputter_) {
  364. outputter_ =
  365. std::make_unique<gles2::TraceOutputter>("GpuChannelManager Trace");
  366. }
  367. return outputter_.get();
  368. }
  369. gles2::ProgramCache* GpuChannelManager::program_cache() {
  370. DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
  371. if (!program_cache_.get()) {
  372. const GpuDriverBugWorkarounds& workarounds = gpu_driver_bug_workarounds_;
  373. bool disable_disk_cache =
  374. gpu_preferences_.disable_gpu_shader_disk_cache ||
  375. workarounds.disable_program_disk_cache;
  376. // Use the EGL blob cache extension for the passthrough decoder.
  377. if (gpu_preferences_.use_passthrough_cmd_decoder &&
  378. gles2::PassthroughCommandDecoderSupported()) {
  379. program_cache_ = std::make_unique<gles2::PassthroughProgramCache>(
  380. gpu_preferences_.gpu_program_cache_size, disable_disk_cache);
  381. } else {
  382. program_cache_ = std::make_unique<gles2::MemoryProgramCache>(
  383. gpu_preferences_.gpu_program_cache_size, disable_disk_cache,
  384. workarounds.disable_program_caching_for_transform_feedback,
  385. &activity_flags_);
  386. }
  387. }
  388. return program_cache_.get();
  389. }
  390. void GpuChannelManager::RemoveChannel(int client_id) {
  391. DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
  392. auto it = gpu_channels_.find(client_id);
  393. if (it == gpu_channels_.end())
  394. return;
  395. delegate_->DidDestroyChannel(client_id);
  396. // Erase the |gpu_channels_| entry before destroying the GpuChannel object to
  397. // avoid reentrancy problems from the GpuChannel destructor.
  398. std::unique_ptr<GpuChannel> channel = std::move(it->second);
  399. gpu_channels_.erase(it);
  400. channel.reset();
  401. if (gpu_channels_.empty()) {
  402. delegate_->DidDestroyAllChannels();
  403. }
  404. }
  405. GpuChannel* GpuChannelManager::LookupChannel(int32_t client_id) const {
  406. DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
  407. const auto& it = gpu_channels_.find(client_id);
  408. return it != gpu_channels_.end() ? it->second.get() : nullptr;
  409. }
  410. GpuChannel* GpuChannelManager::EstablishChannel(
  411. const base::UnguessableToken& channel_token,
  412. int client_id,
  413. uint64_t client_tracing_id,
  414. bool is_gpu_host) {
  415. DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
  416. std::unique_ptr<GpuChannel> gpu_channel = GpuChannel::Create(
  417. this, channel_token, scheduler_, sync_point_manager_, share_group_,
  418. task_runner_, io_task_runner_, client_id, client_tracing_id, is_gpu_host,
  419. image_decode_accelerator_worker_);
  420. if (!gpu_channel)
  421. return nullptr;
  422. GpuChannel* gpu_channel_ptr = gpu_channel.get();
  423. gpu_channels_[client_id] = std::move(gpu_channel);
  424. return gpu_channel_ptr;
  425. }
  426. void GpuChannelManager::SetChannelClientPid(int client_id,
  427. base::ProcessId client_pid) {
  428. DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
  429. GpuChannel* gpu_channel = LookupChannel(client_id);
  430. if (gpu_channel) {
  431. // TODO(rockot): It's possible to receive different PIDs for the same
  432. // GpuChannel because some clients may reuse a client ID. For example, if a
  433. // Content renderer crashes and restarts, the new process will use the same
  434. // GPU client ID that the crashed process used. In such cases, this
  435. // SetChannelClientPid (which comes from the GPU host, not the client
  436. // process) may arrive late with the crashed process PID, followed shortly
  437. // thereafter by the current PID of the client.
  438. //
  439. // For a short window of time this means a GpuChannel may have a stale PID
  440. // value. It's not a serious issue since the PID is only informational and
  441. // not required for security or application correctness, but we should still
  442. // address it. One option is to introduce a separate host-controlled
  443. // interface that is paired with the GpuChannel during Establish, which the
  444. // host can then use to asynchronously push down a PID for the specific
  445. // channel instance.
  446. gpu_channel->set_client_pid(client_pid);
  447. }
  448. }
  449. void GpuChannelManager::SetChannelDiskCacheHandle(
  450. int client_id,
  451. const gpu::GpuDiskCacheHandle& handle) {
  452. DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
  453. GpuChannel* gpu_channel = LookupChannel(client_id);
  454. if (gpu_channel) {
  455. gpu_channel->RegisterCacheHandle(handle);
  456. }
  457. // Record the client id for the shader specific cache.
  458. if (gr_shader_cache_ &&
  459. gpu::GetHandleType(handle) == gpu::GpuDiskCacheType::kGlShaders) {
  460. gr_shader_cache_->CacheClientIdOnDisk(client_id);
  461. }
  462. }
  463. void GpuChannelManager::OnDiskCacheHandleDestoyed(
  464. const gpu::GpuDiskCacheHandle& handle) {
  465. DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
  466. switch (gpu::GetHandleType(handle)) {
  467. case gpu::GpuDiskCacheType::kGlShaders: {
  468. // Currently there isn't any handling necessary for when the disk cache is
  469. // destroyed for the shader cache because it consists of just 2 massive
  470. // caches that are long-living and shared across all channels (i.e.
  471. // unfortunately there is currently no access partitioning for it w.r.t
  472. // different handles).
  473. break;
  474. }
  475. case gpu::GpuDiskCacheType::kDawnWebGPU: {
  476. // TODO(dawn:549) Implement cache destruction for Dawn.
  477. break;
  478. }
  479. }
  480. }
  481. void GpuChannelManager::InternalDestroyGpuMemoryBuffer(
  482. gfx::GpuMemoryBufferId id,
  483. int client_id) {
  484. DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
  485. gpu_memory_buffer_factory_->DestroyGpuMemoryBuffer(id, client_id);
  486. }
  487. void GpuChannelManager::DestroyGpuMemoryBuffer(gfx::GpuMemoryBufferId id,
  488. int client_id,
  489. const SyncToken& sync_token) {
  490. DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
  491. if (!sync_point_manager_->WaitOutOfOrder(
  492. sync_token,
  493. base::BindOnce(&GpuChannelManager::InternalDestroyGpuMemoryBuffer,
  494. base::Unretained(this), id, client_id))) {
  495. // No sync token or invalid sync token, destroy immediately.
  496. InternalDestroyGpuMemoryBuffer(id, client_id);
  497. }
  498. }
  499. void GpuChannelManager::PopulateCache(const gpu::GpuDiskCacheHandle& handle,
  500. const std::string& key,
  501. const std::string& data) {
  502. DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
  503. switch (gpu::GetHandleType(handle)) {
  504. case gpu::GpuDiskCacheType::kGlShaders: {
  505. auto gl_shader_handle =
  506. absl::get<gpu::GpuDiskCacheGlShaderHandle>(handle);
  507. if (gl_shader_handle == kGrShaderGpuDiskCacheHandle) {
  508. if (gr_shader_cache_)
  509. gr_shader_cache_->PopulateCache(key, data);
  510. return;
  511. }
  512. if (program_cache())
  513. program_cache()->LoadProgram(key, data);
  514. break;
  515. }
  516. case gpu::GpuDiskCacheType::kDawnWebGPU: {
  517. // TODO(dawn:549) Implement populating cache for Dawn.
  518. NOTREACHED();
  519. break;
  520. }
  521. }
  522. }
  523. void GpuChannelManager::LoseAllContexts() {
  524. DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
  525. discardable_manager_.OnContextLost();
  526. passthrough_discardable_manager_.OnContextLost();
  527. share_group_ = base::MakeRefCounted<gl::GLShareGroup>();
  528. for (auto& kv : gpu_channels_) {
  529. kv.second->MarkAllContextsLost();
  530. }
  531. task_runner_->PostTask(FROM_HERE,
  532. base::BindOnce(&GpuChannelManager::DestroyAllChannels,
  533. weak_factory_.GetWeakPtr()));
  534. if (shared_context_state_) {
  535. shared_context_state_->MarkContextLost();
  536. shared_context_state_.reset();
  537. }
  538. }
  539. SharedContextState::ContextLostCallback
  540. GpuChannelManager::GetContextLostCallback() {
  541. return base::BindPostTask(task_runner_,
  542. base::BindOnce(&GpuChannelManager::OnContextLost,
  543. weak_factory_.GetWeakPtr()));
  544. }
  545. GpuChannelManager::OnMemoryAllocatedChangeCallback
  546. GpuChannelManager::GetOnMemoryAllocatedChangeCallback() {
  547. return base::BindPostTask(
  548. task_runner_,
  549. base::BindOnce(
  550. [](base::WeakPtr<gpu::GpuChannelManager> gpu_channel_manager,
  551. gpu::CommandBufferId id, uint64_t old_size, uint64_t new_size,
  552. gpu::GpuPeakMemoryAllocationSource source) {
  553. if (gpu_channel_manager) {
  554. gpu_channel_manager->peak_memory_monitor()
  555. ->OnMemoryAllocatedChange(id, old_size, new_size, source);
  556. }
  557. },
  558. weak_factory_.GetWeakPtr()));
  559. }
  560. void GpuChannelManager::DestroyAllChannels() {
  561. DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
  562. // Clear |gpu_channels_| first to prevent reentrancy problems from GpuChannel
  563. // destructor.
  564. auto gpu_channels = std::move(gpu_channels_);
  565. gpu_channels_.clear();
  566. gpu_channels.clear();
  567. }
  568. void GpuChannelManager::GetVideoMemoryUsageStats(
  569. VideoMemoryUsageStats* video_memory_usage_stats) const {
  570. DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
  571. // For each context group, assign its memory usage to its PID
  572. video_memory_usage_stats->process_map.clear();
  573. uint64_t total_size = 0;
  574. for (const auto& entry : gpu_channels_) {
  575. const GpuChannel* channel = entry.second.get();
  576. if (channel->client_pid() == base::kNullProcessId)
  577. continue;
  578. uint64_t size = channel->GetMemoryUsage();
  579. total_size += size;
  580. video_memory_usage_stats->process_map[channel->client_pid()].video_memory +=
  581. size;
  582. }
  583. if (shared_context_state_ && !shared_context_state_->context_lost())
  584. total_size += shared_context_state_->GetMemoryUsage();
  585. // Assign the total across all processes in the GPU process
  586. video_memory_usage_stats->process_map[base::GetCurrentProcId()].video_memory =
  587. total_size;
  588. video_memory_usage_stats->process_map[base::GetCurrentProcId()]
  589. .has_duplicates = true;
  590. video_memory_usage_stats->bytes_allocated = total_size;
  591. }
  592. void GpuChannelManager::StartPeakMemoryMonitor(uint32_t sequence_num) {
  593. DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
  594. peak_memory_monitor_.StartGpuMemoryTracking(sequence_num);
  595. }
  596. base::flat_map<GpuPeakMemoryAllocationSource, uint64_t>
  597. GpuChannelManager::GetPeakMemoryUsage(uint32_t sequence_num,
  598. uint64_t* out_peak_memory) {
  599. DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
  600. auto allocation_per_source =
  601. peak_memory_monitor_.GetPeakMemoryUsage(sequence_num, out_peak_memory);
  602. peak_memory_monitor_.StopGpuMemoryTracking(sequence_num);
  603. return allocation_per_source;
  604. }
  605. #if BUILDFLAG(IS_ANDROID)
  606. void GpuChannelManager::DidAccessGpu() {
  607. DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
  608. last_gpu_access_time_ = base::TimeTicks::Now();
  609. }
  610. void GpuChannelManager::WakeUpGpu() {
  611. DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
  612. begin_wake_up_time_ = base::TimeTicks::Now();
  613. ScheduleWakeUpGpu();
  614. }
  615. void GpuChannelManager::ScheduleWakeUpGpu() {
  616. DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
  617. base::TimeTicks now = base::TimeTicks::Now();
  618. TRACE_EVENT2("gpu", "GpuChannelManager::ScheduleWakeUp", "idle_time",
  619. (now - last_gpu_access_time_).InMilliseconds(),
  620. "keep_awake_time", (now - begin_wake_up_time_).InMilliseconds());
  621. if (now - last_gpu_access_time_ < base::Milliseconds(kMaxGpuIdleTimeMs))
  622. return;
  623. if (now - begin_wake_up_time_ > base::Milliseconds(kMaxKeepAliveTimeMs))
  624. return;
  625. DoWakeUpGpu();
  626. base::ThreadTaskRunnerHandle::Get()->PostDelayedTask(
  627. FROM_HERE,
  628. base::BindOnce(&GpuChannelManager::ScheduleWakeUpGpu,
  629. weak_factory_.GetWeakPtr()),
  630. base::Milliseconds(kMaxGpuIdleTimeMs));
  631. }
  632. void GpuChannelManager::DoWakeUpGpu() {
  633. DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
  634. const CommandBufferStub* stub = nullptr;
  635. for (const auto& kv : gpu_channels_) {
  636. const GpuChannel* channel = kv.second.get();
  637. const CommandBufferStub* stub_candidate = channel->GetOneStub();
  638. if (stub_candidate) {
  639. DCHECK(stub_candidate->decoder_context());
  640. // With Vulkan, Dawn, etc, RasterDecoders don't use GL.
  641. if (stub_candidate->decoder_context()->GetGLContext()) {
  642. stub = stub_candidate;
  643. break;
  644. }
  645. }
  646. }
  647. if (!stub || !stub->decoder_context()->MakeCurrent())
  648. return;
  649. glFinish();
  650. DidAccessGpu();
  651. }
  652. void GpuChannelManager::OnBackgroundCleanup() {
  653. DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
  654. // Delete all the GL contexts when the channel does not use WebGL and Chrome
  655. // goes to background on low-end devices.
  656. std::vector<int> channels_to_clear;
  657. for (auto& kv : gpu_channels_) {
  658. // TODO(ssid): WebGL context loss event notification must be sent before
  659. // clearing WebGL contexts crbug.com/725306.
  660. if (kv.second->HasActiveWebGLContext())
  661. continue;
  662. channels_to_clear.push_back(kv.first);
  663. kv.second->MarkAllContextsLost();
  664. }
  665. for (int channel : channels_to_clear)
  666. RemoveChannel(channel);
  667. if (program_cache_)
  668. program_cache_->Trim(0u);
  669. if (shared_context_state_) {
  670. shared_context_state_->MarkContextLost();
  671. shared_context_state_.reset();
  672. }
  673. SkGraphics::PurgeAllCaches();
  674. }
  675. #endif
  676. void GpuChannelManager::OnApplicationBackgrounded() {
  677. DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
  678. if (shared_context_state_) {
  679. shared_context_state_->PurgeMemory(
  680. base::MemoryPressureListener::MemoryPressureLevel::
  681. MEMORY_PRESSURE_LEVEL_CRITICAL);
  682. }
  683. // Release all skia caching when the application is backgrounded.
  684. SkGraphics::PurgeAllCaches();
  685. }
  686. void GpuChannelManager::HandleMemoryPressure(
  687. base::MemoryPressureListener::MemoryPressureLevel memory_pressure_level) {
  688. DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
  689. if (program_cache_)
  690. program_cache_->HandleMemoryPressure(memory_pressure_level);
  691. // These caches require a current context for cleanup.
  692. if (shared_context_state_ &&
  693. shared_context_state_->MakeCurrent(nullptr, true /* needs_gl */)) {
  694. discardable_manager_.HandleMemoryPressure(memory_pressure_level);
  695. passthrough_discardable_manager_.HandleMemoryPressure(
  696. memory_pressure_level);
  697. shared_context_state_->PurgeMemory(memory_pressure_level);
  698. }
  699. if (gr_shader_cache_)
  700. gr_shader_cache_->PurgeMemory(memory_pressure_level);
  701. #if BUILDFLAG(IS_WIN)
  702. TrimD3DResources();
  703. #endif
  704. }
  705. scoped_refptr<SharedContextState> GpuChannelManager::GetSharedContextState(
  706. ContextResult* result) {
  707. DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
  708. if (shared_context_state_ && !shared_context_state_->context_lost()) {
  709. *result = ContextResult::kSuccess;
  710. return shared_context_state_;
  711. }
  712. scoped_refptr<gl::GLSurface> surface = default_offscreen_surface();
  713. bool use_virtualized_gl_contexts = false;
  714. #if BUILDFLAG(IS_MAC)
  715. // Virtualize GpuPreference::kLowPower contexts by default on OS X to prevent
  716. // performance regressions when enabling FCM.
  717. // http://crbug.com/180463
  718. use_virtualized_gl_contexts = true;
  719. #endif
  720. use_virtualized_gl_contexts |=
  721. gpu_driver_bug_workarounds_.use_virtualized_gl_contexts;
  722. bool enable_angle_validation = features::IsANGLEValidationEnabled();
  723. #if DCHECK_IS_ON()
  724. // Force validation on for all debug builds and testing
  725. enable_angle_validation = true;
  726. #endif
  727. const bool use_passthrough_decoder =
  728. gles2::PassthroughCommandDecoderSupported() &&
  729. gpu_preferences_.use_passthrough_cmd_decoder;
  730. scoped_refptr<gl::GLShareGroup> share_group;
  731. if (use_passthrough_decoder) {
  732. share_group = new gl::GLShareGroup();
  733. // Virtualized contexts don't work with passthrough command decoder.
  734. // See https://crbug.com/914976
  735. use_virtualized_gl_contexts = false;
  736. } else {
  737. share_group = share_group_;
  738. }
  739. scoped_refptr<gl::GLContext> context =
  740. use_virtualized_gl_contexts ? share_group->shared_context() : nullptr;
  741. if (context && (!context->MakeCurrent(surface.get()) ||
  742. context->CheckStickyGraphicsResetStatus() != GL_NO_ERROR)) {
  743. context = nullptr;
  744. }
  745. if (!context) {
  746. ContextCreationAttribs attribs_helper;
  747. attribs_helper.context_type = features::UseGles2ForOopR()
  748. ? gpu::CONTEXT_TYPE_OPENGLES2
  749. : gpu::CONTEXT_TYPE_OPENGLES3;
  750. gl::GLContextAttribs attribs = gles2::GenerateGLContextAttribs(
  751. attribs_helper, use_passthrough_decoder);
  752. // Disable robust resource initialization for raster decoder and compositor.
  753. // TODO(crbug.com/1192632): disable robust_resource_initialization for
  754. // SwANGLE.
  755. if (gl::GLSurfaceEGL::GetGLDisplayEGL()->GetDisplayType() !=
  756. gl::ANGLE_SWIFTSHADER) {
  757. attribs.robust_resource_initialization = false;
  758. }
  759. attribs.can_skip_validation = !enable_angle_validation;
  760. context =
  761. gl::init::CreateGLContext(share_group.get(), surface.get(), attribs);
  762. if (!context && !features::UseGles2ForOopR()) {
  763. LOG(ERROR) << "Failed to create GLES3 context, fallback to GLES2.";
  764. attribs.client_major_es_version = 2;
  765. attribs.client_minor_es_version = 0;
  766. context =
  767. gl::init::CreateGLContext(share_group.get(), surface.get(), attribs);
  768. }
  769. if (!context) {
  770. // TODO(piman): This might not be fatal, we could recurse into
  771. // CreateGLContext to get more info, tho it should be exceedingly
  772. // rare and may not be recoverable anyway.
  773. LOG(ERROR) << "ContextResult::kFatalFailure: "
  774. "Failed to create shared context for virtualization.";
  775. *result = ContextResult::kFatalFailure;
  776. return nullptr;
  777. }
  778. // Ensure that context creation did not lose track of the intended share
  779. // group.
  780. DCHECK(context->share_group() == share_group.get());
  781. gpu_feature_info_.ApplyToGLContext(context.get());
  782. if (use_virtualized_gl_contexts)
  783. share_group->SetSharedContext(context.get());
  784. }
  785. // This should be either:
  786. // (1) a non-virtual GL context, or
  787. // (2) a mock/stub context.
  788. DCHECK(context->GetHandle() ||
  789. gl::GetGLImplementation() == gl::kGLImplementationMockGL ||
  790. gl::GetGLImplementation() == gl::kGLImplementationStubGL);
  791. if (!context->MakeCurrent(surface.get())) {
  792. LOG(ERROR)
  793. << "ContextResult::kTransientFailure, failed to make context current";
  794. *result = ContextResult::kTransientFailure;
  795. return nullptr;
  796. }
  797. // TODO(penghuang): https://crbug.com/899735 Handle device lost for Vulkan.
  798. auto shared_context_state = base::MakeRefCounted<SharedContextState>(
  799. std::move(share_group), std::move(surface), std::move(context),
  800. use_virtualized_gl_contexts,
  801. base::BindOnce(&GpuChannelManager::OnContextLost, base::Unretained(this)),
  802. gpu_preferences_.gr_context_type, vulkan_context_provider_,
  803. metal_context_provider_, dawn_context_provider_,
  804. peak_memory_monitor_.GetWeakPtr());
  805. // Initialize GL context, so Vulkan and GL interop can work properly.
  806. auto feature_info = base::MakeRefCounted<gles2::FeatureInfo>(
  807. gpu_driver_bug_workarounds(), gpu_feature_info());
  808. if (!shared_context_state->InitializeGL(gpu_preferences_,
  809. feature_info.get())) {
  810. LOG(ERROR) << "ContextResult::kFatalFailure: Failed to Initialize GL for "
  811. " SharedContextState";
  812. *result = ContextResult::kFatalFailure;
  813. return nullptr;
  814. }
  815. // Log crash reports when GL errors are generated.
  816. if (gl::GetGLImplementation() == gl::kGLImplementationEGLANGLE &&
  817. enable_angle_validation && feature_info->feature_flags().khr_debug) {
  818. // Limit the total number of gl error crash reports to 1 per GPU
  819. // process.
  820. static int remaining_gl_error_reports = 1;
  821. gles2::InitializeGLDebugLogging(false, CrashReportOnGLErrorDebugCallback,
  822. &remaining_gl_error_reports);
  823. }
  824. if (!shared_context_state->InitializeGrContext(
  825. gpu_preferences_, gpu_driver_bug_workarounds_, gr_shader_cache(),
  826. &activity_flags_, watchdog_)) {
  827. LOG(ERROR) << "ContextResult::kFatalFailure: Failed to Initialize"
  828. "GrContext for SharedContextState";
  829. *result = ContextResult::kFatalFailure;
  830. return nullptr;
  831. }
  832. shared_context_state_ = std::move(shared_context_state);
  833. *result = ContextResult::kSuccess;
  834. return shared_context_state_;
  835. }
  836. void GpuChannelManager::OnContextLost(bool synthetic_loss) {
  837. DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
  838. // Add crash keys for context lost count and time.
  839. static auto* const lost_count_crash_key = base::debug::AllocateCrashKeyString(
  840. "context-lost-count", base::debug::CrashKeySize::Size32);
  841. // The context lost time since creation of |GpuChannelManager|.
  842. static auto* const lost_time_crash_key = base::debug::AllocateCrashKeyString(
  843. "context-lost-time", base::debug::CrashKeySize::Size64);
  844. // The context lost interval since last context lost event.
  845. static auto* const lost_interval_crash_key =
  846. base::debug::AllocateCrashKeyString("context-lost-interval",
  847. base::debug::CrashKeySize::Size64);
  848. base::debug::SetCrashKeyString(
  849. lost_count_crash_key, base::StringPrintf("%d", ++context_lost_count_));
  850. auto lost_time = base::TimeTicks::Now() - creation_time_;
  851. SetCrashKeyTimeDelta(lost_time_crash_key, lost_time);
  852. if (!context_lost_time_.is_zero()) {
  853. auto interval = lost_time - context_lost_time_;
  854. SetCrashKeyTimeDelta(lost_interval_crash_key, interval);
  855. }
  856. context_lost_time_ = lost_time;
  857. bool is_gl = gpu_preferences_.gr_context_type == GrContextType::kGL;
  858. if (synthetic_loss && is_gl)
  859. return;
  860. // Lose all other contexts.
  861. if (gl::GLContext::LosesAllContextsOnContextLost() ||
  862. (shared_context_state_ &&
  863. shared_context_state_->use_virtualized_gl_contexts())) {
  864. delegate_->LoseAllContexts();
  865. }
  866. // Work around issues with recovery by allowing a new GPU process to launch.
  867. if (gpu_driver_bug_workarounds_.exit_on_context_lost ||
  868. (shared_context_state_ && !shared_context_state_->GrContextIsGL())) {
  869. delegate_->MaybeExitOnContextLost();
  870. }
  871. }
  872. void GpuChannelManager::ScheduleGrContextCleanup() {
  873. DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
  874. shared_context_state_->ScheduleGrContextCleanup();
  875. }
  876. void GpuChannelManager::StoreShader(const std::string& key,
  877. const std::string& shader) {
  878. delegate_->StoreBlobToDisk(kGrShaderGpuDiskCacheHandle, key, shader);
  879. }
  880. void GpuChannelManager::SetImageDecodeAcceleratorWorkerForTesting(
  881. ImageDecodeAcceleratorWorker* worker) {
  882. DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
  883. DCHECK(gpu_channels_.empty());
  884. image_decode_accelerator_worker_ = worker;
  885. }
  886. } // namespace gpu