in_process_command_buffer.cc 38 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043
  1. // Copyright 2013 The Chromium Authors. All rights reserved.
  2. // Use of this source code is governed by a BSD-style license that can be
  3. // found in the LICENSE file.
  4. #include "gpu/ipc/in_process_command_buffer.h"
  5. #include <stddef.h>
  6. #include <stdint.h>
  7. #include <set>
  8. #include <utility>
  9. #include "base/atomic_sequence_num.h"
  10. #include "base/bind.h"
  11. #include "base/callback_helpers.h"
  12. #include "base/command_line.h"
  13. #include "base/location.h"
  14. #include "base/logging.h"
  15. #include "base/memory/raw_ptr.h"
  16. #include "base/memory/weak_ptr.h"
  17. #include "base/numerics/safe_conversions.h"
  18. #include "base/sequence_checker.h"
  19. #include "base/synchronization/waitable_event.h"
  20. #include "base/task/single_thread_task_runner.h"
  21. #include "base/threading/thread.h"
  22. #include "base/threading/thread_task_runner_handle.h"
  23. #include "base/time/time.h"
  24. #include "base/trace_event/memory_dump_manager.h"
  25. #include "base/trace_event/trace_event.h"
  26. #include "cc/base/completion_event.h"
  27. #include "components/viz/common/features.h"
  28. #include "gpu/command_buffer/client/gpu_control_client.h"
  29. #include "gpu/command_buffer/client/shared_image_interface.h"
  30. #include "gpu/command_buffer/common/sync_token.h"
  31. #include "gpu/command_buffer/service/command_buffer_service.h"
  32. #include "gpu/command_buffer/service/command_buffer_task_executor.h"
  33. #include "gpu/command_buffer/service/context_group.h"
  34. #include "gpu/command_buffer/service/gl_context_virtual.h"
  35. #include "gpu/command_buffer/service/gles2_cmd_decoder.h"
  36. #include "gpu/command_buffer/service/gpu_command_buffer_memory_tracker.h"
  37. #include "gpu/command_buffer/service/gpu_fence_manager.h"
  38. #include "gpu/command_buffer/service/gpu_tracer.h"
  39. #include "gpu/command_buffer/service/gr_shader_cache.h"
  40. #include "gpu/command_buffer/service/image_factory.h"
  41. #include "gpu/command_buffer/service/mailbox_manager_factory.h"
  42. #include "gpu/command_buffer/service/memory_program_cache.h"
  43. #include "gpu/command_buffer/service/memory_tracking.h"
  44. #include "gpu/command_buffer/service/query_manager.h"
  45. #include "gpu/command_buffer/service/raster_decoder.h"
  46. #include "gpu/command_buffer/service/scheduler.h"
  47. #include "gpu/command_buffer/service/service_utils.h"
  48. #include "gpu/command_buffer/service/shared_context_state.h"
  49. #include "gpu/command_buffer/service/shared_image_interface_in_process.h"
  50. #include "gpu/command_buffer/service/single_task_sequence.h"
  51. #include "gpu/command_buffer/service/sync_point_manager.h"
  52. #include "gpu/command_buffer/service/webgpu_decoder.h"
  53. #include "gpu/config/gpu_feature_info.h"
  54. #include "gpu/config/gpu_preferences.h"
  55. #include "gpu/config/gpu_switches.h"
  56. #include "gpu/ipc/common/gpu_client_ids.h"
  57. #include "ui/gfx/geometry/size.h"
  58. #include "ui/gfx/gpu_fence.h"
  59. #include "ui/gfx/gpu_fence_handle.h"
  60. #include "ui/gl/gl_bindings.h"
  61. #include "ui/gl/gl_context.h"
  62. #include "ui/gl/gl_share_group.h"
  63. #include "ui/gl/gl_surface_egl.h"
  64. #include "ui/gl/gl_utils.h"
  65. #include "ui/gl/gl_version_info.h"
  66. #include "ui/gl/init/create_gr_gl_interface.h"
  67. #include "ui/gl/init/gl_factory.h"
  68. namespace gpu {
  69. namespace {
  70. template <typename T>
  71. base::OnceClosure WrapTaskWithResult(base::OnceCallback<T(void)> task,
  72. T* result,
  73. base::WaitableEvent* completion) {
  74. auto wrapper = [](base::OnceCallback<T(void)> task, T* result,
  75. base::WaitableEvent* completion) {
  76. *result = std::move(task).Run();
  77. completion->Signal();
  78. };
  79. return base::BindOnce(wrapper, std::move(task), result, completion);
  80. }
  81. class ScopedEvent {
  82. public:
  83. explicit ScopedEvent(base::WaitableEvent* event) : event_(event) {}
  84. ~ScopedEvent() { event_->Signal(); }
  85. private:
  86. raw_ptr<base::WaitableEvent> event_;
  87. };
  88. } // namespace
  89. void InProcessCommandBuffer::SetError() {
  90. // Signal errors by losing the command buffer.
  91. command_buffer_->SetParseError(error::kLostContext);
  92. }
  93. void InProcessCommandBuffer::WrapTaskWithGpuCheck(base::OnceClosure task) {
  94. RunTaskOnGpuThread(std::move(task));
  95. }
  96. InProcessCommandBuffer::InProcessCommandBuffer(
  97. CommandBufferTaskExecutor* task_executor,
  98. const GURL& active_url)
  99. : active_url_(active_url),
  100. flush_event_(base::WaitableEvent::ResetPolicy::AUTOMATIC,
  101. base::WaitableEvent::InitialState::NOT_SIGNALED),
  102. task_executor_(task_executor),
  103. fence_sync_wait_event_(base::WaitableEvent::ResetPolicy::AUTOMATIC,
  104. base::WaitableEvent::InitialState::NOT_SIGNALED) {
  105. // This binds the client sequence checker to the current sequence.
  106. DCHECK_CALLED_ON_VALID_SEQUENCE(client_sequence_checker_);
  107. // Detach gpu sequence checker because we want to bind it to the gpu sequence,
  108. // and not the current (client) sequence except for webview (see Initialize).
  109. DETACH_FROM_SEQUENCE(gpu_sequence_checker_);
  110. DCHECK(task_executor_);
  111. }
  112. InProcessCommandBuffer::~InProcessCommandBuffer() {
  113. Destroy();
  114. }
  115. gpu::ServiceTransferCache* InProcessCommandBuffer::GetTransferCacheForTest()
  116. const {
  117. return static_cast<raster::RasterDecoder*>(decoder_.get())
  118. ->GetTransferCacheForTest();
  119. }
  120. int InProcessCommandBuffer::GetRasterDecoderIdForTest() const {
  121. return static_cast<raster::RasterDecoder*>(decoder_.get())
  122. ->DecoderIdForTest();
  123. }
  124. webgpu::WebGPUDecoder* InProcessCommandBuffer::GetWebGPUDecoderForTest() const {
  125. return static_cast<webgpu::WebGPUDecoder*>(decoder_.get());
  126. }
  127. gpu::SharedImageInterface* InProcessCommandBuffer::GetSharedImageInterface()
  128. const {
  129. return shared_image_interface_.get();
  130. }
  131. bool InProcessCommandBuffer::MakeCurrent() {
  132. DCHECK_CALLED_ON_VALID_SEQUENCE(gpu_sequence_checker_);
  133. if (!context_) {
  134. return true;
  135. }
  136. if (error::IsError(command_buffer_->GetState().error)) {
  137. DLOG(ERROR) << "MakeCurrent failed because context lost.";
  138. return false;
  139. }
  140. if (!decoder_->MakeCurrent()) {
  141. DLOG(ERROR) << "Context lost because MakeCurrent failed.";
  142. command_buffer_->SetParseError(error::kLostContext);
  143. return false;
  144. }
  145. return true;
  146. }
  147. absl::optional<gles2::ProgramCache::ScopedCacheUse>
  148. InProcessCommandBuffer::CreateCacheUse() {
  149. absl::optional<gles2::ProgramCache::ScopedCacheUse> cache_use;
  150. if (context_group_->has_program_cache()) {
  151. cache_use.emplace(
  152. context_group_->get_program_cache(),
  153. base::BindRepeating(&DecoderClient::CacheBlob, base::Unretained(this),
  154. gpu::GpuDiskCacheType::kGlShaders));
  155. }
  156. return cache_use;
  157. }
  158. gpu::ContextResult InProcessCommandBuffer::Initialize(
  159. const ContextCreationAttribs& attribs,
  160. ImageFactory* image_factory,
  161. scoped_refptr<base::SingleThreadTaskRunner> task_runner,
  162. gpu::raster::GrShaderCache* gr_shader_cache,
  163. GpuProcessActivityFlags* activity_flags) {
  164. DCHECK_CALLED_ON_VALID_SEQUENCE(client_sequence_checker_);
  165. TRACE_EVENT0("gpu", "InProcessCommandBuffer::Initialize");
  166. DCHECK(task_runner);
  167. origin_task_runner_ = std::move(task_runner);
  168. client_thread_weak_ptr_ = client_thread_weak_ptr_factory_.GetWeakPtr();
  169. Capabilities capabilities;
  170. InitializeOnGpuThreadParams params(attribs, &capabilities, image_factory,
  171. gr_shader_cache, activity_flags);
  172. base::OnceCallback<gpu::ContextResult(void)> init_task =
  173. base::BindOnce(&InProcessCommandBuffer::InitializeOnGpuThread,
  174. base::Unretained(this), params);
  175. task_scheduler_holder_ =
  176. std::make_unique<gpu::GpuTaskSchedulerHelper>(task_executor_);
  177. task_sequence_ = task_scheduler_holder_->GetTaskSequence();
  178. // Here we block by using a WaitableEvent to make sure InitializeOnGpuThread
  179. // is finished as part of Initialize function. This also makes sure we won't
  180. // try to cache GLSurface before the creation is finished.
  181. base::WaitableEvent completion(
  182. base::WaitableEvent::ResetPolicy::MANUAL,
  183. base::WaitableEvent::InitialState::NOT_SIGNALED);
  184. gpu::ContextResult result = gpu::ContextResult::kSuccess;
  185. task_sequence_->ScheduleTask(
  186. WrapTaskWithResult(std::move(init_task), &result, &completion), {});
  187. completion.Wait();
  188. if (result == gpu::ContextResult::kSuccess) {
  189. capabilities_ = capabilities;
  190. shared_image_interface_ = std::make_unique<SharedImageInterfaceInProcess>(
  191. task_sequence_, gpu_dependency_.get(), this);
  192. }
  193. return result;
  194. }
  195. gpu::ContextResult InProcessCommandBuffer::InitializeOnGpuThread(
  196. const InitializeOnGpuThreadParams& params) {
  197. DCHECK_CALLED_ON_VALID_SEQUENCE(gpu_sequence_checker_);
  198. TRACE_EVENT0("gpu", "InProcessCommandBuffer::InitializeOnGpuThread");
  199. UpdateActiveUrl();
  200. gpu_dependency_ =
  201. std::make_unique<DisplayCompositorMemoryAndTaskControllerOnGpu>(
  202. task_executor_, params.image_factory);
  203. GpuDriverBugWorkarounds workarounds(
  204. task_executor_->gpu_feature_info().enabled_gpu_driver_bug_workarounds);
  205. std::unique_ptr<MemoryTracker> memory_tracker;
  206. // Android WebView won't have a memory tracker.
  207. if (task_executor_->ShouldCreateMemoryTracker()) {
  208. const uint64_t client_tracing_id =
  209. base::trace_event::MemoryDumpManager::GetInstance()
  210. ->GetTracingProcessId();
  211. memory_tracker = std::make_unique<GpuCommandBufferMemoryTracker>(
  212. gpu_dependency_->command_buffer_id(), client_tracing_id,
  213. base::ThreadTaskRunnerHandle::Get(), /* obserer=*/nullptr);
  214. }
  215. auto feature_info = base::MakeRefCounted<gles2::FeatureInfo>(
  216. workarounds, task_executor_->gpu_feature_info());
  217. context_group_ = base::MakeRefCounted<gles2::ContextGroup>(
  218. task_executor_->gpu_preferences(),
  219. gles2::PassthroughCommandDecoderSupported(),
  220. task_executor_->mailbox_manager(), std::move(memory_tracker),
  221. task_executor_->shader_translator_cache(),
  222. task_executor_->framebuffer_completeness_cache(), feature_info,
  223. params.attribs.bind_generates_resource, params.image_factory,
  224. nullptr /* progress_reporter */, task_executor_->gpu_feature_info(),
  225. task_executor_->discardable_manager(),
  226. task_executor_->passthrough_discardable_manager(),
  227. task_executor_->shared_image_manager());
  228. #if BUILDFLAG(IS_MAC)
  229. // Virtualize GpuPreference:::kLowPower contexts by default on OS X to prevent
  230. // performance regressions when enabling FCM. https://crbug.com/180463
  231. use_virtualized_gl_context_ |=
  232. (params.attribs.gpu_preference == gl::GpuPreference::kLowPower);
  233. #endif
  234. use_virtualized_gl_context_ |= task_executor_->ForceVirtualizedGLContexts();
  235. use_virtualized_gl_context_ |=
  236. context_group_->feature_info()->workarounds().use_virtualized_gl_contexts;
  237. if (context_group_->use_passthrough_cmd_decoder()) {
  238. // Virtualized contexts don't work with passthrough command decoder.
  239. // See https://crbug.com/914976
  240. use_virtualized_gl_context_ = false;
  241. }
  242. command_buffer_ = std::make_unique<CommandBufferService>(
  243. this, gpu_dependency_->memory_tracker());
  244. context_state_ = task_executor_->GetSharedContextState();
  245. if (context_state_) {
  246. surface_ = context_state_->surface();
  247. } else {
  248. // TODO(crbug.com/1247756): Is creating an offscreen GL surface needed
  249. // still?
  250. surface_ = gl::init::CreateOffscreenGLSurface(gl::GetDefaultDisplay(),
  251. gfx::Size());
  252. if (!surface_.get()) {
  253. DestroyOnGpuThread();
  254. LOG(ERROR) << "ContextResult::kFatalFailure: Failed to create surface.";
  255. return gpu::ContextResult::kFatalFailure;
  256. }
  257. }
  258. sync_point_client_state_ =
  259. task_executor_->sync_point_manager()->CreateSyncPointClientState(
  260. GetNamespaceID(), GetCommandBufferID(),
  261. task_sequence_->GetSequenceId());
  262. if (context_group_->use_passthrough_cmd_decoder()) {
  263. // When using the passthrough command decoder, never share with other
  264. // contexts.
  265. gl_share_group_ = base::MakeRefCounted<gl::GLShareGroup>();
  266. } else {
  267. // When using the validating command decoder, always use the global share
  268. // group.
  269. gl_share_group_ = task_executor_->GetShareGroup();
  270. }
  271. if (params.attribs.context_type == CONTEXT_TYPE_WEBGPU) {
  272. if (!task_executor_->gpu_preferences().enable_webgpu) {
  273. DLOG(ERROR) << "ContextResult::kFatalFailure: WebGPU not enabled";
  274. return gpu::ContextResult::kFatalFailure;
  275. }
  276. std::unique_ptr<webgpu::WebGPUDecoder> webgpu_decoder(
  277. webgpu::WebGPUDecoder::Create(
  278. this, command_buffer_.get(), task_executor_->shared_image_manager(),
  279. gpu_dependency_->memory_tracker(), task_executor_->outputter(),
  280. task_executor_->gpu_preferences(), context_state_));
  281. gpu::ContextResult result =
  282. webgpu_decoder->Initialize(task_executor_->gpu_feature_info());
  283. if (result != gpu::ContextResult::kSuccess) {
  284. DestroyOnGpuThread();
  285. DLOG(ERROR) << "Failed to initialize WebGPU decoder.";
  286. return result;
  287. }
  288. decoder_ = std::move(webgpu_decoder);
  289. } else {
  290. // TODO(khushalsagar): A lot of this initialization code is duplicated in
  291. // GpuChannelManager. Pull it into a common util method.
  292. scoped_refptr<gl::GLContext> real_context =
  293. use_virtualized_gl_context_ ? gl_share_group_->shared_context()
  294. : nullptr;
  295. if (real_context &&
  296. (!real_context->MakeCurrent(surface_.get()) ||
  297. real_context->CheckStickyGraphicsResetStatus() != GL_NO_ERROR)) {
  298. real_context = nullptr;
  299. }
  300. if (!real_context) {
  301. real_context = gl::init::CreateGLContext(
  302. gl_share_group_.get(), surface_.get(),
  303. GenerateGLContextAttribs(params.attribs, context_group_.get()));
  304. if (!real_context) {
  305. // TODO(piman): This might not be fatal, we could recurse into
  306. // CreateGLContext to get more info, tho it should be exceedingly
  307. // rare and may not be recoverable anyway.
  308. DestroyOnGpuThread();
  309. LOG(ERROR) << "ContextResult::kFatalFailure: "
  310. "Failed to create shared context for virtualization.";
  311. return gpu::ContextResult::kFatalFailure;
  312. }
  313. // Ensure that context creation did not lose track of the intended share
  314. // group.
  315. DCHECK(real_context->share_group() == gl_share_group_.get());
  316. task_executor_->gpu_feature_info().ApplyToGLContext(real_context.get());
  317. if (use_virtualized_gl_context_)
  318. gl_share_group_->SetSharedContext(real_context.get());
  319. }
  320. if (!real_context->MakeCurrent(surface_.get())) {
  321. LOG(ERROR)
  322. << "ContextResult::kTransientFailure, failed to make context current";
  323. DestroyOnGpuThread();
  324. return ContextResult::kTransientFailure;
  325. }
  326. if (params.attribs.enable_raster_interface &&
  327. !params.attribs.enable_gles2_interface) {
  328. gr_shader_cache_ = params.gr_shader_cache;
  329. if (!context_state_ ||
  330. !context_state_->MakeCurrent(nullptr, /*needs_gl=*/true)) {
  331. DestroyOnGpuThread();
  332. LOG(ERROR) << "Failed to make context current.";
  333. return ContextResult::kTransientFailure;
  334. }
  335. // TODO(penghuang): Merge all SharedContextState::Initialize*()
  336. if (!context_state_->IsGLInitialized()) {
  337. context_state_->InitializeGL(task_executor_->gpu_preferences(),
  338. context_group_->feature_info());
  339. }
  340. context_ = context_state_->context();
  341. decoder_.reset(raster::RasterDecoder::Create(
  342. this, command_buffer_.get(), task_executor_->outputter(),
  343. task_executor_->gpu_feature_info(), task_executor_->gpu_preferences(),
  344. gpu_dependency_->memory_tracker(),
  345. task_executor_->shared_image_manager(), params.image_factory,
  346. context_state_, true /*is_privileged*/));
  347. } else {
  348. decoder_.reset(gles2::GLES2Decoder::Create(this, command_buffer_.get(),
  349. task_executor_->outputter(),
  350. context_group_.get()));
  351. if (use_virtualized_gl_context_) {
  352. context_ = base::MakeRefCounted<GLContextVirtual>(
  353. gl_share_group_.get(), real_context.get(), decoder_->AsWeakPtr());
  354. if (!context_->Initialize(surface_.get(),
  355. GenerateGLContextAttribs(
  356. params.attribs, context_group_.get()))) {
  357. // TODO(piman): This might not be fatal, we could recurse into
  358. // CreateGLContext to get more info, tho it should be exceedingly
  359. // rare and may not be recoverable anyway.
  360. DestroyOnGpuThread();
  361. LOG(ERROR) << "ContextResult::kFatalFailure: "
  362. "Failed to initialize virtual GL context.";
  363. return gpu::ContextResult::kFatalFailure;
  364. }
  365. if (!context_->MakeCurrent(surface_.get())) {
  366. DestroyOnGpuThread();
  367. // The caller should retry making a context, but this one won't work.
  368. LOG(ERROR) << "ContextResult::kTransientFailure: "
  369. "Could not make context current.";
  370. return gpu::ContextResult::kTransientFailure;
  371. }
  372. } else {
  373. context_ = real_context;
  374. DCHECK(context_->IsCurrent(surface_.get()));
  375. }
  376. }
  377. if (!context_group_->has_program_cache() &&
  378. !context_group_->feature_info()->workarounds().disable_program_cache) {
  379. context_group_->set_program_cache(task_executor_->program_cache());
  380. }
  381. }
  382. gles2::DisallowedFeatures disallowed_features;
  383. auto result = decoder_->Initialize(surface_, context_, /*offscreen=*/true,
  384. disallowed_features, params.attribs);
  385. if (result != gpu::ContextResult::kSuccess) {
  386. DestroyOnGpuThread();
  387. DLOG(ERROR) << "Failed to initialize decoder.";
  388. return result;
  389. }
  390. if (task_executor_->gpu_preferences().enable_gpu_service_logging)
  391. decoder_->SetLogCommands(true);
  392. if (context_ && use_virtualized_gl_context_) {
  393. // If virtualized GL contexts are in use, then real GL context state
  394. // is in an indeterminate state, since the GLStateRestorer was not
  395. // initialized at the time the GLContextVirtual was made current. In
  396. // the case that this command decoder is the next one to be
  397. // processed, force a "full virtual" MakeCurrent to be performed.
  398. context_->ForceReleaseVirtuallyCurrent();
  399. if (!context_->MakeCurrent(surface_.get())) {
  400. DestroyOnGpuThread();
  401. LOG(ERROR) << "ContextResult::kTransientFailure: "
  402. "Failed to make context current after initialization.";
  403. return gpu::ContextResult::kTransientFailure;
  404. }
  405. }
  406. *params.capabilities = decoder_->GetCapabilities();
  407. return gpu::ContextResult::kSuccess;
  408. }
  409. void InProcessCommandBuffer::Destroy() {
  410. DCHECK_CALLED_ON_VALID_SEQUENCE(client_sequence_checker_);
  411. TRACE_EVENT0("gpu", "InProcessCommandBuffer::Destroy");
  412. client_thread_weak_ptr_factory_.InvalidateWeakPtrs();
  413. gpu_control_client_ = nullptr;
  414. shared_image_interface_ = nullptr;
  415. // Here we block by using a WaitableEvent to make sure DestroyOnGpuThread is
  416. // finished as part of Destroy.
  417. base::WaitableEvent completion(
  418. base::WaitableEvent::ResetPolicy::MANUAL,
  419. base::WaitableEvent::InitialState::NOT_SIGNALED);
  420. bool result = false;
  421. base::OnceCallback<bool(void)> destroy_task = base::BindOnce(
  422. &InProcessCommandBuffer::DestroyOnGpuThread, base::Unretained(this));
  423. task_sequence_->ScheduleTask(
  424. WrapTaskWithResult(std::move(destroy_task), &result, &completion), {});
  425. completion.Wait();
  426. task_sequence_ = nullptr;
  427. }
  428. bool InProcessCommandBuffer::DestroyOnGpuThread() {
  429. DCHECK_CALLED_ON_VALID_SEQUENCE(gpu_sequence_checker_);
  430. TRACE_EVENT0("gpu", "InProcessCommandBuffer::DestroyOnGpuThread");
  431. UpdateActiveUrl();
  432. gpu_thread_weak_ptr_factory_.InvalidateWeakPtrs();
  433. // Clean up GL resources if possible.
  434. bool have_context = context_.get() && context_->MakeCurrent(surface_.get());
  435. absl::optional<gles2::ProgramCache::ScopedCacheUse> cache_use;
  436. if (have_context)
  437. cache_use = CreateCacheUse();
  438. // Prepare to destroy the surface while the context is still current, because
  439. // some surface destructors make GL calls.
  440. if (surface_)
  441. surface_->PrepareToDestroy(have_context);
  442. if (decoder_) {
  443. decoder_->Destroy(have_context);
  444. decoder_.reset();
  445. }
  446. command_buffer_.reset();
  447. surface_ = nullptr;
  448. context_ = nullptr;
  449. if (sync_point_client_state_) {
  450. sync_point_client_state_->Destroy();
  451. sync_point_client_state_ = nullptr;
  452. }
  453. gl_share_group_ = nullptr;
  454. context_group_ = nullptr;
  455. if (context_state_)
  456. context_state_->MakeCurrent(nullptr);
  457. context_state_ = nullptr;
  458. gpu_dependency_.reset();
  459. return true;
  460. }
  461. CommandBufferServiceClient::CommandBatchProcessedResult
  462. InProcessCommandBuffer::OnCommandBatchProcessed() {
  463. return task_sequence_->ShouldYield() ? kPauseExecution : kContinueExecution;
  464. }
  465. void InProcessCommandBuffer::OnParseError() {
  466. DCHECK_CALLED_ON_VALID_SEQUENCE(gpu_sequence_checker_);
  467. // There is a race between service side FlushOnGpuThread() calling
  468. // UpdateLastStateOnGpuThread() and client side calling GetLastState().
  469. // Update last_state_ now before notifying client side to save the
  470. // error and make the race benign.
  471. UpdateLastStateOnGpuThread();
  472. PostOrRunClientCallback(base::BindOnce(&InProcessCommandBuffer::OnContextLost,
  473. client_thread_weak_ptr_));
  474. }
  475. void InProcessCommandBuffer::OnContextLost() {
  476. DCHECK_CALLED_ON_VALID_SEQUENCE(client_sequence_checker_);
  477. #if DCHECK_IS_ON()
  478. // This method shouldn't be called more than once.
  479. DCHECK(!context_lost_);
  480. context_lost_ = true;
  481. #endif
  482. if (gpu_control_client_)
  483. gpu_control_client_->OnGpuControlLostContext();
  484. }
  485. void InProcessCommandBuffer::RunTaskOnGpuThread(base::OnceClosure task) {
  486. DCHECK_CALLED_ON_VALID_SEQUENCE(gpu_sequence_checker_);
  487. UpdateActiveUrl();
  488. std::move(task).Run();
  489. }
  490. void InProcessCommandBuffer::ScheduleGpuTask(
  491. base::OnceClosure task,
  492. std::vector<SyncToken> sync_token_fences,
  493. SingleTaskSequence::ReportingCallback report_callback) {
  494. base::OnceClosure gpu_task = base::BindOnce(
  495. &InProcessCommandBuffer::RunTaskOnGpuThread,
  496. gpu_thread_weak_ptr_factory_.GetWeakPtr(), std::move(task));
  497. task_sequence_->ScheduleTask(std::move(gpu_task),
  498. std::move(sync_token_fences),
  499. std::move(report_callback));
  500. }
  501. void InProcessCommandBuffer::ContinueGpuTask(base::OnceClosure task) {
  502. DCHECK_CALLED_ON_VALID_SEQUENCE(gpu_sequence_checker_);
  503. base::OnceClosure gpu_task = base::BindOnce(
  504. &InProcessCommandBuffer::RunTaskOnGpuThread,
  505. gpu_thread_weak_ptr_factory_.GetWeakPtr(), std::move(task));
  506. task_sequence_->ContinueTask(std::move(gpu_task));
  507. }
  508. CommandBuffer::State InProcessCommandBuffer::GetLastState() {
  509. base::AutoLock lock(last_state_lock_);
  510. return last_state_;
  511. }
  512. void InProcessCommandBuffer::UpdateLastStateOnGpuThread() {
  513. DCHECK_CALLED_ON_VALID_SEQUENCE(gpu_sequence_checker_);
  514. base::AutoLock lock(last_state_lock_);
  515. command_buffer_->UpdateState();
  516. State state = command_buffer_->GetState();
  517. if (state.generation - last_state_.generation < 0x80000000U)
  518. last_state_ = state;
  519. }
  520. bool InProcessCommandBuffer::HasUnprocessedCommandsOnGpuThread() {
  521. DCHECK_CALLED_ON_VALID_SEQUENCE(gpu_sequence_checker_);
  522. if (command_buffer_) {
  523. CommandBuffer::State state = command_buffer_->GetState();
  524. return command_buffer_->put_offset() != state.get_offset &&
  525. !error::IsError(state.error);
  526. }
  527. return false;
  528. }
  529. void InProcessCommandBuffer::FlushOnGpuThread(
  530. int32_t put_offset,
  531. const std::vector<SyncToken>& sync_token_fences) {
  532. DCHECK_CALLED_ON_VALID_SEQUENCE(gpu_sequence_checker_);
  533. TRACE_EVENT1("gpu", "InProcessCommandBuffer::FlushOnGpuThread", "put_offset",
  534. put_offset);
  535. ScopedEvent handle_flush(&flush_event_);
  536. // Check if sync token waits are invalid or already complete. Do not use
  537. // SyncPointManager::IsSyncTokenReleased() as it can't say if the wait is
  538. // invalid.
  539. for (const auto& sync_token : sync_token_fences)
  540. DCHECK(!sync_point_client_state_->Wait(sync_token, base::DoNothing()));
  541. if (!MakeCurrent())
  542. return;
  543. auto cache_use = CreateCacheUse();
  544. {
  545. absl::optional<raster::GrShaderCache::ScopedCacheUse> gr_cache_use;
  546. if (gr_shader_cache_)
  547. gr_cache_use.emplace(gr_shader_cache_, kDisplayCompositorClientId);
  548. command_buffer_->Flush(put_offset, decoder_.get());
  549. }
  550. // Update state before signaling the flush event.
  551. UpdateLastStateOnGpuThread();
  552. bool has_unprocessed_commands = HasUnprocessedCommandsOnGpuThread();
  553. if (!command_buffer_->scheduled() || has_unprocessed_commands) {
  554. ContinueGpuTask(base::BindOnce(&InProcessCommandBuffer::FlushOnGpuThread,
  555. gpu_thread_weak_ptr_factory_.GetWeakPtr(),
  556. put_offset, sync_token_fences));
  557. }
  558. // If we've processed all pending commands but still have pending queries,
  559. // pump idle work until the query is passed.
  560. if (!has_unprocessed_commands &&
  561. (decoder_->HasMoreIdleWork() || decoder_->HasPendingQueries())) {
  562. ScheduleDelayedWorkOnGpuThread();
  563. }
  564. }
  565. void InProcessCommandBuffer::PerformDelayedWorkOnGpuThread() {
  566. DCHECK_CALLED_ON_VALID_SEQUENCE(gpu_sequence_checker_);
  567. delayed_work_pending_ = false;
  568. if (MakeCurrent()) {
  569. auto cache_use = CreateCacheUse();
  570. decoder_->PerformIdleWork();
  571. decoder_->ProcessPendingQueries(false);
  572. if (decoder_->HasMoreIdleWork() || decoder_->HasPendingQueries()) {
  573. ScheduleDelayedWorkOnGpuThread();
  574. }
  575. }
  576. }
  577. void InProcessCommandBuffer::ScheduleDelayedWorkOnGpuThread() {
  578. DCHECK_CALLED_ON_VALID_SEQUENCE(gpu_sequence_checker_);
  579. if (delayed_work_pending_)
  580. return;
  581. delayed_work_pending_ = true;
  582. task_executor_->ScheduleDelayedWork(
  583. base::BindOnce(&InProcessCommandBuffer::PerformDelayedWorkOnGpuThread,
  584. gpu_thread_weak_ptr_factory_.GetWeakPtr()));
  585. }
  586. void InProcessCommandBuffer::Flush(int32_t put_offset) {
  587. if (GetLastState().error != error::kNoError)
  588. return;
  589. if (last_put_offset_ == put_offset)
  590. return;
  591. TRACE_EVENT1("gpu", "InProcessCommandBuffer::Flush", "put_offset",
  592. put_offset);
  593. last_put_offset_ = put_offset;
  594. std::vector<SyncToken> sync_token_fences;
  595. next_flush_sync_token_fences_.swap(sync_token_fences);
  596. // Don't use std::move() for |sync_token_fences| because evaluation order for
  597. // arguments is not defined.
  598. ScheduleGpuTask(base::BindOnce(&InProcessCommandBuffer::FlushOnGpuThread,
  599. gpu_thread_weak_ptr_factory_.GetWeakPtr(),
  600. put_offset, sync_token_fences),
  601. sync_token_fences);
  602. }
  603. void InProcessCommandBuffer::OrderingBarrier(int32_t put_offset) {
  604. Flush(put_offset);
  605. }
  606. CommandBuffer::State InProcessCommandBuffer::WaitForTokenInRange(int32_t start,
  607. int32_t end) {
  608. TRACE_EVENT2("gpu", "InProcessCommandBuffer::WaitForTokenInRange", "start",
  609. start, "end", end);
  610. State last_state = GetLastState();
  611. while (!InRange(start, end, last_state.token) &&
  612. last_state.error == error::kNoError) {
  613. flush_event_.Wait();
  614. last_state = GetLastState();
  615. }
  616. return last_state;
  617. }
  618. CommandBuffer::State InProcessCommandBuffer::WaitForGetOffsetInRange(
  619. uint32_t set_get_buffer_count,
  620. int32_t start,
  621. int32_t end) {
  622. TRACE_EVENT2("gpu", "InProcessCommandBuffer::WaitForGetOffsetInRange",
  623. "start", start, "end", end);
  624. State last_state = GetLastState();
  625. while (((set_get_buffer_count != last_state.set_get_buffer_count) ||
  626. !InRange(start, end, last_state.get_offset)) &&
  627. last_state.error == error::kNoError) {
  628. flush_event_.Wait();
  629. last_state = GetLastState();
  630. }
  631. return last_state;
  632. }
  633. void InProcessCommandBuffer::SetGetBuffer(int32_t shm_id) {
  634. if (GetLastState().error != error::kNoError)
  635. return;
  636. base::WaitableEvent completion(
  637. base::WaitableEvent::ResetPolicy::MANUAL,
  638. base::WaitableEvent::InitialState::NOT_SIGNALED);
  639. ScheduleGpuTask(base::BindOnce(
  640. &InProcessCommandBuffer::SetGetBufferOnGpuThread,
  641. gpu_thread_weak_ptr_factory_.GetWeakPtr(), shm_id, &completion));
  642. completion.Wait();
  643. last_put_offset_ = 0;
  644. }
  645. void InProcessCommandBuffer::SetGetBufferOnGpuThread(
  646. int32_t shm_id,
  647. base::WaitableEvent* completion) {
  648. DCHECK_CALLED_ON_VALID_SEQUENCE(gpu_sequence_checker_);
  649. command_buffer_->SetGetBuffer(shm_id);
  650. UpdateLastStateOnGpuThread();
  651. completion->Signal();
  652. }
  653. scoped_refptr<Buffer> InProcessCommandBuffer::CreateTransferBuffer(
  654. uint32_t size,
  655. int32_t* id,
  656. TransferBufferAllocationOption option) {
  657. scoped_refptr<Buffer> buffer = MakeMemoryBuffer(size);
  658. *id = GetNextBufferId();
  659. ScheduleGpuTask(
  660. base::BindOnce(&InProcessCommandBuffer::RegisterTransferBufferOnGpuThread,
  661. gpu_thread_weak_ptr_factory_.GetWeakPtr(), *id, buffer));
  662. return buffer;
  663. }
  664. void InProcessCommandBuffer::RegisterTransferBufferOnGpuThread(
  665. int32_t id,
  666. scoped_refptr<Buffer> buffer) {
  667. DCHECK_CALLED_ON_VALID_SEQUENCE(gpu_sequence_checker_);
  668. command_buffer_->RegisterTransferBuffer(id, std::move(buffer));
  669. }
  670. void InProcessCommandBuffer::DestroyTransferBuffer(int32_t id) {
  671. ScheduleGpuTask(
  672. base::BindOnce(&InProcessCommandBuffer::DestroyTransferBufferOnGpuThread,
  673. gpu_thread_weak_ptr_factory_.GetWeakPtr(), id));
  674. }
  675. void InProcessCommandBuffer::ForceLostContext(error::ContextLostReason reason) {
  676. ScheduleGpuTask(
  677. base::BindOnce(&InProcessCommandBuffer::ForceLostContextOnGpuThread,
  678. gpu_thread_weak_ptr_factory_.GetWeakPtr(), reason));
  679. }
  680. void InProcessCommandBuffer::ForceLostContextOnGpuThread(
  681. error::ContextLostReason reason) {
  682. DCHECK_CALLED_ON_VALID_SEQUENCE(gpu_sequence_checker_);
  683. // Similar implementation to CommandBufferDirect.
  684. command_buffer_->SetContextLostReason(reason);
  685. command_buffer_->SetParseError(error::kLostContext);
  686. }
  687. void InProcessCommandBuffer::DestroyTransferBufferOnGpuThread(int32_t id) {
  688. DCHECK_CALLED_ON_VALID_SEQUENCE(gpu_sequence_checker_);
  689. command_buffer_->DestroyTransferBuffer(id);
  690. }
  691. void InProcessCommandBuffer::SetGpuControlClient(GpuControlClient* client) {
  692. DCHECK_CALLED_ON_VALID_SEQUENCE(client_sequence_checker_);
  693. gpu_control_client_ = client;
  694. }
  695. const Capabilities& InProcessCommandBuffer::GetCapabilities() const {
  696. return capabilities_;
  697. }
  698. const GpuFeatureInfo& InProcessCommandBuffer::GetGpuFeatureInfo() const {
  699. return task_executor_->gpu_feature_info();
  700. }
  701. void InProcessCommandBuffer::OnConsoleMessage(int32_t id,
  702. const std::string& message) {
  703. // TODO(piman): implement this.
  704. }
  705. void InProcessCommandBuffer::CacheBlob(gpu::GpuDiskCacheType type,
  706. const std::string& key,
  707. const std::string& shader) {}
  708. void InProcessCommandBuffer::OnFenceSyncRelease(uint64_t release) {
  709. DCHECK_CALLED_ON_VALID_SEQUENCE(gpu_sequence_checker_);
  710. SyncToken sync_token(GetNamespaceID(), GetCommandBufferID(), release);
  711. command_buffer_->SetReleaseCount(release);
  712. sync_point_client_state_->ReleaseFenceSync(release);
  713. }
  714. void InProcessCommandBuffer::OnDescheduleUntilFinished() {
  715. NOTREACHED();
  716. }
  717. void InProcessCommandBuffer::OnRescheduleAfterFinished() {
  718. NOTREACHED();
  719. }
  720. void InProcessCommandBuffer::OnSwapBuffers(uint64_t swap_id, uint32_t flags) {
  721. NOTREACHED();
  722. }
  723. void InProcessCommandBuffer::ScheduleGrContextCleanup() {
  724. DCHECK_CALLED_ON_VALID_SEQUENCE(gpu_sequence_checker_);
  725. context_state_->ScheduleGrContextCleanup();
  726. }
  727. void InProcessCommandBuffer::HandleReturnData(base::span<const uint8_t> data) {
  728. DCHECK_CALLED_ON_VALID_SEQUENCE(gpu_sequence_checker_);
  729. std::vector<uint8_t> vec(data.data(), data.data() + data.size());
  730. PostOrRunClientCallback(
  731. base::BindOnce(&InProcessCommandBuffer::HandleReturnDataOnOriginThread,
  732. client_thread_weak_ptr_, std::move(vec)));
  733. }
  734. void InProcessCommandBuffer::PostOrRunClientCallback(
  735. base::OnceClosure callback) {
  736. DCHECK_CALLED_ON_VALID_SEQUENCE(gpu_sequence_checker_);
  737. if (!origin_task_runner_) {
  738. task_executor_->PostNonNestableToClient(std::move(callback));
  739. return;
  740. }
  741. origin_task_runner_->PostTask(FROM_HERE, std::move(callback));
  742. }
  743. base::OnceClosure InProcessCommandBuffer::WrapClientCallback(
  744. base::OnceClosure callback) {
  745. return base::BindOnce(&InProcessCommandBuffer::PostOrRunClientCallback,
  746. gpu_thread_weak_ptr_factory_.GetWeakPtr(),
  747. std::move(callback));
  748. }
  749. void InProcessCommandBuffer::SignalSyncToken(const SyncToken& sync_token,
  750. base::OnceClosure callback) {
  751. DCHECK_CALLED_ON_VALID_SEQUENCE(client_sequence_checker_);
  752. ScheduleGpuTask(
  753. base::BindOnce(&InProcessCommandBuffer::SignalSyncTokenOnGpuThread,
  754. gpu_thread_weak_ptr_factory_.GetWeakPtr(), sync_token,
  755. std::move(callback)));
  756. }
  757. void InProcessCommandBuffer::SignalSyncTokenOnGpuThread(
  758. const SyncToken& sync_token,
  759. base::OnceClosure callback) {
  760. auto callback_pair =
  761. base::SplitOnceCallback(WrapClientCallback(std::move(callback)));
  762. if (!sync_point_client_state_->Wait(sync_token,
  763. std::move(callback_pair.first))) {
  764. std::move(callback_pair.second).Run();
  765. }
  766. }
  767. void InProcessCommandBuffer::SignalQuery(unsigned query_id,
  768. base::OnceClosure callback) {
  769. DCHECK_CALLED_ON_VALID_SEQUENCE(client_sequence_checker_);
  770. ScheduleGpuTask(
  771. base::BindOnce(&InProcessCommandBuffer::SignalQueryOnGpuThread,
  772. gpu_thread_weak_ptr_factory_.GetWeakPtr(), query_id,
  773. std::move(callback)));
  774. }
  775. void InProcessCommandBuffer::SignalQueryOnGpuThread(
  776. unsigned query_id,
  777. base::OnceClosure callback) {
  778. decoder_->SetQueryCallback(query_id, WrapClientCallback(std::move(callback)));
  779. }
  780. void InProcessCommandBuffer::CreateGpuFence(uint32_t gpu_fence_id,
  781. ClientGpuFence source) {
  782. // Pass a cloned handle to the GPU process since the source ClientGpuFence
  783. // may go out of scope before the queued task runs.
  784. gfx::GpuFence* gpu_fence = gfx::GpuFence::FromClientGpuFence(source);
  785. gfx::GpuFenceHandle handle = gpu_fence->GetGpuFenceHandle().Clone();
  786. ScheduleGpuTask(
  787. base::BindOnce(&InProcessCommandBuffer::CreateGpuFenceOnGpuThread,
  788. gpu_thread_weak_ptr_factory_.GetWeakPtr(), gpu_fence_id,
  789. std::move(handle)));
  790. }
  791. void InProcessCommandBuffer::CreateGpuFenceOnGpuThread(
  792. uint32_t gpu_fence_id,
  793. gfx::GpuFenceHandle handle) {
  794. DCHECK_CALLED_ON_VALID_SEQUENCE(gpu_sequence_checker_);
  795. UpdateActiveUrl();
  796. if (!GetFeatureInfo()->feature_flags().chromium_gpu_fence) {
  797. DLOG(ERROR) << "CHROMIUM_gpu_fence unavailable";
  798. command_buffer_->SetParseError(error::kLostContext);
  799. return;
  800. }
  801. gles2::GpuFenceManager* gpu_fence_manager = decoder_->GetGpuFenceManager();
  802. DCHECK(gpu_fence_manager);
  803. if (gpu_fence_manager->CreateGpuFenceFromHandle(gpu_fence_id,
  804. std::move(handle)))
  805. return;
  806. // The insertion failed. This shouldn't happen, force context loss to avoid
  807. // inconsistent state.
  808. command_buffer_->SetParseError(error::kLostContext);
  809. }
  810. void InProcessCommandBuffer::GetGpuFence(
  811. uint32_t gpu_fence_id,
  812. base::OnceCallback<void(std::unique_ptr<gfx::GpuFence>)> callback) {
  813. DCHECK_CALLED_ON_VALID_SEQUENCE(client_sequence_checker_);
  814. ScheduleGpuTask(
  815. base::BindOnce(&InProcessCommandBuffer::GetGpuFenceOnGpuThread,
  816. gpu_thread_weak_ptr_factory_.GetWeakPtr(), gpu_fence_id,
  817. std::move(callback)));
  818. }
  819. void InProcessCommandBuffer::GetGpuFenceOnGpuThread(
  820. uint32_t gpu_fence_id,
  821. base::OnceCallback<void(std::unique_ptr<gfx::GpuFence>)> callback) {
  822. DCHECK_CALLED_ON_VALID_SEQUENCE(gpu_sequence_checker_);
  823. if (!GetFeatureInfo()->feature_flags().chromium_gpu_fence) {
  824. DLOG(ERROR) << "CHROMIUM_gpu_fence unavailable";
  825. command_buffer_->SetParseError(error::kLostContext);
  826. return;
  827. }
  828. gles2::GpuFenceManager* manager = decoder_->GetGpuFenceManager();
  829. DCHECK(manager);
  830. std::unique_ptr<gfx::GpuFence> gpu_fence;
  831. if (manager->IsValidGpuFence(gpu_fence_id)) {
  832. gpu_fence = manager->GetGpuFence(gpu_fence_id);
  833. } else {
  834. // Retrieval failed. This shouldn't happen, force context loss to avoid
  835. // inconsistent state.
  836. DLOG(ERROR) << "GpuFence not found";
  837. command_buffer_->SetParseError(error::kLostContext);
  838. }
  839. PostOrRunClientCallback(
  840. base::BindOnce(std::move(callback), std::move(gpu_fence)));
  841. }
  842. void InProcessCommandBuffer::SetLock(base::Lock*) {
  843. // No support for using on multiple threads.
  844. NOTREACHED();
  845. }
  846. void InProcessCommandBuffer::EnsureWorkVisible() {
  847. // This is only relevant for out-of-process command buffers.
  848. }
  849. CommandBufferNamespace InProcessCommandBuffer::GetNamespaceID() const {
  850. return CommandBufferNamespace::IN_PROCESS;
  851. }
  852. CommandBufferId InProcessCommandBuffer::GetCommandBufferID() const {
  853. return gpu_dependency_->command_buffer_id();
  854. }
  855. void InProcessCommandBuffer::FlushPendingWork() {
  856. // This is only relevant for out-of-process command buffers.
  857. }
  858. uint64_t InProcessCommandBuffer::GenerateFenceSyncRelease() {
  859. return next_fence_sync_release_++;
  860. }
  861. bool InProcessCommandBuffer::IsFenceSyncReleased(uint64_t release) {
  862. return release <= GetLastState().release_count;
  863. }
  864. void InProcessCommandBuffer::WaitSyncToken(const SyncToken& sync_token) {
  865. next_flush_sync_token_fences_.push_back(sync_token);
  866. }
  867. bool InProcessCommandBuffer::CanWaitUnverifiedSyncToken(
  868. const SyncToken& sync_token) {
  869. return sync_token.namespace_id() == GetNamespaceID();
  870. }
  871. const gles2::FeatureInfo* InProcessCommandBuffer::GetFeatureInfo() const {
  872. DCHECK_CALLED_ON_VALID_SEQUENCE(gpu_sequence_checker_);
  873. return context_group_->feature_info();
  874. }
  875. void InProcessCommandBuffer::HandleReturnDataOnOriginThread(
  876. std::vector<uint8_t> data) {
  877. DCHECK_CALLED_ON_VALID_SEQUENCE(client_sequence_checker_);
  878. if (gpu_control_client_) {
  879. gpu_control_client_->OnGpuControlReturnData(data);
  880. }
  881. }
  882. void InProcessCommandBuffer::UpdateActiveUrl() {
  883. if (!active_url_.is_empty())
  884. ContextUrl::SetActiveUrl(active_url_);
  885. }
  886. } // namespace gpu