image_decode_accelerator_stub.cc 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472
  1. // Copyright 2018 The Chromium Authors. All rights reserved.
  2. // Use of this source code is governed by a BSD-style license that can be
  3. // found in the LICENSE file.
  4. #include "gpu/ipc/service/image_decode_accelerator_stub.h"
  5. #include <stddef.h>
  6. #include <algorithm>
  7. #include <new>
  8. #include <utility>
  9. #include <vector>
  10. #include "base/bind.h"
  11. #include "base/callback_helpers.h"
  12. #include "base/containers/span.h"
  13. #include "base/feature_list.h"
  14. #include "base/location.h"
  15. #include "base/logging.h"
  16. #include "base/numerics/checked_math.h"
  17. #include "base/numerics/safe_conversions.h"
  18. #include "base/task/single_thread_task_runner.h"
  19. #include "build/build_config.h"
  20. #include "build/chromeos_buildflags.h"
  21. #include "gpu/command_buffer/common/constants.h"
  22. #include "gpu/command_buffer/common/context_result.h"
  23. #include "gpu/command_buffer/common/discardable_handle.h"
  24. #include "gpu/command_buffer/common/scheduling_priority.h"
  25. #include "gpu/command_buffer/common/sync_token.h"
  26. #include "gpu/command_buffer/service/context_group.h"
  27. #include "gpu/command_buffer/service/decoder_context.h"
  28. #include "gpu/command_buffer/service/gr_shader_cache.h"
  29. #include "gpu/command_buffer/service/image_factory.h"
  30. #include "gpu/command_buffer/service/scheduler.h"
  31. #include "gpu/command_buffer/service/service_transfer_cache.h"
  32. #include "gpu/command_buffer/service/shared_context_state.h"
  33. #include "gpu/command_buffer/service/sync_point_manager.h"
  34. #include "gpu/config/gpu_finch_features.h"
  35. #include "gpu/ipc/common/command_buffer_id.h"
  36. #include "gpu/ipc/common/surface_handle.h"
  37. #include "gpu/ipc/service/command_buffer_stub.h"
  38. #include "gpu/ipc/service/gpu_channel.h"
  39. #include "gpu/ipc/service/gpu_channel_manager.h"
  40. #include "third_party/abseil-cpp/absl/types/optional.h"
  41. #include "third_party/skia/include/core/SkColorSpace.h"
  42. #include "third_party/skia/include/core/SkImage.h"
  43. #include "third_party/skia/include/core/SkImageInfo.h"
  44. #include "third_party/skia/include/core/SkRefCnt.h"
  45. #include "third_party/skia/include/gpu/GrBackendSurface.h"
  46. #include "third_party/skia/include/gpu/GrTypes.h"
  47. #include "third_party/skia/include/gpu/gl/GrGLTypes.h"
  48. #include "ui/gfx/buffer_format_util.h"
  49. #include "ui/gfx/buffer_types.h"
  50. #include "ui/gfx/color_space.h"
  51. #include "ui/gfx/gpu_memory_buffer.h"
  52. #include "ui/gl/gl_bindings.h"
  53. #include "ui/gl/gl_image.h"
  54. #if BUILDFLAG(IS_CHROMEOS_ASH)
  55. #include "ui/gfx/linux/native_pixmap_dmabuf.h"
  56. #include "ui/gl/gl_image_native_pixmap.h"
  57. #endif
  58. namespace gpu {
  59. class Buffer;
  60. #if BUILDFLAG(IS_CHROMEOS_ASH)
  61. namespace {
  62. struct CleanUpContext {
  63. scoped_refptr<base::SingleThreadTaskRunner> main_task_runner;
  64. SharedContextState* shared_context_state = nullptr;
  65. scoped_refptr<gl::GLImage> gl_image;
  66. GLuint texture = 0;
  67. };
  68. void CleanUpResource(SkImage::ReleaseContext context) {
  69. auto* clean_up_context = static_cast<CleanUpContext*>(context);
  70. DCHECK(clean_up_context->main_task_runner->BelongsToCurrentThread());
  71. if (clean_up_context->shared_context_state->IsCurrent(
  72. nullptr /* surface */)) {
  73. DCHECK(!clean_up_context->shared_context_state->context_lost());
  74. glDeleteTextures(1u, &clean_up_context->texture);
  75. } else {
  76. DCHECK(clean_up_context->shared_context_state->context_lost());
  77. }
  78. // The GLImage is destroyed here (it should be destroyed regardless of whether
  79. // the context is lost or current).
  80. delete clean_up_context;
  81. }
  82. } // namespace
  83. #endif
  84. ImageDecodeAcceleratorStub::ImageDecodeAcceleratorStub(
  85. ImageDecodeAcceleratorWorker* worker,
  86. GpuChannel* channel,
  87. int32_t route_id)
  88. : worker_(worker),
  89. channel_(channel),
  90. sequence_(channel->scheduler()->CreateSequence(SchedulingPriority::kLow,
  91. channel->task_runner())),
  92. sync_point_client_state_(
  93. channel->sync_point_manager()->CreateSyncPointClientState(
  94. CommandBufferNamespace::GPU_IO,
  95. CommandBufferIdFromChannelAndRoute(channel->client_id(),
  96. route_id),
  97. sequence_)),
  98. main_task_runner_(channel->task_runner()),
  99. io_task_runner_(channel->io_task_runner()) {
  100. // We need the sequence to be initially disabled so that when we schedule a
  101. // task to release the decode sync token, it doesn't run immediately (we want
  102. // it to run when the decode is done).
  103. channel_->scheduler()->DisableSequence(sequence_);
  104. }
  105. void ImageDecodeAcceleratorStub::Shutdown() {
  106. DCHECK(main_task_runner_->BelongsToCurrentThread());
  107. base::AutoLock lock(lock_);
  108. sync_point_client_state_->Destroy();
  109. channel_->scheduler()->DestroySequence(sequence_);
  110. channel_ = nullptr;
  111. }
  112. void ImageDecodeAcceleratorStub::SetImageFactoryForTesting(
  113. ImageFactory* image_factory) {
  114. external_image_factory_for_testing_ = image_factory;
  115. }
  116. ImageDecodeAcceleratorStub::~ImageDecodeAcceleratorStub() {
  117. DCHECK(!channel_);
  118. }
  119. void ImageDecodeAcceleratorStub::ScheduleImageDecode(
  120. mojom::ScheduleImageDecodeParamsPtr params,
  121. uint64_t release_count) {
  122. DCHECK(io_task_runner_->BelongsToCurrentThread());
  123. if (!base::FeatureList::IsEnabled(
  124. features::kVaapiJpegImageDecodeAcceleration) &&
  125. !base::FeatureList::IsEnabled(
  126. features::kVaapiWebPImageDecodeAcceleration)) {
  127. return;
  128. }
  129. DCHECK(io_task_runner_->BelongsToCurrentThread());
  130. base::AutoLock lock(lock_);
  131. if (!channel_) {
  132. // The channel is no longer available, so don't do anything.
  133. return;
  134. }
  135. mojom::ScheduleImageDecodeParams& decode_params = *params;
  136. // Start the actual decode.
  137. worker_->Decode(
  138. std::move(decode_params.encoded_data), decode_params.output_size,
  139. base::BindOnce(&ImageDecodeAcceleratorStub::OnDecodeCompleted,
  140. base::WrapRefCounted(this), decode_params.output_size));
  141. // Schedule a task to eventually release the decode sync token. Note that this
  142. // task won't run until the sequence is re-enabled when a decode completes.
  143. const SyncToken discardable_handle_sync_token = SyncToken(
  144. CommandBufferNamespace::GPU_IO,
  145. CommandBufferIdFromChannelAndRoute(channel_->client_id(),
  146. decode_params.raster_decoder_route_id),
  147. decode_params.discardable_handle_release_count);
  148. channel_->scheduler()->ScheduleTask(Scheduler::Task(
  149. sequence_,
  150. base::BindOnce(&ImageDecodeAcceleratorStub::ProcessCompletedDecode,
  151. base::WrapRefCounted(this), std::move(params),
  152. release_count),
  153. {discardable_handle_sync_token} /* sync_token_fences */));
  154. }
  155. void ImageDecodeAcceleratorStub::ProcessCompletedDecode(
  156. mojom::ScheduleImageDecodeParamsPtr params_ptr,
  157. uint64_t decode_release_count) {
  158. DCHECK(main_task_runner_->BelongsToCurrentThread());
  159. base::AutoLock lock(lock_);
  160. if (!channel_) {
  161. // The channel is no longer available, so don't do anything.
  162. return;
  163. }
  164. mojom::ScheduleImageDecodeParams& params = *params_ptr;
  165. DCHECK(!pending_completed_decodes_.empty());
  166. std::unique_ptr<ImageDecodeAcceleratorWorker::DecodeResult> completed_decode =
  167. std::move(pending_completed_decodes_.front());
  168. pending_completed_decodes_.pop();
  169. // Regardless of what happens next, make sure the sync token gets released and
  170. // the sequence gets disabled if there are no more completed decodes after
  171. // this. base::Unretained(this) is safe because *this outlives the
  172. // ScopedClosureRunner.
  173. base::ScopedClosureRunner finalizer(
  174. base::BindOnce(&ImageDecodeAcceleratorStub::FinishCompletedDecode,
  175. base::Unretained(this), decode_release_count));
  176. if (!completed_decode) {
  177. DLOG(ERROR) << "The image could not be decoded";
  178. return;
  179. }
  180. // TODO(crbug.com/995883): the output_size parameter is going away, so this
  181. // validation is not needed. Checking if the size is too small should happen
  182. // at the level of the decoder (since that's the component that's aware of its
  183. // own capabilities).
  184. if (params.output_size.IsEmpty()) {
  185. DLOG(ERROR) << "Output dimensions are too small";
  186. return;
  187. }
  188. // Gain access to the transfer cache through the GpuChannelManager's
  189. // SharedContextState. We will also use that to get a GrContext that will be
  190. // used for Skia operations.
  191. ContextResult context_result;
  192. scoped_refptr<SharedContextState> shared_context_state =
  193. channel_->gpu_channel_manager()->GetSharedContextState(&context_result);
  194. if (context_result != ContextResult::kSuccess) {
  195. DLOG(ERROR) << "Unable to obtain the SharedContextState";
  196. return;
  197. }
  198. DCHECK(shared_context_state);
  199. // TODO(andrescj): in addition to this check, we should not advertise support
  200. // for hardware decode acceleration if we're not using GL (until we support
  201. // other graphics APIs).
  202. if (!shared_context_state->IsGLInitialized()) {
  203. DLOG(ERROR) << "GL has not been initialized";
  204. return;
  205. }
  206. if (!shared_context_state->gr_context()) {
  207. DLOG(ERROR) << "Could not get the GrContext";
  208. return;
  209. }
  210. if (!shared_context_state->MakeCurrent(nullptr /* surface */)) {
  211. DLOG(ERROR) << "Could not MakeCurrent the shared context";
  212. return;
  213. }
  214. std::vector<sk_sp<SkImage>> plane_sk_images;
  215. absl::optional<base::ScopedClosureRunner> notify_gl_state_changed;
  216. #if BUILDFLAG(IS_CHROMEOS_ASH)
  217. // Right now, we only support YUV 4:2:0 for the output of the decoder (either
  218. // as YV12 or NV12).
  219. //
  220. // TODO(andrescj): change to gfx::BufferFormat::YUV_420 once
  221. // https://crrev.com/c/1573718 lands.
  222. DCHECK(completed_decode->buffer_format == gfx::BufferFormat::YVU_420 ||
  223. completed_decode->buffer_format ==
  224. gfx::BufferFormat::YUV_420_BIPLANAR);
  225. DCHECK_EQ(
  226. gfx::NumberOfPlanesForLinearBufferFormat(completed_decode->buffer_format),
  227. completed_decode->handle.native_pixmap_handle.planes.size());
  228. // Calculate the dimensions of each of the planes.
  229. const gfx::Size y_plane_size = completed_decode->visible_size;
  230. base::CheckedNumeric<int> safe_uv_width(y_plane_size.width());
  231. base::CheckedNumeric<int> safe_uv_height(y_plane_size.height());
  232. safe_uv_width += 1;
  233. safe_uv_width /= 2;
  234. safe_uv_height += 1;
  235. safe_uv_height /= 2;
  236. int uv_width;
  237. int uv_height;
  238. if (!safe_uv_width.AssignIfValid(&uv_width) ||
  239. !safe_uv_height.AssignIfValid(&uv_height)) {
  240. DLOG(ERROR) << "Could not calculate subsampled dimensions";
  241. return;
  242. }
  243. gfx::Size uv_plane_size = gfx::Size(uv_width, uv_height);
  244. // We should notify the SharedContextState that we or Skia may have modified
  245. // the driver's GL state. We should also notify Skia that we may have modified
  246. // the graphics API state outside of Skia. We put this in a
  247. // ScopedClosureRunner so that if we return early, both the SharedContextState
  248. // and Skia end up in a consistent state.
  249. notify_gl_state_changed.emplace(base::BindOnce(
  250. [](scoped_refptr<SharedContextState> scs) {
  251. scs->set_need_context_state_reset(true);
  252. scs->PessimisticallyResetGrContext();
  253. },
  254. shared_context_state));
  255. // Create a gl::GLImage for each plane and attach it to a texture.
  256. const size_t num_planes =
  257. completed_decode->handle.native_pixmap_handle.planes.size();
  258. plane_sk_images.resize(num_planes);
  259. for (size_t plane = 0u; plane < num_planes; plane++) {
  260. // |resource_cleaner| will be called to delete textures and GLImages that we
  261. // create in this section in case of an early return.
  262. CleanUpContext* resource = new CleanUpContext{};
  263. resource->main_task_runner = channel_->task_runner();
  264. resource->shared_context_state = shared_context_state.get();
  265. // The use of base::Unretained() is safe because the |resource| is allocated
  266. // using new and is deleted inside CleanUpResource().
  267. base::ScopedClosureRunner resource_cleaner(
  268. base::BindOnce(&CleanUpResource, base::Unretained(resource)));
  269. glGenTextures(1u, &resource->texture);
  270. glBindTexture(GL_TEXTURE_EXTERNAL_OES, resource->texture);
  271. glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_WRAP_S,
  272. GL_CLAMP_TO_EDGE);
  273. glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_WRAP_T,
  274. GL_CLAMP_TO_EDGE);
  275. glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
  276. glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
  277. gfx::Size plane_size = plane == 0 ? y_plane_size : uv_plane_size;
  278. // Extract the plane out of |completed_decode->handle| and put it in its own
  279. // gfx::GpuMemoryBufferHandle so that we can create a GL image for the
  280. // plane.
  281. gfx::GpuMemoryBufferHandle plane_handle;
  282. plane_handle.type = completed_decode->handle.type;
  283. plane_handle.native_pixmap_handle.planes.push_back(
  284. std::move(completed_decode->handle.native_pixmap_handle.planes[plane]));
  285. // Note that the buffer format for the plane is R_8 for all planes if the
  286. // result of the decode is in YV12. For NV12, the first plane (luma) is R_8
  287. // and the second plane (chroma) is RG_88.
  288. const bool is_nv12_chroma_plane = completed_decode->buffer_format ==
  289. gfx::BufferFormat::YUV_420_BIPLANAR &&
  290. plane == 1u;
  291. const auto plane_format = is_nv12_chroma_plane ? gfx::BufferFormat::RG_88
  292. : gfx::BufferFormat::R_8;
  293. scoped_refptr<gl::GLImage> plane_image;
  294. if (external_image_factory_for_testing_) {
  295. plane_image =
  296. external_image_factory_for_testing_->CreateImageForGpuMemoryBuffer(
  297. std::move(plane_handle), plane_size, plane_format,
  298. gfx::ColorSpace(), gfx::BufferPlane::DEFAULT, -1 /* client_id */,
  299. kNullSurfaceHandle);
  300. } else {
  301. auto plane_pixmap = base::MakeRefCounted<gfx::NativePixmapDmaBuf>(
  302. plane_size, plane_format,
  303. std::move(plane_handle.native_pixmap_handle));
  304. auto plane_image_native_pixmap =
  305. base::MakeRefCounted<gl::GLImageNativePixmap>(plane_size,
  306. plane_format);
  307. if (plane_image_native_pixmap->Initialize(plane_pixmap))
  308. plane_image = std::move(plane_image_native_pixmap);
  309. }
  310. if (!plane_image) {
  311. DLOG(ERROR) << "Could not create GL image";
  312. return;
  313. }
  314. resource->gl_image = std::move(plane_image);
  315. if (!resource->gl_image->BindTexImage(GL_TEXTURE_EXTERNAL_OES)) {
  316. DLOG(ERROR) << "Could not bind GL image to texture";
  317. return;
  318. }
  319. // Notify Skia that we have changed the driver's GL state outside of Skia.
  320. shared_context_state->PessimisticallyResetGrContext();
  321. // Create a SkImage using the texture.
  322. const GrBackendTexture plane_backend_texture(
  323. plane_size.width(), plane_size.height(), GrMipMapped::kNo,
  324. GrGLTextureInfo{GL_TEXTURE_EXTERNAL_OES, resource->texture,
  325. static_cast<GrGLenum>(
  326. is_nv12_chroma_plane ? GL_RG8_EXT : GL_R8_EXT)});
  327. plane_sk_images[plane] = SkImage::MakeFromTexture(
  328. shared_context_state->gr_context(), plane_backend_texture,
  329. kTopLeft_GrSurfaceOrigin,
  330. is_nv12_chroma_plane ? kR8G8_unorm_SkColorType : kAlpha_8_SkColorType,
  331. kOpaque_SkAlphaType, nullptr /* colorSpace */, CleanUpResource,
  332. resource);
  333. if (!plane_sk_images[plane]) {
  334. DLOG(ERROR) << "Could not create planar SkImage";
  335. return;
  336. }
  337. // No need for us to call the resource cleaner. Skia should do that.
  338. resource_cleaner.Release().Reset();
  339. }
  340. // Insert the cache entry in the transfer cache. Note that this section
  341. // validates several of the IPC parameters: |params.raster_decoder_route_id|,
  342. // |params.transfer_cache_entry_id|, |params.discardable_handle_shm_id|, and
  343. // |params.discardable_handle_shm_offset|.
  344. CommandBufferStub* command_buffer =
  345. channel_->LookupCommandBuffer(params.raster_decoder_route_id);
  346. if (!command_buffer) {
  347. DLOG(ERROR) << "Could not find the command buffer";
  348. return;
  349. }
  350. scoped_refptr<Buffer> handle_buffer =
  351. command_buffer->GetTransferBuffer(params.discardable_handle_shm_id);
  352. if (!DiscardableHandleBase::ValidateParameters(
  353. handle_buffer.get(), params.discardable_handle_shm_offset)) {
  354. DLOG(ERROR) << "Could not validate the discardable handle parameters";
  355. return;
  356. }
  357. DCHECK(command_buffer->decoder_context());
  358. if (command_buffer->decoder_context()->GetRasterDecoderId() < 0) {
  359. DLOG(ERROR) << "Could not get the raster decoder ID";
  360. return;
  361. }
  362. {
  363. auto* gr_shader_cache = channel_->gpu_channel_manager()->gr_shader_cache();
  364. absl::optional<raster::GrShaderCache::ScopedCacheUse> cache_use;
  365. if (gr_shader_cache)
  366. cache_use.emplace(gr_shader_cache,
  367. base::strict_cast<int32_t>(channel_->client_id()));
  368. DCHECK(shared_context_state->transfer_cache());
  369. SkYUVAInfo::PlaneConfig plane_config =
  370. completed_decode->buffer_format == gfx::BufferFormat::YVU_420
  371. ? SkYUVAInfo::PlaneConfig::kY_V_U
  372. : SkYUVAInfo::PlaneConfig::kY_UV;
  373. // TODO(andrescj): |params.target_color_space| is not needed because Skia
  374. // knows where it's drawing, so it can handle color space conversion without
  375. // us having to specify the target color space. However, we are currently
  376. // assuming that the color space of the image is sRGB. This means we don't
  377. // support images with embedded color profiles. We could rename
  378. // |params.target_color_space| to |params.image_color_space| and we can send
  379. // the embedded color profile from the renderer using that field.
  380. if (!shared_context_state->transfer_cache()
  381. ->CreateLockedHardwareDecodedImageEntry(
  382. command_buffer->decoder_context()->GetRasterDecoderId(),
  383. params.transfer_cache_entry_id,
  384. ServiceDiscardableHandle(std::move(handle_buffer),
  385. params.discardable_handle_shm_offset,
  386. params.discardable_handle_shm_id),
  387. shared_context_state->gr_context(), std::move(plane_sk_images),
  388. plane_config, SkYUVAInfo::Subsampling::k420,
  389. completed_decode->yuv_color_space,
  390. completed_decode->buffer_byte_size, params.needs_mips)) {
  391. DLOG(ERROR) << "Could not create and insert the transfer cache entry";
  392. return;
  393. }
  394. }
  395. DCHECK(notify_gl_state_changed);
  396. notify_gl_state_changed->RunAndReset();
  397. #else
  398. // Right now, we only support Chrome OS because we need to use the
  399. // |native_pixmap_handle| member of a GpuMemoryBufferHandle.
  400. NOTIMPLEMENTED()
  401. << "Image decode acceleration is unsupported for this platform";
  402. #endif
  403. }
  404. void ImageDecodeAcceleratorStub::FinishCompletedDecode(
  405. uint64_t decode_release_count) {
  406. DCHECK(main_task_runner_->BelongsToCurrentThread());
  407. lock_.AssertAcquired();
  408. sync_point_client_state_->ReleaseFenceSync(decode_release_count);
  409. if (pending_completed_decodes_.empty())
  410. channel_->scheduler()->DisableSequence(sequence_);
  411. }
  412. void ImageDecodeAcceleratorStub::OnDecodeCompleted(
  413. gfx::Size expected_output_size,
  414. std::unique_ptr<ImageDecodeAcceleratorWorker::DecodeResult> result) {
  415. base::AutoLock lock(lock_);
  416. if (!channel_) {
  417. // The channel is no longer available, so don't do anything.
  418. return;
  419. }
  420. // A sanity check on the output of the decoder.
  421. DCHECK(!result || expected_output_size == result->visible_size);
  422. // The decode is ready to be processed: add it to |pending_completed_decodes_|
  423. // so that ProcessCompletedDecode() can pick it up.
  424. pending_completed_decodes_.push(std::move(result));
  425. // We only need to enable the sequence when the number of pending completed
  426. // decodes is 1. If there are more, the sequence should already be enabled.
  427. if (pending_completed_decodes_.size() == 1u)
  428. channel_->scheduler()->EnableSequence(sequence_);
  429. }
  430. } // namespace gpu