vaapi_video_decode_accelerator.cc 49 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307
  1. // Copyright (c) 2012 The Chromium Authors. All rights reserved.
  2. // Use of this source code is governed by a BSD-style license that can be
  3. // found in the LICENSE file.
  4. #include "media/gpu/vaapi/vaapi_video_decode_accelerator.h"
  5. #include <string.h>
  6. #include <va/va.h>
  7. #include <memory>
  8. #include "base/bind.h"
  9. #include "base/callback_helpers.h"
  10. #include "base/containers/contains.h"
  11. #include "base/containers/cxx20_erase.h"
  12. #include "base/cpu.h"
  13. #include "base/files/scoped_file.h"
  14. #include "base/json/json_writer.h"
  15. #include "base/logging.h"
  16. #include "base/metrics/histogram_macros.h"
  17. #include "base/numerics/safe_conversions.h"
  18. #include "base/strings/string_util.h"
  19. #include "base/strings/stringprintf.h"
  20. #include "base/synchronization/waitable_event.h"
  21. #include "base/threading/thread_task_runner_handle.h"
  22. #include "base/trace_event/memory_dump_manager.h"
  23. #include "base/trace_event/process_memory_dump.h"
  24. #include "base/trace_event/trace_event.h"
  25. #include "build/build_config.h"
  26. #include "gpu/ipc/service/gpu_channel.h"
  27. #include "media/base/bind_to_current_loop.h"
  28. #include "media/base/format_utils.h"
  29. #include "media/base/media_log.h"
  30. #include "media/base/video_util.h"
  31. #include "media/gpu/accelerated_video_decoder.h"
  32. #include "media/gpu/h264_decoder.h"
  33. #include "media/gpu/macros.h"
  34. #include "media/gpu/vaapi/h264_vaapi_video_decoder_delegate.h"
  35. #include "media/gpu/vaapi/vaapi_common.h"
  36. #include "media/gpu/vaapi/vaapi_picture.h"
  37. #include "media/gpu/vaapi/vaapi_utils.h"
  38. #include "media/gpu/vaapi/vp8_vaapi_video_decoder_delegate.h"
  39. #include "media/gpu/vaapi/vp9_vaapi_video_decoder_delegate.h"
  40. #include "media/gpu/vp8_decoder.h"
  41. #include "media/gpu/vp9_decoder.h"
  42. #include "media/video/picture.h"
  43. namespace media {
  44. namespace {
  45. // Returns the preferred VA_RT_FORMAT for the given |profile|.
  46. unsigned int GetVaFormatForVideoCodecProfile(VideoCodecProfile profile) {
  47. if (profile == VP9PROFILE_PROFILE2 || profile == VP9PROFILE_PROFILE3)
  48. return VA_RT_FORMAT_YUV420_10BPP;
  49. return VA_RT_FORMAT_YUV420;
  50. }
  51. // Returns true if the CPU is an Intel Gemini Lake or later (including Kaby
  52. // Lake) Cpu platform id's are referenced from the following file in kernel
  53. // source arch/x86/include/asm/intel-family.h
  54. bool IsGeminiLakeOrLater() {
  55. constexpr int kPentiumAndLaterFamily = 0x06;
  56. constexpr int kGeminiLakeModelId = 0x7A;
  57. static base::CPU cpuid;
  58. static bool is_geminilake_or_later =
  59. cpuid.family() == kPentiumAndLaterFamily &&
  60. cpuid.model() >= kGeminiLakeModelId;
  61. return is_geminilake_or_later;
  62. }
  63. } // namespace
  64. #define RETURN_AND_NOTIFY_ON_FAILURE(result, log, error_code, ret) \
  65. do { \
  66. if (!(result)) { \
  67. LOG(ERROR) << log; \
  68. NotifyError(error_code); \
  69. return ret; \
  70. } \
  71. } while (0)
  72. #define RETURN_AND_NOTIFY_ON_STATUS(status, ret) \
  73. do { \
  74. if (!status.is_ok()) { \
  75. NotifyStatus(status); \
  76. return ret; \
  77. } \
  78. } while (0)
  79. class VaapiVideoDecodeAccelerator::InputBuffer {
  80. public:
  81. InputBuffer() : buffer_(nullptr) {}
  82. InputBuffer(int32_t id,
  83. scoped_refptr<DecoderBuffer> buffer,
  84. base::OnceCallback<void(int32_t id)> release_cb)
  85. : id_(id),
  86. buffer_(std::move(buffer)),
  87. release_cb_(std::move(release_cb)) {}
  88. InputBuffer(const InputBuffer&) = delete;
  89. InputBuffer& operator=(const InputBuffer&) = delete;
  90. ~InputBuffer() {
  91. DVLOGF(4) << "id = " << id_;
  92. if (release_cb_)
  93. std::move(release_cb_).Run(id_);
  94. }
  95. // Indicates this is a dummy buffer for flush request.
  96. bool IsFlushRequest() const { return !buffer_; }
  97. int32_t id() const { return id_; }
  98. const scoped_refptr<DecoderBuffer>& buffer() const { return buffer_; }
  99. private:
  100. const int32_t id_ = -1;
  101. const scoped_refptr<DecoderBuffer> buffer_;
  102. base::OnceCallback<void(int32_t id)> release_cb_;
  103. };
  104. void VaapiVideoDecodeAccelerator::NotifyStatus(VaapiStatus status) {
  105. DCHECK(!status.is_ok());
  106. // Send a platform notification error
  107. NotifyError(PLATFORM_FAILURE);
  108. // TODO(crbug.com/1103510) there is no MediaLog here, we should change that.
  109. std::string output_str;
  110. base::JSONWriter::Write(MediaSerialize(status), &output_str);
  111. DLOG(ERROR) << output_str;
  112. }
  113. void VaapiVideoDecodeAccelerator::NotifyError(Error error) {
  114. if (!task_runner_->BelongsToCurrentThread()) {
  115. DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
  116. task_runner_->PostTask(
  117. FROM_HERE, base::BindOnce(&VaapiVideoDecodeAccelerator::NotifyError,
  118. weak_this_, error));
  119. return;
  120. }
  121. VLOGF(1) << "Notifying of error " << error;
  122. if (client_) {
  123. client_->NotifyError(error);
  124. client_ptr_factory_.reset();
  125. }
  126. }
  127. VaapiVideoDecodeAccelerator::VaapiVideoDecodeAccelerator(
  128. const MakeGLContextCurrentCallback& make_context_current_cb,
  129. const BindGLImageCallback& bind_image_cb)
  130. : state_(kUninitialized),
  131. input_ready_(&lock_),
  132. buffer_allocation_mode_(BufferAllocationMode::kNormal),
  133. surfaces_available_(&lock_),
  134. va_surface_format_(VA_INVALID_ID),
  135. task_runner_(base::ThreadTaskRunnerHandle::Get()),
  136. decoder_thread_("VaapiDecoderThread"),
  137. finish_flush_pending_(false),
  138. awaiting_va_surfaces_recycle_(false),
  139. requested_num_pics_(0),
  140. requested_num_reference_frames_(0),
  141. previously_requested_num_reference_frames_(0),
  142. profile_(VIDEO_CODEC_PROFILE_UNKNOWN),
  143. make_context_current_cb_(make_context_current_cb),
  144. bind_image_cb_(bind_image_cb),
  145. weak_this_factory_(this) {
  146. weak_this_ = weak_this_factory_.GetWeakPtr();
  147. va_surface_recycle_cb_ = BindToCurrentLoop(base::BindRepeating(
  148. &VaapiVideoDecodeAccelerator::RecycleVASurface, weak_this_));
  149. base::trace_event::MemoryDumpManager::GetInstance()->RegisterDumpProvider(
  150. this, "media::VaapiVideoDecodeAccelerator",
  151. base::ThreadTaskRunnerHandle::Get());
  152. }
  153. VaapiVideoDecodeAccelerator::~VaapiVideoDecodeAccelerator() {
  154. DCHECK(task_runner_->BelongsToCurrentThread());
  155. base::trace_event::MemoryDumpManager::GetInstance()->UnregisterDumpProvider(
  156. this);
  157. }
  158. bool VaapiVideoDecodeAccelerator::Initialize(const Config& config,
  159. Client* client) {
  160. DCHECK(task_runner_->BelongsToCurrentThread());
  161. vaapi_picture_factory_ = std::make_unique<VaapiPictureFactory>();
  162. if (config.is_encrypted()) {
  163. NOTREACHED() << "Encrypted streams are not supported for this VDA";
  164. return false;
  165. }
  166. client_ptr_factory_.reset(new base::WeakPtrFactory<Client>(client));
  167. client_ = client_ptr_factory_->GetWeakPtr();
  168. VideoCodecProfile profile = config.profile;
  169. base::AutoLock auto_lock(lock_);
  170. DCHECK_EQ(state_, kUninitialized);
  171. VLOGF(2) << "Initializing VAVDA, profile: " << GetProfileName(profile);
  172. vaapi_wrapper_ = VaapiWrapper::CreateForVideoCodec(
  173. VaapiWrapper::kDecode, profile, EncryptionScheme::kUnencrypted,
  174. base::BindRepeating(&ReportVaapiErrorToUMA,
  175. "Media.VaapiVideoDecodeAccelerator.VAAPIError"),
  176. /*enforce_sequence_affinity=*/false);
  177. UMA_HISTOGRAM_BOOLEAN("Media.VAVDA.VaapiWrapperCreationSuccess",
  178. vaapi_wrapper_.get());
  179. if (!vaapi_wrapper_.get()) {
  180. VLOGF(1) << "Failed initializing VAAPI for profile "
  181. << GetProfileName(profile);
  182. return false;
  183. }
  184. if (profile >= H264PROFILE_MIN && profile <= H264PROFILE_MAX) {
  185. auto accelerator =
  186. std::make_unique<H264VaapiVideoDecoderDelegate>(this, vaapi_wrapper_);
  187. decoder_delegate_ = accelerator.get();
  188. decoder_.reset(new H264Decoder(std::move(accelerator), profile,
  189. config.container_color_space));
  190. } else if (profile >= VP8PROFILE_MIN && profile <= VP8PROFILE_MAX) {
  191. auto accelerator =
  192. std::make_unique<VP8VaapiVideoDecoderDelegate>(this, vaapi_wrapper_);
  193. decoder_delegate_ = accelerator.get();
  194. decoder_.reset(new VP8Decoder(std::move(accelerator)));
  195. } else if (profile >= VP9PROFILE_MIN && profile <= VP9PROFILE_MAX) {
  196. auto accelerator =
  197. std::make_unique<VP9VaapiVideoDecoderDelegate>(this, vaapi_wrapper_);
  198. decoder_delegate_ = accelerator.get();
  199. decoder_.reset(new VP9Decoder(std::move(accelerator), profile,
  200. config.container_color_space));
  201. } else {
  202. VLOGF(1) << "Unsupported profile " << GetProfileName(profile);
  203. return false;
  204. }
  205. CHECK(decoder_thread_.Start());
  206. decoder_thread_task_runner_ = decoder_thread_.task_runner();
  207. state_ = kIdle;
  208. profile_ = profile;
  209. output_mode_ = config.output_mode;
  210. buffer_allocation_mode_ = DecideBufferAllocationMode();
  211. previously_requested_num_reference_frames_ = 0;
  212. return true;
  213. }
  214. void VaapiVideoDecodeAccelerator::OutputPicture(
  215. scoped_refptr<VASurface> va_surface,
  216. int32_t input_id,
  217. gfx::Rect visible_rect,
  218. const VideoColorSpace& picture_color_space) {
  219. DCHECK(task_runner_->BelongsToCurrentThread());
  220. const VASurfaceID va_surface_id = va_surface->id();
  221. VaapiPicture* picture = nullptr;
  222. {
  223. base::AutoLock auto_lock(lock_);
  224. int32_t picture_buffer_id = available_picture_buffers_.front();
  225. if (buffer_allocation_mode_ == BufferAllocationMode::kNone) {
  226. // Find the |pictures_| entry matching |va_surface_id|.
  227. for (const auto& id_and_picture : pictures_) {
  228. if (id_and_picture.second->va_surface_id() == va_surface_id) {
  229. picture_buffer_id = id_and_picture.first;
  230. break;
  231. }
  232. }
  233. }
  234. picture = pictures_[picture_buffer_id].get();
  235. DCHECK(base::Contains(available_picture_buffers_, picture_buffer_id));
  236. base::Erase(available_picture_buffers_, picture_buffer_id);
  237. }
  238. DCHECK(picture) << " could not find " << va_surface_id << " available";
  239. const int32_t output_id = picture->picture_buffer_id();
  240. DVLOGF(4) << "Outputting VASurface " << va_surface->id()
  241. << " into pixmap bound to picture buffer id " << output_id;
  242. if (buffer_allocation_mode_ != BufferAllocationMode::kNone) {
  243. TRACE_EVENT2("media,gpu", "VAVDA::DownloadFromSurface", "input_id",
  244. input_id, "output_id", output_id);
  245. RETURN_AND_NOTIFY_ON_FAILURE(picture->DownloadFromSurface(va_surface),
  246. "Failed putting surface into pixmap",
  247. PLATFORM_FAILURE, );
  248. }
  249. {
  250. base::AutoLock auto_lock(lock_);
  251. TRACE_COUNTER_ID2("media,gpu", "Vaapi frames at client", this, "used",
  252. pictures_.size() - available_picture_buffers_.size(),
  253. "available", available_picture_buffers_.size());
  254. }
  255. DVLOGF(4) << "Notifying output picture id " << output_id << " for input "
  256. << input_id
  257. << " is ready. visible rect: " << visible_rect.ToString();
  258. if (!client_)
  259. return;
  260. Picture client_picture(output_id, input_id, visible_rect,
  261. picture_color_space.ToGfxColorSpace(),
  262. picture->AllowOverlay());
  263. client_picture.set_read_lock_fences_enabled(true);
  264. // Notify the |client_| a picture is ready to be consumed.
  265. client_->PictureReady(client_picture);
  266. }
  267. void VaapiVideoDecodeAccelerator::TryOutputPicture() {
  268. DCHECK(task_runner_->BelongsToCurrentThread());
  269. // Handle Destroy() arriving while pictures are queued for output.
  270. if (!client_)
  271. return;
  272. {
  273. base::AutoLock auto_lock(lock_);
  274. if (pending_output_cbs_.empty() || available_picture_buffers_.empty())
  275. return;
  276. }
  277. auto output_cb = std::move(pending_output_cbs_.front());
  278. pending_output_cbs_.pop();
  279. std::move(output_cb).Run();
  280. if (finish_flush_pending_ && pending_output_cbs_.empty())
  281. FinishFlush();
  282. }
  283. void VaapiVideoDecodeAccelerator::QueueInputBuffer(
  284. scoped_refptr<DecoderBuffer> buffer,
  285. int32_t bitstream_id) {
  286. DVLOGF(4) << "Queueing new input buffer id: " << bitstream_id
  287. << " size: " << (buffer->end_of_stream() ? 0 : buffer->data_size());
  288. DCHECK(task_runner_->BelongsToCurrentThread());
  289. TRACE_EVENT1("media,gpu", "QueueInputBuffer", "input_id", bitstream_id);
  290. base::AutoLock auto_lock(lock_);
  291. if (buffer->end_of_stream()) {
  292. auto flush_buffer = std::make_unique<InputBuffer>();
  293. DCHECK(flush_buffer->IsFlushRequest());
  294. input_buffers_.push(std::move(flush_buffer));
  295. } else {
  296. auto input_buffer = std::make_unique<InputBuffer>(
  297. bitstream_id, std::move(buffer),
  298. BindToCurrentLoop(
  299. base::BindOnce(&Client::NotifyEndOfBitstreamBuffer, client_)));
  300. input_buffers_.push(std::move(input_buffer));
  301. }
  302. TRACE_COUNTER1("media,gpu", "Vaapi input buffers", input_buffers_.size());
  303. input_ready_.Signal();
  304. switch (state_) {
  305. case kIdle:
  306. state_ = kDecoding;
  307. decoder_thread_task_runner_->PostTask(
  308. FROM_HERE, base::BindOnce(&VaapiVideoDecodeAccelerator::DecodeTask,
  309. base::Unretained(this)));
  310. break;
  311. case kDecoding:
  312. // Decoder already running.
  313. break;
  314. case kResetting:
  315. // When resetting, allow accumulating bitstream buffers, so that
  316. // the client can queue after-seek-buffers while we are finishing with
  317. // the before-seek one.
  318. break;
  319. default:
  320. LOG(ERROR) << "Decode/Flush request from client in invalid state: "
  321. << state_;
  322. NotifyError(PLATFORM_FAILURE);
  323. break;
  324. }
  325. }
  326. bool VaapiVideoDecodeAccelerator::GetCurrInputBuffer_Locked() {
  327. DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
  328. lock_.AssertAcquired();
  329. if (curr_input_buffer_.get())
  330. return true;
  331. // Will only wait if it is expected that in current state new buffers will
  332. // be queued from the client via Decode(). The state can change during wait.
  333. while (input_buffers_.empty() && (state_ == kDecoding || state_ == kIdle))
  334. input_ready_.Wait();
  335. // We could have got woken up in a different state or never got to sleep
  336. // due to current state.
  337. if (state_ != kDecoding && state_ != kIdle)
  338. return false;
  339. DCHECK(!input_buffers_.empty());
  340. curr_input_buffer_ = std::move(input_buffers_.front());
  341. input_buffers_.pop();
  342. TRACE_COUNTER1("media,gpu", "Vaapi input buffers", input_buffers_.size());
  343. if (curr_input_buffer_->IsFlushRequest()) {
  344. DVLOGF(4) << "New flush buffer";
  345. return true;
  346. }
  347. DVLOGF(4) << "New |curr_input_buffer_|, id: " << curr_input_buffer_->id()
  348. << " size: " << curr_input_buffer_->buffer()->data_size() << "B";
  349. decoder_->SetStream(curr_input_buffer_->id(), *curr_input_buffer_->buffer());
  350. return true;
  351. }
  352. void VaapiVideoDecodeAccelerator::ReturnCurrInputBuffer_Locked() {
  353. DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
  354. lock_.AssertAcquired();
  355. DCHECK(curr_input_buffer_.get());
  356. curr_input_buffer_.reset();
  357. }
  358. // TODO(posciak): refactor the whole class to remove sleeping in wait for
  359. // surfaces, and reschedule DecodeTask instead.
  360. bool VaapiVideoDecodeAccelerator::WaitForSurfaces_Locked() {
  361. DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
  362. lock_.AssertAcquired();
  363. while (available_va_surfaces_.empty() &&
  364. (state_ == kDecoding || state_ == kIdle)) {
  365. surfaces_available_.Wait();
  366. }
  367. return state_ == kDecoding || state_ == kIdle;
  368. }
  369. void VaapiVideoDecodeAccelerator::DecodeTask() {
  370. DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
  371. base::AutoLock auto_lock(lock_);
  372. if (state_ != kDecoding)
  373. return;
  374. DVLOGF(4) << "Decode task";
  375. // Try to decode what stream data is (still) in the decoder until we run out
  376. // of it.
  377. while (GetCurrInputBuffer_Locked()) {
  378. DCHECK(curr_input_buffer_.get());
  379. if (curr_input_buffer_->IsFlushRequest()) {
  380. FlushTask();
  381. break;
  382. }
  383. AcceleratedVideoDecoder::DecodeResult res;
  384. {
  385. // We are OK releasing the lock here, as decoder never calls our methods
  386. // directly and we will reacquire the lock before looking at state again.
  387. // This is the main decode function of the decoder and while keeping
  388. // the lock for its duration would be fine, it would defeat the purpose
  389. // of having a separate decoder thread.
  390. base::AutoUnlock auto_unlock(lock_);
  391. TRACE_EVENT0("media,gpu", "VAVDA::Decode");
  392. res = decoder_->Decode();
  393. }
  394. switch (res) {
  395. case AcceleratedVideoDecoder::kConfigChange: {
  396. const uint8_t bit_depth = decoder_->GetBitDepth();
  397. RETURN_AND_NOTIFY_ON_FAILURE(
  398. bit_depth == 8u,
  399. "Unsupported bit depth: " << base::strict_cast<int>(bit_depth),
  400. PLATFORM_FAILURE, );
  401. // The visible rect should be a subset of the picture size. Otherwise,
  402. // the encoded stream is bad.
  403. const gfx::Size pic_size = decoder_->GetPicSize();
  404. const gfx::Rect visible_rect = decoder_->GetVisibleRect();
  405. RETURN_AND_NOTIFY_ON_FAILURE(
  406. gfx::Rect(pic_size).Contains(visible_rect),
  407. "The visible rectangle is not contained by the picture size",
  408. UNREADABLE_INPUT, );
  409. VLOGF(2) << "Decoder requesting a new set of surfaces";
  410. size_t required_num_of_pictures = decoder_->GetRequiredNumOfPictures();
  411. if (buffer_allocation_mode_ == BufferAllocationMode::kNone &&
  412. profile_ >= H264PROFILE_MIN && profile_ <= H264PROFILE_MAX) {
  413. // For H.264, the decoder might request too few pictures. In
  414. // BufferAllocationMode::kNone, this can cause us to do a lot of busy
  415. // work waiting for picture buffers to come back from the client (see
  416. // crbug.com/910986#c32). This is a workaround to increase the
  417. // likelihood that we don't have to wait on buffers to come back from
  418. // the client. |kNumOfPics| is picked to mirror the value returned by
  419. // VP9Decoder::GetRequiredNumOfPictures().
  420. constexpr size_t kMinNumOfPics = 13u;
  421. required_num_of_pictures =
  422. std::max(kMinNumOfPics, required_num_of_pictures);
  423. }
  424. // Notify |decoder_delegate_| of an imminent VAContextID destruction, so
  425. // it can destroy any internal structures making use of it.
  426. decoder_delegate_->OnVAContextDestructionSoon();
  427. task_runner_->PostTask(
  428. FROM_HERE,
  429. base::BindOnce(
  430. &VaapiVideoDecodeAccelerator::InitiateSurfaceSetChange,
  431. weak_this_, required_num_of_pictures, pic_size,
  432. decoder_->GetNumReferenceFrames(), visible_rect));
  433. // We'll get rescheduled once ProvidePictureBuffers() finishes.
  434. return;
  435. }
  436. case AcceleratedVideoDecoder::kRanOutOfStreamData:
  437. ReturnCurrInputBuffer_Locked();
  438. break;
  439. case AcceleratedVideoDecoder::kRanOutOfSurfaces:
  440. // No more output buffers in the decoder, try getting more or go to
  441. // sleep waiting for them.
  442. if (!WaitForSurfaces_Locked())
  443. return;
  444. break;
  445. case AcceleratedVideoDecoder::kNeedContextUpdate:
  446. // This should not happen as we return false from
  447. // NeedsCompressedHeaderParsed().
  448. NOTREACHED() << "Context updates not supported";
  449. return;
  450. case AcceleratedVideoDecoder::kDecodeError:
  451. RETURN_AND_NOTIFY_ON_FAILURE(false, "Error decoding stream",
  452. PLATFORM_FAILURE, );
  453. return;
  454. case AcceleratedVideoDecoder::kTryAgain:
  455. NOTREACHED() << "Should not reach here unless this class accepts "
  456. "encrypted streams.";
  457. RETURN_AND_NOTIFY_ON_FAILURE(false, "Error decoding stream",
  458. PLATFORM_FAILURE, );
  459. return;
  460. }
  461. }
  462. }
  463. void VaapiVideoDecodeAccelerator::InitiateSurfaceSetChange(
  464. size_t num_pics,
  465. gfx::Size size,
  466. size_t num_reference_frames,
  467. const gfx::Rect& visible_rect) {
  468. DCHECK(task_runner_->BelongsToCurrentThread());
  469. DCHECK(!awaiting_va_surfaces_recycle_);
  470. DCHECK_GT(num_pics, num_reference_frames);
  471. // At this point decoder has stopped running and has already posted onto our
  472. // loop any remaining output request callbacks, which executed before we got
  473. // here. Some of them might have been pended though, because we might not have
  474. // had enough PictureBuffers to output surfaces to. Initiate a wait cycle,
  475. // which will wait for client to return enough PictureBuffers to us, so that
  476. // we can finish all pending output callbacks, releasing associated surfaces.
  477. awaiting_va_surfaces_recycle_ = true;
  478. requested_pic_size_ = size;
  479. requested_visible_rect_ = visible_rect;
  480. if (buffer_allocation_mode_ == BufferAllocationMode::kSuperReduced) {
  481. // Add one to the reference frames for the one being currently egressed.
  482. requested_num_reference_frames_ = num_reference_frames + 1;
  483. requested_num_pics_ = num_pics - num_reference_frames;
  484. } else if (buffer_allocation_mode_ == BufferAllocationMode::kReduced) {
  485. // Add one to the reference frames for the one being currently egressed,
  486. // and an extra allocation for both |client_| and |decoder_|.
  487. requested_num_reference_frames_ = num_reference_frames + 2;
  488. requested_num_pics_ = num_pics - num_reference_frames + 1;
  489. } else {
  490. requested_num_reference_frames_ = 0;
  491. requested_num_pics_ = num_pics + num_extra_pics_;
  492. }
  493. VLOGF(2) << " |requested_num_pics_| = " << requested_num_pics_
  494. << "; |requested_num_reference_frames_| = "
  495. << requested_num_reference_frames_;
  496. TryFinishSurfaceSetChange();
  497. }
  498. void VaapiVideoDecodeAccelerator::TryFinishSurfaceSetChange() {
  499. DCHECK(task_runner_->BelongsToCurrentThread());
  500. if (!awaiting_va_surfaces_recycle_)
  501. return;
  502. base::AutoLock auto_lock(lock_);
  503. const size_t expected_max_available_va_surfaces =
  504. IsBufferAllocationModeReducedOrSuperReduced()
  505. ? previously_requested_num_reference_frames_
  506. : pictures_.size();
  507. if (!pending_output_cbs_.empty() ||
  508. expected_max_available_va_surfaces != available_va_surfaces_.size()) {
  509. // If we're here the stream resolution has changed; we need to wait until:
  510. // - all |pending_output_cbs_| have been executed
  511. // - all VASurfaces are back to |available_va_surfaces_|; we can't use
  512. // |requested_num_reference_frames_| for comparison, since it might have
  513. // changed in the previous call to InitiateSurfaceSetChange(), so we use
  514. // |previously_requested_num_reference_frames_| instead.
  515. DVLOGF(2) << "Awaiting pending output/surface release callbacks to finish";
  516. task_runner_->PostTask(
  517. FROM_HERE,
  518. base::BindOnce(&VaapiVideoDecodeAccelerator::TryFinishSurfaceSetChange,
  519. weak_this_));
  520. return;
  521. }
  522. previously_requested_num_reference_frames_ = requested_num_reference_frames_;
  523. // All surfaces released, destroy them and dismiss all PictureBuffers.
  524. awaiting_va_surfaces_recycle_ = false;
  525. const VideoCodecProfile new_profile = decoder_->GetProfile();
  526. if (profile_ != new_profile) {
  527. profile_ = new_profile;
  528. auto new_vaapi_wrapper = VaapiWrapper::CreateForVideoCodec(
  529. VaapiWrapper::kDecode, profile_, EncryptionScheme::kUnencrypted,
  530. base::BindRepeating(&ReportVaapiErrorToUMA,
  531. "Media.VaapiVideoDecodeAccelerator.VAAPIError"),
  532. /*enforce_sequence_affinity=*/false);
  533. RETURN_AND_NOTIFY_ON_FAILURE(new_vaapi_wrapper.get(),
  534. "Failed creating VaapiWrapper",
  535. INVALID_ARGUMENT, );
  536. decoder_delegate_->set_vaapi_wrapper(new_vaapi_wrapper.get());
  537. vaapi_wrapper_ = std::move(new_vaapi_wrapper);
  538. } else {
  539. vaapi_wrapper_->DestroyContext();
  540. }
  541. available_va_surfaces_.clear();
  542. for (auto iter = pictures_.begin(); iter != pictures_.end(); ++iter) {
  543. VLOGF(2) << "Dismissing picture id: " << iter->first;
  544. if (client_)
  545. client_->DismissPictureBuffer(iter->first);
  546. }
  547. pictures_.clear();
  548. // And ask for a new set as requested.
  549. VLOGF(2) << "Requesting " << requested_num_pics_
  550. << " pictures of size: " << requested_pic_size_.ToString()
  551. << " and visible rectangle = " << requested_visible_rect_.ToString();
  552. const absl::optional<VideoPixelFormat> format =
  553. GfxBufferFormatToVideoPixelFormat(
  554. vaapi_picture_factory_->GetBufferFormat());
  555. CHECK(format);
  556. task_runner_->PostTask(
  557. FROM_HERE, base::BindOnce(&Client::ProvidePictureBuffersWithVisibleRect,
  558. client_, requested_num_pics_, *format, 1,
  559. requested_pic_size_, requested_visible_rect_,
  560. vaapi_picture_factory_->GetGLTextureTarget()));
  561. // |client_| may respond via AssignPictureBuffers().
  562. }
  563. void VaapiVideoDecodeAccelerator::Decode(BitstreamBuffer bitstream_buffer) {
  564. Decode(bitstream_buffer.ToDecoderBuffer(), bitstream_buffer.id());
  565. }
  566. void VaapiVideoDecodeAccelerator::Decode(scoped_refptr<DecoderBuffer> buffer,
  567. int32_t bitstream_id) {
  568. DCHECK(task_runner_->BelongsToCurrentThread());
  569. TRACE_EVENT1("media,gpu", "VAVDA::Decode", "Buffer id", bitstream_id);
  570. if (bitstream_id < 0) {
  571. LOG(ERROR) << "Invalid bitstream_buffer, id: " << bitstream_id;
  572. NotifyError(INVALID_ARGUMENT);
  573. return;
  574. }
  575. if (!buffer) {
  576. if (client_)
  577. client_->NotifyEndOfBitstreamBuffer(bitstream_id);
  578. return;
  579. }
  580. QueueInputBuffer(std::move(buffer), bitstream_id);
  581. }
  582. void VaapiVideoDecodeAccelerator::AssignPictureBuffers(
  583. const std::vector<PictureBuffer>& buffers) {
  584. DCHECK(task_runner_->BelongsToCurrentThread());
  585. base::AutoLock auto_lock(lock_);
  586. DCHECK(pictures_.empty());
  587. available_picture_buffers_.clear();
  588. RETURN_AND_NOTIFY_ON_FAILURE(
  589. buffers.size() >= requested_num_pics_,
  590. "Got an invalid number of picture buffers. (Got " << buffers.size()
  591. << ", requested " << requested_num_pics_ << ")", INVALID_ARGUMENT, );
  592. // requested_pic_size_ can be adjusted by VDA client. We should update
  593. // |requested_pic_size_| by buffers[0].size(). But AMD driver doesn't decode
  594. // frames correctly if the surface stride is different from the width of a
  595. // coded size.
  596. // TODO(b/139460315): Save buffers[0].size() as |adjusted_size_| once the
  597. // AMD driver issue is resolved.
  598. va_surface_format_ = GetVaFormatForVideoCodecProfile(profile_);
  599. std::vector<VASurfaceID> va_surface_ids;
  600. scoped_refptr<VaapiWrapper> vaapi_wrapper_for_picture = vaapi_wrapper_;
  601. const bool requires_vpp =
  602. vaapi_picture_factory_->NeedsProcessingPipelineForDownloading();
  603. // If we aren't in BufferAllocationMode::kNone mode and the VaapiPicture
  604. // implementation we get from |vaapi_picture_factory_| requires the video
  605. // processing pipeline for downloading the decoded frame from the internal
  606. // surface, we need to create a |vpp_vaapi_wrapper_|.
  607. if (requires_vpp && buffer_allocation_mode_ != BufferAllocationMode::kNone) {
  608. if (!vpp_vaapi_wrapper_) {
  609. vpp_vaapi_wrapper_ = VaapiWrapper::Create(
  610. VaapiWrapper::kVideoProcess, VAProfileNone,
  611. EncryptionScheme::kUnencrypted,
  612. base::BindRepeating(
  613. &ReportVaapiErrorToUMA,
  614. "Media.VaapiVideoDecodeAccelerator.Vpp.VAAPIError"),
  615. /*enforce_sequence_affinity=*/false);
  616. RETURN_AND_NOTIFY_ON_FAILURE(vpp_vaapi_wrapper_,
  617. "Failed to initialize VppVaapiWrapper",
  618. PLATFORM_FAILURE, );
  619. // Size is irrelevant for a VPP context.
  620. RETURN_AND_NOTIFY_ON_FAILURE(
  621. vpp_vaapi_wrapper_->CreateContext(gfx::Size()),
  622. "Failed to create Context", PLATFORM_FAILURE, );
  623. }
  624. vaapi_wrapper_for_picture = vpp_vaapi_wrapper_;
  625. }
  626. for (size_t i = 0; i < buffers.size(); ++i) {
  627. // TODO(b/139460315): Create with buffers[i] once the AMD driver issue is
  628. // resolved.
  629. PictureBuffer buffer = buffers[i];
  630. buffer.set_size(requested_pic_size_);
  631. // Note that the |size_to_bind| is not relevant in IMPORT mode.
  632. const gfx::Size size_to_bind =
  633. (output_mode_ == Config::OutputMode::ALLOCATE)
  634. ? GetRectSizeFromOrigin(requested_visible_rect_)
  635. : gfx::Size();
  636. std::unique_ptr<VaapiPicture> picture = vaapi_picture_factory_->Create(
  637. vaapi_wrapper_for_picture, make_context_current_cb_, bind_image_cb_,
  638. buffer, size_to_bind);
  639. RETURN_AND_NOTIFY_ON_FAILURE(picture, "Failed creating a VaapiPicture",
  640. PLATFORM_FAILURE, );
  641. if (output_mode_ == Config::OutputMode::ALLOCATE) {
  642. RETURN_AND_NOTIFY_ON_STATUS(
  643. picture->Allocate(vaapi_picture_factory_->GetBufferFormat()), );
  644. available_picture_buffers_.push_back(buffers[i].id());
  645. VASurfaceID va_surface_id = picture->va_surface_id();
  646. if (va_surface_id != VA_INVALID_ID)
  647. va_surface_ids.push_back(va_surface_id);
  648. }
  649. DCHECK(!base::Contains(pictures_, buffers[i].id()));
  650. pictures_[buffers[i].id()] = std::move(picture);
  651. surfaces_available_.Signal();
  652. }
  653. base::RepeatingCallback<void(VASurfaceID)> va_surface_release_cb;
  654. // If we aren't in BufferAllocationMode::kNone, we use |va_surface_ids| for
  655. // decode, otherwise ask |vaapi_wrapper_| to allocate them for us.
  656. if (buffer_allocation_mode_ == BufferAllocationMode::kNone) {
  657. DCHECK(!va_surface_ids.empty());
  658. RETURN_AND_NOTIFY_ON_FAILURE(
  659. vaapi_wrapper_->CreateContext(requested_pic_size_),
  660. "Failed creating VA Context", PLATFORM_FAILURE, );
  661. DCHECK_EQ(va_surface_ids.size(), buffers.size());
  662. va_surface_release_cb = base::DoNothing();
  663. } else {
  664. const size_t requested_num_surfaces =
  665. IsBufferAllocationModeReducedOrSuperReduced()
  666. ? requested_num_reference_frames_
  667. : pictures_.size();
  668. CHECK_NE(requested_num_surfaces, 0u);
  669. va_surface_ids.clear();
  670. RETURN_AND_NOTIFY_ON_FAILURE(
  671. vaapi_wrapper_->CreateContextAndSurfaces(
  672. va_surface_format_, requested_pic_size_,
  673. {VaapiWrapper::SurfaceUsageHint::kVideoDecoder},
  674. requested_num_surfaces, &va_surface_ids),
  675. "Failed creating VA Surfaces", PLATFORM_FAILURE, );
  676. va_surface_release_cb =
  677. base::BindRepeating(&VaapiWrapper::DestroySurface, vaapi_wrapper_);
  678. }
  679. for (const VASurfaceID va_surface_id : va_surface_ids) {
  680. available_va_surfaces_.emplace_back(std::make_unique<ScopedVASurfaceID>(
  681. va_surface_id, va_surface_release_cb));
  682. }
  683. // Resume DecodeTask if it is still in decoding state.
  684. if (state_ == kDecoding) {
  685. decoder_thread_task_runner_->PostTask(
  686. FROM_HERE, base::BindOnce(&VaapiVideoDecodeAccelerator::DecodeTask,
  687. base::Unretained(this)));
  688. }
  689. }
  690. #if defined(USE_OZONE)
  691. void VaapiVideoDecodeAccelerator::ImportBufferForPicture(
  692. int32_t picture_buffer_id,
  693. VideoPixelFormat pixel_format,
  694. gfx::GpuMemoryBufferHandle gpu_memory_buffer_handle) {
  695. VLOGF(2) << "Importing picture id: " << picture_buffer_id;
  696. DCHECK(task_runner_->BelongsToCurrentThread());
  697. if (output_mode_ != Config::OutputMode::IMPORT) {
  698. LOG(ERROR) << "Cannot import in non-import mode";
  699. NotifyError(INVALID_ARGUMENT);
  700. return;
  701. }
  702. {
  703. base::AutoLock auto_lock(lock_);
  704. if (!pictures_.count(picture_buffer_id)) {
  705. // It's possible that we've already posted a DismissPictureBuffer for this
  706. // picture, but it has not yet executed when this ImportBufferForPicture
  707. // was posted to us by the client. In that case just ignore this (we've
  708. // already dismissed it and accounted for that).
  709. DVLOGF(3) << "got picture id=" << picture_buffer_id
  710. << " not in use (anymore?).";
  711. return;
  712. }
  713. auto buffer_format = VideoPixelFormatToGfxBufferFormat(pixel_format);
  714. if (!buffer_format) {
  715. LOG(ERROR) << "Unsupported format: " << pixel_format;
  716. NotifyError(INVALID_ARGUMENT);
  717. return;
  718. }
  719. VaapiPicture* picture = pictures_[picture_buffer_id].get();
  720. if (!picture->ImportGpuMemoryBufferHandle(
  721. *buffer_format, std::move(gpu_memory_buffer_handle))) {
  722. // ImportGpuMemoryBufferHandle will close the handles even on failure, so
  723. // we don't need to do this ourselves.
  724. LOG(ERROR) << "Failed to import GpuMemoryBufferHandle";
  725. NotifyError(PLATFORM_FAILURE);
  726. return;
  727. }
  728. }
  729. ReusePictureBuffer(picture_buffer_id);
  730. }
  731. #endif
  732. void VaapiVideoDecodeAccelerator::ReusePictureBuffer(
  733. int32_t picture_buffer_id) {
  734. DVLOGF(4) << "picture id=" << picture_buffer_id;
  735. DCHECK(task_runner_->BelongsToCurrentThread());
  736. TRACE_EVENT1("media,gpu", "VAVDA::ReusePictureBuffer", "Picture id",
  737. picture_buffer_id);
  738. {
  739. base::AutoLock auto_lock(lock_);
  740. if (!pictures_.count(picture_buffer_id)) {
  741. // It's possible that we've already posted a DismissPictureBuffer for this
  742. // picture, but it has not yet executed when this ReusePictureBuffer
  743. // was posted to us by the client. In that case just ignore this (we've
  744. // already dismissed it and accounted for that).
  745. DVLOGF(3) << "got picture id=" << picture_buffer_id
  746. << " not in use (anymore?).";
  747. return;
  748. }
  749. available_picture_buffers_.push_back(picture_buffer_id);
  750. TRACE_COUNTER_ID2("media,gpu", "Vaapi frames at client", this, "used",
  751. pictures_.size() - available_picture_buffers_.size(),
  752. "available", available_picture_buffers_.size());
  753. }
  754. TryOutputPicture();
  755. }
  756. void VaapiVideoDecodeAccelerator::FlushTask() {
  757. VLOGF(2);
  758. DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
  759. DCHECK(curr_input_buffer_ && curr_input_buffer_->IsFlushRequest());
  760. curr_input_buffer_.reset();
  761. // First flush all the pictures that haven't been outputted, notifying the
  762. // client to output them.
  763. bool res = decoder_->Flush();
  764. RETURN_AND_NOTIFY_ON_FAILURE(res, "Failed flushing the decoder.",
  765. PLATFORM_FAILURE, );
  766. // Put the decoder in idle state, ready to resume.
  767. decoder_->Reset();
  768. task_runner_->PostTask(
  769. FROM_HERE,
  770. base::BindOnce(&VaapiVideoDecodeAccelerator::FinishFlush, weak_this_));
  771. }
  772. void VaapiVideoDecodeAccelerator::Flush() {
  773. VLOGF(2) << "Got flush request";
  774. DCHECK(task_runner_->BelongsToCurrentThread());
  775. QueueInputBuffer(DecoderBuffer::CreateEOSBuffer(), -1);
  776. }
  777. void VaapiVideoDecodeAccelerator::FinishFlush() {
  778. VLOGF(2);
  779. DCHECK(task_runner_->BelongsToCurrentThread());
  780. finish_flush_pending_ = false;
  781. base::AutoLock auto_lock(lock_);
  782. if (state_ != kDecoding) {
  783. DCHECK(state_ == kDestroying || state_ == kResetting) << state_;
  784. return;
  785. }
  786. // Still waiting for textures from client to finish outputting all pending
  787. // frames. Try again later.
  788. if (!pending_output_cbs_.empty()) {
  789. finish_flush_pending_ = true;
  790. return;
  791. }
  792. // Resume decoding if necessary.
  793. if (input_buffers_.empty()) {
  794. state_ = kIdle;
  795. } else {
  796. decoder_thread_task_runner_->PostTask(
  797. FROM_HERE, base::BindOnce(&VaapiVideoDecodeAccelerator::DecodeTask,
  798. base::Unretained(this)));
  799. }
  800. task_runner_->PostTask(FROM_HERE,
  801. base::BindOnce(&Client::NotifyFlushDone, client_));
  802. }
  803. void VaapiVideoDecodeAccelerator::ResetTask() {
  804. VLOGF(2);
  805. DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
  806. // All the decoding tasks from before the reset request from client are done
  807. // by now, as this task was scheduled after them and client is expected not
  808. // to call Decode() after Reset() and before NotifyResetDone.
  809. decoder_->Reset();
  810. base::AutoLock auto_lock(lock_);
  811. // Return current input buffer, if present.
  812. if (curr_input_buffer_)
  813. ReturnCurrInputBuffer_Locked();
  814. // And let client know that we are done with reset.
  815. task_runner_->PostTask(
  816. FROM_HERE,
  817. base::BindOnce(&VaapiVideoDecodeAccelerator::FinishReset, weak_this_));
  818. }
  819. void VaapiVideoDecodeAccelerator::Reset() {
  820. VLOGF(2) << "Got reset request";
  821. DCHECK(task_runner_->BelongsToCurrentThread());
  822. // This will make any new decode tasks exit early.
  823. base::AutoLock auto_lock(lock_);
  824. state_ = kResetting;
  825. finish_flush_pending_ = false;
  826. // Drop all remaining input buffers, if present.
  827. while (!input_buffers_.empty())
  828. input_buffers_.pop();
  829. TRACE_COUNTER1("media,gpu", "Vaapi input buffers", input_buffers_.size());
  830. decoder_thread_task_runner_->PostTask(
  831. FROM_HERE, base::BindOnce(&VaapiVideoDecodeAccelerator::ResetTask,
  832. base::Unretained(this)));
  833. input_ready_.Signal();
  834. surfaces_available_.Signal();
  835. }
  836. void VaapiVideoDecodeAccelerator::FinishReset() {
  837. VLOGF(2);
  838. DCHECK(task_runner_->BelongsToCurrentThread());
  839. base::AutoLock auto_lock(lock_);
  840. if (state_ != kResetting) {
  841. DCHECK(state_ == kDestroying || state_ == kUninitialized) << state_;
  842. return; // We could've gotten destroyed already.
  843. }
  844. // Drop pending outputs.
  845. while (!pending_output_cbs_.empty())
  846. pending_output_cbs_.pop();
  847. if (awaiting_va_surfaces_recycle_) {
  848. // Decoder requested a new surface set while we were waiting for it to
  849. // finish the last DecodeTask, running at the time of Reset().
  850. // Let the surface set change finish first before resetting.
  851. task_runner_->PostTask(
  852. FROM_HERE,
  853. base::BindOnce(&VaapiVideoDecodeAccelerator::FinishReset, weak_this_));
  854. return;
  855. }
  856. state_ = kIdle;
  857. task_runner_->PostTask(FROM_HERE,
  858. base::BindOnce(&Client::NotifyResetDone, client_));
  859. // The client might have given us new buffers via Decode() while we were
  860. // resetting and might be waiting for our move, and not call Decode() anymore
  861. // until we return something. Post a DecodeTask() so that we won't
  862. // sleep forever waiting for Decode() in that case. Having two of them
  863. // in the pipe is harmless, the additional one will return as soon as it sees
  864. // that we are back in kDecoding state.
  865. if (!input_buffers_.empty()) {
  866. state_ = kDecoding;
  867. decoder_thread_task_runner_->PostTask(
  868. FROM_HERE, base::BindOnce(&VaapiVideoDecodeAccelerator::DecodeTask,
  869. base::Unretained(this)));
  870. }
  871. }
  872. void VaapiVideoDecodeAccelerator::Cleanup() {
  873. DCHECK(task_runner_->BelongsToCurrentThread());
  874. base::AutoLock auto_lock(lock_);
  875. if (state_ == kUninitialized || state_ == kDestroying)
  876. return;
  877. VLOGF(2) << "Destroying VAVDA";
  878. state_ = kDestroying;
  879. // Call DismissPictureBuffer() to notify |client_| that the picture buffers
  880. // are no longer used and thus |client_| shall release them. If |client_| has
  881. // been invalidated in NotifyError(),|client_| will be destroyed shortly. The
  882. // destruction should release all the PictureBuffers.
  883. if (client_) {
  884. for (const auto& id_and_picture : pictures_)
  885. client_->DismissPictureBuffer(id_and_picture.first);
  886. }
  887. pictures_.clear();
  888. client_ptr_factory_.reset();
  889. weak_this_factory_.InvalidateWeakPtrs();
  890. // TODO(mcasas): consider deleting |decoder_| on
  891. // |decoder_thread_task_runner_|, https://crbug.com/789160.
  892. // Signal all potential waiters on the decoder_thread_, let them early-exit,
  893. // as we've just moved to the kDestroying state, and wait for all tasks
  894. // to finish.
  895. input_ready_.Signal();
  896. surfaces_available_.Signal();
  897. {
  898. base::AutoUnlock auto_unlock(lock_);
  899. decoder_thread_.Stop();
  900. }
  901. if (buffer_allocation_mode_ != BufferAllocationMode::kNone)
  902. available_va_surfaces_.clear();
  903. // Notify |decoder_delegate_| of an imminent VAContextID destruction, so it
  904. // can destroy any internal structures making use of it. At this point
  905. // |decoder_thread_| is stopped so we can access |decoder_delegate_| from
  906. // |task_runner_|.
  907. decoder_delegate_->OnVAContextDestructionSoon();
  908. vaapi_wrapper_->DestroyContext();
  909. if (vpp_vaapi_wrapper_)
  910. vpp_vaapi_wrapper_->DestroyContext();
  911. state_ = kUninitialized;
  912. }
  913. void VaapiVideoDecodeAccelerator::Destroy() {
  914. DCHECK(task_runner_->BelongsToCurrentThread());
  915. Cleanup();
  916. delete this;
  917. }
  918. bool VaapiVideoDecodeAccelerator::TryToSetupDecodeOnSeparateThread(
  919. const base::WeakPtr<Client>& decode_client,
  920. const scoped_refptr<base::SingleThreadTaskRunner>& decode_task_runner) {
  921. return false;
  922. }
  923. void VaapiVideoDecodeAccelerator::SurfaceReady(
  924. scoped_refptr<VASurface> dec_surface,
  925. int32_t bitstream_id,
  926. const gfx::Rect& visible_rect,
  927. const VideoColorSpace& color_space) {
  928. if (!task_runner_->BelongsToCurrentThread()) {
  929. task_runner_->PostTask(
  930. FROM_HERE, base::BindOnce(&VaapiVideoDecodeAccelerator::SurfaceReady,
  931. weak_this_, std::move(dec_surface),
  932. bitstream_id, visible_rect, color_space));
  933. return;
  934. }
  935. DCHECK(!awaiting_va_surfaces_recycle_);
  936. {
  937. base::AutoLock auto_lock(lock_);
  938. // Drop any requests to output if we are resetting or being destroyed.
  939. if (state_ == kResetting || state_ == kDestroying)
  940. return;
  941. }
  942. pending_output_cbs_.push(base::BindOnce(
  943. &VaapiVideoDecodeAccelerator::OutputPicture, weak_this_,
  944. std::move(dec_surface), bitstream_id, visible_rect, color_space));
  945. TryOutputPicture();
  946. }
  947. scoped_refptr<VASurface> VaapiVideoDecodeAccelerator::CreateSurface() {
  948. DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
  949. base::AutoLock auto_lock(lock_);
  950. if (available_va_surfaces_.empty())
  951. return nullptr;
  952. DCHECK_NE(VA_INVALID_ID, va_surface_format_);
  953. DCHECK(!awaiting_va_surfaces_recycle_);
  954. if (buffer_allocation_mode_ != BufferAllocationMode::kNone) {
  955. auto va_surface_id = std::move(available_va_surfaces_.front());
  956. const VASurfaceID id = va_surface_id->id();
  957. available_va_surfaces_.pop_front();
  958. TRACE_COUNTER_ID2("media,gpu", "Vaapi VASurfaceIDs", this, "used",
  959. (IsBufferAllocationModeReducedOrSuperReduced()
  960. ? requested_num_reference_frames_
  961. : pictures_.size()) -
  962. available_va_surfaces_.size(),
  963. "available", available_va_surfaces_.size());
  964. return new VASurface(
  965. id, requested_pic_size_, va_surface_format_,
  966. base::BindOnce(va_surface_recycle_cb_, std::move(va_surface_id)));
  967. }
  968. // Find the first |available_va_surfaces_| id such that the associated
  969. // |pictures_| entry is marked as |available_picture_buffers_|. In practice,
  970. // we will quickly find an available |va_surface_id|.
  971. for (auto it = available_va_surfaces_.begin();
  972. it != available_va_surfaces_.end(); ++it) {
  973. const VASurfaceID va_surface_id = (*it)->id();
  974. for (const auto& id_and_picture : pictures_) {
  975. if (id_and_picture.second->va_surface_id() == va_surface_id &&
  976. base::Contains(available_picture_buffers_, id_and_picture.first)) {
  977. // Remove |va_surface_id| from the list of availables, and use the id
  978. // to return a new VASurface.
  979. auto va_surface = std::move(*it);
  980. available_va_surfaces_.erase(it);
  981. return new VASurface(
  982. va_surface_id, requested_pic_size_, va_surface_format_,
  983. base::BindOnce(va_surface_recycle_cb_, std::move(va_surface)));
  984. }
  985. }
  986. }
  987. return nullptr;
  988. }
  989. void VaapiVideoDecodeAccelerator::RecycleVASurface(
  990. std::unique_ptr<ScopedVASurfaceID> va_surface,
  991. // We don't use |va_surface_id| but it must be here because this method is
  992. // bound as VASurface::ReleaseCB.
  993. VASurfaceID /*va_surface_id*/) {
  994. DCHECK(task_runner_->BelongsToCurrentThread());
  995. {
  996. base::AutoLock auto_lock(lock_);
  997. available_va_surfaces_.push_back(std::move(va_surface));
  998. if (buffer_allocation_mode_ != BufferAllocationMode::kNone) {
  999. TRACE_COUNTER_ID2("media,gpu", "Vaapi VASurfaceIDs", this, "used",
  1000. (IsBufferAllocationModeReducedOrSuperReduced()
  1001. ? requested_num_reference_frames_
  1002. : pictures_.size()) -
  1003. available_va_surfaces_.size(),
  1004. "available", available_va_surfaces_.size());
  1005. }
  1006. surfaces_available_.Signal();
  1007. }
  1008. TryOutputPicture();
  1009. }
  1010. // static
  1011. VideoDecodeAccelerator::SupportedProfiles
  1012. VaapiVideoDecodeAccelerator::GetSupportedProfiles() {
  1013. VideoDecodeAccelerator::SupportedProfiles profiles =
  1014. VaapiWrapper::GetSupportedDecodeProfiles();
  1015. // VaVDA never supported VP9 Profile 2, AV1 and HEVC, but VaapiWrapper does.
  1016. // Filter them out.
  1017. base::EraseIf(profiles, [](const auto& profile) {
  1018. VideoCodec codec = VideoCodecProfileToVideoCodec(profile.profile);
  1019. return profile.profile == VP9PROFILE_PROFILE2 ||
  1020. codec == VideoCodec::kAV1 || codec == VideoCodec::kHEVC;
  1021. });
  1022. return profiles;
  1023. }
  1024. VaapiVideoDecodeAccelerator::BufferAllocationMode
  1025. VaapiVideoDecodeAccelerator::DecideBufferAllocationMode() {
  1026. #if BUILDFLAG(USE_VAAPI_X11)
  1027. // The IMPORT mode is used for Android on Chrome OS, so this doesn't apply
  1028. // here.
  1029. DCHECK_NE(output_mode_, VideoDecodeAccelerator::Config::OutputMode::IMPORT);
  1030. // TODO(crbug/1116701): get video decode acceleration working with ozone.
  1031. // For H.264 on older devices, another +1 is experimentally needed for
  1032. // high-to-high resolution changes.
  1033. // TODO(mcasas): Figure out why and why only H264, see crbug.com/912295 and
  1034. // http://crrev.com/c/1363807/9/media/gpu/h264_decoder.cc#1449.
  1035. if (profile_ >= H264PROFILE_MIN && profile_ <= H264PROFILE_MAX)
  1036. return BufferAllocationMode::kReduced;
  1037. return BufferAllocationMode::kSuperReduced;
  1038. #else
  1039. // TODO(crbug.com/912295): Enable a better BufferAllocationMode for IMPORT
  1040. // |output_mode_| as well.
  1041. if (output_mode_ == VideoDecodeAccelerator::Config::OutputMode::IMPORT)
  1042. return BufferAllocationMode::kNormal;
  1043. // On Gemini Lake, Kaby Lake and later we can pass to libva the client's
  1044. // PictureBuffers to decode onto, which skips the use of the Vpp unit and its
  1045. // associated format reconciliation copy, avoiding all internal buffer
  1046. // allocations.
  1047. // TODO(crbug.com/911754): Enable for VP9 Profile 2.
  1048. if (IsGeminiLakeOrLater() &&
  1049. (profile_ == VP9PROFILE_PROFILE0 || profile_ == VP8PROFILE_ANY ||
  1050. (profile_ >= H264PROFILE_MIN && profile_ <= H264PROFILE_MAX))) {
  1051. // Add one to the reference frames for the one being currently egressed, and
  1052. // an extra allocation for both |client_| and |decoder_|, see
  1053. // crrev.com/c/1576560.
  1054. if (profile_ == VP8PROFILE_ANY)
  1055. num_extra_pics_ = 3;
  1056. return BufferAllocationMode::kNone;
  1057. }
  1058. // For H.264 on older devices, another +1 is experimentally needed for
  1059. // high-to-high resolution changes.
  1060. // TODO(mcasas): Figure out why and why only H264, see crbug.com/912295 and
  1061. // http://crrev.com/c/1363807/9/media/gpu/h264_decoder.cc#1449.
  1062. if (profile_ >= H264PROFILE_MIN && profile_ <= H264PROFILE_MAX)
  1063. return BufferAllocationMode::kReduced;
  1064. // If we're here, we have to use the Vpp unit and allocate buffers for
  1065. // |decoder_|; usually we'd have to allocate the |decoder_|s
  1066. // GetRequiredNumOfPictures() internally, we can allocate just |decoder_|s
  1067. // GetNumReferenceFrames() + 1. Moreover, we also request the |client_| to
  1068. // allocate less than the usual |decoder_|s GetRequiredNumOfPictures().
  1069. return BufferAllocationMode::kSuperReduced;
  1070. #endif
  1071. }
  1072. bool VaapiVideoDecodeAccelerator::IsBufferAllocationModeReducedOrSuperReduced()
  1073. const {
  1074. return buffer_allocation_mode_ == BufferAllocationMode::kSuperReduced ||
  1075. buffer_allocation_mode_ == BufferAllocationMode::kReduced;
  1076. }
  1077. bool VaapiVideoDecodeAccelerator::OnMemoryDump(
  1078. const base::trace_event::MemoryDumpArgs& args,
  1079. base::trace_event::ProcessMemoryDump* pmd) {
  1080. using base::trace_event::MemoryAllocatorDump;
  1081. base::AutoLock auto_lock(lock_);
  1082. if (buffer_allocation_mode_ == BufferAllocationMode::kNone ||
  1083. !requested_num_reference_frames_) {
  1084. return false;
  1085. }
  1086. auto dump_name = base::StringPrintf("gpu/vaapi/decoder/0x%" PRIxPTR,
  1087. reinterpret_cast<uintptr_t>(this));
  1088. MemoryAllocatorDump* dump = pmd->CreateAllocatorDump(dump_name);
  1089. constexpr float kNumBytesPerPixelYUV420 = 12.0 / 8;
  1090. constexpr float kNumBytesPerPixelYUV420_10bpp = 2 * kNumBytesPerPixelYUV420;
  1091. DCHECK(va_surface_format_ == VA_RT_FORMAT_YUV420 ||
  1092. va_surface_format_ == VA_RT_FORMAT_YUV420_10BPP);
  1093. const float va_surface_bytes_per_pixel =
  1094. va_surface_format_ == VA_RT_FORMAT_YUV420 ? kNumBytesPerPixelYUV420
  1095. : kNumBytesPerPixelYUV420_10bpp;
  1096. // Report |requested_num_surfaces| and the associated memory size. The
  1097. // calculated size is an estimation since we don't know the internal VA
  1098. // strides, texture compression, headers, etc, but is a good lower boundary.
  1099. const size_t requested_num_surfaces =
  1100. IsBufferAllocationModeReducedOrSuperReduced()
  1101. ? requested_num_reference_frames_
  1102. : pictures_.size();
  1103. dump->AddScalar(MemoryAllocatorDump::kNameSize,
  1104. MemoryAllocatorDump::kUnitsBytes,
  1105. static_cast<uint64_t>(requested_num_surfaces *
  1106. requested_pic_size_.GetArea() *
  1107. va_surface_bytes_per_pixel));
  1108. dump->AddScalar(MemoryAllocatorDump::kNameObjectCount,
  1109. MemoryAllocatorDump::kUnitsObjects,
  1110. static_cast<uint64_t>(requested_num_surfaces));
  1111. return true;
  1112. }
  1113. } // namespace media