av1_decoder.cc 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556
  1. // Copyright 2020 The Chromium Authors. All rights reserved.
  2. // Use of this source code is governed by a BSD-style license that can be
  3. // found in the LICENSE file.
  4. #include "media/gpu/av1_decoder.h"
  5. #include <bitset>
  6. #include "base/callback_helpers.h"
  7. #include "base/logging.h"
  8. #include "base/memory/ptr_util.h"
  9. #include "base/metrics/histogram_functions.h"
  10. #include "base/stl_util.h"
  11. #include "media/base/limits.h"
  12. #include "media/gpu/av1_picture.h"
  13. #include "third_party/libgav1/src/src/decoder_state.h"
  14. #include "third_party/libgav1/src/src/gav1/status_code.h"
  15. #include "third_party/libgav1/src/src/utils/constants.h"
  16. namespace media {
  17. namespace {
  18. // (Section 6.4.1):
  19. //
  20. // - "An operating point specifies which spatial and temporal layers should be
  21. // decoded."
  22. //
  23. // - "The order of operating points indicates the preferred order for producing
  24. // an output: a decoder should select the earliest operating point in the list
  25. // that meets its decoding capabilities as expressed by the level associated
  26. // with each operating point."
  27. //
  28. // For simplicity, we always select operating point 0 and will validate that it
  29. // doesn't have scalability information.
  30. constexpr unsigned int kDefaultOperatingPoint = 0;
  31. // Conversion function from libgav1 profiles to media::VideoCodecProfile.
  32. VideoCodecProfile AV1ProfileToVideoCodecProfile(
  33. libgav1::BitstreamProfile profile) {
  34. switch (profile) {
  35. case libgav1::kProfile0:
  36. return AV1PROFILE_PROFILE_MAIN;
  37. case libgav1::kProfile1:
  38. return AV1PROFILE_PROFILE_HIGH;
  39. case libgav1::kProfile2:
  40. return AV1PROFILE_PROFILE_PRO;
  41. default:
  42. // ObuParser::ParseSequenceHeader() validates the profile.
  43. NOTREACHED() << "Invalid profile: " << base::strict_cast<int>(profile);
  44. return AV1PROFILE_PROFILE_MAIN;
  45. }
  46. }
  47. // Returns true iff the sequence has spatial or temporal scalability information
  48. // for the selected operating point.
  49. bool SequenceUsesScalability(int operating_point_idc) {
  50. return operating_point_idc != 0;
  51. }
  52. bool IsValidBitDepth(uint8_t bit_depth, VideoCodecProfile profile) {
  53. // Spec 6.4.1.
  54. switch (profile) {
  55. case AV1PROFILE_PROFILE_MAIN:
  56. case AV1PROFILE_PROFILE_HIGH:
  57. return bit_depth == 8u || bit_depth == 10u;
  58. case AV1PROFILE_PROFILE_PRO:
  59. return bit_depth == 8u || bit_depth == 10u || bit_depth == 12u;
  60. default:
  61. NOTREACHED();
  62. return false;
  63. }
  64. }
  65. VideoChromaSampling GetAV1ChromaSampling(
  66. const libgav1::ColorConfig& color_config) {
  67. // Spec section 6.4.2
  68. int8_t subsampling_x = color_config.subsampling_x;
  69. int8_t subsampling_y = color_config.subsampling_y;
  70. bool monochrome = color_config.is_monochrome;
  71. if (monochrome) {
  72. return VideoChromaSampling::k400;
  73. } else {
  74. if (subsampling_x == 0 && subsampling_y == 0) {
  75. return VideoChromaSampling::k444;
  76. } else if (subsampling_x == 1u && subsampling_y == 0) {
  77. return VideoChromaSampling::k422;
  78. } else if (subsampling_x == 1u && subsampling_y == 1u) {
  79. return VideoChromaSampling::k420;
  80. } else {
  81. DLOG(WARNING) << "Unknown chroma sampling format.";
  82. return VideoChromaSampling::kUnknown;
  83. }
  84. }
  85. }
  86. } // namespace
  87. AV1Decoder::AV1Decoder(std::unique_ptr<AV1Accelerator> accelerator,
  88. VideoCodecProfile profile,
  89. const VideoColorSpace& container_color_space)
  90. : buffer_pool_(std::make_unique<libgav1::BufferPool>(
  91. /*on_frame_buffer_size_changed=*/nullptr,
  92. /*get_frame_buffer=*/nullptr,
  93. /*release_frame_buffer=*/nullptr,
  94. /*callback_private_data=*/nullptr)),
  95. state_(std::make_unique<libgav1::DecoderState>()),
  96. accelerator_(std::move(accelerator)),
  97. profile_(profile),
  98. container_color_space_(container_color_space) {
  99. ref_frames_.fill(nullptr);
  100. }
  101. AV1Decoder::~AV1Decoder() {
  102. DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
  103. // |buffer_pool_| checks that all the allocated frames are released in its
  104. // dtor. Explicitly destruct |state_| before |buffer_pool_| to release frames
  105. // in |reference_frame| in |state_|.
  106. state_.reset();
  107. }
  108. bool AV1Decoder::Flush() {
  109. DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
  110. DVLOG(2) << "Decoder flush";
  111. Reset();
  112. return true;
  113. }
  114. void AV1Decoder::Reset() {
  115. DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
  116. ClearCurrentFrame();
  117. // We must reset the |current_sequence_header_| to ensure we don't try to
  118. // decode frames using an incorrect sequence header. If the first
  119. // DecoderBuffer after the reset doesn't contain a sequence header, we'll just
  120. // skip it and will keep skipping until we get a sequence header.
  121. current_sequence_header_.reset();
  122. stream_id_ = 0;
  123. stream_ = nullptr;
  124. stream_size_ = 0;
  125. on_error_ = false;
  126. state_ = std::make_unique<libgav1::DecoderState>();
  127. ClearReferenceFrames();
  128. parser_.reset();
  129. decrypt_config_.reset();
  130. buffer_pool_ = std::make_unique<libgav1::BufferPool>(
  131. /*on_frame_buffer_size_changed=*/nullptr,
  132. /*get_frame_buffer=*/nullptr,
  133. /*release_frame_buffer=*/nullptr,
  134. /*callback_private_data=*/nullptr);
  135. }
  136. void AV1Decoder::SetStream(int32_t id, const DecoderBuffer& decoder_buffer) {
  137. DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
  138. stream_id_ = id;
  139. stream_ = decoder_buffer.data();
  140. stream_size_ = decoder_buffer.data_size();
  141. ClearCurrentFrame();
  142. parser_ = base::WrapUnique(new (std::nothrow) libgav1::ObuParser(
  143. decoder_buffer.data(), decoder_buffer.data_size(), kDefaultOperatingPoint,
  144. buffer_pool_.get(), state_.get()));
  145. if (!parser_) {
  146. on_error_ = true;
  147. return;
  148. }
  149. if (current_sequence_header_)
  150. parser_->set_sequence_header(*current_sequence_header_);
  151. if (decoder_buffer.decrypt_config())
  152. decrypt_config_ = decoder_buffer.decrypt_config()->Clone();
  153. else
  154. decrypt_config_.reset();
  155. }
  156. void AV1Decoder::ClearCurrentFrame() {
  157. DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
  158. current_frame_.reset();
  159. current_frame_header_.reset();
  160. pending_pic_.reset();
  161. }
  162. AcceleratedVideoDecoder::DecodeResult AV1Decoder::Decode() {
  163. DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
  164. if (on_error_)
  165. return kDecodeError;
  166. auto result = DecodeInternal();
  167. on_error_ = result == kDecodeError;
  168. return result;
  169. }
  170. AcceleratedVideoDecoder::DecodeResult AV1Decoder::DecodeInternal() {
  171. DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
  172. if (!parser_) {
  173. DLOG(ERROR) << "Decode() is called before SetStream()";
  174. return kDecodeError;
  175. }
  176. while (parser_->HasData() || current_frame_header_) {
  177. base::ScopedClosureRunner clear_current_frame(
  178. base::BindOnce(&AV1Decoder::ClearCurrentFrame, base::Unretained(this)));
  179. if (pending_pic_) {
  180. const AV1Accelerator::Status status = DecodeAndOutputPicture(
  181. std::move(pending_pic_), parser_->tile_buffers());
  182. if (status == AV1Accelerator::Status::kFail)
  183. return kDecodeError;
  184. if (status == AV1Accelerator::Status::kTryAgain) {
  185. clear_current_frame.ReplaceClosure(base::DoNothing());
  186. return kTryAgain;
  187. }
  188. // Continue so that we force |clear_current_frame| to run before moving
  189. // on.
  190. continue;
  191. }
  192. if (!current_frame_header_) {
  193. libgav1::StatusCode status_code = parser_->ParseOneFrame(&current_frame_);
  194. if (status_code != libgav1::kStatusOk) {
  195. DLOG(WARNING) << "Failed to parse OBU: "
  196. << libgav1::GetErrorString(status_code);
  197. return kDecodeError;
  198. }
  199. if (!current_frame_) {
  200. DLOG(WARNING) << "No frame found. Skipping the current stream";
  201. continue;
  202. }
  203. current_frame_header_ = parser_->frame_header();
  204. // Detects if a new coded video sequence is starting.
  205. if (parser_->sequence_header_changed()) {
  206. // TODO(b/171853869): Remove this check once libgav1::ObuParser does
  207. // this check.
  208. if (current_frame_header_->frame_type != libgav1::kFrameKey ||
  209. !current_frame_header_->show_frame ||
  210. current_frame_header_->show_existing_frame ||
  211. current_frame_->temporal_id() != 0) {
  212. // Section 7.5.
  213. DVLOG(1)
  214. << "The first frame successive to sequence header OBU must be a "
  215. << "keyframe with show_frame=1, show_existing_frame=0 and "
  216. << "temporal_id=0";
  217. return kDecodeError;
  218. }
  219. if (SequenceUsesScalability(
  220. parser_->sequence_header()
  221. .operating_point_idc[kDefaultOperatingPoint])) {
  222. DVLOG(3) << "Either temporal or spatial layer decoding is not "
  223. << "supported";
  224. return kDecodeError;
  225. }
  226. current_sequence_header_ = parser_->sequence_header();
  227. VideoChromaSampling new_chroma_sampling =
  228. GetAV1ChromaSampling(current_sequence_header_->color_config);
  229. if (new_chroma_sampling != chroma_sampling_) {
  230. chroma_sampling_ = new_chroma_sampling;
  231. base::UmaHistogramEnumeration(
  232. "Media.PlatformVideoDecoding.ChromaSampling", chroma_sampling_);
  233. }
  234. if (chroma_sampling_ != VideoChromaSampling::k420) {
  235. DVLOG(1) << "Only YUV 4:2:0 is supported";
  236. return kDecodeError;
  237. }
  238. const VideoCodecProfile new_profile =
  239. AV1ProfileToVideoCodecProfile(current_sequence_header_->profile);
  240. const uint8_t new_bit_depth = base::checked_cast<uint8_t>(
  241. current_sequence_header_->color_config.bitdepth);
  242. if (!IsValidBitDepth(new_bit_depth, new_profile)) {
  243. DVLOG(1) << "Invalid bit depth="
  244. << base::strict_cast<int>(new_bit_depth)
  245. << ", profile=" << GetProfileName(new_profile);
  246. return kDecodeError;
  247. }
  248. const gfx::Size new_frame_size(
  249. base::strict_cast<int>(current_sequence_header_->max_frame_width),
  250. base::strict_cast<int>(current_sequence_header_->max_frame_height));
  251. gfx::Rect new_visible_rect(
  252. base::strict_cast<int>(current_frame_header_->render_width),
  253. base::strict_cast<int>(current_frame_header_->render_height));
  254. DCHECK(!new_frame_size.IsEmpty());
  255. if (!gfx::Rect(new_frame_size).Contains(new_visible_rect)) {
  256. DVLOG(1) << "Render size exceeds picture size. render size: "
  257. << new_visible_rect.ToString()
  258. << ", picture size: " << new_frame_size.ToString();
  259. new_visible_rect = gfx::Rect(new_frame_size);
  260. }
  261. ClearReferenceFrames();
  262. // Issues kConfigChange only if either the dimensions, profile or bit
  263. // depth is changed.
  264. if (frame_size_ != new_frame_size ||
  265. visible_rect_ != new_visible_rect || profile_ != new_profile ||
  266. bit_depth_ != new_bit_depth) {
  267. frame_size_ = new_frame_size;
  268. visible_rect_ = new_visible_rect;
  269. profile_ = new_profile;
  270. bit_depth_ = new_bit_depth;
  271. clear_current_frame.ReplaceClosure(base::DoNothing());
  272. return kConfigChange;
  273. }
  274. }
  275. }
  276. if (!current_sequence_header_) {
  277. // Decoding is not doable because we haven't received a sequence header.
  278. // This occurs when seeking a video.
  279. DVLOG(3) << "Discarded the current frame because no sequence header has "
  280. << "been found yet";
  281. continue;
  282. }
  283. DCHECK(current_frame_header_);
  284. const auto& frame_header = *current_frame_header_;
  285. if (frame_header.show_existing_frame) {
  286. const size_t frame_to_show =
  287. base::checked_cast<size_t>(frame_header.frame_to_show);
  288. DCHECK_LE(0u, frame_to_show);
  289. DCHECK_LT(frame_to_show, ref_frames_.size());
  290. if (!CheckAndCleanUpReferenceFrames()) {
  291. DLOG(ERROR) << "The states of reference frames are different between "
  292. << "|ref_frames_| and |state_|";
  293. return kDecodeError;
  294. }
  295. auto pic = ref_frames_[frame_to_show];
  296. CHECK(pic);
  297. pic = pic->Duplicate();
  298. if (!pic) {
  299. DVLOG(1) << "Failed duplication";
  300. return kDecodeError;
  301. }
  302. pic->set_bitstream_id(stream_id_);
  303. if (!accelerator_->OutputPicture(*pic)) {
  304. return kDecodeError;
  305. }
  306. // libgav1::ObuParser sets |current_frame_| to the frame to show while
  307. // |current_frame_header_| is the frame header of the currently parsed
  308. // frame. If |current_frame_| is a keyframe, then refresh_frame_flags must
  309. // be 0xff. Otherwise, refresh_frame_flags must be 0x00 (Section 5.9.2).
  310. DCHECK(current_frame_->frame_type() == libgav1::kFrameKey ||
  311. current_frame_header_->refresh_frame_flags == 0x00);
  312. DCHECK(current_frame_->frame_type() != libgav1::kFrameKey ||
  313. current_frame_header_->refresh_frame_flags == 0xff);
  314. UpdateReferenceFrames(std::move(pic));
  315. continue;
  316. }
  317. if (parser_->tile_buffers().empty()) {
  318. // The last call to ParseOneFrame() didn't actually have any tile groups.
  319. // This could happen in rare cases (for example, if there is a Metadata
  320. // OBU after the TileGroup OBU). Ignore this case.
  321. continue;
  322. }
  323. const gfx::Size current_frame_size(
  324. base::strict_cast<int>(frame_header.width),
  325. base::strict_cast<int>(frame_header.height));
  326. if (current_frame_size != frame_size_) {
  327. // TODO(hiroh): This must be handled in decoding spatial layer.
  328. DVLOG(1) << "Resolution change in the middle of video sequence (i.e."
  329. << " between sequence headers) is not supported";
  330. return kDecodeError;
  331. }
  332. if (current_frame_size.width() !=
  333. base::strict_cast<int>(frame_header.upscaled_width)) {
  334. DVLOG(1) << "Super resolution is not supported";
  335. return kDecodeError;
  336. }
  337. const gfx::Rect current_visible_rect(
  338. base::strict_cast<int>(frame_header.render_width),
  339. base::strict_cast<int>(frame_header.render_height));
  340. if (current_visible_rect != visible_rect_) {
  341. // TODO(andrescj): Handle the visible rectangle change in the middle of
  342. // video sequence.
  343. DVLOG(1) << "Visible rectangle change in the middle of video sequence"
  344. << "(i.e. between sequence headers) is not supported";
  345. return kDecodeError;
  346. }
  347. DCHECK(current_sequence_header_->film_grain_params_present ||
  348. !frame_header.film_grain_params.apply_grain);
  349. auto pic = accelerator_->CreateAV1Picture(
  350. frame_header.film_grain_params.apply_grain);
  351. if (!pic) {
  352. clear_current_frame.ReplaceClosure(base::DoNothing());
  353. return kRanOutOfSurfaces;
  354. }
  355. pic->set_visible_rect(current_visible_rect);
  356. pic->set_bitstream_id(stream_id_);
  357. // For AV1, prefer the frame color space over the config.
  358. const auto& cc = current_sequence_header_->color_config;
  359. const auto cs = VideoColorSpace(
  360. cc.color_primary, cc.transfer_characteristics, cc.matrix_coefficients,
  361. cc.color_range == libgav1::kColorRangeStudio
  362. ? gfx::ColorSpace::RangeID::LIMITED
  363. : gfx::ColorSpace::RangeID::FULL);
  364. if (cs.IsSpecified())
  365. pic->set_colorspace(cs);
  366. else if (container_color_space_.IsSpecified())
  367. pic->set_colorspace(container_color_space_);
  368. pic->frame_header = frame_header;
  369. if (decrypt_config_)
  370. pic->set_decrypt_config(decrypt_config_->Clone());
  371. const AV1Accelerator::Status status =
  372. DecodeAndOutputPicture(std::move(pic), parser_->tile_buffers());
  373. if (status == AV1Accelerator::Status::kFail)
  374. return kDecodeError;
  375. if (status == AV1Accelerator::Status::kTryAgain) {
  376. clear_current_frame.ReplaceClosure(base::DoNothing());
  377. return kTryAgain;
  378. }
  379. }
  380. return kRanOutOfStreamData;
  381. }
  382. void AV1Decoder::UpdateReferenceFrames(scoped_refptr<AV1Picture> pic) {
  383. DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
  384. DCHECK(state_);
  385. DCHECK(current_frame_header_);
  386. const uint8_t refresh_frame_flags =
  387. current_frame_header_->refresh_frame_flags;
  388. const std::bitset<libgav1::kNumReferenceFrameTypes> update_reference_frame(
  389. refresh_frame_flags);
  390. for (size_t i = 0; i < libgav1::kNumReferenceFrameTypes; ++i) {
  391. if (update_reference_frame[i])
  392. ref_frames_[i] = pic;
  393. }
  394. state_->UpdateReferenceFrames(current_frame_,
  395. base::strict_cast<int>(refresh_frame_flags));
  396. }
  397. void AV1Decoder::ClearReferenceFrames() {
  398. DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
  399. DCHECK(state_);
  400. ref_frames_.fill(nullptr);
  401. // If AV1Decoder has decided to clear the reference frames, then ObuParser
  402. // must have also decided to do so.
  403. DCHECK_EQ(base::STLCount(state_->reference_frame, nullptr),
  404. static_cast<int>(state_->reference_frame.size()));
  405. }
  406. bool AV1Decoder::CheckAndCleanUpReferenceFrames() {
  407. DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
  408. DCHECK(state_);
  409. DCHECK(current_frame_header_);
  410. for (size_t i = 0; i < libgav1::kNumReferenceFrameTypes; ++i) {
  411. if (state_->reference_frame[i] && !ref_frames_[i])
  412. return false;
  413. if (!state_->reference_frame[i] && ref_frames_[i])
  414. ref_frames_[i].reset();
  415. }
  416. // If we get here, we know |ref_frames_| includes all and only those frames
  417. // that can be currently used as reference frames. Now we'll assert that for
  418. // non-intra frames, all the necessary reference frames are in |ref_frames_|.
  419. // For intra frames, we don't need this assertion because they shouldn't
  420. // depend on reference frames.
  421. if (!libgav1::IsIntraFrame(current_frame_header_->frame_type)) {
  422. for (size_t i = 0; i < libgav1::kNumInterReferenceFrameTypes; ++i) {
  423. const auto ref_frame_index =
  424. current_frame_header_->reference_frame_index[i];
  425. // Unless an error occurred in libgav1, |ref_frame_index| should be valid,
  426. // and since CheckAndCleanUpReferenceFrames() only gets called if parsing
  427. // succeeded, we can assert that validity.
  428. CHECK_GE(ref_frame_index, 0);
  429. CHECK_LT(ref_frame_index, libgav1::kNumReferenceFrameTypes);
  430. CHECK(ref_frames_[ref_frame_index]);
  431. }
  432. }
  433. // If we get here, we know that all the reference frames needed by the current
  434. // frame are in |ref_frames_|.
  435. return true;
  436. }
  437. AV1Decoder::AV1Accelerator::Status AV1Decoder::DecodeAndOutputPicture(
  438. scoped_refptr<AV1Picture> pic,
  439. const libgav1::Vector<libgav1::TileBuffer>& tile_buffers) {
  440. DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
  441. DCHECK(pic);
  442. DCHECK(current_sequence_header_);
  443. DCHECK(stream_);
  444. DCHECK_GT(stream_size_, 0u);
  445. if (!CheckAndCleanUpReferenceFrames()) {
  446. DLOG(ERROR) << "The states of reference frames are different between "
  447. << "|ref_frames_| and |state_|";
  448. return AV1Accelerator::Status::kFail;
  449. }
  450. const AV1Accelerator::Status status = accelerator_->SubmitDecode(
  451. *pic, *current_sequence_header_, ref_frames_, tile_buffers,
  452. base::make_span(stream_, stream_size_));
  453. if (status != AV1Accelerator::Status::kOk) {
  454. if (status == AV1Accelerator::Status::kTryAgain)
  455. pending_pic_ = std::move(pic);
  456. return status;
  457. }
  458. if (pic->frame_header.show_frame && !accelerator_->OutputPicture(*pic))
  459. return AV1Accelerator::Status::kFail;
  460. // |current_frame_header_->refresh_frame_flags| should be 0xff if the frame is
  461. // either a SWITCH_FRAME or a visible KEY_FRAME (Spec 5.9.2).
  462. DCHECK(!(current_frame_header_->frame_type == libgav1::kFrameSwitch ||
  463. (current_frame_header_->frame_type == libgav1::kFrameKey &&
  464. current_frame_header_->show_frame)) ||
  465. current_frame_header_->refresh_frame_flags == 0xff);
  466. UpdateReferenceFrames(std::move(pic));
  467. return AV1Accelerator::Status::kOk;
  468. }
  469. gfx::Size AV1Decoder::GetPicSize() const {
  470. DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
  471. // TODO(hiroh): It should be safer to align this by 64 or 128 (depending on
  472. // use_128x128_superblock) so that a driver doesn't touch out of the buffer.
  473. return frame_size_;
  474. }
  475. gfx::Rect AV1Decoder::GetVisibleRect() const {
  476. DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
  477. return visible_rect_;
  478. }
  479. VideoCodecProfile AV1Decoder::GetProfile() const {
  480. DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
  481. return profile_;
  482. }
  483. uint8_t AV1Decoder::GetBitDepth() const {
  484. DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
  485. return bit_depth_;
  486. }
  487. VideoChromaSampling AV1Decoder::GetChromaSampling() const {
  488. DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
  489. return chroma_sampling_;
  490. }
  491. size_t AV1Decoder::GetRequiredNumOfPictures() const {
  492. DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
  493. constexpr size_t kPicsInPipeline = limits::kMaxVideoFrames + 1;
  494. DCHECK(current_sequence_header_);
  495. return (kPicsInPipeline + GetNumReferenceFrames()) *
  496. (1 + current_sequence_header_->film_grain_params_present);
  497. }
  498. size_t AV1Decoder::GetNumReferenceFrames() const {
  499. DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
  500. return libgav1::kNumReferenceFrameTypes;
  501. }
  502. } // namespace media