video_encode_accelerator_tests.cc 42 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972
  1. // Copyright 2020 The Chromium Authors. All rights reserved.
  2. // Use of this source code is governed by a BSD-style license that can be
  3. // found in the LICENSE file.
  4. #include <algorithm>
  5. #include <limits>
  6. #include "base/command_line.h"
  7. #include "base/files/file_path.h"
  8. #include "base/files/file_util.h"
  9. #include "base/strings/string_number_conversions.h"
  10. #include "media/base/media_switches.h"
  11. #include "media/base/media_util.h"
  12. #include "media/base/test_data_util.h"
  13. #include "media/base/video_bitrate_allocation.h"
  14. #include "media/base/video_codecs.h"
  15. #include "media/base/video_decoder_config.h"
  16. #include "media/gpu/buildflags.h"
  17. #include "media/gpu/gpu_video_encode_accelerator_helpers.h"
  18. #include "media/gpu/test/video.h"
  19. #include "media/gpu/test/video_encoder/bitstream_file_writer.h"
  20. #include "media/gpu/test/video_encoder/bitstream_validator.h"
  21. #include "media/gpu/test/video_encoder/decoder_buffer_validator.h"
  22. #include "media/gpu/test/video_encoder/video_encoder.h"
  23. #include "media/gpu/test/video_encoder/video_encoder_client.h"
  24. #include "media/gpu/test/video_encoder/video_encoder_test_environment.h"
  25. #include "media/gpu/test/video_frame_file_writer.h"
  26. #include "media/gpu/test/video_frame_helpers.h"
  27. #include "media/gpu/test/video_frame_validator.h"
  28. #include "media/gpu/test/video_test_environment.h"
  29. #include "media/gpu/test/video_test_helpers.h"
  30. #include "testing/gtest/include/gtest/gtest.h"
  31. #include "third_party/abseil-cpp/absl/types/optional.h"
  32. namespace media {
  33. namespace test {
  34. namespace {
  35. // Video encoder tests usage message. Make sure to also update the documentation
  36. // under docs/media/gpu/video_encoder_test_usage.md when making changes here.
  37. constexpr const char* usage_msg =
  38. R"(usage: video_encode_accelerator_tests
  39. [--codec=<codec>] [--num_temporal_layers=<number>]
  40. [--num_spatial_layers=<number>] [--bitrate_mode=(cbr|vbr)]
  41. [--reverse] [--disable_validator] [--output_bitstream]
  42. [--output_images=(all|corrupt)] [--output_format=(png|yuv)]
  43. [--output_folder=<filepath>] [--output_limit=<number>]
  44. [--disable_vaapi_lock]
  45. [-v=<level>] [--vmodule=<config>]
  46. [--gtest_help] [--help]
  47. [<video path>] [<video metadata path>]
  48. )";
  49. // Video encoder tests help message.
  50. constexpr const char* help_msg =
  51. R"""(Run the video encoder accelerator tests on the video specified by
  52. <video path>. If no <video path> is given the default
  53. "bear_320x192_40frames.yuv.webm" video will be used.
  54. The <video metadata path> should specify the location of a json file
  55. containing the video's metadata, such as frame checksums. By default
  56. <video path>.json will be used.
  57. The following arguments are supported:
  58. -v enable verbose mode, e.g. -v=2.
  59. --vmodule enable verbose mode for the specified module,
  60. e.g. --vmodule=*media/gpu*=2.
  61. --codec codec profile to encode, "h264" (baseline),
  62. "h264main, "h264high", "vp8" and "vp9".
  63. H264 Baseline is selected if unspecified.
  64. --num_temporal_layers the number of temporal layers of the encoded
  65. bitstream. A default value is 1.
  66. --num_spatial_layers the number of spatial layers of the encoded
  67. bitstream. Only used in --codec=vp9 currently.
  68. Spatial SVC encoding is applied only in
  69. NV12Dmabuf test cases.
  70. --bitrate_mode The rate control mode for encoding, one of "cbr"
  71. (default) or "vbr".
  72. --reverse the stream plays backwards if the stream reaches
  73. end of stream. So the input stream to be encoded
  74. is consecutive. By default this is false.
  75. --disable_validator disable validation of encoded bitstream.
  76. --output_bitstream save the output bitstream in either H264 AnnexB
  77. format (for H264) or IVF format (for vp8 and
  78. vp9) to <output_folder>/<testname>.
  79. --output_images in addition to saving the full encoded,
  80. bitstream it's also possible to dump individual
  81. frames to <output_folder>/<testname>, possible
  82. values are "all|corrupt"
  83. --output_format set the format of images saved to disk,
  84. supported formats are "png" (default) and
  85. "yuv".
  86. --output_limit limit the number of images saved to disk.
  87. --output_folder set the basic folder used to store test
  88. artifacts. The default is the current directory.
  89. --disable_vaapi_lock disable the global VA-API lock if applicable,
  90. i.e., only on devices that use the VA-API with a libva
  91. backend that's known to be thread-safe and only in
  92. portions of the Chrome stack that should be able to
  93. deal with the absence of the lock
  94. (not the VaapiVideoDecodeAccelerator).
  95. --gtest_help display the gtest help and exit.
  96. --help display this help and exit.
  97. )""";
  98. // Default video to be used if no test video was specified.
  99. constexpr base::FilePath::CharType kDefaultTestVideoPath[] =
  100. FILE_PATH_LITERAL("bear_320x192_40frames.yuv.webm");
  101. // The number of frames to encode for bitrate check test cases.
  102. // TODO(hiroh): Decrease this values to make the test faster.
  103. constexpr size_t kNumFramesToEncodeForBitrateCheck = 300;
  104. // Tolerance factor for how encoded bitrate can differ from requested bitrate.
  105. constexpr double kBitrateTolerance = 0.1;
  106. constexpr double kVariableBitrateTolerance = 0.3;
  107. // The event timeout used in bitrate check tests because encoding 2160p and
  108. // validating |kNumFramesToEncodeBitrateCheck| frames take much time.
  109. constexpr base::TimeDelta kBitrateCheckEventTimeout = base::Seconds(180);
  110. media::test::VideoEncoderTestEnvironment* g_env;
  111. // Video encode test class. Performs setup and teardown for each single test.
  112. class VideoEncoderTest : public ::testing::Test {
  113. public:
  114. // GetDefaultConfig() creates VideoEncoderClientConfig for SharedMemory input
  115. // encoding. This function must not be called in spatial SVC encoding.
  116. VideoEncoderClientConfig GetDefaultConfig() {
  117. const auto& spatial_layers = g_env->SpatialLayers();
  118. CHECK_LE(spatial_layers.size(), 1u);
  119. return VideoEncoderClientConfig(g_env->Video(), g_env->Profile(),
  120. spatial_layers, g_env->BitrateAllocation(),
  121. g_env->Reverse());
  122. }
  123. std::unique_ptr<VideoEncoder> CreateVideoEncoder(
  124. Video* video,
  125. const VideoEncoderClientConfig& config) {
  126. LOG_ASSERT(video);
  127. auto video_encoder =
  128. VideoEncoder::Create(config, CreateBitstreamProcessors(video, config));
  129. LOG_ASSERT(video_encoder);
  130. if (!video_encoder->Initialize(video))
  131. ADD_FAILURE();
  132. return video_encoder;
  133. }
  134. private:
  135. std::unique_ptr<BitstreamProcessor> CreateBitstreamValidator(
  136. const Video* video,
  137. const VideoDecoderConfig& decoder_config,
  138. const size_t last_frame_index,
  139. VideoFrameValidator::GetModelFrameCB get_model_frame_cb,
  140. absl::optional<size_t> spatial_layer_index_to_decode,
  141. absl::optional<size_t> temporal_layer_index_to_decode,
  142. const std::vector<gfx::Size>& spatial_layer_resolutions) {
  143. std::vector<std::unique_ptr<VideoFrameProcessor>> video_frame_processors;
  144. // Attach a video frame writer to store individual frames to disk if
  145. // requested.
  146. std::unique_ptr<VideoFrameProcessor> image_writer;
  147. auto frame_output_config = g_env->ImageOutputConfig();
  148. base::FilePath output_folder = base::FilePath(g_env->OutputFolder())
  149. .Append(g_env->GetTestOutputFilePath());
  150. if (frame_output_config.output_mode != FrameOutputMode::kNone) {
  151. base::FilePath::StringType output_file_prefix;
  152. if (spatial_layer_index_to_decode) {
  153. output_file_prefix +=
  154. FILE_PATH_LITERAL("SL") +
  155. base::NumberToString(*spatial_layer_index_to_decode);
  156. }
  157. if (temporal_layer_index_to_decode) {
  158. output_file_prefix +=
  159. FILE_PATH_LITERAL("TL") +
  160. base::NumberToString(*temporal_layer_index_to_decode);
  161. }
  162. image_writer = VideoFrameFileWriter::Create(
  163. output_folder, frame_output_config.output_format,
  164. frame_output_config.output_limit, output_file_prefix);
  165. LOG_ASSERT(image_writer);
  166. if (frame_output_config.output_mode == FrameOutputMode::kAll)
  167. video_frame_processors.push_back(std::move(image_writer));
  168. }
  169. // For a resolution less than 360p, we lower the tolerance. Some platforms
  170. // couldn't compress a low resolution video efficiently with a low bitrate.
  171. constexpr gfx::Size k360p(640, 360);
  172. constexpr double kSSIMToleranceForLowerResolution = 0.65;
  173. const gfx::Size encode_resolution = decoder_config.visible_rect().size();
  174. const double ssim_tolerance =
  175. encode_resolution.GetArea() < k360p.GetArea()
  176. ? kSSIMToleranceForLowerResolution
  177. : SSIMVideoFrameValidator::kDefaultTolerance;
  178. auto ssim_validator = SSIMVideoFrameValidator::Create(
  179. get_model_frame_cb, std::move(image_writer),
  180. VideoFrameValidator::ValidationMode::kAverage, ssim_tolerance);
  181. LOG_ASSERT(ssim_validator);
  182. video_frame_processors.push_back(std::move(ssim_validator));
  183. return BitstreamValidator::Create(
  184. decoder_config, last_frame_index, std::move(video_frame_processors),
  185. spatial_layer_index_to_decode, temporal_layer_index_to_decode,
  186. spatial_layer_resolutions);
  187. }
  188. std::vector<std::unique_ptr<BitstreamProcessor>> CreateBitstreamProcessors(
  189. Video* video,
  190. const VideoEncoderClientConfig& config) {
  191. std::vector<std::unique_ptr<BitstreamProcessor>> bitstream_processors;
  192. const gfx::Rect visible_rect(config.output_resolution);
  193. std::vector<gfx::Size> spatial_layer_resolutions;
  194. // |config.spatial_layers| is filled only in temporal layer or spatial layer
  195. // encoding.
  196. for (const auto& sl : config.spatial_layers)
  197. spatial_layer_resolutions.emplace_back(sl.width, sl.height);
  198. const VideoCodec codec =
  199. VideoCodecProfileToVideoCodec(config.output_profile);
  200. if (g_env->SaveOutputBitstream()) {
  201. base::FilePath::StringPieceType extension =
  202. codec == VideoCodec::kH264 ? FILE_PATH_LITERAL("h264")
  203. : FILE_PATH_LITERAL("ivf");
  204. auto output_bitstream_filepath =
  205. g_env->OutputFolder()
  206. .Append(g_env->GetTestOutputFilePath())
  207. .Append(video->FilePath().BaseName().ReplaceExtension(extension));
  208. if (!spatial_layer_resolutions.empty()) {
  209. CHECK_GE(config.num_spatial_layers, 1u);
  210. CHECK_GE(config.num_temporal_layers, 1u);
  211. for (size_t spatial_layer_index_to_write = 0;
  212. spatial_layer_index_to_write < config.num_spatial_layers;
  213. ++spatial_layer_index_to_write) {
  214. const gfx::Size& layer_size =
  215. spatial_layer_resolutions[spatial_layer_index_to_write];
  216. for (size_t temporal_layer_index_to_write = 0;
  217. temporal_layer_index_to_write < config.num_temporal_layers;
  218. ++temporal_layer_index_to_write) {
  219. bitstream_processors.emplace_back(BitstreamFileWriter::Create(
  220. output_bitstream_filepath.InsertBeforeExtensionASCII(
  221. FILE_PATH_LITERAL(".SL") +
  222. base::NumberToString(spatial_layer_index_to_write) +
  223. FILE_PATH_LITERAL(".TL") +
  224. base::NumberToString(temporal_layer_index_to_write)),
  225. codec, layer_size, config.framerate,
  226. config.num_frames_to_encode, spatial_layer_index_to_write,
  227. temporal_layer_index_to_write, spatial_layer_resolutions));
  228. LOG_ASSERT(bitstream_processors.back());
  229. }
  230. }
  231. } else {
  232. bitstream_processors.emplace_back(BitstreamFileWriter::Create(
  233. output_bitstream_filepath, codec, visible_rect.size(),
  234. config.framerate, config.num_frames_to_encode));
  235. LOG_ASSERT(bitstream_processors.back());
  236. }
  237. }
  238. if (!g_env->IsBitstreamValidatorEnabled()) {
  239. return bitstream_processors;
  240. }
  241. switch (codec) {
  242. case VideoCodec::kH264:
  243. bitstream_processors.emplace_back(new H264Validator(
  244. config.output_profile, visible_rect, config.num_temporal_layers));
  245. break;
  246. case VideoCodec::kVP8:
  247. bitstream_processors.emplace_back(
  248. new VP8Validator(visible_rect, config.num_temporal_layers));
  249. break;
  250. case VideoCodec::kVP9:
  251. bitstream_processors.emplace_back(new VP9Validator(
  252. config.output_profile, visible_rect, config.num_spatial_layers,
  253. config.num_temporal_layers));
  254. break;
  255. default:
  256. LOG(ERROR) << "Unsupported profile: "
  257. << GetProfileName(config.output_profile);
  258. break;
  259. }
  260. raw_data_helper_ = RawDataHelper::Create(video, g_env->Reverse());
  261. if (!raw_data_helper_) {
  262. LOG(ERROR) << "Failed to create raw data helper";
  263. return bitstream_processors;
  264. }
  265. if (!spatial_layer_resolutions.empty()) {
  266. CHECK_GE(config.num_spatial_layers, 1u);
  267. CHECK_GE(config.num_temporal_layers, 1u);
  268. for (size_t spatial_layer_index_to_decode = 0;
  269. spatial_layer_index_to_decode < config.num_spatial_layers;
  270. ++spatial_layer_index_to_decode) {
  271. const gfx::Size& layer_size =
  272. spatial_layer_resolutions[spatial_layer_index_to_decode];
  273. VideoDecoderConfig decoder_config(
  274. codec, config.output_profile,
  275. VideoDecoderConfig::AlphaMode::kIsOpaque, VideoColorSpace(),
  276. kNoTransformation, layer_size, gfx::Rect(layer_size), layer_size,
  277. EmptyExtraData(), EncryptionScheme::kUnencrypted);
  278. VideoFrameValidator::GetModelFrameCB get_model_frame_cb =
  279. base::BindRepeating(&VideoEncoderTest::GetModelFrame,
  280. base::Unretained(this), gfx::Rect(layer_size));
  281. for (size_t temporal_layer_index_to_decode = 0;
  282. temporal_layer_index_to_decode < config.num_temporal_layers;
  283. ++temporal_layer_index_to_decode) {
  284. bitstream_processors.emplace_back(CreateBitstreamValidator(
  285. video, decoder_config, config.num_frames_to_encode - 1,
  286. get_model_frame_cb, spatial_layer_index_to_decode,
  287. temporal_layer_index_to_decode, spatial_layer_resolutions));
  288. LOG_ASSERT(bitstream_processors.back());
  289. }
  290. }
  291. } else {
  292. // Attach a bitstream validator to validate all encoded video frames. The
  293. // bitstream validator uses a software video decoder to validate the
  294. // encoded buffers by decoding them. Metrics such as the image's SSIM can
  295. // be calculated for additional quality checks.
  296. VideoDecoderConfig decoder_config(
  297. codec, config.output_profile,
  298. VideoDecoderConfig::AlphaMode::kIsOpaque, VideoColorSpace(),
  299. kNoTransformation, visible_rect.size(), visible_rect,
  300. visible_rect.size(), EmptyExtraData(),
  301. EncryptionScheme::kUnencrypted);
  302. VideoFrameValidator::GetModelFrameCB get_model_frame_cb =
  303. base::BindRepeating(&VideoEncoderTest::GetModelFrame,
  304. base::Unretained(this), visible_rect);
  305. bitstream_processors.emplace_back(CreateBitstreamValidator(
  306. video, decoder_config, config.num_frames_to_encode - 1,
  307. get_model_frame_cb, absl::nullopt, absl::nullopt,
  308. /*spatial_layer_resolutions=*/{}));
  309. LOG_ASSERT(bitstream_processors.back());
  310. }
  311. return bitstream_processors;
  312. }
  313. scoped_refptr<const VideoFrame> GetModelFrame(const gfx::Rect& visible_rect,
  314. size_t frame_index) {
  315. LOG_ASSERT(raw_data_helper_);
  316. auto frame = raw_data_helper_->GetFrame(frame_index);
  317. if (!frame)
  318. return nullptr;
  319. if (visible_rect.size() == frame->visible_rect().size())
  320. return frame;
  321. return ScaleVideoFrame(frame.get(), visible_rect.size());
  322. }
  323. std::unique_ptr<RawDataHelper> raw_data_helper_;
  324. };
  325. absl::optional<std::string> SupportsDynamicFramerate() {
  326. return g_env->IsKeplerUsed()
  327. ? absl::make_optional<std::string>(
  328. "The rate controller in the kepler firmware doesn't handle "
  329. "frame rate changes correctly.")
  330. : absl::nullopt;
  331. }
  332. absl::optional<std::string> SupportsNV12DmaBufInput() {
  333. return g_env->IsKeplerUsed() ? absl::make_optional<std::string>(
  334. "Encoding with dmabuf input frames is not "
  335. "supported in kepler.")
  336. : absl::nullopt;
  337. }
  338. } // namespace
  339. // Encode video from start to end. Wait for the kFlushDone event at the end of
  340. // the stream, that notifies us all frames have been encoded.
  341. TEST_F(VideoEncoderTest, FlushAtEndOfStream) {
  342. if (g_env->SpatialLayers().size() > 1)
  343. GTEST_SKIP() << "Skip SHMEM input test cases in spatial SVC encoding";
  344. auto encoder = CreateVideoEncoder(g_env->Video(), GetDefaultConfig());
  345. encoder->Encode();
  346. EXPECT_TRUE(encoder->WaitForFlushDone());
  347. EXPECT_EQ(encoder->GetFlushDoneCount(), 1u);
  348. EXPECT_EQ(encoder->GetFrameReleasedCount(), g_env->Video()->NumFrames());
  349. EXPECT_TRUE(encoder->WaitForBitstreamProcessors());
  350. }
  351. // Test initializing the video encoder. The test will be successful if the video
  352. // encoder is capable of setting up the encoder for the specified codec and
  353. // resolution. The test only verifies initialization and doesn't do any
  354. // encoding.
  355. TEST_F(VideoEncoderTest, Initialize) {
  356. if (g_env->SpatialLayers().size() > 1)
  357. GTEST_SKIP() << "Skip SHMEM input test cases in spatial SVC encoding";
  358. auto encoder = CreateVideoEncoder(g_env->Video(), GetDefaultConfig());
  359. EXPECT_EQ(encoder->GetEventCount(VideoEncoder::kInitialized), 1u);
  360. }
  361. // Create a video encoder and immediately destroy it without initializing. The
  362. // video encoder will be automatically destroyed when the video encoder goes out
  363. // of scope at the end of the test. The test will pass if no asserts or crashes
  364. // are triggered upon destroying.
  365. TEST_F(VideoEncoderTest, DestroyBeforeInitialize) {
  366. if (g_env->SpatialLayers().size() > 1)
  367. GTEST_SKIP() << "Skip SHMEM input test cases in spatial SVC encoding";
  368. auto video_encoder = VideoEncoder::Create(GetDefaultConfig());
  369. EXPECT_NE(video_encoder, nullptr);
  370. }
  371. // Test forcing key frames while encoding a video.
  372. TEST_F(VideoEncoderTest, ForceKeyFrame) {
  373. if (g_env->SpatialLayers().size() > 1)
  374. GTEST_SKIP() << "Skip SHMEM input test cases in spatial SVC encoding";
  375. auto config = GetDefaultConfig();
  376. const size_t middle_frame = config.num_frames_to_encode;
  377. config.num_frames_to_encode *= 2;
  378. auto encoder = CreateVideoEncoder(g_env->Video(), config);
  379. // It is expected that our hw encoders don't produce key frames in a short
  380. // time span like a few hundred frames.
  381. encoder->EncodeUntil(VideoEncoder::kBitstreamReady, 1u);
  382. EXPECT_TRUE(encoder->WaitUntilIdle());
  383. EXPECT_EQ(encoder->GetEventCount(VideoEncoder::kKeyFrame), 1u);
  384. // Encode until the middle of stream and request force_keyframe.
  385. encoder->EncodeUntil(VideoEncoder::kFrameReleased, middle_frame);
  386. EXPECT_TRUE(encoder->WaitUntilIdle());
  387. // Check if there is no keyframe except the first frame.
  388. EXPECT_EQ(encoder->GetEventCount(VideoEncoder::kKeyFrame), 1u);
  389. encoder->ForceKeyFrame();
  390. // Encode until the end of stream.
  391. encoder->Encode();
  392. EXPECT_TRUE(encoder->WaitForFlushDone());
  393. // Check if there are two key frames, first frame and one on ForceKeyFrame().
  394. EXPECT_EQ(encoder->GetEventCount(VideoEncoder::kKeyFrame), 2u);
  395. EXPECT_EQ(encoder->GetFlushDoneCount(), 1u);
  396. EXPECT_EQ(encoder->GetFrameReleasedCount(), config.num_frames_to_encode);
  397. EXPECT_TRUE(encoder->WaitForBitstreamProcessors());
  398. }
  399. // Encode video from start to end. Multiple buffer encodes will be queued in the
  400. // encoder, without waiting for the result of the previous encode requests.
  401. TEST_F(VideoEncoderTest, FlushAtEndOfStream_MultipleOutstandingEncodes) {
  402. if (g_env->SpatialLayers().size() > 1)
  403. GTEST_SKIP() << "Skip SHMEM input test cases in spatial SVC encoding";
  404. auto config = GetDefaultConfig();
  405. config.max_outstanding_encode_requests = 4;
  406. auto encoder = CreateVideoEncoder(g_env->Video(), config);
  407. encoder->Encode();
  408. EXPECT_TRUE(encoder->WaitForFlushDone());
  409. EXPECT_EQ(encoder->GetFlushDoneCount(), 1u);
  410. EXPECT_EQ(encoder->GetFrameReleasedCount(), g_env->Video()->NumFrames());
  411. EXPECT_TRUE(encoder->WaitForBitstreamProcessors());
  412. }
  413. // Encode multiple videos simultaneously from start to finish.
  414. TEST_F(VideoEncoderTest, FlushAtEndOfStream_MultipleConcurrentEncodes) {
  415. if (g_env->SpatialLayers().size() > 1)
  416. GTEST_SKIP() << "Skip SHMEM input test cases in spatial SVC encoding";
  417. // Run two encoders for larger resolutions to avoid creating shared memory
  418. // buffers during the test on lower end devices.
  419. constexpr gfx::Size k1080p(1920, 1080);
  420. const size_t kMinSupportedConcurrentEncoders =
  421. g_env->Video()->Resolution().GetArea() >= k1080p.GetArea() ? 2 : 3;
  422. auto config = GetDefaultConfig();
  423. std::vector<std::unique_ptr<VideoEncoder>> encoders(
  424. kMinSupportedConcurrentEncoders);
  425. for (size_t i = 0; i < kMinSupportedConcurrentEncoders; ++i)
  426. encoders[i] = CreateVideoEncoder(g_env->Video(), config);
  427. for (size_t i = 0; i < kMinSupportedConcurrentEncoders; ++i)
  428. encoders[i]->Encode();
  429. for (size_t i = 0; i < kMinSupportedConcurrentEncoders; ++i) {
  430. EXPECT_TRUE(encoders[i]->WaitForFlushDone());
  431. EXPECT_EQ(encoders[i]->GetFlushDoneCount(), 1u);
  432. EXPECT_EQ(encoders[i]->GetFrameReleasedCount(),
  433. g_env->Video()->NumFrames());
  434. EXPECT_TRUE(encoders[i]->WaitForBitstreamProcessors());
  435. }
  436. }
  437. TEST_F(VideoEncoderTest, BitrateCheck) {
  438. if (g_env->SpatialLayers().size() > 1)
  439. GTEST_SKIP() << "Skip SHMEM input test cases in spatial SVC encoding";
  440. auto config = GetDefaultConfig();
  441. config.num_frames_to_encode = kNumFramesToEncodeForBitrateCheck;
  442. auto encoder = CreateVideoEncoder(g_env->Video(), config);
  443. // Set longer event timeout than the default (30 sec) because encoding 2160p
  444. // and validating the stream take much time.
  445. encoder->SetEventWaitTimeout(kBitrateCheckEventTimeout);
  446. encoder->Encode();
  447. EXPECT_TRUE(encoder->WaitForFlushDone());
  448. EXPECT_EQ(encoder->GetFlushDoneCount(), 1u);
  449. EXPECT_EQ(encoder->GetFrameReleasedCount(), config.num_frames_to_encode);
  450. EXPECT_TRUE(encoder->WaitForBitstreamProcessors());
  451. // TODO(b/181797390): Reconsider bitrate check for VBR encoding if this fails
  452. // on some boards.
  453. const double tolerance =
  454. config.bitrate_allocation.GetMode() == Bitrate::Mode::kConstant
  455. ? kBitrateTolerance
  456. : kVariableBitrateTolerance;
  457. EXPECT_NEAR(encoder->GetStats().Bitrate(),
  458. config.bitrate_allocation.GetSumBps(),
  459. tolerance * config.bitrate_allocation.GetSumBps());
  460. }
  461. TEST_F(VideoEncoderTest, BitrateCheck_DynamicBitrate) {
  462. if (g_env->SpatialLayers().size() > 1)
  463. GTEST_SKIP() << "Skip SHMEM input test cases in spatial SVC encoding";
  464. if (g_env->BitrateAllocation().GetMode() != Bitrate::Mode::kConstant) {
  465. GTEST_SKIP()
  466. << "Skip Dynamic bitrate change checks for non-CBR bitrate mode";
  467. }
  468. auto config = GetDefaultConfig();
  469. config.num_frames_to_encode = kNumFramesToEncodeForBitrateCheck * 2;
  470. auto encoder = CreateVideoEncoder(g_env->Video(), config);
  471. // Set longer event timeout than the default (30 sec) because encoding 2160p
  472. // and validating the stream take much time.
  473. encoder->SetEventWaitTimeout(kBitrateCheckEventTimeout);
  474. // Encode the video with the first bitrate.
  475. const uint32_t first_bitrate = config.bitrate_allocation.GetSumBps();
  476. encoder->EncodeUntil(VideoEncoder::kFrameReleased,
  477. kNumFramesToEncodeForBitrateCheck);
  478. EXPECT_TRUE(encoder->WaitUntilIdle());
  479. EXPECT_NEAR(encoder->GetStats().Bitrate(), first_bitrate,
  480. kBitrateTolerance * first_bitrate);
  481. // Encode the video with the second bitrate.
  482. const uint32_t second_bitrate = first_bitrate * 3 / 2;
  483. encoder->ResetStats();
  484. encoder->UpdateBitrate(
  485. AllocateDefaultBitrateForTesting(
  486. config.num_spatial_layers, config.num_temporal_layers,
  487. Bitrate::ConstantBitrate(second_bitrate)),
  488. config.framerate);
  489. encoder->Encode();
  490. EXPECT_TRUE(encoder->WaitForFlushDone());
  491. EXPECT_NEAR(encoder->GetStats().Bitrate(), second_bitrate,
  492. kBitrateTolerance * second_bitrate);
  493. EXPECT_EQ(encoder->GetFlushDoneCount(), 1u);
  494. EXPECT_EQ(encoder->GetFrameReleasedCount(), config.num_frames_to_encode);
  495. EXPECT_TRUE(encoder->WaitForBitstreamProcessors());
  496. }
  497. TEST_F(VideoEncoderTest, BitrateCheck_DynamicFramerate) {
  498. if (g_env->SpatialLayers().size() > 1)
  499. GTEST_SKIP() << "Skip SHMEM input test cases in spatial SVC encoding";
  500. if (g_env->BitrateAllocation().GetMode() != Bitrate::Mode::kConstant) {
  501. GTEST_SKIP()
  502. << "Skip dynamic framerate change checks for non-CBR bitrate mode";
  503. }
  504. if (auto skip_reason = SupportsDynamicFramerate())
  505. GTEST_SKIP() << *skip_reason;
  506. auto config = GetDefaultConfig();
  507. config.num_frames_to_encode = kNumFramesToEncodeForBitrateCheck * 2;
  508. auto encoder = CreateVideoEncoder(g_env->Video(), config);
  509. // Set longer event timeout than the default (30 sec) because encoding 2160p
  510. // and validating the stream take much time.
  511. encoder->SetEventWaitTimeout(kBitrateCheckEventTimeout);
  512. // Encode the video with the first framerate.
  513. const uint32_t first_framerate = config.framerate;
  514. encoder->EncodeUntil(VideoEncoder::kFrameReleased,
  515. kNumFramesToEncodeForBitrateCheck);
  516. EXPECT_TRUE(encoder->WaitUntilIdle());
  517. EXPECT_NEAR(encoder->GetStats().Bitrate(),
  518. config.bitrate_allocation.GetSumBps(),
  519. kBitrateTolerance * config.bitrate_allocation.GetSumBps());
  520. // Encode the video with the second framerate.
  521. const uint32_t second_framerate = first_framerate * 3 / 2;
  522. encoder->ResetStats();
  523. encoder->UpdateBitrate(config.bitrate_allocation, second_framerate);
  524. encoder->Encode();
  525. EXPECT_TRUE(encoder->WaitForFlushDone());
  526. EXPECT_NEAR(encoder->GetStats().Bitrate(),
  527. config.bitrate_allocation.GetSumBps(),
  528. kBitrateTolerance * config.bitrate_allocation.GetSumBps());
  529. EXPECT_EQ(encoder->GetFlushDoneCount(), 1u);
  530. EXPECT_EQ(encoder->GetFrameReleasedCount(), config.num_frames_to_encode);
  531. EXPECT_TRUE(encoder->WaitForBitstreamProcessors());
  532. }
  533. TEST_F(VideoEncoderTest, FlushAtEndOfStream_NV12Dmabuf) {
  534. if (auto skip_reason = SupportsNV12DmaBufInput())
  535. GTEST_SKIP() << *skip_reason;
  536. Video* nv12_video = g_env->GenerateNV12Video();
  537. VideoEncoderClientConfig config(nv12_video, g_env->Profile(),
  538. g_env->SpatialLayers(),
  539. g_env->BitrateAllocation(), g_env->Reverse());
  540. config.input_storage_type =
  541. VideoEncodeAccelerator::Config::StorageType::kGpuMemoryBuffer;
  542. auto encoder = CreateVideoEncoder(nv12_video, config);
  543. encoder->Encode();
  544. EXPECT_TRUE(encoder->WaitForFlushDone());
  545. EXPECT_EQ(encoder->GetFlushDoneCount(), 1u);
  546. EXPECT_EQ(encoder->GetFrameReleasedCount(), nv12_video->NumFrames());
  547. EXPECT_TRUE(encoder->WaitForBitstreamProcessors());
  548. }
  549. // Downscaling is required in VideoEncodeAccelerator when zero-copy video
  550. // capture is enabled. One example is simulcast, camera produces 360p VideoFrame
  551. // and there are two VideoEncodeAccelerator for 360p and 180p. VideoEncoder for
  552. // 180p is fed 360p and thus has to perform the scaling from 360p to 180p.
  553. TEST_F(VideoEncoderTest, FlushAtEndOfStream_NV12DmabufScaling) {
  554. if (auto skip_reason = SupportsNV12DmaBufInput())
  555. GTEST_SKIP() << *skip_reason;
  556. if (g_env->SpatialLayers().size() > 1)
  557. GTEST_SKIP() << "Skip simulcast test case for spatial SVC encoding";
  558. constexpr gfx::Size kMinOutputResolution(240, 180);
  559. const gfx::Size output_resolution =
  560. gfx::Size(g_env->Video()->Resolution().width() / 2,
  561. g_env->Video()->Resolution().height() / 2);
  562. if (!gfx::Rect(output_resolution).Contains(gfx::Rect(kMinOutputResolution))) {
  563. GTEST_SKIP() << "Skip test if video resolution is too small, "
  564. << "output_resolution=" << output_resolution.ToString()
  565. << ", minimum output resolution="
  566. << kMinOutputResolution.ToString();
  567. }
  568. auto* nv12_video = g_env->GenerateNV12Video();
  569. // Set 1/4 of the original bitrate because the area of |output_resolution| is
  570. // 1/4 of the original resolution.
  571. uint32_t new_target_bitrate = g_env->BitrateAllocation().GetSumBps() / 4;
  572. // TODO(b/181797390): Reconsider if this peak bitrate is reasonable.
  573. const Bitrate new_bitrate =
  574. g_env->BitrateAllocation().GetMode() == Bitrate::Mode::kConstant
  575. ? Bitrate::ConstantBitrate(new_target_bitrate)
  576. : Bitrate::VariableBitrate(new_target_bitrate,
  577. new_target_bitrate * 2);
  578. auto spatial_layers = g_env->SpatialLayers();
  579. size_t num_temporal_layers = 1u;
  580. if (!spatial_layers.empty()) {
  581. CHECK_EQ(spatial_layers.size(), 1u);
  582. spatial_layers[0].width = output_resolution.width();
  583. spatial_layers[0].height = output_resolution.height();
  584. spatial_layers[0].bitrate_bps /= 4;
  585. num_temporal_layers = spatial_layers[0].num_of_temporal_layers;
  586. }
  587. VideoEncoderClientConfig config(
  588. nv12_video, g_env->Profile(), spatial_layers,
  589. AllocateDefaultBitrateForTesting(/*num_spatial_layers=*/1u,
  590. num_temporal_layers, new_bitrate),
  591. g_env->Reverse());
  592. config.output_resolution = output_resolution;
  593. config.input_storage_type =
  594. VideoEncodeAccelerator::Config::StorageType::kGpuMemoryBuffer;
  595. auto encoder = CreateVideoEncoder(nv12_video, config);
  596. encoder->Encode();
  597. EXPECT_TRUE(encoder->WaitForFlushDone());
  598. EXPECT_EQ(encoder->GetFlushDoneCount(), 1u);
  599. EXPECT_EQ(encoder->GetFrameReleasedCount(), nv12_video->NumFrames());
  600. EXPECT_TRUE(encoder->WaitForBitstreamProcessors());
  601. }
  602. // Encode VideoFrames with cropping the rectangle (0, 60, size).
  603. // Cropping is required in VideoEncodeAccelerator when zero-copy video
  604. // capture is enabled. One example is when 640x360 capture recording is
  605. // requested, a camera cannot produce the resolution and instead produces
  606. // 640x480 frames with visible_rect=0, 60, 640x360.
  607. TEST_F(VideoEncoderTest, FlushAtEndOfStream_NV12DmabufCroppingTopAndBottom) {
  608. if (auto skip_reason = SupportsNV12DmaBufInput())
  609. GTEST_SKIP() << *skip_reason;
  610. constexpr int kGrowHeight = 120;
  611. const gfx::Size original_resolution = g_env->Video()->Resolution();
  612. const gfx::Rect expanded_visible_rect(0, kGrowHeight / 2,
  613. original_resolution.width(),
  614. original_resolution.height());
  615. const gfx::Size expanded_resolution(
  616. original_resolution.width(), original_resolution.height() + kGrowHeight);
  617. constexpr gfx::Size kMaxExpandedResolution(1920, 1080);
  618. if (!gfx::Rect(kMaxExpandedResolution)
  619. .Contains(gfx::Rect(expanded_resolution))) {
  620. GTEST_SKIP() << "Expanded video resolution is too large, "
  621. << "expanded_resolution=" << expanded_resolution.ToString()
  622. << ", maximum expanded resolution="
  623. << kMaxExpandedResolution.ToString();
  624. }
  625. auto nv12_expanded_video = g_env->GenerateNV12Video()->Expand(
  626. expanded_resolution, expanded_visible_rect);
  627. ASSERT_TRUE(nv12_expanded_video);
  628. VideoEncoderClientConfig config(nv12_expanded_video.get(), g_env->Profile(),
  629. g_env->SpatialLayers(),
  630. g_env->BitrateAllocation(), g_env->Reverse());
  631. config.output_resolution = original_resolution;
  632. config.input_storage_type =
  633. VideoEncodeAccelerator::Config::StorageType::kGpuMemoryBuffer;
  634. auto encoder = CreateVideoEncoder(nv12_expanded_video.get(), config);
  635. encoder->Encode();
  636. EXPECT_TRUE(encoder->WaitForFlushDone());
  637. EXPECT_EQ(encoder->GetFlushDoneCount(), 1u);
  638. EXPECT_EQ(encoder->GetFrameReleasedCount(), nv12_expanded_video->NumFrames());
  639. EXPECT_TRUE(encoder->WaitForBitstreamProcessors());
  640. }
  641. // Encode VideoFrames with cropping the rectangle (60, 0, size).
  642. // Cropping is required in VideoEncodeAccelerator when zero-copy video
  643. // capture is enabled. One example is when 640x360 capture recording is
  644. // requested, a camera cannot produce the resolution and instead produces
  645. // 760x360 frames with visible_rect=60, 0, 640x360.
  646. TEST_F(VideoEncoderTest, FlushAtEndOfStream_NV12DmabufCroppingRightAndLeft) {
  647. if (auto skip_reason = SupportsNV12DmaBufInput())
  648. GTEST_SKIP() << *skip_reason;
  649. constexpr int kGrowWidth = 120;
  650. const gfx::Size original_resolution = g_env->Video()->Resolution();
  651. const gfx::Rect expanded_visible_rect(kGrowWidth / 2, 0,
  652. original_resolution.width(),
  653. original_resolution.height());
  654. const gfx::Size expanded_resolution(original_resolution.width() + kGrowWidth,
  655. original_resolution.height());
  656. constexpr gfx::Size kMaxExpandedResolution(1920, 1080);
  657. if (!gfx::Rect(kMaxExpandedResolution)
  658. .Contains(gfx::Rect(expanded_resolution))) {
  659. GTEST_SKIP() << "Expanded video resolution is too large, "
  660. << "expanded_resolution=" << expanded_resolution.ToString()
  661. << ", maximum expanded resolution="
  662. << kMaxExpandedResolution.ToString();
  663. }
  664. auto nv12_expanded_video = g_env->GenerateNV12Video()->Expand(
  665. expanded_resolution, expanded_visible_rect);
  666. ASSERT_TRUE(nv12_expanded_video);
  667. VideoEncoderClientConfig config(nv12_expanded_video.get(), g_env->Profile(),
  668. g_env->SpatialLayers(),
  669. g_env->BitrateAllocation(), g_env->Reverse());
  670. config.output_resolution = original_resolution;
  671. config.input_storage_type =
  672. VideoEncodeAccelerator::Config::StorageType::kGpuMemoryBuffer;
  673. auto encoder = CreateVideoEncoder(nv12_expanded_video.get(), config);
  674. encoder->Encode();
  675. EXPECT_TRUE(encoder->WaitForFlushDone());
  676. EXPECT_EQ(encoder->GetFlushDoneCount(), 1u);
  677. EXPECT_EQ(encoder->GetFrameReleasedCount(), nv12_expanded_video->NumFrames());
  678. EXPECT_TRUE(encoder->WaitForBitstreamProcessors());
  679. }
  680. // This tests deactivate and activating spatial layers during encoding.
  681. TEST_F(VideoEncoderTest, DeactivateAndActivateSpatialLayers) {
  682. if (auto skip_reason = SupportsNV12DmaBufInput())
  683. GTEST_SKIP() << *skip_reason;
  684. const auto& spatial_layers = g_env->SpatialLayers();
  685. if (spatial_layers.size() <= 1)
  686. GTEST_SKIP() << "Skip (de)activate spatial layers test for simple encoding";
  687. Video* nv12_video = g_env->GenerateNV12Video();
  688. const size_t bottom_spatial_idx = 0;
  689. const size_t top_spatial_idx = spatial_layers.size() - 1;
  690. auto deactivate_spatial_layer =
  691. [](VideoBitrateAllocation bitrate_allocation,
  692. size_t deactivate_sid) -> VideoBitrateAllocation {
  693. for (size_t i = 0; i < VideoBitrateAllocation::kMaxTemporalLayers; ++i)
  694. bitrate_allocation.SetBitrate(deactivate_sid, i, 0u);
  695. return bitrate_allocation;
  696. };
  697. const auto& default_allocation = g_env->BitrateAllocation();
  698. std::vector<VideoBitrateAllocation> bitrate_allocations;
  699. // Deactivate the top layer.
  700. bitrate_allocations.emplace_back(
  701. deactivate_spatial_layer(default_allocation, top_spatial_idx));
  702. // Activate the top layer.
  703. bitrate_allocations.emplace_back(default_allocation);
  704. // Deactivate the bottom layer (and top layer if there is still a spatial
  705. // layer).
  706. auto bitrate_allocation =
  707. deactivate_spatial_layer(default_allocation, bottom_spatial_idx);
  708. if (bottom_spatial_idx + 1 < top_spatial_idx) {
  709. bitrate_allocation =
  710. deactivate_spatial_layer(bitrate_allocation, top_spatial_idx);
  711. }
  712. bitrate_allocations.emplace_back(bitrate_allocation);
  713. // Deactivate the layers except bottom layer.
  714. bitrate_allocation = default_allocation;
  715. for (size_t i = bottom_spatial_idx + 1; i < spatial_layers.size(); ++i)
  716. bitrate_allocation = deactivate_spatial_layer(bitrate_allocation, i);
  717. bitrate_allocations.emplace_back(bitrate_allocation);
  718. VideoEncoderClientConfig config(nv12_video, g_env->Profile(),
  719. g_env->SpatialLayers(),
  720. g_env->BitrateAllocation(), g_env->Reverse());
  721. config.input_storage_type =
  722. VideoEncodeAccelerator::Config::StorageType::kGpuMemoryBuffer;
  723. std::vector<size_t> num_frames_to_encode(bitrate_allocations.size());
  724. for (size_t i = 0; i < num_frames_to_encode.size(); ++i)
  725. num_frames_to_encode[i] = config.num_frames_to_encode * (i + 1);
  726. config.num_frames_to_encode =
  727. num_frames_to_encode.back() + config.num_frames_to_encode;
  728. auto encoder = CreateVideoEncoder(nv12_video, config);
  729. for (size_t i = 0; i < bitrate_allocations.size(); ++i) {
  730. encoder->EncodeUntil(VideoEncoder::kFrameReleased, num_frames_to_encode[i]);
  731. EXPECT_TRUE(encoder->WaitUntilIdle());
  732. encoder->UpdateBitrate(bitrate_allocations[i], config.framerate);
  733. }
  734. encoder->Encode();
  735. EXPECT_TRUE(encoder->WaitForFlushDone());
  736. EXPECT_EQ(encoder->GetFlushDoneCount(), 1u);
  737. EXPECT_EQ(encoder->GetFrameReleasedCount(), config.num_frames_to_encode);
  738. EXPECT_TRUE(encoder->WaitForBitstreamProcessors());
  739. }
  740. } // namespace test
  741. } // namespace media
  742. int main(int argc, char** argv) {
  743. // Set the default test data path.
  744. media::test::Video::SetTestDataPath(media::GetTestDataPath());
  745. // Print the help message if requested. This needs to be done before
  746. // initializing gtest, to overwrite the default gtest help message.
  747. base::CommandLine::Init(argc, argv);
  748. const base::CommandLine* cmd_line = base::CommandLine::ForCurrentProcess();
  749. LOG_ASSERT(cmd_line);
  750. if (cmd_line->HasSwitch("help")) {
  751. std::cout << media::test::usage_msg << "\n" << media::test::help_msg;
  752. return 0;
  753. }
  754. // Check if a video was specified on the command line.
  755. base::CommandLine::StringVector args = cmd_line->GetArgs();
  756. base::FilePath video_path =
  757. (args.size() >= 1) ? base::FilePath(args[0])
  758. : base::FilePath(media::test::kDefaultTestVideoPath);
  759. base::FilePath video_metadata_path =
  760. (args.size() >= 2) ? base::FilePath(args[1]) : base::FilePath();
  761. std::string codec = "h264";
  762. size_t num_temporal_layers = 1u;
  763. size_t num_spatial_layers = 1u;
  764. bool output_bitstream = false;
  765. bool reverse = false;
  766. media::Bitrate::Mode bitrate_mode = media::Bitrate::Mode::kConstant;
  767. media::test::FrameOutputConfig frame_output_config;
  768. base::FilePath output_folder =
  769. base::FilePath(base::FilePath::kCurrentDirectory);
  770. std::vector<base::Feature> disabled_features;
  771. // Parse command line arguments.
  772. bool enable_bitstream_validator = true;
  773. base::CommandLine::SwitchMap switches = cmd_line->GetSwitches();
  774. for (base::CommandLine::SwitchMap::const_iterator it = switches.begin();
  775. it != switches.end(); ++it) {
  776. if (it->first.find("gtest_") == 0 || // Handled by GoogleTest
  777. it->first == "v" || it->first == "vmodule") { // Handled by Chrome
  778. continue;
  779. }
  780. if (it->first == "codec") {
  781. codec = it->second;
  782. } else if (it->first == "num_temporal_layers") {
  783. if (!base::StringToSizeT(it->second, &num_temporal_layers)) {
  784. std::cout << "invalid number of temporal layers: " << it->second
  785. << "\n";
  786. return EXIT_FAILURE;
  787. }
  788. } else if (it->first == "num_spatial_layers") {
  789. if (!base::StringToSizeT(it->second, &num_spatial_layers)) {
  790. std::cout << "invalid number of spatial layers: " << it->second << "\n";
  791. return EXIT_FAILURE;
  792. }
  793. } else if (it->first == "bitrate_mode") {
  794. if (it->second == "vbr") {
  795. bitrate_mode = media::Bitrate::Mode::kVariable;
  796. } else if (it->second != "cbr") {
  797. std::cout << "unknown bitrate mode \"" << it->second
  798. << "\", possible values are \"cbr|vbr\"\n";
  799. return EXIT_FAILURE;
  800. }
  801. } else if (it->first == "disable_validator") {
  802. enable_bitstream_validator = false;
  803. } else if (it->first == "output_bitstream") {
  804. output_bitstream = true;
  805. } else if (it->first == "reverse") {
  806. reverse = true;
  807. } else if (it->first == "output_images") {
  808. if (it->second == "all") {
  809. frame_output_config.output_mode = media::test::FrameOutputMode::kAll;
  810. } else if (it->second == "corrupt") {
  811. frame_output_config.output_mode =
  812. media::test::FrameOutputMode::kCorrupt;
  813. } else {
  814. std::cout << "unknown image output mode \"" << it->second
  815. << "\", possible values are \"all|corrupt\"\n";
  816. return EXIT_FAILURE;
  817. }
  818. } else if (it->first == "output_format") {
  819. if (it->second == "png") {
  820. frame_output_config.output_format =
  821. media::test::VideoFrameFileWriter::OutputFormat::kPNG;
  822. } else if (it->second == "yuv") {
  823. frame_output_config.output_format =
  824. media::test::VideoFrameFileWriter::OutputFormat::kYUV;
  825. } else {
  826. std::cout << "unknown frame output format \"" << it->second
  827. << "\", possible values are \"png|yuv\"\n";
  828. return EXIT_FAILURE;
  829. }
  830. } else if (it->first == "output_limit") {
  831. if (!base::StringToUint64(it->second,
  832. &frame_output_config.output_limit)) {
  833. std::cout << "invalid number \"" << it->second << "\n";
  834. return EXIT_FAILURE;
  835. }
  836. } else if (it->first == "output_folder") {
  837. output_folder = base::FilePath(it->second);
  838. } else if (it->first == "disable_vaapi_lock") {
  839. disabled_features.push_back(media::kGlobalVaapiLock);
  840. } else {
  841. std::cout << "unknown option: --" << it->first << "\n"
  842. << media::test::usage_msg;
  843. return EXIT_FAILURE;
  844. }
  845. }
  846. testing::InitGoogleTest(&argc, argv);
  847. // Set up our test environment.
  848. media::test::VideoEncoderTestEnvironment* test_environment =
  849. media::test::VideoEncoderTestEnvironment::Create(
  850. video_path, video_metadata_path, enable_bitstream_validator,
  851. output_folder, codec, num_temporal_layers, num_spatial_layers,
  852. output_bitstream,
  853. /*output_bitrate=*/absl::nullopt, bitrate_mode, reverse,
  854. frame_output_config,
  855. /*enabled_features=*/{}, disabled_features);
  856. if (!test_environment)
  857. return EXIT_FAILURE;
  858. media::test::g_env = static_cast<media::test::VideoEncoderTestEnvironment*>(
  859. testing::AddGlobalTestEnvironment(test_environment));
  860. return RUN_ALL_TESTS();
  861. }