fake_media_source.cc 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612
  1. // Copyright 2014 The Chromium Authors. All rights reserved.
  2. // Use of this source code is governed by a BSD-style license that can be
  3. // found in the LICENSE file.
  4. #include "media/cast/test/fake_media_source.h"
  5. #include <memory>
  6. #include <utility>
  7. #include "base/bind.h"
  8. #include "base/files/scoped_file.h"
  9. #include "base/logging.h"
  10. #include "base/rand_util.h"
  11. #include "base/strings/string_number_conversions.h"
  12. #include "build/build_config.h"
  13. #include "media/base/audio_buffer.h"
  14. #include "media/base/audio_bus.h"
  15. #include "media/base/audio_fifo.h"
  16. #include "media/base/audio_timestamp_helper.h"
  17. #include "media/base/media.h"
  18. #include "media/base/video_frame.h"
  19. #include "media/base/video_util.h"
  20. #include "media/cast/cast_sender.h"
  21. #include "media/cast/test/utility/audio_utility.h"
  22. #include "media/cast/test/utility/video_utility.h"
  23. #include "ui/gfx/geometry/size.h"
  24. #if BUILDFLAG(IS_WIN)
  25. #include <direct.h>
  26. #endif // BUILDFLAG(IS_WIN)
  27. #include "media/ffmpeg/ffmpeg_common.h"
  28. #include "media/ffmpeg/ffmpeg_decoding_loop.h"
  29. #include "media/ffmpeg/ffmpeg_deleters.h"
  30. #include "media/filters/ffmpeg_glue.h"
  31. #include "media/filters/in_memory_url_protocol.h"
  32. namespace {
  33. static const int kSoundFrequency = 440; // Frequency of sinusoid wave.
  34. static const float kSoundVolume = 0.10f;
  35. static const int kAudioFrameMs = 10; // Each audio frame is exactly 10ms.
  36. static const int kAudioPacketsPerSecond = 1000 / kAudioFrameMs;
  37. // Bounds for variable frame size mode.
  38. static const int kMinFakeFrameWidth = 60;
  39. static const int kMinFakeFrameHeight = 34;
  40. static const int kStartingFakeFrameWidth = 854;
  41. static const int kStartingFakeFrameHeight = 480;
  42. static const int kMaxFakeFrameWidth = 1280;
  43. static const int kMaxFakeFrameHeight = 720;
  44. static const int kMaxFrameSizeChangeMillis = 5000;
  45. void AVFreeFrame(AVFrame* frame) {
  46. av_frame_free(&frame);
  47. }
  48. base::TimeDelta PtsToTimeDelta(int64_t pts, const AVRational& time_base) {
  49. return pts * base::Seconds(1) * time_base.num / time_base.den;
  50. }
  51. int64_t TimeDeltaToPts(base::TimeDelta delta, const AVRational& time_base) {
  52. return static_cast<int64_t>(
  53. delta.InSecondsF() * time_base.den / time_base.num + 0.5 /* rounding */);
  54. }
  55. } // namespace
  56. namespace media {
  57. namespace cast {
  58. FakeMediaSource::FakeMediaSource(
  59. scoped_refptr<base::SingleThreadTaskRunner> task_runner,
  60. const base::TickClock* clock,
  61. const FrameSenderConfig& audio_config,
  62. const FrameSenderConfig& video_config,
  63. bool keep_frames)
  64. : task_runner_(task_runner),
  65. output_audio_params_(AudioParameters::AUDIO_PCM_LINEAR,
  66. media::GuessChannelLayout(audio_config.channels),
  67. audio_config.rtp_timebase,
  68. audio_config.rtp_timebase / kAudioPacketsPerSecond),
  69. video_config_(video_config),
  70. keep_frames_(keep_frames),
  71. variable_frame_size_mode_(false),
  72. synthetic_count_(0),
  73. clock_(clock),
  74. audio_frame_count_(0),
  75. video_frame_count_(0),
  76. av_format_context_(nullptr),
  77. audio_stream_index_(-1),
  78. playback_rate_(1.0),
  79. video_stream_index_(-1),
  80. video_frame_rate_numerator_(video_config.max_frame_rate),
  81. video_frame_rate_denominator_(1),
  82. audio_algo_(&media_log_),
  83. video_first_pts_(0),
  84. video_first_pts_set_(false) {
  85. CHECK(output_audio_params_.IsValid());
  86. audio_bus_factory_ = std::make_unique<TestAudioBusFactory>(
  87. audio_config.channels, audio_config.rtp_timebase, kSoundFrequency,
  88. kSoundVolume);
  89. }
  90. FakeMediaSource::~FakeMediaSource() = default;
  91. void FakeMediaSource::SetSourceFile(const base::FilePath& video_file,
  92. int final_fps) {
  93. DCHECK(!video_file.empty());
  94. LOG(INFO) << "Source: " << video_file.value();
  95. if (!file_data_.Initialize(video_file)) {
  96. LOG(ERROR) << "Cannot load file.";
  97. return;
  98. }
  99. protocol_ = std::make_unique<InMemoryUrlProtocol>(file_data_.data(),
  100. file_data_.length(), false);
  101. glue_ = std::make_unique<FFmpegGlue>(protocol_.get());
  102. if (!glue_->OpenContext()) {
  103. LOG(ERROR) << "Cannot open file.";
  104. return;
  105. }
  106. // AVFormatContext is owned by the glue.
  107. av_format_context_ = glue_->format_context();
  108. if (avformat_find_stream_info(av_format_context_, NULL) < 0) {
  109. LOG(ERROR) << "Cannot find stream information.";
  110. return;
  111. }
  112. // Prepare FFmpeg decoders.
  113. for (unsigned int i = 0; i < av_format_context_->nb_streams; ++i) {
  114. AVStream* av_stream = av_format_context_->streams[i];
  115. std::unique_ptr<AVCodecContext, ScopedPtrAVFreeContext> av_codec_context(
  116. AVStreamToAVCodecContext(av_stream));
  117. if (!av_codec_context) {
  118. LOG(ERROR) << "Cannot get a codec context for the codec: "
  119. << av_stream->codecpar->codec_id;
  120. continue;
  121. }
  122. const AVCodec* av_codec = avcodec_find_decoder(av_codec_context->codec_id);
  123. if (!av_codec) {
  124. LOG(ERROR) << "Cannot find decoder for the codec: "
  125. << av_codec_context->codec_id;
  126. continue;
  127. }
  128. // Number of threads for decoding.
  129. av_codec_context->thread_count = 2;
  130. av_codec_context->error_concealment = FF_EC_GUESS_MVS | FF_EC_DEBLOCK;
  131. av_codec_context->request_sample_fmt = AV_SAMPLE_FMT_S16;
  132. if (avcodec_open2(av_codec_context.get(), av_codec, nullptr) < 0) {
  133. LOG(ERROR) << "Cannot open AVCodecContext for the codec: "
  134. << av_codec_context->codec_id;
  135. return;
  136. }
  137. if (av_codec->type == AVMEDIA_TYPE_AUDIO) {
  138. if (av_codec_context->sample_fmt == AV_SAMPLE_FMT_S16P) {
  139. LOG(ERROR) << "Audio format not supported.";
  140. continue;
  141. }
  142. ChannelLayout layout = ChannelLayoutToChromeChannelLayout(
  143. av_codec_context->channel_layout,
  144. av_codec_context->channels);
  145. if (layout == CHANNEL_LAYOUT_UNSUPPORTED) {
  146. LOG(ERROR) << "Unsupported audio channels layout.";
  147. continue;
  148. }
  149. if (audio_stream_index_ != -1) {
  150. LOG(WARNING) << "Found multiple audio streams.";
  151. }
  152. audio_stream_index_ = static_cast<int>(i);
  153. av_audio_context_ = std::move(av_codec_context);
  154. source_audio_params_.Reset(
  155. AudioParameters::AUDIO_PCM_LINEAR, layout,
  156. av_audio_context_->sample_rate,
  157. av_audio_context_->sample_rate / kAudioPacketsPerSecond);
  158. source_audio_params_.set_channels_for_discrete(
  159. av_audio_context_->channels);
  160. CHECK(source_audio_params_.IsValid());
  161. LOG(INFO) << "Source file has audio.";
  162. audio_decoding_loop_ =
  163. std::make_unique<FFmpegDecodingLoop>(av_audio_context_.get());
  164. } else if (av_codec->type == AVMEDIA_TYPE_VIDEO) {
  165. VideoPixelFormat format =
  166. AVPixelFormatToVideoPixelFormat(av_codec_context->pix_fmt);
  167. if (format != PIXEL_FORMAT_I420) {
  168. LOG(ERROR) << "Cannot handle non YV12 video format: " << format;
  169. continue;
  170. }
  171. if (video_stream_index_ != -1) {
  172. LOG(WARNING) << "Found multiple video streams.";
  173. }
  174. video_stream_index_ = static_cast<int>(i);
  175. av_video_context_ = std::move(av_codec_context);
  176. video_decoding_loop_ =
  177. std::make_unique<FFmpegDecodingLoop>(av_video_context_.get());
  178. if (final_fps > 0) {
  179. // If video is played at a manual speed audio needs to match.
  180. playback_rate_ = 1.0 * final_fps *
  181. av_stream->r_frame_rate.den / av_stream->r_frame_rate.num;
  182. video_frame_rate_numerator_ = final_fps;
  183. video_frame_rate_denominator_ = 1;
  184. } else {
  185. playback_rate_ = 1.0;
  186. video_frame_rate_numerator_ = av_stream->r_frame_rate.num;
  187. video_frame_rate_denominator_ = av_stream->r_frame_rate.den;
  188. }
  189. LOG(INFO) << "Source file has video.";
  190. } else {
  191. LOG(ERROR) << "Unknown stream type; ignore.";
  192. }
  193. }
  194. Rewind();
  195. }
  196. void FakeMediaSource::SetVariableFrameSizeMode(bool enabled) {
  197. variable_frame_size_mode_ = enabled;
  198. }
  199. void FakeMediaSource::Start(scoped_refptr<AudioFrameInput> audio_frame_input,
  200. scoped_refptr<VideoFrameInput> video_frame_input) {
  201. audio_frame_input_ = audio_frame_input;
  202. video_frame_input_ = video_frame_input;
  203. LOG(INFO) << "Max Frame rate: " << video_config_.max_frame_rate;
  204. LOG(INFO) << "Source Frame rate: "
  205. << video_frame_rate_numerator_ << "/"
  206. << video_frame_rate_denominator_ << " fps.";
  207. LOG(INFO) << "Audio playback rate: " << playback_rate_;
  208. if (start_time_.is_null())
  209. start_time_ = clock_->NowTicks();
  210. if (!is_transcoding_audio() && !is_transcoding_video()) {
  211. // Send fake patterns.
  212. task_runner_->PostTask(FROM_HERE,
  213. base::BindOnce(&FakeMediaSource::SendNextFakeFrame,
  214. weak_factory_.GetWeakPtr()));
  215. return;
  216. }
  217. // Send transcoding streams.
  218. bool is_encrypted = false;
  219. audio_algo_.Initialize(source_audio_params_, is_encrypted);
  220. audio_algo_.FlushBuffers();
  221. audio_fifo_input_bus_ = AudioBus::Create(
  222. source_audio_params_.channels(),
  223. source_audio_params_.frames_per_buffer());
  224. // Audio FIFO can carry all data fron AudioRendererAlgorithm.
  225. audio_fifo_ = std::make_unique<AudioFifo>(source_audio_params_.channels(),
  226. audio_algo_.QueueCapacity());
  227. audio_converter_ = std::make_unique<media::AudioConverter>(
  228. source_audio_params_, output_audio_params_, true);
  229. audio_converter_->AddInput(this);
  230. task_runner_->PostTask(FROM_HERE,
  231. base::BindOnce(&FakeMediaSource::SendNextFrame,
  232. weak_factory_.GetWeakPtr()));
  233. }
  234. void FakeMediaSource::SendNextFakeFrame() {
  235. UpdateNextFrameSize();
  236. scoped_refptr<VideoFrame> video_frame =
  237. VideoFrame::CreateBlackFrame(current_frame_size_);
  238. PopulateVideoFrame(video_frame.get(), synthetic_count_);
  239. ++synthetic_count_;
  240. const base::TimeTicks now = clock_->NowTicks();
  241. base::TimeDelta video_time = VideoFrameTime(++video_frame_count_);
  242. video_frame->set_timestamp(video_time);
  243. if (keep_frames_)
  244. inserted_video_frame_queue_.push(video_frame);
  245. video_frame_input_->InsertRawVideoFrame(video_frame,
  246. start_time_ + video_time);
  247. // Send just enough audio data to match next video frame's time.
  248. base::TimeDelta audio_time = AudioFrameTime(audio_frame_count_);
  249. while (audio_time < video_time) {
  250. if (is_transcoding_audio()) {
  251. Decode(true);
  252. CHECK(!audio_bus_queue_.empty()) << "No audio decoded.";
  253. std::unique_ptr<AudioBus> bus(audio_bus_queue_.front());
  254. audio_bus_queue_.pop();
  255. audio_frame_input_->InsertAudio(std::move(bus), start_time_ + audio_time);
  256. } else {
  257. audio_frame_input_->InsertAudio(
  258. audio_bus_factory_->NextAudioBus(base::Milliseconds(kAudioFrameMs)),
  259. start_time_ + audio_time);
  260. }
  261. audio_time = AudioFrameTime(++audio_frame_count_);
  262. }
  263. // This is the time since FakeMediaSource was started.
  264. const base::TimeDelta elapsed_time = now - start_time_;
  265. // Handle the case when frame generation cannot keep up.
  266. // Move the time ahead to match the next frame.
  267. while (video_time < elapsed_time) {
  268. LOG(WARNING) << "Skipping one frame.";
  269. video_time = VideoFrameTime(++video_frame_count_);
  270. }
  271. task_runner_->PostDelayedTask(
  272. FROM_HERE,
  273. base::BindOnce(&FakeMediaSource::SendNextFakeFrame,
  274. weak_factory_.GetWeakPtr()),
  275. video_time - elapsed_time);
  276. }
  277. void FakeMediaSource::UpdateNextFrameSize() {
  278. if (variable_frame_size_mode_) {
  279. bool update_size_change_time = false;
  280. if (current_frame_size_.IsEmpty()) {
  281. current_frame_size_ = gfx::Size(kStartingFakeFrameWidth,
  282. kStartingFakeFrameHeight);
  283. update_size_change_time = true;
  284. } else if (clock_->NowTicks() >= next_frame_size_change_time_) {
  285. current_frame_size_ = gfx::Size(
  286. base::RandInt(kMinFakeFrameWidth, kMaxFakeFrameWidth),
  287. base::RandInt(kMinFakeFrameHeight, kMaxFakeFrameHeight));
  288. update_size_change_time = true;
  289. }
  290. if (update_size_change_time) {
  291. next_frame_size_change_time_ =
  292. clock_->NowTicks() +
  293. base::Milliseconds(base::RandDouble() * kMaxFrameSizeChangeMillis);
  294. }
  295. } else {
  296. current_frame_size_ = gfx::Size(kStartingFakeFrameWidth,
  297. kStartingFakeFrameHeight);
  298. next_frame_size_change_time_ = base::TimeTicks();
  299. }
  300. }
  301. bool FakeMediaSource::SendNextTranscodedVideo(base::TimeDelta elapsed_time) {
  302. if (!is_transcoding_video())
  303. return false;
  304. Decode(false);
  305. if (video_frame_queue_.empty())
  306. return false;
  307. const scoped_refptr<VideoFrame> video_frame = video_frame_queue_.front();
  308. if (elapsed_time < video_frame->timestamp())
  309. return false;
  310. video_frame_queue_.pop();
  311. // Use the timestamp from the file if we're transcoding.
  312. video_frame->set_timestamp(ScaleTimestamp(video_frame->timestamp()));
  313. if (keep_frames_)
  314. inserted_video_frame_queue_.push(video_frame);
  315. video_frame_input_->InsertRawVideoFrame(
  316. video_frame, start_time_ + video_frame->timestamp());
  317. // Make sure queue is not empty.
  318. Decode(false);
  319. return true;
  320. }
  321. bool FakeMediaSource::SendNextTranscodedAudio(base::TimeDelta elapsed_time) {
  322. if (!is_transcoding_audio())
  323. return false;
  324. Decode(true);
  325. if (audio_bus_queue_.empty())
  326. return false;
  327. base::TimeDelta audio_time = audio_sent_ts_->GetTimestamp();
  328. if (elapsed_time < audio_time)
  329. return false;
  330. std::unique_ptr<AudioBus> bus(audio_bus_queue_.front());
  331. audio_bus_queue_.pop();
  332. audio_sent_ts_->AddFrames(bus->frames());
  333. audio_frame_input_->InsertAudio(std::move(bus), start_time_ + audio_time);
  334. // Make sure queue is not empty.
  335. Decode(true);
  336. return true;
  337. }
  338. void FakeMediaSource::SendNextFrame() {
  339. // Send as much as possible. Audio is sent according to
  340. // system time.
  341. while (SendNextTranscodedAudio(clock_->NowTicks() - start_time_)) {
  342. }
  343. // Video is sync'ed to audio.
  344. while (SendNextTranscodedVideo(audio_sent_ts_->GetTimestamp())) {
  345. }
  346. if (audio_bus_queue_.empty() && video_frame_queue_.empty()) {
  347. // Both queues are empty can only mean that we have reached
  348. // the end of the stream.
  349. LOG(INFO) << "Rewind.";
  350. Rewind();
  351. }
  352. // Send next send.
  353. task_runner_->PostDelayedTask(FROM_HERE,
  354. base::BindOnce(&FakeMediaSource::SendNextFrame,
  355. weak_factory_.GetWeakPtr()),
  356. base::Milliseconds(kAudioFrameMs));
  357. }
  358. base::TimeDelta FakeMediaSource::VideoFrameTime(int frame_number) {
  359. return frame_number * base::Seconds(1) * video_frame_rate_denominator_ /
  360. video_frame_rate_numerator_;
  361. }
  362. base::TimeDelta FakeMediaSource::ScaleTimestamp(base::TimeDelta timestamp) {
  363. return timestamp / playback_rate_;
  364. }
  365. base::TimeDelta FakeMediaSource::AudioFrameTime(int frame_number) {
  366. return frame_number * base::Milliseconds(kAudioFrameMs);
  367. }
  368. void FakeMediaSource::Rewind() {
  369. CHECK(av_seek_frame(av_format_context_, -1, 0, AVSEEK_FLAG_BACKWARD) >= 0)
  370. << "Failed to rewind to the beginning.";
  371. }
  372. ScopedAVPacket FakeMediaSource::DemuxOnePacket(bool* audio) {
  373. auto packet = ScopedAVPacket::Allocate();
  374. if (av_read_frame(av_format_context_, packet.get()) < 0) {
  375. VLOG(1) << "Failed to read one AVPacket.";
  376. return {};
  377. }
  378. int stream_index = static_cast<int>(packet->stream_index);
  379. if (stream_index == audio_stream_index_) {
  380. *audio = true;
  381. } else if (stream_index == video_stream_index_) {
  382. *audio = false;
  383. } else {
  384. // Ignore unknown packet.
  385. LOG(INFO) << "Unknown packet.";
  386. return {};
  387. }
  388. return packet;
  389. }
  390. void FakeMediaSource::DecodeAudio(ScopedAVPacket packet) {
  391. auto result = audio_decoding_loop_->DecodePacket(
  392. packet.get(), base::BindRepeating(&FakeMediaSource::OnNewAudioFrame,
  393. base::Unretained(this)));
  394. CHECK_EQ(result, FFmpegDecodingLoop::DecodeStatus::kOkay)
  395. << "Failed to decode audio.";
  396. const int frames_needed_to_scale =
  397. playback_rate_ * av_audio_context_->sample_rate / kAudioPacketsPerSecond;
  398. while (frames_needed_to_scale <= audio_algo_.BufferedFrames()) {
  399. if (!audio_algo_.FillBuffer(audio_fifo_input_bus_.get(), 0,
  400. audio_fifo_input_bus_->frames(),
  401. playback_rate_)) {
  402. // Nothing can be scaled. Decode some more.
  403. return;
  404. }
  405. // Prevent overflow of audio data in the FIFO.
  406. if (audio_fifo_input_bus_->frames() + audio_fifo_->frames() <=
  407. audio_fifo_->max_frames()) {
  408. audio_fifo_->Push(audio_fifo_input_bus_.get());
  409. } else {
  410. LOG(WARNING) << "Audio FIFO full; dropping samples.";
  411. }
  412. // Make sure there's enough data to resample audio.
  413. if (audio_fifo_->frames() <
  414. 2 * source_audio_params_.sample_rate() / kAudioPacketsPerSecond) {
  415. continue;
  416. }
  417. std::unique_ptr<media::AudioBus> resampled_bus(media::AudioBus::Create(
  418. output_audio_params_.channels(),
  419. output_audio_params_.sample_rate() / kAudioPacketsPerSecond));
  420. audio_converter_->Convert(resampled_bus.get());
  421. audio_bus_queue_.push(resampled_bus.release());
  422. }
  423. }
  424. bool FakeMediaSource::OnNewAudioFrame(AVFrame* frame) {
  425. int frames_read = frame->nb_samples;
  426. if (frames_read < 0)
  427. return false;
  428. if (!audio_sent_ts_) {
  429. // Initialize the base time to the first packet in the file. This is set to
  430. // the frequency we send to the receiver. Not the frequency of the source
  431. // file. This is because we increment the frame count by samples we sent.
  432. audio_sent_ts_ = std::make_unique<AudioTimestampHelper>(
  433. output_audio_params_.sample_rate());
  434. // For some files this is an invalid value.
  435. base::TimeDelta base_ts;
  436. audio_sent_ts_->SetBaseTimestamp(base_ts);
  437. }
  438. scoped_refptr<AudioBuffer> buffer = AudioBuffer::CopyFrom(
  439. AVSampleFormatToSampleFormat(av_audio_context_->sample_fmt,
  440. av_audio_context_->codec_id),
  441. ChannelLayoutToChromeChannelLayout(av_audio_context_->channel_layout,
  442. av_audio_context_->channels),
  443. av_audio_context_->channels, av_audio_context_->sample_rate, frames_read,
  444. &frame->data[0],
  445. PtsToTimeDelta(frame->pts, av_audio_stream()->time_base));
  446. audio_algo_.EnqueueBuffer(buffer);
  447. return true;
  448. }
  449. void FakeMediaSource::DecodeVideo(ScopedAVPacket packet) {
  450. auto result = video_decoding_loop_->DecodePacket(
  451. packet.get(), base::BindRepeating(&FakeMediaSource::OnNewVideoFrame,
  452. base::Unretained(this)));
  453. CHECK_EQ(result, FFmpegDecodingLoop::DecodeStatus::kOkay)
  454. << "Failed to decode video.";
  455. }
  456. bool FakeMediaSource::OnNewVideoFrame(AVFrame* frame) {
  457. gfx::Size size(av_video_context_->width, av_video_context_->height);
  458. if (!video_first_pts_set_) {
  459. video_first_pts_ = frame->pts;
  460. video_first_pts_set_ = true;
  461. }
  462. const AVRational& time_base = av_video_stream()->time_base;
  463. base::TimeDelta timestamp =
  464. PtsToTimeDelta(frame->pts - video_first_pts_, time_base);
  465. if (timestamp < last_video_frame_timestamp_) {
  466. // Stream has rewound. Rebase |video_first_pts_|.
  467. const AVRational& frame_rate = av_video_stream()->r_frame_rate;
  468. timestamp = last_video_frame_timestamp_ +
  469. (base::Seconds(1) * frame_rate.den / frame_rate.num);
  470. const int64_t adjustment_pts = TimeDeltaToPts(timestamp, time_base);
  471. video_first_pts_ = frame->pts - adjustment_pts;
  472. }
  473. AVFrame* shallow_copy = av_frame_clone(frame);
  474. scoped_refptr<media::VideoFrame> video_frame =
  475. VideoFrame::WrapExternalYuvData(
  476. media::PIXEL_FORMAT_I420, size, gfx::Rect(size), size,
  477. shallow_copy->linesize[0], shallow_copy->linesize[1],
  478. shallow_copy->linesize[2], shallow_copy->data[0],
  479. shallow_copy->data[1], shallow_copy->data[2], timestamp);
  480. if (!video_frame)
  481. return false;
  482. video_frame_queue_.push(video_frame);
  483. video_frame_queue_.back()->AddDestructionObserver(
  484. base::BindOnce(&AVFreeFrame, shallow_copy));
  485. last_video_frame_timestamp_ = timestamp;
  486. return true;
  487. }
  488. void FakeMediaSource::Decode(bool decode_audio) {
  489. // Read the stream until one video frame can be decoded.
  490. while (true) {
  491. if (decode_audio && !audio_bus_queue_.empty())
  492. return;
  493. if (!decode_audio && !video_frame_queue_.empty())
  494. return;
  495. bool audio_packet = false;
  496. ScopedAVPacket packet = DemuxOnePacket(&audio_packet);
  497. if (!packet) {
  498. VLOG(1) << "End of stream.";
  499. return;
  500. }
  501. if (audio_packet)
  502. DecodeAudio(std::move(packet));
  503. else
  504. DecodeVideo(std::move(packet));
  505. }
  506. }
  507. double FakeMediaSource::ProvideInput(media::AudioBus* output_bus,
  508. uint32_t frames_delayed) {
  509. if (audio_fifo_->frames() >= output_bus->frames()) {
  510. audio_fifo_->Consume(output_bus, 0, output_bus->frames());
  511. return 1.0;
  512. } else {
  513. LOG(WARNING) << "Not enough audio data for resampling.";
  514. output_bus->Zero();
  515. return 0.0;
  516. }
  517. }
  518. scoped_refptr<media::VideoFrame>
  519. FakeMediaSource::PopOldestInsertedVideoFrame() {
  520. CHECK(!inserted_video_frame_queue_.empty());
  521. scoped_refptr<media::VideoFrame> video_frame =
  522. inserted_video_frame_queue_.front();
  523. inserted_video_frame_queue_.pop();
  524. return video_frame;
  525. }
  526. AVStream* FakeMediaSource::av_audio_stream() {
  527. return av_format_context_->streams[audio_stream_index_];
  528. }
  529. AVStream* FakeMediaSource::av_video_stream() {
  530. return av_format_context_->streams[video_stream_index_];
  531. }
  532. } // namespace cast
  533. } // namespace media