stream_parser_unittest.cc 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373
  1. // Copyright 2014 The Chromium Authors. All rights reserved.
  2. // Use of this source code is governed by a BSD-style license that can be
  3. // found in the LICENSE file.
  4. #include <stddef.h>
  5. #include <stdint.h>
  6. #include <algorithm>
  7. #include <map>
  8. #include <sstream>
  9. #include "media/base/stream_parser.h"
  10. #include "media/base/stream_parser_buffer.h"
  11. #include "testing/gtest/include/gtest/gtest.h"
  12. namespace media {
  13. typedef StreamParser::TrackId TrackId;
  14. typedef StreamParser::BufferQueue BufferQueue;
  15. const int kEnd = -1;
  16. const uint8_t kFakeData[] = {0xFF};
  17. const TrackId kAudioTrackId = 0;
  18. const TrackId kVideoTrackId = 1;
  19. const TrackId kTextTrackIdA = 2;
  20. const TrackId kTextTrackIdB = 3;
  21. static bool IsAudio(scoped_refptr<StreamParserBuffer> buffer) {
  22. return buffer->type() == DemuxerStream::AUDIO;
  23. }
  24. static bool IsVideo(scoped_refptr<StreamParserBuffer> buffer) {
  25. return buffer->type() == DemuxerStream::VIDEO;
  26. }
  27. static bool IsText(scoped_refptr<StreamParserBuffer> buffer) {
  28. return buffer->type() == DemuxerStream::TEXT;
  29. }
  30. // Creates and appends a sequence of StreamParserBuffers to the provided
  31. // |queue|. |decode_timestamps| determines the number of appended buffers and
  32. // their sequence of decode timestamps; a |kEnd| timestamp indicates the
  33. // end of the sequence and no buffer is appended for it. Each new buffer's
  34. // type will be |type| with track ID set to |track_id|.
  35. static void GenerateBuffers(const int* decode_timestamps,
  36. StreamParserBuffer::Type type,
  37. TrackId track_id,
  38. BufferQueue* queue) {
  39. DCHECK(decode_timestamps);
  40. DCHECK(queue);
  41. DCHECK_NE(type, DemuxerStream::UNKNOWN);
  42. DCHECK_LE(type, DemuxerStream::TYPE_MAX);
  43. for (int i = 0; decode_timestamps[i] != kEnd; ++i) {
  44. scoped_refptr<StreamParserBuffer> buffer =
  45. StreamParserBuffer::CopyFrom(kFakeData, sizeof(kFakeData),
  46. true, type, track_id);
  47. buffer->SetDecodeTimestamp(
  48. DecodeTimestamp::FromMicroseconds(decode_timestamps[i]));
  49. queue->push_back(buffer);
  50. }
  51. }
  52. class StreamParserTest : public testing::Test {
  53. public:
  54. StreamParserTest(const StreamParserTest&) = delete;
  55. StreamParserTest& operator=(const StreamParserTest&) = delete;
  56. protected:
  57. StreamParserTest() = default;
  58. // Returns the number of buffers in |merged_buffers_| for which |predicate|
  59. // returns true.
  60. size_t CountMatchingMergedBuffers(
  61. bool (*predicate)(scoped_refptr<StreamParserBuffer> buffer)) {
  62. return static_cast<size_t>(std::count_if(merged_buffers_.begin(),
  63. merged_buffers_.end(), predicate));
  64. }
  65. // Appends test audio buffers in the sequence described by |decode_timestamps|
  66. // to |audio_buffers_|. See GenerateBuffers() for |decode_timestamps| format.
  67. void GenerateAudioBuffers(const int* decode_timestamps) {
  68. GenerateBuffers(decode_timestamps, DemuxerStream::AUDIO, kAudioTrackId,
  69. &buffer_queue_map_[kAudioTrackId]);
  70. }
  71. // Appends test video buffers in the sequence described by |decode_timestamps|
  72. // to |video_buffers_|. See GenerateBuffers() for |decode_timestamps| format.
  73. void GenerateVideoBuffers(const int* decode_timestamps) {
  74. GenerateBuffers(decode_timestamps, DemuxerStream::VIDEO, kVideoTrackId,
  75. &buffer_queue_map_[kVideoTrackId]);
  76. }
  77. // Current tests only need up to two distinct text BufferQueues. This helper
  78. // conditionally appends buffers to the underlying |buffer_queue_map_| keyed
  79. // by the respective track ID. If |decode_timestamps_{a,b}|
  80. // is NULL, then the corresponding BufferQueue is not changed at all.
  81. // Note that key collision on map insertion does not replace the previous
  82. // value.
  83. void GenerateTextBuffers(const int* decode_timestamps_a,
  84. const int* decode_timestamps_b) {
  85. if (decode_timestamps_a) {
  86. GenerateBuffers(decode_timestamps_a, DemuxerStream::TEXT, kTextTrackIdA,
  87. &buffer_queue_map_[kTextTrackIdA]);
  88. }
  89. if (decode_timestamps_b) {
  90. GenerateBuffers(decode_timestamps_b, DemuxerStream::TEXT, kTextTrackIdB,
  91. &buffer_queue_map_[kTextTrackIdB]);
  92. }
  93. }
  94. // Returns a string that describes the sequence of buffers in
  95. // |merged_buffers_|. The string is a concatenation of space-delimited buffer
  96. // descriptors in the same sequence as |merged_buffers_|. Each descriptor is
  97. // the concatenation of
  98. // 1) a single character that describes the buffer's type(), e.g. A, V, or T
  99. // for audio, video, or text, respectively
  100. // 2) the buffer's track_id()
  101. // 3) ":"
  102. // 4) the buffer's decode timestamp.
  103. // If |include_type_and_text_track| is false, then items 1, 2, and 3 are
  104. // not included in descriptors. This is useful when buffers with different
  105. // media types but the same decode timestamp are expected, and the exact
  106. // sequence of media types for the tying timestamps is not subject to
  107. // verification.
  108. std::string MergedBufferQueueString(bool include_type_and_text_track) {
  109. std::stringstream results_stream;
  110. for (BufferQueue::const_iterator itr = merged_buffers_.begin();
  111. itr != merged_buffers_.end();
  112. ++itr) {
  113. if (itr != merged_buffers_.begin())
  114. results_stream << " ";
  115. const StreamParserBuffer& buffer = *(itr->get());
  116. if (include_type_and_text_track) {
  117. switch (buffer.type()) {
  118. case DemuxerStream::AUDIO:
  119. results_stream << "A";
  120. break;
  121. case DemuxerStream::VIDEO:
  122. results_stream << "V";
  123. break;
  124. case DemuxerStream::TEXT:
  125. results_stream << "T";
  126. break;
  127. default:
  128. NOTREACHED();
  129. }
  130. results_stream << buffer.track_id() << ":";
  131. }
  132. results_stream << buffer.GetDecodeTimestamp().InMicroseconds();
  133. }
  134. return results_stream.str();
  135. }
  136. // Verifies that MergeBufferQueues() of the current |audio_buffers_|,
  137. // |video_buffers_|, |text_map_|, and |merged_buffers_| returns true and
  138. // results in an updated |merged_buffers_| that matches expectation. The
  139. // expectation, specified in |expected|, is compared to the string resulting
  140. // from MergedBufferQueueString() (see comments for that method) with
  141. // |verify_type_and_text_track_sequence| passed. |merged_buffers_| is appended
  142. // to by the merge, and may be setup by the caller to have some pre-existing
  143. // buffers; it is both an input and output of this method.
  144. // Regardless of |verify_type_and_text_track_sequence|, the marginal number
  145. // of buffers of each type (audio, video, text) resulting from the merge is
  146. // also verified to match the number of buffers in |audio_buffers_|,
  147. // |video_buffers_|, and |text_map_|, respectively.
  148. void VerifyMergeSuccess(const std::string& expected,
  149. bool verify_type_and_text_track_sequence) {
  150. // |merged_buffers| may already have some buffers. Count them by type for
  151. // later inclusion in verification.
  152. size_t original_audio_in_merged = CountMatchingMergedBuffers(IsAudio);
  153. size_t original_video_in_merged = CountMatchingMergedBuffers(IsVideo);
  154. size_t original_text_in_merged = CountMatchingMergedBuffers(IsText);
  155. EXPECT_TRUE(MergeBufferQueues(buffer_queue_map_, &merged_buffers_));
  156. // Verify resulting contents of |merged_buffers| matches |expected|.
  157. EXPECT_EQ(expected,
  158. MergedBufferQueueString(verify_type_and_text_track_sequence));
  159. // Verify that the correct number of each type of buffer is in the merge
  160. // result.
  161. size_t audio_in_merged = CountMatchingMergedBuffers(IsAudio);
  162. size_t video_in_merged = CountMatchingMergedBuffers(IsVideo);
  163. size_t text_in_merged = CountMatchingMergedBuffers(IsText);
  164. EXPECT_GE(audio_in_merged, original_audio_in_merged);
  165. EXPECT_GE(video_in_merged, original_video_in_merged);
  166. EXPECT_GE(text_in_merged, original_text_in_merged);
  167. EXPECT_EQ(buffer_queue_map_[kAudioTrackId].size(),
  168. audio_in_merged - original_audio_in_merged);
  169. if (buffer_queue_map_[kAudioTrackId].empty())
  170. buffer_queue_map_.erase(kAudioTrackId);
  171. EXPECT_EQ(buffer_queue_map_[kVideoTrackId].size(),
  172. video_in_merged - original_video_in_merged);
  173. if (buffer_queue_map_[kVideoTrackId].empty())
  174. buffer_queue_map_.erase(kVideoTrackId);
  175. size_t expected_text_buffer_count = 0;
  176. expected_text_buffer_count += buffer_queue_map_[kTextTrackIdA].size();
  177. if (buffer_queue_map_[kTextTrackIdA].empty())
  178. buffer_queue_map_.erase(kTextTrackIdA);
  179. expected_text_buffer_count += buffer_queue_map_[kTextTrackIdB].size();
  180. if (buffer_queue_map_[kTextTrackIdB].empty())
  181. buffer_queue_map_.erase(kTextTrackIdB);
  182. EXPECT_EQ(expected_text_buffer_count,
  183. text_in_merged - original_text_in_merged);
  184. }
  185. // Verifies that MergeBufferQueues() of the current |buffer_queue_map_| and
  186. // |merged_buffers_| returns false.
  187. void VerifyMergeFailure() {
  188. EXPECT_FALSE(MergeBufferQueues(buffer_queue_map_, &merged_buffers_));
  189. }
  190. // Helper to allow tests to clear all the input BufferQueues (except
  191. // |merged_buffers_|) and the BufferQueueMap that are used in
  192. // VerifyMerge{Success/Failure}().
  193. void ClearBufferQueuesButKeepAnyMergedBuffers() { buffer_queue_map_.clear(); }
  194. private:
  195. StreamParser::BufferQueueMap buffer_queue_map_;
  196. BufferQueue merged_buffers_;
  197. };
  198. TEST_F(StreamParserTest, MergeBufferQueues_AllEmpty) {
  199. std::string expected = "";
  200. VerifyMergeSuccess(expected, true);
  201. }
  202. TEST_F(StreamParserTest, MergeBufferQueues_SingleAudioBuffer) {
  203. std::string expected = "A0:100";
  204. int audio_timestamps[] = { 100, kEnd };
  205. GenerateAudioBuffers(audio_timestamps);
  206. VerifyMergeSuccess(expected, true);
  207. }
  208. TEST_F(StreamParserTest, MergeBufferQueues_SingleVideoBuffer) {
  209. std::string expected = "V1:100";
  210. int video_timestamps[] = { 100, kEnd };
  211. GenerateVideoBuffers(video_timestamps);
  212. VerifyMergeSuccess(expected, true);
  213. }
  214. TEST_F(StreamParserTest, MergeBufferQueues_SingleTextBuffer) {
  215. std::string expected = "T2:100";
  216. int text_timestamps[] = { 100, kEnd };
  217. GenerateTextBuffers(text_timestamps, NULL);
  218. VerifyMergeSuccess(expected, true);
  219. }
  220. TEST_F(StreamParserTest, MergeBufferQueues_OverlappingAudioVideo) {
  221. std::string expected = "A0:100 V1:101 V1:102 A0:103 A0:104 V1:105";
  222. int audio_timestamps[] = { 100, 103, 104, kEnd };
  223. GenerateAudioBuffers(audio_timestamps);
  224. int video_timestamps[] = { 101, 102, 105, kEnd };
  225. GenerateVideoBuffers(video_timestamps);
  226. VerifyMergeSuccess(expected, true);
  227. }
  228. TEST_F(StreamParserTest, MergeBufferQueues_OverlappingMultipleText) {
  229. std::string expected = "T2:100 T2:101 T3:103 T2:104 T3:105 T3:106";
  230. int text_timestamps_a[] = { 100, 101, 104, kEnd };
  231. int text_timestamps_b[] = { 103, 105, 106, kEnd };
  232. GenerateTextBuffers(text_timestamps_a, text_timestamps_b);
  233. VerifyMergeSuccess(expected, true);
  234. }
  235. TEST_F(StreamParserTest, MergeBufferQueues_OverlappingAudioVideoText) {
  236. std::string expected = "A0:100 V1:101 T2:102 V1:103 T3:104 A0:105 V1:106 "
  237. "T2:107";
  238. int audio_timestamps[] = { 100, 105, kEnd };
  239. GenerateAudioBuffers(audio_timestamps);
  240. int video_timestamps[] = { 101, 103, 106, kEnd };
  241. GenerateVideoBuffers(video_timestamps);
  242. int text_timestamps_a[] = { 102, 107, kEnd };
  243. int text_timestamps_b[] = { 104, kEnd };
  244. GenerateTextBuffers(text_timestamps_a, text_timestamps_b);
  245. VerifyMergeSuccess(expected, true);
  246. }
  247. TEST_F(StreamParserTest, MergeBufferQueues_NonDecreasingNoCrossMediaDuplicate) {
  248. std::string expected = "A0:100 A0:100 A0:100 V1:101 V1:101 V1:101 A0:102 "
  249. "V1:103 V1:103";
  250. int audio_timestamps[] = { 100, 100, 100, 102, kEnd };
  251. GenerateAudioBuffers(audio_timestamps);
  252. int video_timestamps[] = { 101, 101, 101, 103, 103, kEnd };
  253. GenerateVideoBuffers(video_timestamps);
  254. VerifyMergeSuccess(expected, true);
  255. }
  256. TEST_F(StreamParserTest, MergeBufferQueues_CrossStreamDuplicates) {
  257. // Interface keeps the choice undefined of which stream's buffer wins the
  258. // selection when timestamps are tied. Verify at least the right number of
  259. // each kind of buffer results, and that buffers are in nondecreasing order.
  260. std::string expected = "100 100 100 100 100 100 102 102 102 102 102 102 102";
  261. int audio_timestamps[] = { 100, 100, 100, 102, kEnd };
  262. GenerateAudioBuffers(audio_timestamps);
  263. int video_timestamps[] = { 100, 100, 102, 102, 102, kEnd };
  264. GenerateVideoBuffers(video_timestamps);
  265. int text_timestamps[] = { 100, 102, 102, 102, kEnd };
  266. GenerateTextBuffers(text_timestamps, NULL);
  267. VerifyMergeSuccess(expected, false);
  268. }
  269. TEST_F(StreamParserTest, MergeBufferQueues_InvalidDecreasingSingleStream) {
  270. int audio_timestamps[] = { 101, 102, 100, 103, kEnd };
  271. GenerateAudioBuffers(audio_timestamps);
  272. VerifyMergeFailure();
  273. }
  274. TEST_F(StreamParserTest, MergeBufferQueues_InvalidDecreasingMultipleStreams) {
  275. int audio_timestamps[] = { 101, 102, 100, 103, kEnd };
  276. GenerateAudioBuffers(audio_timestamps);
  277. int video_timestamps[] = { 104, 100, kEnd };
  278. GenerateVideoBuffers(video_timestamps);
  279. VerifyMergeFailure();
  280. }
  281. TEST_F(StreamParserTest, MergeBufferQueues_ValidAppendToExistingMerge) {
  282. std::string expected = "A0:100 V1:101 T2:102 V1:103 T3:104 A0:105 V1:106 "
  283. "T2:107";
  284. int audio_timestamps[] = { 100, 105, kEnd };
  285. GenerateAudioBuffers(audio_timestamps);
  286. int video_timestamps[] = { 101, 103, 106, kEnd };
  287. GenerateVideoBuffers(video_timestamps);
  288. int text_timestamps_a[] = { 102, 107, kEnd };
  289. int text_timestamps_b[] = { 104, kEnd };
  290. GenerateTextBuffers(text_timestamps_a, text_timestamps_b);
  291. VerifyMergeSuccess(expected, true);
  292. ClearBufferQueuesButKeepAnyMergedBuffers();
  293. expected = "A0:100 V1:101 T2:102 V1:103 T3:104 A0:105 V1:106 T2:107 "
  294. "A0:107 V1:111 T2:112 V1:113 T3:114 A0:115 V1:116 T2:117";
  295. int more_audio_timestamps[] = { 107, 115, kEnd };
  296. GenerateAudioBuffers(more_audio_timestamps);
  297. int more_video_timestamps[] = { 111, 113, 116, kEnd };
  298. GenerateVideoBuffers(more_video_timestamps);
  299. int more_text_timestamps_a[] = { 112, 117, kEnd };
  300. int more_text_timestamps_b[] = { 114, kEnd };
  301. GenerateTextBuffers(more_text_timestamps_a, more_text_timestamps_b);
  302. VerifyMergeSuccess(expected, true);
  303. }
  304. TEST_F(StreamParserTest, MergeBufferQueues_InvalidAppendToExistingMerge) {
  305. std::string expected = "A0:100 V1:101 T2:102 V1:103 T3:104 A0:105 V1:106 "
  306. "T2:107";
  307. int audio_timestamps[] = { 100, 105, kEnd };
  308. GenerateAudioBuffers(audio_timestamps);
  309. int video_timestamps[] = { 101, 103, 106, kEnd };
  310. GenerateVideoBuffers(video_timestamps);
  311. int text_timestamps_a[] = { 102, 107, kEnd };
  312. int text_timestamps_b[] = { 104, kEnd };
  313. GenerateTextBuffers(text_timestamps_a, text_timestamps_b);
  314. VerifyMergeSuccess(expected, true);
  315. // Appending empty buffers to pre-existing merge result should succeed and not
  316. // change the existing result.
  317. ClearBufferQueuesButKeepAnyMergedBuffers();
  318. VerifyMergeSuccess(expected, true);
  319. // But appending something with a lower timestamp than the last timestamp
  320. // in the pre-existing merge result should fail.
  321. int more_audio_timestamps[] = { 106, kEnd };
  322. GenerateAudioBuffers(more_audio_timestamps);
  323. VerifyMergeFailure();
  324. }
  325. } // namespace media