web_engine_audio_output_device.cc 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474
  1. // Copyright 2020 The Chromium Authors. All rights reserved.
  2. // Use of this source code is governed by a BSD-style license that can be
  3. // found in the LICENSE file.
  4. #include "fuchsia_web/webengine/renderer/web_engine_audio_output_device.h"
  5. #include "base/fuchsia/fuchsia_logging.h"
  6. #include "base/logging.h"
  7. #include "base/memory/shared_memory_mapping.h"
  8. #include "base/memory/writable_shared_memory_region.h"
  9. #include "base/no_destructor.h"
  10. #include "base/threading/thread.h"
  11. #include "base/threading/thread_task_runner_handle.h"
  12. #include "media/base/audio_timestamp_helper.h"
  13. namespace {
  14. // Total number of buffers used for AudioConsumer.
  15. constexpr size_t kNumBuffers = 4;
  16. // Extra lead time added to min_lead_time reported by AudioConsumer when
  17. // scheduling PumpSamples() timer. This is necessary to make it more likely
  18. // that each packet is sent on time, even if the timer is delayed. Higher values
  19. // increase playback latency, but make underflow less likely. 20ms allows to
  20. // keep latency reasonably low, while making playback reliable under normal
  21. // conditions.
  22. //
  23. // TODO(crbug.com/1153909): It may be possible to reduce this value to reduce
  24. // total latency, but that requires that an elevated scheduling profile is
  25. // applied to this thread.
  26. constexpr base::TimeDelta kLeadTimeExtra = base::Milliseconds(20);
  27. class DefaultAudioThread {
  28. public:
  29. DefaultAudioThread() : thread_("WebEngineAudioOutputDevice") {
  30. base::Thread::Options options(base::MessagePumpType::IO, 0);
  31. options.thread_type = base::ThreadType::kRealtimeAudio;
  32. thread_.StartWithOptions(std::move(options));
  33. }
  34. ~DefaultAudioThread() = default;
  35. scoped_refptr<base::SingleThreadTaskRunner> task_runner() {
  36. return thread_.task_runner();
  37. }
  38. private:
  39. base::Thread thread_;
  40. };
  41. scoped_refptr<base::SingleThreadTaskRunner> GetDefaultAudioTaskRunner() {
  42. static base::NoDestructor<DefaultAudioThread> default_audio_thread;
  43. return default_audio_thread->task_runner();
  44. }
  45. } // namespace
  46. // static
  47. scoped_refptr<WebEngineAudioOutputDevice> WebEngineAudioOutputDevice::Create(
  48. fidl::InterfaceHandle<fuchsia::media::AudioConsumer> audio_consumer_handle,
  49. scoped_refptr<base::SingleThreadTaskRunner> task_runner) {
  50. scoped_refptr<WebEngineAudioOutputDevice> result(
  51. new WebEngineAudioOutputDevice(task_runner));
  52. task_runner->PostTask(
  53. FROM_HERE,
  54. base::BindOnce(
  55. &WebEngineAudioOutputDevice::BindAudioConsumerOnAudioThread, result,
  56. std::move(audio_consumer_handle)));
  57. return result;
  58. }
  59. // static
  60. scoped_refptr<WebEngineAudioOutputDevice>
  61. WebEngineAudioOutputDevice::CreateOnDefaultThread(
  62. fidl::InterfaceHandle<fuchsia::media::AudioConsumer>
  63. audio_consumer_handle) {
  64. return Create(std::move(audio_consumer_handle), GetDefaultAudioTaskRunner());
  65. }
  66. WebEngineAudioOutputDevice::WebEngineAudioOutputDevice(
  67. scoped_refptr<base::SingleThreadTaskRunner> task_runner)
  68. : task_runner_(std::move(task_runner)) {}
  69. WebEngineAudioOutputDevice::~WebEngineAudioOutputDevice() = default;
  70. void WebEngineAudioOutputDevice::Initialize(
  71. const media::AudioParameters& params,
  72. RenderCallback* callback) {
  73. DCHECK(callback);
  74. // Save |callback| synchronously here to handle the case when Stop() is called
  75. // before the DoInitialize() task is processed.
  76. {
  77. base::AutoLock auto_lock(callback_lock_);
  78. DCHECK(!callback_);
  79. callback_ = callback;
  80. }
  81. task_runner_->PostTask(
  82. FROM_HERE,
  83. base::BindOnce(&WebEngineAudioOutputDevice::InitializeOnAudioThread, this,
  84. params));
  85. }
  86. void WebEngineAudioOutputDevice::Start() {
  87. task_runner_->PostTask(
  88. FROM_HERE,
  89. base::BindOnce(&WebEngineAudioOutputDevice::StartOnAudioThread, this));
  90. }
  91. void WebEngineAudioOutputDevice::Stop() {
  92. {
  93. base::AutoLock auto_lock(callback_lock_);
  94. callback_ = nullptr;
  95. }
  96. task_runner_->PostTask(
  97. FROM_HERE,
  98. base::BindOnce(&WebEngineAudioOutputDevice::StopOnAudioThread, this));
  99. }
  100. void WebEngineAudioOutputDevice::Pause() {
  101. task_runner_->PostTask(
  102. FROM_HERE,
  103. base::BindOnce(&WebEngineAudioOutputDevice::PauseOnAudioThread, this));
  104. }
  105. void WebEngineAudioOutputDevice::Play() {
  106. task_runner_->PostTask(
  107. FROM_HERE,
  108. base::BindOnce(&WebEngineAudioOutputDevice::PlayOnAudioThread, this));
  109. }
  110. void WebEngineAudioOutputDevice::Flush() {
  111. task_runner_->PostTask(
  112. FROM_HERE,
  113. base::BindOnce(&WebEngineAudioOutputDevice::FlushOnAudioThread, this));
  114. }
  115. bool WebEngineAudioOutputDevice::SetVolume(double volume) {
  116. task_runner_->PostTask(
  117. FROM_HERE,
  118. base::BindOnce(&WebEngineAudioOutputDevice::SetVolumeOnAudioThread, this,
  119. volume));
  120. return true;
  121. }
  122. media::OutputDeviceInfo WebEngineAudioOutputDevice::GetOutputDeviceInfo() {
  123. // AudioConsumer doesn't provider any information about the output device.
  124. //
  125. // TODO(crbug.com/852834): Update this method when that functionality is
  126. // implemented.
  127. return media::OutputDeviceInfo(
  128. std::string(), media::OUTPUT_DEVICE_STATUS_OK,
  129. media::AudioParameters(media::AudioParameters::AUDIO_PCM_LOW_LATENCY,
  130. media::CHANNEL_LAYOUT_STEREO, 48000, 480));
  131. }
  132. void WebEngineAudioOutputDevice::GetOutputDeviceInfoAsync(
  133. OutputDeviceInfoCB info_cb) {
  134. std::move(info_cb).Run(GetOutputDeviceInfo());
  135. }
  136. bool WebEngineAudioOutputDevice::IsOptimizedForHardwareParameters() {
  137. // AudioConsumer doesn't provide device parameters (since target device may
  138. // change).
  139. return false;
  140. }
  141. bool WebEngineAudioOutputDevice::CurrentThreadIsRenderingThread() {
  142. return task_runner_->BelongsToCurrentThread();
  143. }
  144. void WebEngineAudioOutputDevice::BindAudioConsumerOnAudioThread(
  145. fidl::InterfaceHandle<fuchsia::media::AudioConsumer>
  146. audio_consumer_handle) {
  147. DCHECK(CurrentThreadIsRenderingThread());
  148. DCHECK(!audio_consumer_);
  149. audio_consumer_.Bind(std::move(audio_consumer_handle));
  150. audio_consumer_.set_error_handler([this](zx_status_t status) {
  151. ZX_LOG(ERROR, status) << "AudioConsumer disconnected.";
  152. ReportError();
  153. });
  154. }
  155. void WebEngineAudioOutputDevice::InitializeOnAudioThread(
  156. const media::AudioParameters& params) {
  157. DCHECK(CurrentThreadIsRenderingThread());
  158. params_ = params;
  159. audio_bus_ = media::AudioBus::Create(params_);
  160. UpdateVolume();
  161. WatchAudioConsumerStatus();
  162. }
  163. void WebEngineAudioOutputDevice::StartOnAudioThread() {
  164. DCHECK(CurrentThreadIsRenderingThread());
  165. if (!audio_consumer_)
  166. return;
  167. CreateStreamSink();
  168. media_pos_frames_ = 0;
  169. audio_consumer_->Start(fuchsia::media::AudioConsumerStartFlags::LOW_LATENCY,
  170. fuchsia::media::NO_TIMESTAMP, 0);
  171. // When AudioConsumer handles the Start() message sent above, it will update
  172. // its state and sent WatchStatus() response. OnAudioConsumerStatusChanged()
  173. // will then call SchedulePumpSamples() to start sending audio packets.
  174. }
  175. void WebEngineAudioOutputDevice::StopOnAudioThread() {
  176. DCHECK(CurrentThreadIsRenderingThread());
  177. if (!audio_consumer_)
  178. return;
  179. audio_consumer_->Stop();
  180. pump_samples_timer_.Stop();
  181. audio_consumer_.Unbind();
  182. stream_sink_.Unbind();
  183. volume_control_.Unbind();
  184. }
  185. void WebEngineAudioOutputDevice::PauseOnAudioThread() {
  186. DCHECK(CurrentThreadIsRenderingThread());
  187. if (!audio_consumer_)
  188. return;
  189. paused_ = true;
  190. audio_consumer_->SetRate(0.0);
  191. pump_samples_timer_.Stop();
  192. }
  193. void WebEngineAudioOutputDevice::PlayOnAudioThread() {
  194. DCHECK(CurrentThreadIsRenderingThread());
  195. if (!audio_consumer_)
  196. return;
  197. paused_ = false;
  198. audio_consumer_->SetRate(1.0);
  199. }
  200. void WebEngineAudioOutputDevice::FlushOnAudioThread() {
  201. DCHECK(CurrentThreadIsRenderingThread());
  202. if (!stream_sink_)
  203. return;
  204. stream_sink_->DiscardAllPacketsNoReply();
  205. }
  206. void WebEngineAudioOutputDevice::SetVolumeOnAudioThread(double volume) {
  207. DCHECK(CurrentThreadIsRenderingThread());
  208. volume_ = volume;
  209. if (audio_consumer_)
  210. UpdateVolume();
  211. }
  212. void WebEngineAudioOutputDevice::CreateStreamSink() {
  213. DCHECK(CurrentThreadIsRenderingThread());
  214. DCHECK(audio_consumer_);
  215. // Allocate buffers for the StreamSink.
  216. size_t buffer_size = params_.GetBytesPerBuffer(media::kSampleFormatF32);
  217. stream_sink_buffers_.reserve(kNumBuffers);
  218. available_buffers_indices_.clear();
  219. std::vector<zx::vmo> vmos_for_stream_sink;
  220. vmos_for_stream_sink.reserve(kNumBuffers);
  221. for (size_t i = 0; i < kNumBuffers; ++i) {
  222. auto region = base::WritableSharedMemoryRegion::Create(buffer_size);
  223. auto mapping = region.Map();
  224. if (!mapping.IsValid()) {
  225. LOG(WARNING) << "Failed to allocate VMO of size " << buffer_size;
  226. ReportError();
  227. return;
  228. }
  229. stream_sink_buffers_.push_back(std::move(mapping));
  230. available_buffers_indices_.push_back(i);
  231. auto read_only_region =
  232. base::WritableSharedMemoryRegion::ConvertToReadOnly(std::move(region));
  233. vmos_for_stream_sink.push_back(
  234. base::ReadOnlySharedMemoryRegion::TakeHandleForSerialization(
  235. std::move(read_only_region))
  236. .PassPlatformHandle());
  237. }
  238. // Configure StreamSink.
  239. fuchsia::media::AudioStreamType stream_type;
  240. stream_type.channels = params_.channels();
  241. stream_type.frames_per_second = params_.sample_rate();
  242. stream_type.sample_format = fuchsia::media::AudioSampleFormat::FLOAT;
  243. audio_consumer_->CreateStreamSink(std::move(vmos_for_stream_sink),
  244. std::move(stream_type), nullptr,
  245. stream_sink_.NewRequest());
  246. stream_sink_.set_error_handler([this](zx_status_t status) {
  247. ZX_LOG(ERROR, status) << "StreamSink disconnected.";
  248. ReportError();
  249. });
  250. }
  251. void WebEngineAudioOutputDevice::UpdateVolume() {
  252. DCHECK(CurrentThreadIsRenderingThread());
  253. DCHECK(audio_consumer_);
  254. if (!volume_control_) {
  255. audio_consumer_->BindVolumeControl(volume_control_.NewRequest());
  256. volume_control_.set_error_handler([](zx_status_t status) {
  257. ZX_LOG(ERROR, status) << "VolumeControl disconnected.";
  258. });
  259. }
  260. volume_control_->SetVolume(volume_);
  261. }
  262. void WebEngineAudioOutputDevice::WatchAudioConsumerStatus() {
  263. DCHECK(CurrentThreadIsRenderingThread());
  264. audio_consumer_->WatchStatus(fit::bind_member(
  265. this, &WebEngineAudioOutputDevice::OnAudioConsumerStatusChanged));
  266. }
  267. void WebEngineAudioOutputDevice::OnAudioConsumerStatusChanged(
  268. fuchsia::media::AudioConsumerStatus status) {
  269. DCHECK(CurrentThreadIsRenderingThread());
  270. if (!status.has_min_lead_time()) {
  271. DLOG(ERROR) << "AudioConsumerStatus.min_lead_time isn't set.";
  272. ReportError();
  273. return;
  274. }
  275. min_lead_time_ = base::Nanoseconds(status.min_lead_time());
  276. if (status.has_presentation_timeline()) {
  277. timeline_reference_time_ = base::TimeTicks::FromZxTime(
  278. status.presentation_timeline().reference_time);
  279. timeline_subject_time_ =
  280. base::Nanoseconds(status.presentation_timeline().subject_time);
  281. timeline_reference_delta_ = status.presentation_timeline().reference_delta;
  282. timeline_subject_delta_ = status.presentation_timeline().subject_delta;
  283. } else {
  284. // Reset |timeline_reference_time_| to null value, which is used to indicate
  285. // that there is no presentation timeline.
  286. timeline_reference_time_ = base::TimeTicks();
  287. }
  288. // Reschedule the timer for the new timeline.
  289. pump_samples_timer_.Stop();
  290. SchedulePumpSamples();
  291. WatchAudioConsumerStatus();
  292. }
  293. void WebEngineAudioOutputDevice::SchedulePumpSamples() {
  294. DCHECK(CurrentThreadIsRenderingThread());
  295. if (paused_ || timeline_reference_time_.is_null() ||
  296. pump_samples_timer_.IsRunning() || available_buffers_indices_.empty()) {
  297. return;
  298. }
  299. // Current position in the stream.
  300. auto media_pos = media::AudioTimestampHelper::FramesToTime(
  301. media_pos_frames_, params_.sample_rate());
  302. // Calculate expected playback time for the next sample based on the
  303. // presentation timeline provided by the AudioConsumer.
  304. // See https://fuchsia.dev/reference/fidl/fuchsia.media#formulas .
  305. // AudioConsumer uses monotonic clock (aka base::TimeTicks) as a reference
  306. // timeline. Subject timeline corresponds to position within the stream, which
  307. // is stored as |media_pos_frames_| and then passed in the |pts| field in each
  308. // packet produced in PumpSamples().
  309. auto playback_time = timeline_reference_time_ +
  310. (media_pos - timeline_subject_time_) *
  311. timeline_reference_delta_ / timeline_subject_delta_;
  312. base::TimeTicks now = base::TimeTicks::Now();
  313. // Target time for when PumpSamples() should run.
  314. base::TimeTicks target_time = playback_time - min_lead_time_ - kLeadTimeExtra;
  315. base::TimeDelta delay = target_time - now;
  316. pump_samples_timer_.Start(
  317. FROM_HERE, delay,
  318. base::BindOnce(&WebEngineAudioOutputDevice::PumpSamples, this,
  319. playback_time));
  320. }
  321. void WebEngineAudioOutputDevice::PumpSamples(base::TimeTicks playback_time) {
  322. DCHECK(CurrentThreadIsRenderingThread());
  323. auto now = base::TimeTicks::Now();
  324. int skipped_frames = 0;
  325. // Check if it's too late to send the next packet. If it is, then advance
  326. // current stream position.
  327. auto lead_time = playback_time - now;
  328. if (lead_time < min_lead_time_) {
  329. auto new_playback_time = now + min_lead_time_;
  330. auto skipped_time = new_playback_time - playback_time;
  331. skipped_frames = media::AudioTimestampHelper::TimeToFrames(
  332. skipped_time, params_.sample_rate());
  333. media_pos_frames_ += skipped_frames;
  334. playback_time += skipped_time;
  335. }
  336. int frames_filled;
  337. {
  338. base::AutoLock auto_lock(callback_lock_);
  339. // |callback_| may be reset in Stop(). No need to keep rendering the stream
  340. // in that case.
  341. if (!callback_)
  342. return;
  343. frames_filled = callback_->Render(playback_time - now, now, skipped_frames,
  344. audio_bus_.get());
  345. }
  346. if (frames_filled) {
  347. DCHECK(!available_buffers_indices_.empty());
  348. int buffer_index = available_buffers_indices_.back();
  349. available_buffers_indices_.pop_back();
  350. audio_bus_->ToInterleaved<media::Float32SampleTypeTraitsNoClip>(
  351. frames_filled,
  352. static_cast<float*>(stream_sink_buffers_[buffer_index].memory()));
  353. fuchsia::media::StreamPacket packet;
  354. packet.payload_buffer_id = buffer_index;
  355. packet.pts = media::AudioTimestampHelper::FramesToTime(
  356. media_pos_frames_, params_.sample_rate())
  357. .InNanoseconds();
  358. packet.payload_offset = 0;
  359. packet.payload_size = frames_filled * sizeof(float) * params_.channels();
  360. stream_sink_->SendPacket(std::move(packet), [this, buffer_index]() {
  361. OnStreamSendDone(buffer_index);
  362. });
  363. media_pos_frames_ += frames_filled;
  364. }
  365. SchedulePumpSamples();
  366. }
  367. void WebEngineAudioOutputDevice::OnStreamSendDone(size_t buffer_index) {
  368. DCHECK(CurrentThreadIsRenderingThread());
  369. available_buffers_indices_.push_back(buffer_index);
  370. SchedulePumpSamples();
  371. }
  372. void WebEngineAudioOutputDevice::ReportError() {
  373. DCHECK(CurrentThreadIsRenderingThread());
  374. audio_consumer_.Unbind();
  375. stream_sink_.Unbind();
  376. volume_control_.Unbind();
  377. pump_samples_timer_.Stop();
  378. {
  379. base::AutoLock auto_lock(callback_lock_);
  380. if (callback_)
  381. callback_->OnRenderError();
  382. }
  383. }