sinc_resampler.cc 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490
  1. // Copyright (c) 2012 The Chromium Authors. All rights reserved.
  2. // Use of this source code is governed by a BSD-style license that can be
  3. // found in the LICENSE file.
  4. //
  5. // Initial input buffer layout, dividing into regions r0_ to r4_ (note: r0_, r3_
  6. // and r4_ will move after the first load):
  7. //
  8. // |----------------|-----------------------------------------|----------------|
  9. //
  10. // request_frames_
  11. // <--------------------------------------------------------->
  12. // r0_ (during first load)
  13. //
  14. // kKernelSize / 2 kKernelSize / 2 kKernelSize / 2 kKernelSize / 2
  15. // <---------------> <---------------> <---------------> <--------------->
  16. // r1_ r2_ r3_ r4_
  17. //
  18. // block_size_ == r4_ - r2_
  19. // <--------------------------------------->
  20. //
  21. // request_frames_
  22. // <------------------ ... ----------------->
  23. // r0_ (during second load)
  24. //
  25. // On the second request r0_ slides to the right by kKernelSize / 2 and r3_, r4_
  26. // and block_size_ are reinitialized via step (3) in the algorithm below.
  27. //
  28. // These new regions remain constant until a Flush() occurs. While complicated,
  29. // this allows us to reduce jitter by always requesting the same amount from the
  30. // provided callback.
  31. //
  32. // The algorithm:
  33. //
  34. // 1) Allocate input_buffer of size: request_frames_ + kKernelSize; this ensures
  35. // there's enough room to read request_frames_ from the callback into region
  36. // r0_ (which will move between the first and subsequent passes).
  37. //
  38. // 2) Let r1_, r2_ each represent half the kernel centered around r0_:
  39. //
  40. // r0_ = input_buffer_ + kKernelSize / 2
  41. // r1_ = input_buffer_
  42. // r2_ = r0_
  43. //
  44. // r0_ is always request_frames_ in size. r1_, r2_ are kKernelSize / 2 in
  45. // size. r1_ must be zero initialized to avoid convolution with garbage (see
  46. // step (5) for why).
  47. //
  48. // 3) Let r3_, r4_ each represent half the kernel right aligned with the end of
  49. // r0_ and choose block_size_ as the distance in frames between r4_ and r2_:
  50. //
  51. // r3_ = r0_ + request_frames_ - kKernelSize
  52. // r4_ = r0_ + request_frames_ - kKernelSize / 2
  53. // block_size_ = r4_ - r2_ = request_frames_ - kKernelSize / 2
  54. //
  55. // 4) Consume request_frames_ frames into r0_.
  56. //
  57. // 5) Position kernel centered at start of r2_ and generate output frames until
  58. // the kernel is centered at the start of r4_ or we've finished generating
  59. // all the output frames.
  60. //
  61. // 6) Wrap left over data from the r3_ to r1_ and r4_ to r2_.
  62. //
  63. // 7) If we're on the second load, in order to avoid overwriting the frames we
  64. // just wrapped from r4_ we need to slide r0_ to the right by the size of
  65. // r4_, which is kKernelSize / 2:
  66. //
  67. // r0_ = r0_ + kKernelSize / 2 = input_buffer_ + kKernelSize
  68. //
  69. // r3_, r4_, and block_size_ then need to be reinitialized, so goto (3).
  70. //
  71. // 8) Else, if we're not on the second load, goto (4).
  72. //
  73. // Note: we're glossing over how the sub-sample handling works with
  74. // |virtual_source_idx_|, etc.
  75. #include "media/base/sinc_resampler.h"
  76. #include <limits>
  77. #include "base/check_op.h"
  78. #include "base/cpu.h"
  79. #include "base/numerics/math_constants.h"
  80. #include "base/trace_event/trace_event.h"
  81. #include "build/build_config.h"
  82. #include "cc/base/math_util.h"
  83. #if defined(ARCH_CPU_X86_FAMILY)
  84. #include <immintrin.h>
  85. // Including these headers directly should generally be avoided. Since
  86. // Chrome is compiled with -msse3 (the minimal requirement), we include the
  87. // headers directly to make the intrinsics available.
  88. #include <avxintrin.h>
  89. #include <avx2intrin.h>
  90. #include <fmaintrin.h>
  91. #elif defined(ARCH_CPU_ARM_FAMILY) && defined(USE_NEON)
  92. #include <arm_neon.h>
  93. #endif
  94. namespace media {
  95. static double SincScaleFactor(double io_ratio) {
  96. // |sinc_scale_factor| is basically the normalized cutoff frequency of the
  97. // low-pass filter.
  98. double sinc_scale_factor = io_ratio > 1.0 ? 1.0 / io_ratio : 1.0;
  99. // The sinc function is an idealized brick-wall filter, but since we're
  100. // windowing it the transition from pass to stop does not happen right away.
  101. // So we should adjust the low pass filter cutoff slightly downward to avoid
  102. // some aliasing at the very high-end.
  103. // TODO(crogers): this value is empirical and to be more exact should vary
  104. // depending on kKernelSize.
  105. sinc_scale_factor *= 0.9;
  106. return sinc_scale_factor;
  107. }
  108. // If we know the minimum architecture at compile time, avoid CPU detection.
  109. void SincResampler::InitializeCPUSpecificFeatures() {
  110. #if defined(ARCH_CPU_ARM_FAMILY) && defined(USE_NEON)
  111. convolve_proc_ = Convolve_NEON;
  112. #elif defined(ARCH_CPU_X86_FAMILY)
  113. base::CPU cpu;
  114. // Using AVX2 instead of SSE2 when AVX2/FMA3 supported.
  115. if (cpu.has_avx2() && cpu.has_fma3())
  116. convolve_proc_ = Convolve_AVX2;
  117. else if (cpu.has_sse2())
  118. convolve_proc_ = Convolve_SSE;
  119. else
  120. convolve_proc_ = Convolve_C;
  121. #else
  122. // Unknown architecture.
  123. convolve_proc_ = Convolve_C;
  124. #endif
  125. }
  126. static int CalculateChunkSize(int block_size_, double io_ratio) {
  127. return block_size_ / io_ratio;
  128. }
  129. SincResampler::SincResampler(double io_sample_rate_ratio,
  130. int request_frames,
  131. const ReadCB read_cb)
  132. : io_sample_rate_ratio_(io_sample_rate_ratio),
  133. read_cb_(std::move(read_cb)),
  134. request_frames_(request_frames),
  135. input_buffer_size_(request_frames_ + kKernelSize),
  136. // Create input buffers with a 32-byte alignment for SIMD optimizations.
  137. kernel_storage_(static_cast<float*>(
  138. base::AlignedAlloc(sizeof(float) * kKernelStorageSize, 32))),
  139. kernel_pre_sinc_storage_(static_cast<float*>(
  140. base::AlignedAlloc(sizeof(float) * kKernelStorageSize, 32))),
  141. kernel_window_storage_(static_cast<float*>(
  142. base::AlignedAlloc(sizeof(float) * kKernelStorageSize, 32))),
  143. input_buffer_(static_cast<float*>(
  144. base::AlignedAlloc(sizeof(float) * input_buffer_size_, 32))),
  145. r1_(input_buffer_.get()),
  146. r2_(input_buffer_.get() + kKernelSize / 2) {
  147. CHECK_GT(request_frames, kKernelSize * 3 / 2)
  148. << "request_frames must be greater than 1.5 kernels to allow sufficient "
  149. "data for resampling";
  150. // This means that after the first call to Flush we will have
  151. // block_size_ > kKernelSize and r2_ < r3_.
  152. InitializeCPUSpecificFeatures();
  153. DCHECK(convolve_proc_);
  154. CHECK_GT(request_frames_, 0);
  155. Flush();
  156. memset(kernel_storage_.get(), 0,
  157. sizeof(*kernel_storage_.get()) * kKernelStorageSize);
  158. memset(kernel_pre_sinc_storage_.get(), 0,
  159. sizeof(*kernel_pre_sinc_storage_.get()) * kKernelStorageSize);
  160. memset(kernel_window_storage_.get(), 0,
  161. sizeof(*kernel_window_storage_.get()) * kKernelStorageSize);
  162. InitializeKernel();
  163. }
  164. SincResampler::~SincResampler() = default;
  165. void SincResampler::UpdateRegions(bool second_load) {
  166. // Setup various region pointers in the buffer (see diagram above). If we're
  167. // on the second load we need to slide r0_ to the right by kKernelSize / 2.
  168. r0_ = input_buffer_.get() + (second_load ? kKernelSize : kKernelSize / 2);
  169. r3_ = r0_ + request_frames_ - kKernelSize;
  170. r4_ = r0_ + request_frames_ - kKernelSize / 2;
  171. block_size_ = r4_ - r2_;
  172. chunk_size_ = CalculateChunkSize(block_size_, io_sample_rate_ratio_);
  173. // r1_ at the beginning of the buffer.
  174. CHECK_EQ(r1_, input_buffer_.get());
  175. // r1_ left of r2_, r4_ left of r3_ and size correct.
  176. CHECK_EQ(r2_ - r1_, r4_ - r3_);
  177. // r2_ left of r3.
  178. CHECK_LT(r2_, r3_);
  179. }
  180. void SincResampler::InitializeKernel() {
  181. // Blackman window parameters.
  182. static const double kAlpha = 0.16;
  183. static const double kA0 = 0.5 * (1.0 - kAlpha);
  184. static const double kA1 = 0.5;
  185. static const double kA2 = 0.5 * kAlpha;
  186. // Generates a set of windowed sinc() kernels.
  187. // We generate a range of sub-sample offsets from 0.0 to 1.0.
  188. const double sinc_scale_factor = SincScaleFactor(io_sample_rate_ratio_);
  189. for (int offset_idx = 0; offset_idx <= kKernelOffsetCount; ++offset_idx) {
  190. const float subsample_offset =
  191. static_cast<float>(offset_idx) / kKernelOffsetCount;
  192. for (int i = 0; i < kKernelSize; ++i) {
  193. const int idx = i + offset_idx * kKernelSize;
  194. const float pre_sinc =
  195. base::kPiFloat * (i - kKernelSize / 2 - subsample_offset);
  196. kernel_pre_sinc_storage_[idx] = pre_sinc;
  197. // Compute Blackman window, matching the offset of the sinc().
  198. const float x = (i - subsample_offset) / kKernelSize;
  199. const float window =
  200. static_cast<float>(kA0 - kA1 * cos(2.0 * base::kPiDouble * x) +
  201. kA2 * cos(4.0 * base::kPiDouble * x));
  202. kernel_window_storage_[idx] = window;
  203. // Compute the sinc with offset, then window the sinc() function and store
  204. // at the correct offset.
  205. kernel_storage_[idx] = static_cast<float>(
  206. window * (pre_sinc ? sin(sinc_scale_factor * pre_sinc) / pre_sinc
  207. : sinc_scale_factor));
  208. }
  209. }
  210. }
  211. void SincResampler::SetRatio(double io_sample_rate_ratio) {
  212. if (fabs(io_sample_rate_ratio_ - io_sample_rate_ratio) <
  213. std::numeric_limits<double>::epsilon()) {
  214. return;
  215. }
  216. io_sample_rate_ratio_ = io_sample_rate_ratio;
  217. chunk_size_ = CalculateChunkSize(block_size_, io_sample_rate_ratio_);
  218. // Optimize reinitialization by reusing values which are independent of
  219. // |sinc_scale_factor|. Provides a 3x speedup.
  220. const double sinc_scale_factor = SincScaleFactor(io_sample_rate_ratio_);
  221. for (int offset_idx = 0; offset_idx <= kKernelOffsetCount; ++offset_idx) {
  222. for (int i = 0; i < kKernelSize; ++i) {
  223. const int idx = i + offset_idx * kKernelSize;
  224. const float window = kernel_window_storage_[idx];
  225. const float pre_sinc = kernel_pre_sinc_storage_[idx];
  226. kernel_storage_[idx] = static_cast<float>(
  227. window * (pre_sinc ? sin(sinc_scale_factor * pre_sinc) / pre_sinc
  228. : sinc_scale_factor));
  229. }
  230. }
  231. }
  232. void SincResampler::Resample(int frames, float* destination) {
  233. TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("audio"), "SincResampler::Resample",
  234. "io sample rate ratio", io_sample_rate_ratio_);
  235. int remaining_frames = frames;
  236. // Step (1) -- Prime the input buffer at the start of the input stream.
  237. if (!buffer_primed_ && remaining_frames) {
  238. read_cb_.Run(request_frames_, r0_.get());
  239. buffer_primed_ = true;
  240. }
  241. // Step (2) -- Resample!
  242. while (remaining_frames) {
  243. // Silent audio can contain non-zero samples small enough to result in
  244. // subnormals internally. Disabling subnormals can be significantly faster.
  245. {
  246. cc::ScopedSubnormalFloatDisabler disable_subnormals;
  247. while (virtual_source_idx_ < block_size_) {
  248. // |virtual_source_idx_| lies in between two kernel offsets so figure
  249. // out what they are.
  250. const int source_idx = static_cast<int>(virtual_source_idx_);
  251. const double virtual_offset_idx =
  252. (virtual_source_idx_ - source_idx) * kKernelOffsetCount;
  253. const int offset_idx = static_cast<int>(virtual_offset_idx);
  254. // We'll compute "convolutions" for the two kernels which straddle
  255. // |virtual_source_idx_|.
  256. const float* k1 = kernel_storage_.get() + offset_idx * kKernelSize;
  257. const float* k2 = k1 + kKernelSize;
  258. // Ensure |k1|, |k2| are 32-byte aligned for SIMD usage. Should always
  259. // be true so long as kKernelSize is a multiple of 32.
  260. DCHECK_EQ(0u, reinterpret_cast<uintptr_t>(k1) & 0x1F);
  261. DCHECK_EQ(0u, reinterpret_cast<uintptr_t>(k2) & 0x1F);
  262. // Initialize input pointer based on quantized |virtual_source_idx_|.
  263. const float* input_ptr = r1_ + source_idx;
  264. // Figure out how much to weight each kernel's "convolution".
  265. const double kernel_interpolation_factor =
  266. virtual_offset_idx - offset_idx;
  267. *destination++ =
  268. convolve_proc_(input_ptr, k1, k2, kernel_interpolation_factor);
  269. // Advance the virtual index.
  270. virtual_source_idx_ += io_sample_rate_ratio_;
  271. if (!--remaining_frames)
  272. return;
  273. }
  274. }
  275. // Wrap back around to the start.
  276. DCHECK_GE(virtual_source_idx_, block_size_);
  277. virtual_source_idx_ -= block_size_;
  278. // Step (3) -- Copy r3_, r4_ to r1_, r2_.
  279. // This wraps the last input frames back to the start of the buffer.
  280. memcpy(r1_, r3_, sizeof(*input_buffer_.get()) * kKernelSize);
  281. // Step (4) -- Reinitialize regions if necessary.
  282. if (r0_ == r2_)
  283. UpdateRegions(true);
  284. // Step (5) -- Refresh the buffer with more input.
  285. read_cb_.Run(request_frames_, r0_.get());
  286. }
  287. }
  288. void SincResampler::PrimeWithSilence() {
  289. // By enforcing the buffer hasn't been primed, we ensure the input buffer has
  290. // already been zeroed during construction or by a previous Flush() call.
  291. DCHECK(!buffer_primed_);
  292. DCHECK_EQ(input_buffer_[0], 0.0f);
  293. UpdateRegions(true);
  294. }
  295. void SincResampler::Flush() {
  296. virtual_source_idx_ = 0;
  297. buffer_primed_ = false;
  298. memset(input_buffer_.get(), 0,
  299. sizeof(*input_buffer_.get()) * input_buffer_size_);
  300. UpdateRegions(false);
  301. }
  302. int SincResampler::GetMaxInputFramesRequested(
  303. int output_frames_requested) const {
  304. const int num_chunks = static_cast<int>(
  305. std::ceil(static_cast<float>(output_frames_requested) / chunk_size_));
  306. return num_chunks * request_frames_;
  307. }
  308. double SincResampler::BufferedFrames() const {
  309. return buffer_primed_ ? request_frames_ - virtual_source_idx_ : 0;
  310. }
  311. float SincResampler::Convolve_C(const float* input_ptr, const float* k1,
  312. const float* k2,
  313. double kernel_interpolation_factor) {
  314. float sum1 = 0;
  315. float sum2 = 0;
  316. // Generate a single output sample. Unrolling this loop hurt performance in
  317. // local testing.
  318. int n = kKernelSize;
  319. while (n--) {
  320. sum1 += *input_ptr * *k1++;
  321. sum2 += *input_ptr++ * *k2++;
  322. }
  323. // Linearly interpolate the two "convolutions".
  324. return static_cast<float>((1.0 - kernel_interpolation_factor) * sum1 +
  325. kernel_interpolation_factor * sum2);
  326. }
  327. #if defined(ARCH_CPU_X86_FAMILY)
  328. float SincResampler::Convolve_SSE(const float* input_ptr, const float* k1,
  329. const float* k2,
  330. double kernel_interpolation_factor) {
  331. __m128 m_input;
  332. __m128 m_sums1 = _mm_setzero_ps();
  333. __m128 m_sums2 = _mm_setzero_ps();
  334. // Based on |input_ptr| alignment, we need to use loadu or load. Unrolling
  335. // these loops hurt performance in local testing.
  336. if (reinterpret_cast<uintptr_t>(input_ptr) & 0x0F) {
  337. for (int i = 0; i < kKernelSize; i += 4) {
  338. m_input = _mm_loadu_ps(input_ptr + i);
  339. m_sums1 = _mm_add_ps(m_sums1, _mm_mul_ps(m_input, _mm_load_ps(k1 + i)));
  340. m_sums2 = _mm_add_ps(m_sums2, _mm_mul_ps(m_input, _mm_load_ps(k2 + i)));
  341. }
  342. } else {
  343. for (int i = 0; i < kKernelSize; i += 4) {
  344. m_input = _mm_load_ps(input_ptr + i);
  345. m_sums1 = _mm_add_ps(m_sums1, _mm_mul_ps(m_input, _mm_load_ps(k1 + i)));
  346. m_sums2 = _mm_add_ps(m_sums2, _mm_mul_ps(m_input, _mm_load_ps(k2 + i)));
  347. }
  348. }
  349. // Linearly interpolate the two "convolutions".
  350. m_sums1 = _mm_mul_ps(m_sums1, _mm_set_ps1(
  351. static_cast<float>(1.0 - kernel_interpolation_factor)));
  352. m_sums2 = _mm_mul_ps(m_sums2, _mm_set_ps1(
  353. static_cast<float>(kernel_interpolation_factor)));
  354. m_sums1 = _mm_add_ps(m_sums1, m_sums2);
  355. // Sum components together.
  356. float result;
  357. m_sums2 = _mm_add_ps(_mm_movehl_ps(m_sums1, m_sums1), m_sums1);
  358. _mm_store_ss(&result, _mm_add_ss(m_sums2, _mm_shuffle_ps(
  359. m_sums2, m_sums2, 1)));
  360. return result;
  361. }
  362. __attribute__((target("avx2,fma"))) float SincResampler::Convolve_AVX2(
  363. const float* input_ptr,
  364. const float* k1,
  365. const float* k2,
  366. double kernel_interpolation_factor) {
  367. __m256 m_input;
  368. __m256 m_sums1 = _mm256_setzero_ps();
  369. __m256 m_sums2 = _mm256_setzero_ps();
  370. // Based on |input_ptr| alignment, we need to use loadu or load. Unrolling
  371. // these loops has not been tested or benchmarked.
  372. bool aligned_input = (reinterpret_cast<uintptr_t>(input_ptr) & 0x1F) == 0;
  373. if (!aligned_input) {
  374. for (size_t i = 0; i < kKernelSize; i += 8) {
  375. m_input = _mm256_loadu_ps(input_ptr + i);
  376. m_sums1 = _mm256_fmadd_ps(m_input, _mm256_load_ps(k1 + i), m_sums1);
  377. m_sums2 = _mm256_fmadd_ps(m_input, _mm256_load_ps(k2 + i), m_sums2);
  378. }
  379. } else {
  380. for (size_t i = 0; i < kKernelSize; i += 8) {
  381. m_input = _mm256_load_ps(input_ptr + i);
  382. m_sums1 = _mm256_fmadd_ps(m_input, _mm256_load_ps(k1 + i), m_sums1);
  383. m_sums2 = _mm256_fmadd_ps(m_input, _mm256_load_ps(k2 + i), m_sums2);
  384. }
  385. }
  386. // Linearly interpolate the two "convolutions".
  387. __m128 m128_sums1 = _mm_add_ps(_mm256_extractf128_ps(m_sums1, 0),
  388. _mm256_extractf128_ps(m_sums1, 1));
  389. __m128 m128_sums2 = _mm_add_ps(_mm256_extractf128_ps(m_sums2, 0),
  390. _mm256_extractf128_ps(m_sums2, 1));
  391. m128_sums1 = _mm_mul_ps(
  392. m128_sums1,
  393. _mm_set_ps1(static_cast<float>(1.0 - kernel_interpolation_factor)));
  394. m128_sums2 = _mm_mul_ps(
  395. m128_sums2, _mm_set_ps1(static_cast<float>(kernel_interpolation_factor)));
  396. m128_sums1 = _mm_add_ps(m128_sums1, m128_sums2);
  397. // Sum components together.
  398. float result;
  399. m128_sums2 = _mm_add_ps(_mm_movehl_ps(m128_sums1, m128_sums1), m128_sums1);
  400. _mm_store_ss(&result, _mm_add_ss(m128_sums2,
  401. _mm_shuffle_ps(m128_sums2, m128_sums2, 1)));
  402. return result;
  403. }
  404. #elif defined(ARCH_CPU_ARM_FAMILY) && defined(USE_NEON)
  405. float SincResampler::Convolve_NEON(const float* input_ptr, const float* k1,
  406. const float* k2,
  407. double kernel_interpolation_factor) {
  408. float32x4_t m_input;
  409. float32x4_t m_sums1 = vmovq_n_f32(0);
  410. float32x4_t m_sums2 = vmovq_n_f32(0);
  411. const float* upper = input_ptr + kKernelSize;
  412. for (; input_ptr < upper; ) {
  413. m_input = vld1q_f32(input_ptr);
  414. input_ptr += 4;
  415. m_sums1 = vmlaq_f32(m_sums1, m_input, vld1q_f32(k1));
  416. k1 += 4;
  417. m_sums2 = vmlaq_f32(m_sums2, m_input, vld1q_f32(k2));
  418. k2 += 4;
  419. }
  420. // Linearly interpolate the two "convolutions".
  421. m_sums1 = vmlaq_f32(
  422. vmulq_f32(m_sums1, vmovq_n_f32(1.0 - kernel_interpolation_factor)),
  423. m_sums2, vmovq_n_f32(kernel_interpolation_factor));
  424. // Sum components together.
  425. float32x2_t m_half = vadd_f32(vget_high_f32(m_sums1), vget_low_f32(m_sums1));
  426. return vget_lane_f32(vpadd_f32(m_half, m_half), 0);
  427. }
  428. #endif
  429. } // namespace media