compress_offload.c 32 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * compress_core.c - compress offload core
  4. *
  5. * Copyright (C) 2011 Intel Corporation
  6. * Authors: Vinod Koul <vinod.koul@linux.intel.com>
  7. * Pierre-Louis Bossart <pierre-louis.bossart@linux.intel.com>
  8. * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  9. *
  10. * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  11. */
  12. #define FORMAT(fmt) "%s: %d: " fmt, __func__, __LINE__
  13. #define pr_fmt(fmt) KBUILD_MODNAME ": " FORMAT(fmt)
  14. #include <linux/file.h>
  15. #include <linux/fs.h>
  16. #include <linux/list.h>
  17. #include <linux/math64.h>
  18. #include <linux/mm.h>
  19. #include <linux/mutex.h>
  20. #include <linux/poll.h>
  21. #include <linux/slab.h>
  22. #include <linux/sched.h>
  23. #include <linux/types.h>
  24. #include <linux/uio.h>
  25. #include <linux/uaccess.h>
  26. #include <linux/module.h>
  27. #include <linux/compat.h>
  28. #include <sound/core.h>
  29. #include <sound/initval.h>
  30. #include <sound/info.h>
  31. #include <sound/compress_params.h>
  32. #include <sound/compress_offload.h>
  33. #include <sound/compress_driver.h>
  34. #ifndef __GENKSYMS__
  35. #include <trace/hooks/snd_compr.h>
  36. #endif
  37. /* struct snd_compr_codec_caps overflows the ioctl bit size for some
  38. * architectures, so we need to disable the relevant ioctls.
  39. */
  40. #if _IOC_SIZEBITS < 14
  41. #define COMPR_CODEC_CAPS_OVERFLOW
  42. #endif
  43. /* TODO:
  44. * - add substream support for multiple devices in case of
  45. * SND_DYNAMIC_MINORS is not used
  46. * - Multiple node representation
  47. * driver should be able to register multiple nodes
  48. */
  49. static DEFINE_MUTEX(device_mutex);
  50. struct snd_compr_file {
  51. unsigned long caps;
  52. struct snd_compr_stream stream;
  53. };
  54. static void error_delayed_work(struct work_struct *work);
  55. /*
  56. * a note on stream states used:
  57. * we use following states in the compressed core
  58. * SNDRV_PCM_STATE_OPEN: When stream has been opened.
  59. * SNDRV_PCM_STATE_SETUP: When stream has been initialized. This is done by
  60. * calling SNDRV_COMPRESS_SET_PARAMS. Running streams will come to this
  61. * state at stop by calling SNDRV_COMPRESS_STOP, or at end of drain.
  62. * SNDRV_PCM_STATE_PREPARED: When a stream has been written to (for
  63. * playback only). User after setting up stream writes the data buffer
  64. * before starting the stream.
  65. * SNDRV_PCM_STATE_RUNNING: When stream has been started and is
  66. * decoding/encoding and rendering/capturing data.
  67. * SNDRV_PCM_STATE_DRAINING: When stream is draining current data. This is done
  68. * by calling SNDRV_COMPRESS_DRAIN.
  69. * SNDRV_PCM_STATE_PAUSED: When stream is paused. This is done by calling
  70. * SNDRV_COMPRESS_PAUSE. It can be stopped or resumed by calling
  71. * SNDRV_COMPRESS_STOP or SNDRV_COMPRESS_RESUME respectively.
  72. */
  73. static int snd_compr_open(struct inode *inode, struct file *f)
  74. {
  75. struct snd_compr *compr;
  76. struct snd_compr_file *data;
  77. struct snd_compr_runtime *runtime;
  78. enum snd_compr_direction dirn;
  79. int maj = imajor(inode);
  80. int ret;
  81. if ((f->f_flags & O_ACCMODE) == O_WRONLY)
  82. dirn = SND_COMPRESS_PLAYBACK;
  83. else if ((f->f_flags & O_ACCMODE) == O_RDONLY)
  84. dirn = SND_COMPRESS_CAPTURE;
  85. else
  86. return -EINVAL;
  87. if (maj == snd_major)
  88. compr = snd_lookup_minor_data(iminor(inode),
  89. SNDRV_DEVICE_TYPE_COMPRESS);
  90. else
  91. return -EBADFD;
  92. if (compr == NULL) {
  93. pr_err("no device data!!!\n");
  94. return -ENODEV;
  95. }
  96. if (dirn != compr->direction) {
  97. pr_err("this device doesn't support this direction\n");
  98. snd_card_unref(compr->card);
  99. return -EINVAL;
  100. }
  101. data = kzalloc(sizeof(*data), GFP_KERNEL);
  102. if (!data) {
  103. snd_card_unref(compr->card);
  104. return -ENOMEM;
  105. }
  106. INIT_DELAYED_WORK(&data->stream.error_work, error_delayed_work);
  107. data->stream.ops = compr->ops;
  108. data->stream.direction = dirn;
  109. data->stream.private_data = compr->private_data;
  110. data->stream.device = compr;
  111. runtime = kzalloc(sizeof(*runtime), GFP_KERNEL);
  112. if (!runtime) {
  113. kfree(data);
  114. snd_card_unref(compr->card);
  115. return -ENOMEM;
  116. }
  117. runtime->state = SNDRV_PCM_STATE_OPEN;
  118. init_waitqueue_head(&runtime->sleep);
  119. data->stream.runtime = runtime;
  120. f->private_data = (void *)data;
  121. mutex_lock(&compr->lock);
  122. ret = compr->ops->open(&data->stream);
  123. mutex_unlock(&compr->lock);
  124. if (ret) {
  125. kfree(runtime);
  126. kfree(data);
  127. }
  128. snd_card_unref(compr->card);
  129. return ret;
  130. }
  131. static int snd_compr_free(struct inode *inode, struct file *f)
  132. {
  133. struct snd_compr_file *data = f->private_data;
  134. struct snd_compr_runtime *runtime = data->stream.runtime;
  135. cancel_delayed_work_sync(&data->stream.error_work);
  136. switch (runtime->state) {
  137. case SNDRV_PCM_STATE_RUNNING:
  138. case SNDRV_PCM_STATE_DRAINING:
  139. case SNDRV_PCM_STATE_PAUSED:
  140. data->stream.ops->trigger(&data->stream, SNDRV_PCM_TRIGGER_STOP);
  141. break;
  142. default:
  143. break;
  144. }
  145. data->stream.ops->free(&data->stream);
  146. if (!data->stream.runtime->dma_buffer_p)
  147. kfree(data->stream.runtime->buffer);
  148. kfree(data->stream.runtime);
  149. kfree(data);
  150. return 0;
  151. }
  152. static int snd_compr_update_tstamp(struct snd_compr_stream *stream,
  153. struct snd_compr_tstamp *tstamp)
  154. {
  155. if (!stream->ops->pointer)
  156. return -ENOTSUPP;
  157. stream->ops->pointer(stream, tstamp);
  158. pr_debug("dsp consumed till %d total %d bytes\n",
  159. tstamp->byte_offset, tstamp->copied_total);
  160. if (stream->direction == SND_COMPRESS_PLAYBACK)
  161. stream->runtime->total_bytes_transferred = tstamp->copied_total;
  162. else
  163. stream->runtime->total_bytes_available = tstamp->copied_total;
  164. return 0;
  165. }
  166. static size_t snd_compr_calc_avail(struct snd_compr_stream *stream,
  167. struct snd_compr_avail *avail)
  168. {
  169. memset(avail, 0, sizeof(*avail));
  170. snd_compr_update_tstamp(stream, &avail->tstamp);
  171. /* Still need to return avail even if tstamp can't be filled in */
  172. if (stream->runtime->total_bytes_available == 0 &&
  173. stream->runtime->state == SNDRV_PCM_STATE_SETUP &&
  174. stream->direction == SND_COMPRESS_PLAYBACK) {
  175. pr_debug("detected init and someone forgot to do a write\n");
  176. return stream->runtime->buffer_size;
  177. }
  178. pr_debug("app wrote %lld, DSP consumed %lld\n",
  179. stream->runtime->total_bytes_available,
  180. stream->runtime->total_bytes_transferred);
  181. if (stream->runtime->total_bytes_available ==
  182. stream->runtime->total_bytes_transferred) {
  183. if (stream->direction == SND_COMPRESS_PLAYBACK) {
  184. pr_debug("both pointers are same, returning full avail\n");
  185. return stream->runtime->buffer_size;
  186. } else {
  187. pr_debug("both pointers are same, returning no avail\n");
  188. return 0;
  189. }
  190. }
  191. avail->avail = stream->runtime->total_bytes_available -
  192. stream->runtime->total_bytes_transferred;
  193. if (stream->direction == SND_COMPRESS_PLAYBACK)
  194. avail->avail = stream->runtime->buffer_size - avail->avail;
  195. pr_debug("ret avail as %lld\n", avail->avail);
  196. return avail->avail;
  197. }
  198. static inline size_t snd_compr_get_avail(struct snd_compr_stream *stream)
  199. {
  200. struct snd_compr_avail avail;
  201. return snd_compr_calc_avail(stream, &avail);
  202. }
  203. static int
  204. snd_compr_ioctl_avail(struct snd_compr_stream *stream, unsigned long arg)
  205. {
  206. struct snd_compr_avail ioctl_avail;
  207. size_t avail;
  208. avail = snd_compr_calc_avail(stream, &ioctl_avail);
  209. ioctl_avail.avail = avail;
  210. switch (stream->runtime->state) {
  211. case SNDRV_PCM_STATE_OPEN:
  212. return -EBADFD;
  213. case SNDRV_PCM_STATE_XRUN:
  214. return -EPIPE;
  215. default:
  216. break;
  217. }
  218. if (copy_to_user((__u64 __user *)arg,
  219. &ioctl_avail, sizeof(ioctl_avail)))
  220. return -EFAULT;
  221. return 0;
  222. }
  223. static int snd_compr_write_data(struct snd_compr_stream *stream,
  224. const char __user *buf, size_t count)
  225. {
  226. void *dstn;
  227. size_t copy;
  228. struct snd_compr_runtime *runtime = stream->runtime;
  229. /* 64-bit Modulus */
  230. u64 app_pointer = div64_u64(runtime->total_bytes_available,
  231. runtime->buffer_size);
  232. app_pointer = runtime->total_bytes_available -
  233. (app_pointer * runtime->buffer_size);
  234. dstn = runtime->buffer + app_pointer;
  235. pr_debug("copying %ld at %lld\n",
  236. (unsigned long)count, app_pointer);
  237. if (count < runtime->buffer_size - app_pointer) {
  238. if (copy_from_user(dstn, buf, count))
  239. return -EFAULT;
  240. } else {
  241. copy = runtime->buffer_size - app_pointer;
  242. if (copy_from_user(dstn, buf, copy))
  243. return -EFAULT;
  244. if (copy_from_user(runtime->buffer, buf + copy, count - copy))
  245. return -EFAULT;
  246. }
  247. /* if DSP cares, let it know data has been written */
  248. if (stream->ops->ack)
  249. stream->ops->ack(stream, count);
  250. return count;
  251. }
  252. static ssize_t snd_compr_write(struct file *f, const char __user *buf,
  253. size_t count, loff_t *offset)
  254. {
  255. struct snd_compr_file *data = f->private_data;
  256. struct snd_compr_stream *stream;
  257. size_t avail;
  258. int retval;
  259. if (snd_BUG_ON(!data))
  260. return -EFAULT;
  261. stream = &data->stream;
  262. mutex_lock(&stream->device->lock);
  263. /* write is allowed when stream is running or has been steup */
  264. switch (stream->runtime->state) {
  265. case SNDRV_PCM_STATE_SETUP:
  266. case SNDRV_PCM_STATE_PREPARED:
  267. case SNDRV_PCM_STATE_RUNNING:
  268. break;
  269. default:
  270. mutex_unlock(&stream->device->lock);
  271. return -EBADFD;
  272. }
  273. avail = snd_compr_get_avail(stream);
  274. pr_debug("avail returned %ld\n", (unsigned long)avail);
  275. /* calculate how much we can write to buffer */
  276. if (avail > count)
  277. avail = count;
  278. if (stream->ops->copy) {
  279. char __user* cbuf = (char __user*)buf;
  280. retval = stream->ops->copy(stream, cbuf, avail);
  281. } else {
  282. retval = snd_compr_write_data(stream, buf, avail);
  283. }
  284. if (retval > 0)
  285. stream->runtime->total_bytes_available += retval;
  286. /* while initiating the stream, write should be called before START
  287. * call, so in setup move state */
  288. if (stream->runtime->state == SNDRV_PCM_STATE_SETUP) {
  289. stream->runtime->state = SNDRV_PCM_STATE_PREPARED;
  290. pr_debug("stream prepared, Houston we are good to go\n");
  291. }
  292. mutex_unlock(&stream->device->lock);
  293. return retval;
  294. }
  295. static ssize_t snd_compr_read(struct file *f, char __user *buf,
  296. size_t count, loff_t *offset)
  297. {
  298. struct snd_compr_file *data = f->private_data;
  299. struct snd_compr_stream *stream;
  300. size_t avail;
  301. int retval;
  302. if (snd_BUG_ON(!data))
  303. return -EFAULT;
  304. stream = &data->stream;
  305. mutex_lock(&stream->device->lock);
  306. /* read is allowed when stream is running, paused, draining and setup
  307. * (yes setup is state which we transition to after stop, so if user
  308. * wants to read data after stop we allow that)
  309. */
  310. switch (stream->runtime->state) {
  311. case SNDRV_PCM_STATE_OPEN:
  312. case SNDRV_PCM_STATE_PREPARED:
  313. case SNDRV_PCM_STATE_SUSPENDED:
  314. case SNDRV_PCM_STATE_DISCONNECTED:
  315. retval = -EBADFD;
  316. goto out;
  317. case SNDRV_PCM_STATE_XRUN:
  318. retval = -EPIPE;
  319. goto out;
  320. }
  321. avail = snd_compr_get_avail(stream);
  322. pr_debug("avail returned %ld\n", (unsigned long)avail);
  323. /* calculate how much we can read from buffer */
  324. if (avail > count)
  325. avail = count;
  326. if (stream->ops->copy) {
  327. retval = stream->ops->copy(stream, buf, avail);
  328. } else {
  329. retval = -ENXIO;
  330. goto out;
  331. }
  332. if (retval > 0)
  333. stream->runtime->total_bytes_transferred += retval;
  334. out:
  335. mutex_unlock(&stream->device->lock);
  336. return retval;
  337. }
  338. static int snd_compr_mmap(struct file *f, struct vm_area_struct *vma)
  339. {
  340. return -ENXIO;
  341. }
  342. static __poll_t snd_compr_get_poll(struct snd_compr_stream *stream)
  343. {
  344. if (stream->direction == SND_COMPRESS_PLAYBACK)
  345. return EPOLLOUT | EPOLLWRNORM;
  346. else
  347. return EPOLLIN | EPOLLRDNORM;
  348. }
  349. static __poll_t snd_compr_poll(struct file *f, poll_table *wait)
  350. {
  351. struct snd_compr_file *data = f->private_data;
  352. struct snd_compr_stream *stream;
  353. size_t avail;
  354. __poll_t retval = 0;
  355. if (snd_BUG_ON(!data))
  356. return EPOLLERR;
  357. stream = &data->stream;
  358. mutex_lock(&stream->device->lock);
  359. switch (stream->runtime->state) {
  360. case SNDRV_PCM_STATE_OPEN:
  361. case SNDRV_PCM_STATE_XRUN:
  362. retval = snd_compr_get_poll(stream) | EPOLLERR;
  363. goto out;
  364. default:
  365. break;
  366. }
  367. poll_wait(f, &stream->runtime->sleep, wait);
  368. avail = snd_compr_get_avail(stream);
  369. pr_debug("avail is %ld\n", (unsigned long)avail);
  370. /* check if we have at least one fragment to fill */
  371. switch (stream->runtime->state) {
  372. case SNDRV_PCM_STATE_DRAINING:
  373. /* stream has been woken up after drain is complete
  374. * draining done so set stream state to stopped
  375. */
  376. retval = snd_compr_get_poll(stream);
  377. stream->runtime->state = SNDRV_PCM_STATE_SETUP;
  378. break;
  379. case SNDRV_PCM_STATE_RUNNING:
  380. case SNDRV_PCM_STATE_PREPARED:
  381. case SNDRV_PCM_STATE_PAUSED:
  382. if (avail >= stream->runtime->fragment_size)
  383. retval = snd_compr_get_poll(stream);
  384. break;
  385. default:
  386. retval = snd_compr_get_poll(stream) | EPOLLERR;
  387. break;
  388. }
  389. out:
  390. mutex_unlock(&stream->device->lock);
  391. return retval;
  392. }
  393. static int
  394. snd_compr_get_caps(struct snd_compr_stream *stream, unsigned long arg)
  395. {
  396. int retval;
  397. struct snd_compr_caps caps;
  398. if (!stream->ops->get_caps)
  399. return -ENXIO;
  400. memset(&caps, 0, sizeof(caps));
  401. retval = stream->ops->get_caps(stream, &caps);
  402. if (retval)
  403. goto out;
  404. if (copy_to_user((void __user *)arg, &caps, sizeof(caps)))
  405. retval = -EFAULT;
  406. out:
  407. return retval;
  408. }
  409. #ifndef COMPR_CODEC_CAPS_OVERFLOW
  410. static int
  411. snd_compr_get_codec_caps(struct snd_compr_stream *stream, unsigned long arg)
  412. {
  413. int retval;
  414. struct snd_compr_codec_caps *caps;
  415. if (!stream->ops->get_codec_caps)
  416. return -ENXIO;
  417. caps = kzalloc(sizeof(*caps), GFP_KERNEL);
  418. if (!caps)
  419. return -ENOMEM;
  420. retval = stream->ops->get_codec_caps(stream, caps);
  421. if (retval)
  422. goto out;
  423. if (copy_to_user((void __user *)arg, caps, sizeof(*caps)))
  424. retval = -EFAULT;
  425. out:
  426. kfree(caps);
  427. return retval;
  428. }
  429. #endif /* !COMPR_CODEC_CAPS_OVERFLOW */
  430. int snd_compr_malloc_pages(struct snd_compr_stream *stream, size_t size)
  431. {
  432. struct snd_dma_buffer *dmab;
  433. int ret;
  434. if (snd_BUG_ON(!(stream) || !(stream)->runtime))
  435. return -EINVAL;
  436. dmab = kzalloc(sizeof(*dmab), GFP_KERNEL);
  437. if (!dmab)
  438. return -ENOMEM;
  439. dmab->dev = stream->dma_buffer.dev;
  440. ret = snd_dma_alloc_pages(dmab->dev.type, dmab->dev.dev, size, dmab);
  441. if (ret < 0) {
  442. kfree(dmab);
  443. return ret;
  444. }
  445. snd_compr_set_runtime_buffer(stream, dmab);
  446. stream->runtime->dma_bytes = size;
  447. return 1;
  448. }
  449. EXPORT_SYMBOL(snd_compr_malloc_pages);
  450. int snd_compr_free_pages(struct snd_compr_stream *stream)
  451. {
  452. struct snd_compr_runtime *runtime;
  453. if (snd_BUG_ON(!(stream) || !(stream)->runtime))
  454. return -EINVAL;
  455. runtime = stream->runtime;
  456. if (runtime->dma_area == NULL)
  457. return 0;
  458. if (runtime->dma_buffer_p != &stream->dma_buffer) {
  459. /* It's a newly allocated buffer. Release it now. */
  460. snd_dma_free_pages(runtime->dma_buffer_p);
  461. kfree(runtime->dma_buffer_p);
  462. }
  463. snd_compr_set_runtime_buffer(stream, NULL);
  464. return 0;
  465. }
  466. EXPORT_SYMBOL(snd_compr_free_pages);
  467. /* revisit this with snd_pcm_preallocate_xxx */
  468. static int snd_compr_allocate_buffer(struct snd_compr_stream *stream,
  469. struct snd_compr_params *params)
  470. {
  471. unsigned int buffer_size;
  472. void *buffer = NULL;
  473. buffer_size = params->buffer.fragment_size * params->buffer.fragments;
  474. if (stream->ops->copy) {
  475. buffer = NULL;
  476. /* if copy is defined the driver will be required to copy
  477. * the data from core
  478. */
  479. } else {
  480. if (stream->runtime->dma_buffer_p) {
  481. if (buffer_size > stream->runtime->dma_buffer_p->bytes)
  482. dev_err(&stream->device->dev,
  483. "Not enough DMA buffer");
  484. else
  485. buffer = stream->runtime->dma_buffer_p->area;
  486. } else {
  487. buffer = kmalloc(buffer_size, GFP_KERNEL);
  488. }
  489. if (!buffer)
  490. return -ENOMEM;
  491. }
  492. stream->runtime->fragment_size = params->buffer.fragment_size;
  493. stream->runtime->fragments = params->buffer.fragments;
  494. stream->runtime->buffer = buffer;
  495. stream->runtime->buffer_size = buffer_size;
  496. return 0;
  497. }
  498. static int snd_compress_check_input(struct snd_compr_params *params)
  499. {
  500. /* first let's check the buffer parameter's */
  501. if (params->buffer.fragment_size == 0 ||
  502. params->buffer.fragments > U32_MAX / params->buffer.fragment_size ||
  503. params->buffer.fragments == 0)
  504. return -EINVAL;
  505. /* now codec parameters */
  506. if (params->codec.id == 0 || params->codec.id > SND_AUDIOCODEC_MAX)
  507. return -EINVAL;
  508. if (params->codec.ch_in == 0 || params->codec.ch_out == 0)
  509. return -EINVAL;
  510. return 0;
  511. }
  512. static int
  513. snd_compr_set_params(struct snd_compr_stream *stream, unsigned long arg)
  514. {
  515. struct snd_compr_params *params;
  516. int retval;
  517. if (stream->runtime->state == SNDRV_PCM_STATE_OPEN) {
  518. /*
  519. * we should allow parameter change only when stream has been
  520. * opened not in other cases
  521. */
  522. params = memdup_user((void __user *)arg, sizeof(*params));
  523. if (IS_ERR(params))
  524. return PTR_ERR(params);
  525. retval = snd_compress_check_input(params);
  526. if (retval)
  527. goto out;
  528. retval = snd_compr_allocate_buffer(stream, params);
  529. if (retval) {
  530. retval = -ENOMEM;
  531. goto out;
  532. }
  533. retval = stream->ops->set_params(stream, params);
  534. if (retval)
  535. goto out;
  536. stream->metadata_set = false;
  537. stream->next_track = false;
  538. stream->runtime->state = SNDRV_PCM_STATE_SETUP;
  539. } else {
  540. return -EPERM;
  541. }
  542. out:
  543. kfree(params);
  544. return retval;
  545. }
  546. static int
  547. snd_compr_get_params(struct snd_compr_stream *stream, unsigned long arg)
  548. {
  549. struct snd_codec *params;
  550. int retval;
  551. if (!stream->ops->get_params)
  552. return -EBADFD;
  553. params = kzalloc(sizeof(*params), GFP_KERNEL);
  554. if (!params)
  555. return -ENOMEM;
  556. retval = stream->ops->get_params(stream, params);
  557. if (retval)
  558. goto out;
  559. if (copy_to_user((char __user *)arg, params, sizeof(*params)))
  560. retval = -EFAULT;
  561. out:
  562. kfree(params);
  563. return retval;
  564. }
  565. static int
  566. snd_compr_get_metadata(struct snd_compr_stream *stream, unsigned long arg)
  567. {
  568. struct snd_compr_metadata metadata;
  569. int retval;
  570. if (!stream->ops->get_metadata)
  571. return -ENXIO;
  572. if (copy_from_user(&metadata, (void __user *)arg, sizeof(metadata)))
  573. return -EFAULT;
  574. retval = stream->ops->get_metadata(stream, &metadata);
  575. if (retval != 0)
  576. return retval;
  577. if (copy_to_user((void __user *)arg, &metadata, sizeof(metadata)))
  578. return -EFAULT;
  579. return 0;
  580. }
  581. static int
  582. snd_compr_set_metadata(struct snd_compr_stream *stream, unsigned long arg)
  583. {
  584. struct snd_compr_metadata metadata;
  585. int retval;
  586. if (!stream->ops->set_metadata)
  587. return -ENXIO;
  588. /*
  589. * we should allow parameter change only when stream has been
  590. * opened not in other cases
  591. */
  592. if (copy_from_user(&metadata, (void __user *)arg, sizeof(metadata)))
  593. return -EFAULT;
  594. retval = stream->ops->set_metadata(stream, &metadata);
  595. stream->metadata_set = true;
  596. return retval;
  597. }
  598. static inline int
  599. snd_compr_tstamp(struct snd_compr_stream *stream, unsigned long arg)
  600. {
  601. struct snd_compr_tstamp tstamp = {0};
  602. int ret;
  603. ret = snd_compr_update_tstamp(stream, &tstamp);
  604. if (ret == 0)
  605. ret = copy_to_user((struct snd_compr_tstamp __user *)arg,
  606. &tstamp, sizeof(tstamp)) ? -EFAULT : 0;
  607. return ret;
  608. }
  609. static int snd_compr_pause(struct snd_compr_stream *stream)
  610. {
  611. int retval;
  612. bool use_pause_in_drain = false;
  613. bool leave_draining_state = false;
  614. trace_android_vh_snd_compr_use_pause_in_drain(&use_pause_in_drain,
  615. &leave_draining_state);
  616. if (use_pause_in_drain && stream->runtime->state == SNDRV_PCM_STATE_DRAINING) {
  617. retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_PAUSE_PUSH);
  618. if (!retval && leave_draining_state) {
  619. stream->runtime->state = SNDRV_PCM_STATE_PAUSED;
  620. wake_up(&stream->runtime->sleep);
  621. }
  622. return retval;
  623. }
  624. if (stream->runtime->state != SNDRV_PCM_STATE_RUNNING)
  625. return -EPERM;
  626. retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_PAUSE_PUSH);
  627. if (!retval)
  628. stream->runtime->state = SNDRV_PCM_STATE_PAUSED;
  629. return retval;
  630. }
  631. static int snd_compr_resume(struct snd_compr_stream *stream)
  632. {
  633. int retval;
  634. bool use_pause_in_drain = false;
  635. bool leave_draining_state = false;
  636. trace_android_vh_snd_compr_use_pause_in_drain(&use_pause_in_drain,
  637. &leave_draining_state);
  638. if (use_pause_in_drain && stream->runtime->state == SNDRV_PCM_STATE_DRAINING)
  639. return stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_PAUSE_RELEASE);
  640. if (stream->runtime->state != SNDRV_PCM_STATE_PAUSED)
  641. return -EPERM;
  642. retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_PAUSE_RELEASE);
  643. if (!retval)
  644. stream->runtime->state = SNDRV_PCM_STATE_RUNNING;
  645. return retval;
  646. }
  647. static int snd_compr_start(struct snd_compr_stream *stream)
  648. {
  649. int retval;
  650. switch (stream->runtime->state) {
  651. case SNDRV_PCM_STATE_SETUP:
  652. if (stream->direction != SND_COMPRESS_CAPTURE)
  653. return -EPERM;
  654. break;
  655. case SNDRV_PCM_STATE_PREPARED:
  656. break;
  657. default:
  658. return -EPERM;
  659. }
  660. retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_START);
  661. if (!retval)
  662. stream->runtime->state = SNDRV_PCM_STATE_RUNNING;
  663. return retval;
  664. }
  665. static int snd_compr_stop(struct snd_compr_stream *stream)
  666. {
  667. int retval;
  668. switch (stream->runtime->state) {
  669. case SNDRV_PCM_STATE_OPEN:
  670. case SNDRV_PCM_STATE_SETUP:
  671. case SNDRV_PCM_STATE_PREPARED:
  672. return -EPERM;
  673. default:
  674. break;
  675. }
  676. retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_STOP);
  677. if (!retval) {
  678. /* clear flags and stop any drain wait */
  679. stream->partial_drain = false;
  680. stream->metadata_set = false;
  681. snd_compr_drain_notify(stream);
  682. stream->runtime->total_bytes_available = 0;
  683. stream->runtime->total_bytes_transferred = 0;
  684. }
  685. return retval;
  686. }
  687. static void error_delayed_work(struct work_struct *work)
  688. {
  689. struct snd_compr_stream *stream;
  690. stream = container_of(work, struct snd_compr_stream, error_work.work);
  691. mutex_lock(&stream->device->lock);
  692. stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_STOP);
  693. wake_up(&stream->runtime->sleep);
  694. mutex_unlock(&stream->device->lock);
  695. }
  696. /*
  697. * snd_compr_stop_error: Report a fatal error on a stream
  698. * @stream: pointer to stream
  699. * @state: state to transition the stream to
  700. *
  701. * Stop the stream and set its state.
  702. *
  703. * Should be called with compressed device lock held.
  704. */
  705. int snd_compr_stop_error(struct snd_compr_stream *stream,
  706. snd_pcm_state_t state)
  707. {
  708. if (stream->runtime->state == state)
  709. return 0;
  710. stream->runtime->state = state;
  711. pr_debug("Changing state to: %d\n", state);
  712. queue_delayed_work(system_power_efficient_wq, &stream->error_work, 0);
  713. return 0;
  714. }
  715. EXPORT_SYMBOL_GPL(snd_compr_stop_error);
  716. static int snd_compress_wait_for_drain(struct snd_compr_stream *stream)
  717. {
  718. int ret;
  719. /*
  720. * We are called with lock held. So drop the lock while we wait for
  721. * drain complete notification from the driver
  722. *
  723. * It is expected that driver will notify the drain completion and then
  724. * stream will be moved to SETUP state, even if draining resulted in an
  725. * error. We can trigger next track after this.
  726. */
  727. stream->runtime->state = SNDRV_PCM_STATE_DRAINING;
  728. mutex_unlock(&stream->device->lock);
  729. /* we wait for drain to complete here, drain can return when
  730. * interruption occurred, wait returned error or success.
  731. * For the first two cases we don't do anything different here and
  732. * return after waking up
  733. */
  734. ret = wait_event_interruptible(stream->runtime->sleep,
  735. (stream->runtime->state != SNDRV_PCM_STATE_DRAINING));
  736. if (ret == -ERESTARTSYS)
  737. pr_debug("wait aborted by a signal\n");
  738. else if (ret)
  739. pr_debug("wait for drain failed with %d\n", ret);
  740. wake_up(&stream->runtime->sleep);
  741. mutex_lock(&stream->device->lock);
  742. return ret;
  743. }
  744. static int snd_compr_drain(struct snd_compr_stream *stream)
  745. {
  746. int retval;
  747. switch (stream->runtime->state) {
  748. case SNDRV_PCM_STATE_OPEN:
  749. case SNDRV_PCM_STATE_SETUP:
  750. case SNDRV_PCM_STATE_PREPARED:
  751. case SNDRV_PCM_STATE_PAUSED:
  752. return -EPERM;
  753. case SNDRV_PCM_STATE_XRUN:
  754. return -EPIPE;
  755. default:
  756. break;
  757. }
  758. retval = stream->ops->trigger(stream, SND_COMPR_TRIGGER_DRAIN);
  759. if (retval) {
  760. pr_debug("SND_COMPR_TRIGGER_DRAIN failed %d\n", retval);
  761. wake_up(&stream->runtime->sleep);
  762. return retval;
  763. }
  764. return snd_compress_wait_for_drain(stream);
  765. }
  766. static int snd_compr_next_track(struct snd_compr_stream *stream)
  767. {
  768. int retval;
  769. /* only a running stream can transition to next track */
  770. if (stream->runtime->state != SNDRV_PCM_STATE_RUNNING)
  771. return -EPERM;
  772. /* next track doesn't have any meaning for capture streams */
  773. if (stream->direction == SND_COMPRESS_CAPTURE)
  774. return -EPERM;
  775. /* you can signal next track if this is intended to be a gapless stream
  776. * and current track metadata is set
  777. */
  778. if (stream->metadata_set == false)
  779. return -EPERM;
  780. retval = stream->ops->trigger(stream, SND_COMPR_TRIGGER_NEXT_TRACK);
  781. if (retval != 0)
  782. return retval;
  783. stream->metadata_set = false;
  784. stream->next_track = true;
  785. return 0;
  786. }
  787. static int snd_compr_partial_drain(struct snd_compr_stream *stream)
  788. {
  789. int retval;
  790. switch (stream->runtime->state) {
  791. case SNDRV_PCM_STATE_OPEN:
  792. case SNDRV_PCM_STATE_SETUP:
  793. case SNDRV_PCM_STATE_PREPARED:
  794. case SNDRV_PCM_STATE_PAUSED:
  795. return -EPERM;
  796. case SNDRV_PCM_STATE_XRUN:
  797. return -EPIPE;
  798. default:
  799. break;
  800. }
  801. /* partial drain doesn't have any meaning for capture streams */
  802. if (stream->direction == SND_COMPRESS_CAPTURE)
  803. return -EPERM;
  804. /* stream can be drained only when next track has been signalled */
  805. if (stream->next_track == false)
  806. return -EPERM;
  807. stream->partial_drain = true;
  808. retval = stream->ops->trigger(stream, SND_COMPR_TRIGGER_PARTIAL_DRAIN);
  809. if (retval) {
  810. pr_debug("Partial drain returned failure\n");
  811. wake_up(&stream->runtime->sleep);
  812. return retval;
  813. }
  814. stream->next_track = false;
  815. return snd_compress_wait_for_drain(stream);
  816. }
  817. static long snd_compr_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
  818. {
  819. struct snd_compr_file *data = f->private_data;
  820. struct snd_compr_stream *stream;
  821. int retval = -ENOTTY;
  822. if (snd_BUG_ON(!data))
  823. return -EFAULT;
  824. stream = &data->stream;
  825. mutex_lock(&stream->device->lock);
  826. switch (_IOC_NR(cmd)) {
  827. case _IOC_NR(SNDRV_COMPRESS_IOCTL_VERSION):
  828. retval = put_user(SNDRV_COMPRESS_VERSION,
  829. (int __user *)arg) ? -EFAULT : 0;
  830. break;
  831. case _IOC_NR(SNDRV_COMPRESS_GET_CAPS):
  832. retval = snd_compr_get_caps(stream, arg);
  833. break;
  834. #ifndef COMPR_CODEC_CAPS_OVERFLOW
  835. case _IOC_NR(SNDRV_COMPRESS_GET_CODEC_CAPS):
  836. retval = snd_compr_get_codec_caps(stream, arg);
  837. break;
  838. #endif
  839. case _IOC_NR(SNDRV_COMPRESS_SET_PARAMS):
  840. retval = snd_compr_set_params(stream, arg);
  841. break;
  842. case _IOC_NR(SNDRV_COMPRESS_GET_PARAMS):
  843. retval = snd_compr_get_params(stream, arg);
  844. break;
  845. case _IOC_NR(SNDRV_COMPRESS_SET_METADATA):
  846. retval = snd_compr_set_metadata(stream, arg);
  847. break;
  848. case _IOC_NR(SNDRV_COMPRESS_GET_METADATA):
  849. retval = snd_compr_get_metadata(stream, arg);
  850. break;
  851. case _IOC_NR(SNDRV_COMPRESS_TSTAMP):
  852. retval = snd_compr_tstamp(stream, arg);
  853. break;
  854. case _IOC_NR(SNDRV_COMPRESS_AVAIL):
  855. retval = snd_compr_ioctl_avail(stream, arg);
  856. break;
  857. case _IOC_NR(SNDRV_COMPRESS_PAUSE):
  858. retval = snd_compr_pause(stream);
  859. break;
  860. case _IOC_NR(SNDRV_COMPRESS_RESUME):
  861. retval = snd_compr_resume(stream);
  862. break;
  863. case _IOC_NR(SNDRV_COMPRESS_START):
  864. retval = snd_compr_start(stream);
  865. break;
  866. case _IOC_NR(SNDRV_COMPRESS_STOP):
  867. retval = snd_compr_stop(stream);
  868. break;
  869. case _IOC_NR(SNDRV_COMPRESS_DRAIN):
  870. retval = snd_compr_drain(stream);
  871. break;
  872. case _IOC_NR(SNDRV_COMPRESS_PARTIAL_DRAIN):
  873. retval = snd_compr_partial_drain(stream);
  874. break;
  875. case _IOC_NR(SNDRV_COMPRESS_NEXT_TRACK):
  876. retval = snd_compr_next_track(stream);
  877. break;
  878. }
  879. mutex_unlock(&stream->device->lock);
  880. return retval;
  881. }
  882. /* support of 32bit userspace on 64bit platforms */
  883. #ifdef CONFIG_COMPAT
  884. static long snd_compr_ioctl_compat(struct file *file, unsigned int cmd,
  885. unsigned long arg)
  886. {
  887. return snd_compr_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
  888. }
  889. #endif
  890. static const struct file_operations snd_compr_file_ops = {
  891. .owner = THIS_MODULE,
  892. .open = snd_compr_open,
  893. .release = snd_compr_free,
  894. .write = snd_compr_write,
  895. .read = snd_compr_read,
  896. .unlocked_ioctl = snd_compr_ioctl,
  897. #ifdef CONFIG_COMPAT
  898. .compat_ioctl = snd_compr_ioctl_compat,
  899. #endif
  900. .mmap = snd_compr_mmap,
  901. .poll = snd_compr_poll,
  902. };
  903. static int snd_compress_dev_register(struct snd_device *device)
  904. {
  905. int ret;
  906. struct snd_compr *compr;
  907. if (snd_BUG_ON(!device || !device->device_data))
  908. return -EBADFD;
  909. compr = device->device_data;
  910. pr_debug("reg device %s, direction %d\n", compr->name,
  911. compr->direction);
  912. /* register compressed device */
  913. ret = snd_register_device(SNDRV_DEVICE_TYPE_COMPRESS,
  914. compr->card, compr->device,
  915. &snd_compr_file_ops, compr, &compr->dev);
  916. if (ret < 0) {
  917. pr_err("snd_register_device failed %d\n", ret);
  918. return ret;
  919. }
  920. return ret;
  921. }
  922. static int snd_compress_dev_disconnect(struct snd_device *device)
  923. {
  924. struct snd_compr *compr;
  925. compr = device->device_data;
  926. snd_unregister_device(&compr->dev);
  927. return 0;
  928. }
  929. #ifdef CONFIG_SND_VERBOSE_PROCFS
  930. static void snd_compress_proc_info_read(struct snd_info_entry *entry,
  931. struct snd_info_buffer *buffer)
  932. {
  933. struct snd_compr *compr = (struct snd_compr *)entry->private_data;
  934. snd_iprintf(buffer, "card: %d\n", compr->card->number);
  935. snd_iprintf(buffer, "device: %d\n", compr->device);
  936. snd_iprintf(buffer, "stream: %s\n",
  937. compr->direction == SND_COMPRESS_PLAYBACK
  938. ? "PLAYBACK" : "CAPTURE");
  939. snd_iprintf(buffer, "id: %s\n", compr->id);
  940. }
  941. static int snd_compress_proc_init(struct snd_compr *compr)
  942. {
  943. struct snd_info_entry *entry;
  944. char name[16];
  945. sprintf(name, "compr%i", compr->device);
  946. entry = snd_info_create_card_entry(compr->card, name,
  947. compr->card->proc_root);
  948. if (!entry)
  949. return -ENOMEM;
  950. entry->mode = S_IFDIR | 0555;
  951. compr->proc_root = entry;
  952. entry = snd_info_create_card_entry(compr->card, "info",
  953. compr->proc_root);
  954. if (entry)
  955. snd_info_set_text_ops(entry, compr,
  956. snd_compress_proc_info_read);
  957. compr->proc_info_entry = entry;
  958. return 0;
  959. }
  960. static void snd_compress_proc_done(struct snd_compr *compr)
  961. {
  962. snd_info_free_entry(compr->proc_info_entry);
  963. compr->proc_info_entry = NULL;
  964. snd_info_free_entry(compr->proc_root);
  965. compr->proc_root = NULL;
  966. }
  967. static inline void snd_compress_set_id(struct snd_compr *compr, const char *id)
  968. {
  969. strlcpy(compr->id, id, sizeof(compr->id));
  970. }
  971. #else
  972. static inline int snd_compress_proc_init(struct snd_compr *compr)
  973. {
  974. return 0;
  975. }
  976. static inline void snd_compress_proc_done(struct snd_compr *compr)
  977. {
  978. }
  979. static inline void snd_compress_set_id(struct snd_compr *compr, const char *id)
  980. {
  981. }
  982. #endif
  983. static int snd_compress_dev_free(struct snd_device *device)
  984. {
  985. struct snd_compr *compr;
  986. compr = device->device_data;
  987. snd_compress_proc_done(compr);
  988. put_device(&compr->dev);
  989. return 0;
  990. }
  991. /*
  992. * snd_compress_new: create new compress device
  993. * @card: sound card pointer
  994. * @device: device number
  995. * @dirn: device direction, should be of type enum snd_compr_direction
  996. * @compr: compress device pointer
  997. */
  998. int snd_compress_new(struct snd_card *card, int device,
  999. int dirn, const char *id, struct snd_compr *compr)
  1000. {
  1001. static const struct snd_device_ops ops = {
  1002. .dev_free = snd_compress_dev_free,
  1003. .dev_register = snd_compress_dev_register,
  1004. .dev_disconnect = snd_compress_dev_disconnect,
  1005. };
  1006. int ret;
  1007. compr->card = card;
  1008. compr->device = device;
  1009. compr->direction = dirn;
  1010. snd_compress_set_id(compr, id);
  1011. snd_device_initialize(&compr->dev, card);
  1012. dev_set_name(&compr->dev, "comprC%iD%i", card->number, device);
  1013. ret = snd_device_new(card, SNDRV_DEV_COMPRESS, compr, &ops);
  1014. if (ret == 0)
  1015. snd_compress_proc_init(compr);
  1016. return ret;
  1017. }
  1018. EXPORT_SYMBOL_GPL(snd_compress_new);
  1019. static int snd_compress_add_device(struct snd_compr *device)
  1020. {
  1021. int ret;
  1022. if (!device->card)
  1023. return -EINVAL;
  1024. /* register the card */
  1025. ret = snd_card_register(device->card);
  1026. if (ret)
  1027. goto out;
  1028. return 0;
  1029. out:
  1030. pr_err("failed with %d\n", ret);
  1031. return ret;
  1032. }
  1033. static int snd_compress_remove_device(struct snd_compr *device)
  1034. {
  1035. return snd_card_free(device->card);
  1036. }
  1037. /**
  1038. * snd_compress_register - register compressed device
  1039. *
  1040. * @device: compressed device to register
  1041. */
  1042. int snd_compress_register(struct snd_compr *device)
  1043. {
  1044. int retval;
  1045. if (device->name == NULL || device->ops == NULL)
  1046. return -EINVAL;
  1047. pr_debug("Registering compressed device %s\n", device->name);
  1048. if (snd_BUG_ON(!device->ops->open))
  1049. return -EINVAL;
  1050. if (snd_BUG_ON(!device->ops->free))
  1051. return -EINVAL;
  1052. if (snd_BUG_ON(!device->ops->set_params))
  1053. return -EINVAL;
  1054. if (snd_BUG_ON(!device->ops->trigger))
  1055. return -EINVAL;
  1056. mutex_init(&device->lock);
  1057. /* register a compressed card */
  1058. mutex_lock(&device_mutex);
  1059. retval = snd_compress_add_device(device);
  1060. mutex_unlock(&device_mutex);
  1061. return retval;
  1062. }
  1063. EXPORT_SYMBOL_GPL(snd_compress_register);
  1064. int snd_compress_deregister(struct snd_compr *device)
  1065. {
  1066. pr_debug("Removing compressed device %s\n", device->name);
  1067. mutex_lock(&device_mutex);
  1068. snd_compress_remove_device(device);
  1069. mutex_unlock(&device_mutex);
  1070. return 0;
  1071. }
  1072. EXPORT_SYMBOL_GPL(snd_compress_deregister);
  1073. MODULE_DESCRIPTION("ALSA Compressed offload framework");
  1074. MODULE_AUTHOR("Vinod Koul <vinod.koul@linux.intel.com>");
  1075. MODULE_LICENSE("GPL v2");