trace.c 8.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352
  1. // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
  2. //
  3. // This file is provided under a dual BSD/GPLv2 license. When using or
  4. // redistributing this file, you may do so under either license.
  5. //
  6. // Copyright(c) 2018 Intel Corporation. All rights reserved.
  7. //
  8. // Author: Liam Girdwood <liam.r.girdwood@linux.intel.com>
  9. //
  10. #include <linux/debugfs.h>
  11. #include <linux/sched/signal.h>
  12. #include "sof-priv.h"
  13. #include "ops.h"
  14. static size_t sof_trace_avail(struct snd_sof_dev *sdev,
  15. loff_t pos, size_t buffer_size)
  16. {
  17. loff_t host_offset = READ_ONCE(sdev->host_offset);
  18. /*
  19. * If host offset is less than local pos, it means write pointer of
  20. * host DMA buffer has been wrapped. We should output the trace data
  21. * at the end of host DMA buffer at first.
  22. */
  23. if (host_offset < pos)
  24. return buffer_size - pos;
  25. /* If there is available trace data now, it is unnecessary to wait. */
  26. if (host_offset > pos)
  27. return host_offset - pos;
  28. return 0;
  29. }
  30. static size_t sof_wait_trace_avail(struct snd_sof_dev *sdev,
  31. loff_t pos, size_t buffer_size)
  32. {
  33. wait_queue_entry_t wait;
  34. size_t ret = sof_trace_avail(sdev, pos, buffer_size);
  35. /* data immediately available */
  36. if (ret)
  37. return ret;
  38. if (!sdev->dtrace_is_enabled && sdev->dtrace_draining) {
  39. /*
  40. * tracing has ended and all traces have been
  41. * read by client, return EOF
  42. */
  43. sdev->dtrace_draining = false;
  44. return 0;
  45. }
  46. /* wait for available trace data from FW */
  47. init_waitqueue_entry(&wait, current);
  48. set_current_state(TASK_INTERRUPTIBLE);
  49. add_wait_queue(&sdev->trace_sleep, &wait);
  50. if (!signal_pending(current)) {
  51. /* set timeout to max value, no error code */
  52. schedule_timeout(MAX_SCHEDULE_TIMEOUT);
  53. }
  54. remove_wait_queue(&sdev->trace_sleep, &wait);
  55. return sof_trace_avail(sdev, pos, buffer_size);
  56. }
  57. static ssize_t sof_dfsentry_trace_read(struct file *file, char __user *buffer,
  58. size_t count, loff_t *ppos)
  59. {
  60. struct snd_sof_dfsentry *dfse = file->private_data;
  61. struct snd_sof_dev *sdev = dfse->sdev;
  62. unsigned long rem;
  63. loff_t lpos = *ppos;
  64. size_t avail, buffer_size = dfse->size;
  65. u64 lpos_64;
  66. /* make sure we know about any failures on the DSP side */
  67. sdev->dtrace_error = false;
  68. /* check pos and count */
  69. if (lpos < 0)
  70. return -EINVAL;
  71. if (!count)
  72. return 0;
  73. /* check for buffer wrap and count overflow */
  74. lpos_64 = lpos;
  75. lpos = do_div(lpos_64, buffer_size);
  76. if (count > buffer_size - lpos) /* min() not used to avoid sparse warnings */
  77. count = buffer_size - lpos;
  78. /* get available count based on current host offset */
  79. avail = sof_wait_trace_avail(sdev, lpos, buffer_size);
  80. if (sdev->dtrace_error) {
  81. dev_err(sdev->dev, "error: trace IO error\n");
  82. return -EIO;
  83. }
  84. /* make sure count is <= avail */
  85. count = avail > count ? count : avail;
  86. /* copy available trace data to debugfs */
  87. rem = copy_to_user(buffer, ((u8 *)(dfse->buf) + lpos), count);
  88. if (rem)
  89. return -EFAULT;
  90. *ppos += count;
  91. /* move debugfs reading position */
  92. return count;
  93. }
  94. static int sof_dfsentry_trace_release(struct inode *inode, struct file *file)
  95. {
  96. struct snd_sof_dfsentry *dfse = inode->i_private;
  97. struct snd_sof_dev *sdev = dfse->sdev;
  98. /* avoid duplicate traces at next open */
  99. if (!sdev->dtrace_is_enabled)
  100. sdev->host_offset = 0;
  101. return 0;
  102. }
  103. static const struct file_operations sof_dfs_trace_fops = {
  104. .open = simple_open,
  105. .read = sof_dfsentry_trace_read,
  106. .llseek = default_llseek,
  107. .release = sof_dfsentry_trace_release,
  108. };
  109. static int trace_debugfs_create(struct snd_sof_dev *sdev)
  110. {
  111. struct snd_sof_dfsentry *dfse;
  112. if (!sdev)
  113. return -EINVAL;
  114. dfse = devm_kzalloc(sdev->dev, sizeof(*dfse), GFP_KERNEL);
  115. if (!dfse)
  116. return -ENOMEM;
  117. dfse->type = SOF_DFSENTRY_TYPE_BUF;
  118. dfse->buf = sdev->dmatb.area;
  119. dfse->size = sdev->dmatb.bytes;
  120. dfse->sdev = sdev;
  121. debugfs_create_file("trace", 0444, sdev->debugfs_root, dfse,
  122. &sof_dfs_trace_fops);
  123. return 0;
  124. }
  125. int snd_sof_init_trace_ipc(struct snd_sof_dev *sdev)
  126. {
  127. struct sof_ipc_fw_ready *ready = &sdev->fw_ready;
  128. struct sof_ipc_fw_version *v = &ready->version;
  129. struct sof_ipc_dma_trace_params_ext params;
  130. struct sof_ipc_reply ipc_reply;
  131. int ret;
  132. if (!sdev->dtrace_is_supported)
  133. return 0;
  134. if (sdev->dtrace_is_enabled || !sdev->dma_trace_pages)
  135. return -EINVAL;
  136. /* set IPC parameters */
  137. params.hdr.cmd = SOF_IPC_GLB_TRACE_MSG;
  138. /* PARAMS_EXT is only supported from ABI 3.7.0 onwards */
  139. if (v->abi_version >= SOF_ABI_VER(3, 7, 0)) {
  140. params.hdr.size = sizeof(struct sof_ipc_dma_trace_params_ext);
  141. params.hdr.cmd |= SOF_IPC_TRACE_DMA_PARAMS_EXT;
  142. params.timestamp_ns = ktime_get(); /* in nanosecond */
  143. } else {
  144. params.hdr.size = sizeof(struct sof_ipc_dma_trace_params);
  145. params.hdr.cmd |= SOF_IPC_TRACE_DMA_PARAMS;
  146. }
  147. params.buffer.phy_addr = sdev->dmatp.addr;
  148. params.buffer.size = sdev->dmatb.bytes;
  149. params.buffer.pages = sdev->dma_trace_pages;
  150. params.stream_tag = 0;
  151. sdev->host_offset = 0;
  152. sdev->dtrace_draining = false;
  153. ret = snd_sof_dma_trace_init(sdev, &params.stream_tag);
  154. if (ret < 0) {
  155. dev_err(sdev->dev,
  156. "error: fail in snd_sof_dma_trace_init %d\n", ret);
  157. return ret;
  158. }
  159. dev_dbg(sdev->dev, "stream_tag: %d\n", params.stream_tag);
  160. /* send IPC to the DSP */
  161. ret = sof_ipc_tx_message(sdev->ipc,
  162. params.hdr.cmd, &params, sizeof(params),
  163. &ipc_reply, sizeof(ipc_reply));
  164. if (ret < 0) {
  165. dev_err(sdev->dev,
  166. "error: can't set params for DMA for trace %d\n", ret);
  167. goto trace_release;
  168. }
  169. ret = snd_sof_dma_trace_trigger(sdev, SNDRV_PCM_TRIGGER_START);
  170. if (ret < 0) {
  171. dev_err(sdev->dev,
  172. "error: snd_sof_dma_trace_trigger: start: %d\n", ret);
  173. goto trace_release;
  174. }
  175. sdev->dtrace_is_enabled = true;
  176. return 0;
  177. trace_release:
  178. snd_sof_dma_trace_release(sdev);
  179. return ret;
  180. }
  181. int snd_sof_init_trace(struct snd_sof_dev *sdev)
  182. {
  183. int ret;
  184. if (!sdev->dtrace_is_supported)
  185. return 0;
  186. /* set false before start initialization */
  187. sdev->dtrace_is_enabled = false;
  188. /* allocate trace page table buffer */
  189. ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, sdev->dev,
  190. PAGE_SIZE, &sdev->dmatp);
  191. if (ret < 0) {
  192. dev_err(sdev->dev,
  193. "error: can't alloc page table for trace %d\n", ret);
  194. return ret;
  195. }
  196. /* allocate trace data buffer */
  197. ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV_SG, sdev->dev,
  198. DMA_BUF_SIZE_FOR_TRACE, &sdev->dmatb);
  199. if (ret < 0) {
  200. dev_err(sdev->dev,
  201. "error: can't alloc buffer for trace %d\n", ret);
  202. goto page_err;
  203. }
  204. /* create compressed page table for audio firmware */
  205. ret = snd_sof_create_page_table(sdev->dev, &sdev->dmatb,
  206. sdev->dmatp.area, sdev->dmatb.bytes);
  207. if (ret < 0)
  208. goto table_err;
  209. sdev->dma_trace_pages = ret;
  210. dev_dbg(sdev->dev, "dma_trace_pages: %d\n", sdev->dma_trace_pages);
  211. if (sdev->first_boot) {
  212. ret = trace_debugfs_create(sdev);
  213. if (ret < 0)
  214. goto table_err;
  215. }
  216. init_waitqueue_head(&sdev->trace_sleep);
  217. ret = snd_sof_init_trace_ipc(sdev);
  218. if (ret < 0)
  219. goto table_err;
  220. return 0;
  221. table_err:
  222. sdev->dma_trace_pages = 0;
  223. snd_dma_free_pages(&sdev->dmatb);
  224. page_err:
  225. snd_dma_free_pages(&sdev->dmatp);
  226. return ret;
  227. }
  228. EXPORT_SYMBOL(snd_sof_init_trace);
  229. int snd_sof_trace_update_pos(struct snd_sof_dev *sdev,
  230. struct sof_ipc_dma_trace_posn *posn)
  231. {
  232. if (!sdev->dtrace_is_supported)
  233. return 0;
  234. if (sdev->dtrace_is_enabled && sdev->host_offset != posn->host_offset) {
  235. sdev->host_offset = posn->host_offset;
  236. wake_up(&sdev->trace_sleep);
  237. }
  238. if (posn->overflow != 0)
  239. dev_err(sdev->dev,
  240. "error: DSP trace buffer overflow %u bytes. Total messages %d\n",
  241. posn->overflow, posn->messages);
  242. return 0;
  243. }
  244. /* an error has occurred within the DSP that prevents further trace */
  245. void snd_sof_trace_notify_for_error(struct snd_sof_dev *sdev)
  246. {
  247. if (!sdev->dtrace_is_supported)
  248. return;
  249. if (sdev->dtrace_is_enabled) {
  250. dev_err(sdev->dev, "error: waking up any trace sleepers\n");
  251. sdev->dtrace_error = true;
  252. wake_up(&sdev->trace_sleep);
  253. }
  254. }
  255. EXPORT_SYMBOL(snd_sof_trace_notify_for_error);
  256. void snd_sof_release_trace(struct snd_sof_dev *sdev)
  257. {
  258. int ret;
  259. if (!sdev->dtrace_is_supported || !sdev->dtrace_is_enabled)
  260. return;
  261. ret = snd_sof_dma_trace_trigger(sdev, SNDRV_PCM_TRIGGER_STOP);
  262. if (ret < 0)
  263. dev_err(sdev->dev,
  264. "error: snd_sof_dma_trace_trigger: stop: %d\n", ret);
  265. ret = snd_sof_dma_trace_release(sdev);
  266. if (ret < 0)
  267. dev_err(sdev->dev,
  268. "error: fail in snd_sof_dma_trace_release %d\n", ret);
  269. sdev->dtrace_is_enabled = false;
  270. sdev->dtrace_draining = true;
  271. wake_up(&sdev->trace_sleep);
  272. }
  273. EXPORT_SYMBOL(snd_sof_release_trace);
  274. void snd_sof_free_trace(struct snd_sof_dev *sdev)
  275. {
  276. if (!sdev->dtrace_is_supported)
  277. return;
  278. snd_sof_release_trace(sdev);
  279. if (sdev->dma_trace_pages) {
  280. snd_dma_free_pages(&sdev->dmatb);
  281. snd_dma_free_pages(&sdev->dmatp);
  282. sdev->dma_trace_pages = 0;
  283. }
  284. }
  285. EXPORT_SYMBOL(snd_sof_free_trace);