async_tx.c 7.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * core routines for the asynchronous memory transfer/transform api
  4. *
  5. * Copyright © 2006, Intel Corporation.
  6. *
  7. * Dan Williams <dan.j.williams@intel.com>
  8. *
  9. * with architecture considerations by:
  10. * Neil Brown <neilb@suse.de>
  11. * Jeff Garzik <jeff@garzik.org>
  12. */
  13. #include <linux/rculist.h>
  14. #include <linux/module.h>
  15. #include <linux/kernel.h>
  16. #include <linux/async_tx.h>
  17. #ifdef CONFIG_DMA_ENGINE
  18. static int __init async_tx_init(void)
  19. {
  20. async_dmaengine_get();
  21. printk(KERN_INFO "async_tx: api initialized (async)\n");
  22. return 0;
  23. }
  24. static void __exit async_tx_exit(void)
  25. {
  26. async_dmaengine_put();
  27. }
  28. module_init(async_tx_init);
  29. module_exit(async_tx_exit);
  30. /**
  31. * __async_tx_find_channel - find a channel to carry out the operation or let
  32. * the transaction execute synchronously
  33. * @submit: transaction dependency and submission modifiers
  34. * @tx_type: transaction type
  35. */
  36. struct dma_chan *
  37. __async_tx_find_channel(struct async_submit_ctl *submit,
  38. enum dma_transaction_type tx_type)
  39. {
  40. struct dma_async_tx_descriptor *depend_tx = submit->depend_tx;
  41. /* see if we can keep the chain on one channel */
  42. if (depend_tx &&
  43. dma_has_cap(tx_type, depend_tx->chan->device->cap_mask))
  44. return depend_tx->chan;
  45. return async_dma_find_channel(tx_type);
  46. }
  47. EXPORT_SYMBOL_GPL(__async_tx_find_channel);
  48. #endif
  49. /**
  50. * async_tx_channel_switch - queue an interrupt descriptor with a dependency
  51. * pre-attached.
  52. * @depend_tx: the operation that must finish before the new operation runs
  53. * @tx: the new operation
  54. */
  55. static void
  56. async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx,
  57. struct dma_async_tx_descriptor *tx)
  58. {
  59. struct dma_chan *chan = depend_tx->chan;
  60. struct dma_device *device = chan->device;
  61. struct dma_async_tx_descriptor *intr_tx = (void *) ~0;
  62. /* first check to see if we can still append to depend_tx */
  63. txd_lock(depend_tx);
  64. if (txd_parent(depend_tx) && depend_tx->chan == tx->chan) {
  65. txd_chain(depend_tx, tx);
  66. intr_tx = NULL;
  67. }
  68. txd_unlock(depend_tx);
  69. /* attached dependency, flush the parent channel */
  70. if (!intr_tx) {
  71. device->device_issue_pending(chan);
  72. return;
  73. }
  74. /* see if we can schedule an interrupt
  75. * otherwise poll for completion
  76. */
  77. if (dma_has_cap(DMA_INTERRUPT, device->cap_mask))
  78. intr_tx = device->device_prep_dma_interrupt(chan, 0);
  79. else
  80. intr_tx = NULL;
  81. if (intr_tx) {
  82. intr_tx->callback = NULL;
  83. intr_tx->callback_param = NULL;
  84. /* safe to chain outside the lock since we know we are
  85. * not submitted yet
  86. */
  87. txd_chain(intr_tx, tx);
  88. /* check if we need to append */
  89. txd_lock(depend_tx);
  90. if (txd_parent(depend_tx)) {
  91. txd_chain(depend_tx, intr_tx);
  92. async_tx_ack(intr_tx);
  93. intr_tx = NULL;
  94. }
  95. txd_unlock(depend_tx);
  96. if (intr_tx) {
  97. txd_clear_parent(intr_tx);
  98. intr_tx->tx_submit(intr_tx);
  99. async_tx_ack(intr_tx);
  100. }
  101. device->device_issue_pending(chan);
  102. } else {
  103. if (dma_wait_for_async_tx(depend_tx) != DMA_COMPLETE)
  104. panic("%s: DMA error waiting for depend_tx\n",
  105. __func__);
  106. tx->tx_submit(tx);
  107. }
  108. }
  109. /**
  110. * submit_disposition - flags for routing an incoming operation
  111. * @ASYNC_TX_SUBMITTED: we were able to append the new operation under the lock
  112. * @ASYNC_TX_CHANNEL_SWITCH: when the lock is dropped schedule a channel switch
  113. * @ASYNC_TX_DIRECT_SUBMIT: when the lock is dropped submit directly
  114. *
  115. * while holding depend_tx->lock we must avoid submitting new operations
  116. * to prevent a circular locking dependency with drivers that already
  117. * hold a channel lock when calling async_tx_run_dependencies.
  118. */
  119. enum submit_disposition {
  120. ASYNC_TX_SUBMITTED,
  121. ASYNC_TX_CHANNEL_SWITCH,
  122. ASYNC_TX_DIRECT_SUBMIT,
  123. };
  124. void
  125. async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx,
  126. struct async_submit_ctl *submit)
  127. {
  128. struct dma_async_tx_descriptor *depend_tx = submit->depend_tx;
  129. tx->callback = submit->cb_fn;
  130. tx->callback_param = submit->cb_param;
  131. if (depend_tx) {
  132. enum submit_disposition s;
  133. /* sanity check the dependency chain:
  134. * 1/ if ack is already set then we cannot be sure
  135. * we are referring to the correct operation
  136. * 2/ dependencies are 1:1 i.e. two transactions can
  137. * not depend on the same parent
  138. */
  139. BUG_ON(async_tx_test_ack(depend_tx) || txd_next(depend_tx) ||
  140. txd_parent(tx));
  141. /* the lock prevents async_tx_run_dependencies from missing
  142. * the setting of ->next when ->parent != NULL
  143. */
  144. txd_lock(depend_tx);
  145. if (txd_parent(depend_tx)) {
  146. /* we have a parent so we can not submit directly
  147. * if we are staying on the same channel: append
  148. * else: channel switch
  149. */
  150. if (depend_tx->chan == chan) {
  151. txd_chain(depend_tx, tx);
  152. s = ASYNC_TX_SUBMITTED;
  153. } else
  154. s = ASYNC_TX_CHANNEL_SWITCH;
  155. } else {
  156. /* we do not have a parent so we may be able to submit
  157. * directly if we are staying on the same channel
  158. */
  159. if (depend_tx->chan == chan)
  160. s = ASYNC_TX_DIRECT_SUBMIT;
  161. else
  162. s = ASYNC_TX_CHANNEL_SWITCH;
  163. }
  164. txd_unlock(depend_tx);
  165. switch (s) {
  166. case ASYNC_TX_SUBMITTED:
  167. break;
  168. case ASYNC_TX_CHANNEL_SWITCH:
  169. async_tx_channel_switch(depend_tx, tx);
  170. break;
  171. case ASYNC_TX_DIRECT_SUBMIT:
  172. txd_clear_parent(tx);
  173. tx->tx_submit(tx);
  174. break;
  175. }
  176. } else {
  177. txd_clear_parent(tx);
  178. tx->tx_submit(tx);
  179. }
  180. if (submit->flags & ASYNC_TX_ACK)
  181. async_tx_ack(tx);
  182. if (depend_tx)
  183. async_tx_ack(depend_tx);
  184. }
  185. EXPORT_SYMBOL_GPL(async_tx_submit);
  186. /**
  187. * async_trigger_callback - schedules the callback function to be run
  188. * @submit: submission and completion parameters
  189. *
  190. * honored flags: ASYNC_TX_ACK
  191. *
  192. * The callback is run after any dependent operations have completed.
  193. */
  194. struct dma_async_tx_descriptor *
  195. async_trigger_callback(struct async_submit_ctl *submit)
  196. {
  197. struct dma_chan *chan;
  198. struct dma_device *device;
  199. struct dma_async_tx_descriptor *tx;
  200. struct dma_async_tx_descriptor *depend_tx = submit->depend_tx;
  201. if (depend_tx) {
  202. chan = depend_tx->chan;
  203. device = chan->device;
  204. /* see if we can schedule an interrupt
  205. * otherwise poll for completion
  206. */
  207. if (device && !dma_has_cap(DMA_INTERRUPT, device->cap_mask))
  208. device = NULL;
  209. tx = device ? device->device_prep_dma_interrupt(chan, 0) : NULL;
  210. } else
  211. tx = NULL;
  212. if (tx) {
  213. pr_debug("%s: (async)\n", __func__);
  214. async_tx_submit(chan, tx, submit);
  215. } else {
  216. pr_debug("%s: (sync)\n", __func__);
  217. /* wait for any prerequisite operations */
  218. async_tx_quiesce(&submit->depend_tx);
  219. async_tx_sync_epilog(submit);
  220. }
  221. return tx;
  222. }
  223. EXPORT_SYMBOL_GPL(async_trigger_callback);
  224. /**
  225. * async_tx_quiesce - ensure tx is complete and freeable upon return
  226. * @tx - transaction to quiesce
  227. */
  228. void async_tx_quiesce(struct dma_async_tx_descriptor **tx)
  229. {
  230. if (*tx) {
  231. /* if ack is already set then we cannot be sure
  232. * we are referring to the correct operation
  233. */
  234. BUG_ON(async_tx_test_ack(*tx));
  235. if (dma_wait_for_async_tx(*tx) != DMA_COMPLETE)
  236. panic("%s: DMA error waiting for transaction\n",
  237. __func__);
  238. async_tx_ack(*tx);
  239. *tx = NULL;
  240. }
  241. }
  242. EXPORT_SYMBOL_GPL(async_tx_quiesce);
  243. MODULE_AUTHOR("Intel Corporation");
  244. MODULE_DESCRIPTION("Asynchronous Bulk Memory Transactions API");
  245. MODULE_LICENSE("GPL");