sandbox-dma-test.c 5.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * Direct Memory Access U-Class Simulation driver
  4. *
  5. * Copyright (C) 2018 Texas Instruments Incorporated <www.ti.com>
  6. *
  7. * Author: Grygorii Strashko <grygorii.strashko@ti.com>
  8. */
  9. #include <common.h>
  10. #include <dm.h>
  11. #include <dm/read.h>
  12. #include <dma-uclass.h>
  13. #include <dt-structs.h>
  14. #include <errno.h>
  15. #define SANDBOX_DMA_CH_CNT 3
  16. #define SANDBOX_DMA_BUF_SIZE 1024
  17. struct sandbox_dma_chan {
  18. struct sandbox_dma_dev *ud;
  19. char name[20];
  20. u32 id;
  21. enum dma_direction dir;
  22. bool in_use;
  23. bool enabled;
  24. };
  25. struct sandbox_dma_dev {
  26. struct device *dev;
  27. u32 ch_count;
  28. struct sandbox_dma_chan channels[SANDBOX_DMA_CH_CNT];
  29. uchar buf[SANDBOX_DMA_BUF_SIZE];
  30. uchar *buf_rx;
  31. size_t data_len;
  32. u32 meta;
  33. };
  34. static int sandbox_dma_transfer(struct udevice *dev, int direction,
  35. void *dst, void *src, size_t len)
  36. {
  37. memcpy(dst, src, len);
  38. return 0;
  39. }
  40. static int sandbox_dma_of_xlate(struct dma *dma,
  41. struct ofnode_phandle_args *args)
  42. {
  43. struct sandbox_dma_dev *ud = dev_get_priv(dma->dev);
  44. struct sandbox_dma_chan *uc;
  45. debug("%s(dma id=%u)\n", __func__, args->args[0]);
  46. if (args->args[0] >= SANDBOX_DMA_CH_CNT)
  47. return -EINVAL;
  48. dma->id = args->args[0];
  49. uc = &ud->channels[dma->id];
  50. if (dma->id == 1)
  51. uc->dir = DMA_MEM_TO_DEV;
  52. else if (dma->id == 2)
  53. uc->dir = DMA_DEV_TO_MEM;
  54. else
  55. uc->dir = DMA_MEM_TO_MEM;
  56. debug("%s(dma id=%lu dir=%d)\n", __func__, dma->id, uc->dir);
  57. return 0;
  58. }
  59. static int sandbox_dma_request(struct dma *dma)
  60. {
  61. struct sandbox_dma_dev *ud = dev_get_priv(dma->dev);
  62. struct sandbox_dma_chan *uc;
  63. if (dma->id >= SANDBOX_DMA_CH_CNT)
  64. return -EINVAL;
  65. uc = &ud->channels[dma->id];
  66. if (uc->in_use)
  67. return -EBUSY;
  68. uc->in_use = true;
  69. debug("%s(dma id=%lu in_use=%d)\n", __func__, dma->id, uc->in_use);
  70. return 0;
  71. }
  72. static int sandbox_dma_rfree(struct dma *dma)
  73. {
  74. struct sandbox_dma_dev *ud = dev_get_priv(dma->dev);
  75. struct sandbox_dma_chan *uc;
  76. if (dma->id >= SANDBOX_DMA_CH_CNT)
  77. return -EINVAL;
  78. uc = &ud->channels[dma->id];
  79. if (!uc->in_use)
  80. return -EINVAL;
  81. uc->in_use = false;
  82. ud->buf_rx = NULL;
  83. ud->data_len = 0;
  84. debug("%s(dma id=%lu in_use=%d)\n", __func__, dma->id, uc->in_use);
  85. return 0;
  86. }
  87. static int sandbox_dma_enable(struct dma *dma)
  88. {
  89. struct sandbox_dma_dev *ud = dev_get_priv(dma->dev);
  90. struct sandbox_dma_chan *uc;
  91. if (dma->id >= SANDBOX_DMA_CH_CNT)
  92. return -EINVAL;
  93. uc = &ud->channels[dma->id];
  94. if (!uc->in_use)
  95. return -EINVAL;
  96. if (uc->enabled)
  97. return -EINVAL;
  98. uc->enabled = true;
  99. debug("%s(dma id=%lu enabled=%d)\n", __func__, dma->id, uc->enabled);
  100. return 0;
  101. }
  102. static int sandbox_dma_disable(struct dma *dma)
  103. {
  104. struct sandbox_dma_dev *ud = dev_get_priv(dma->dev);
  105. struct sandbox_dma_chan *uc;
  106. if (dma->id >= SANDBOX_DMA_CH_CNT)
  107. return -EINVAL;
  108. uc = &ud->channels[dma->id];
  109. if (!uc->in_use)
  110. return -EINVAL;
  111. if (!uc->enabled)
  112. return -EINVAL;
  113. uc->enabled = false;
  114. debug("%s(dma id=%lu enabled=%d)\n", __func__, dma->id, uc->enabled);
  115. return 0;
  116. }
  117. static int sandbox_dma_send(struct dma *dma,
  118. void *src, size_t len, void *metadata)
  119. {
  120. struct sandbox_dma_dev *ud = dev_get_priv(dma->dev);
  121. struct sandbox_dma_chan *uc;
  122. if (dma->id >= SANDBOX_DMA_CH_CNT)
  123. return -EINVAL;
  124. if (!src || !metadata)
  125. return -EINVAL;
  126. debug("%s(dma id=%lu)\n", __func__, dma->id);
  127. uc = &ud->channels[dma->id];
  128. if (uc->dir != DMA_MEM_TO_DEV)
  129. return -EINVAL;
  130. if (!uc->in_use)
  131. return -EINVAL;
  132. if (!uc->enabled)
  133. return -EINVAL;
  134. if (len >= SANDBOX_DMA_BUF_SIZE)
  135. return -EINVAL;
  136. memcpy(ud->buf, src, len);
  137. ud->data_len = len;
  138. ud->meta = *((u32 *)metadata);
  139. debug("%s(dma id=%lu len=%zu meta=%08x)\n",
  140. __func__, dma->id, len, ud->meta);
  141. return 0;
  142. }
  143. static int sandbox_dma_receive(struct dma *dma, void **dst, void *metadata)
  144. {
  145. struct sandbox_dma_dev *ud = dev_get_priv(dma->dev);
  146. struct sandbox_dma_chan *uc;
  147. if (dma->id >= SANDBOX_DMA_CH_CNT)
  148. return -EINVAL;
  149. if (!dst || !metadata)
  150. return -EINVAL;
  151. uc = &ud->channels[dma->id];
  152. if (uc->dir != DMA_DEV_TO_MEM)
  153. return -EINVAL;
  154. if (!uc->in_use)
  155. return -EINVAL;
  156. if (!uc->enabled)
  157. return -EINVAL;
  158. if (!ud->data_len)
  159. return 0;
  160. if (ud->buf_rx) {
  161. memcpy(ud->buf_rx, ud->buf, ud->data_len);
  162. *dst = ud->buf_rx;
  163. } else {
  164. memcpy(*dst, ud->buf, ud->data_len);
  165. }
  166. *((u32 *)metadata) = ud->meta;
  167. debug("%s(dma id=%lu len=%zu meta=%08x %p)\n",
  168. __func__, dma->id, ud->data_len, ud->meta, *dst);
  169. return ud->data_len;
  170. }
  171. static int sandbox_dma_prepare_rcv_buf(struct dma *dma, void *dst, size_t size)
  172. {
  173. struct sandbox_dma_dev *ud = dev_get_priv(dma->dev);
  174. ud->buf_rx = dst;
  175. return 0;
  176. }
  177. static const struct dma_ops sandbox_dma_ops = {
  178. .transfer = sandbox_dma_transfer,
  179. .of_xlate = sandbox_dma_of_xlate,
  180. .request = sandbox_dma_request,
  181. .rfree = sandbox_dma_rfree,
  182. .enable = sandbox_dma_enable,
  183. .disable = sandbox_dma_disable,
  184. .send = sandbox_dma_send,
  185. .receive = sandbox_dma_receive,
  186. .prepare_rcv_buf = sandbox_dma_prepare_rcv_buf,
  187. };
  188. static int sandbox_dma_probe(struct udevice *dev)
  189. {
  190. struct dma_dev_priv *uc_priv = dev_get_uclass_priv(dev);
  191. struct sandbox_dma_dev *ud = dev_get_priv(dev);
  192. int i, ret = 0;
  193. uc_priv->supported = DMA_SUPPORTS_MEM_TO_MEM |
  194. DMA_SUPPORTS_MEM_TO_DEV |
  195. DMA_SUPPORTS_DEV_TO_MEM;
  196. ud->ch_count = SANDBOX_DMA_CH_CNT;
  197. ud->buf_rx = NULL;
  198. ud->meta = 0;
  199. ud->data_len = 0;
  200. pr_err("Number of channels: %u\n", ud->ch_count);
  201. for (i = 0; i < ud->ch_count; i++) {
  202. struct sandbox_dma_chan *uc = &ud->channels[i];
  203. uc->ud = ud;
  204. uc->id = i;
  205. sprintf(uc->name, "DMA chan%d\n", i);
  206. uc->in_use = false;
  207. uc->enabled = false;
  208. }
  209. return ret;
  210. }
  211. static const struct udevice_id sandbox_dma_ids[] = {
  212. { .compatible = "sandbox,dma" },
  213. { }
  214. };
  215. U_BOOT_DRIVER(sandbox_dma) = {
  216. .name = "sandbox-dma",
  217. .id = UCLASS_DMA,
  218. .of_match = sandbox_dma_ids,
  219. .ops = &sandbox_dma_ops,
  220. .probe = sandbox_dma_probe,
  221. .priv_auto_alloc_size = sizeof(struct sandbox_dma_dev),
  222. };