dma-axi-dmac.c 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Driver for the Analog Devices AXI-DMAC core
  4. *
  5. * Copyright 2013-2019 Analog Devices Inc.
  6. * Author: Lars-Peter Clausen <lars@metafoo.de>
  7. */
  8. #include <linux/bitfield.h>
  9. #include <linux/clk.h>
  10. #include <linux/device.h>
  11. #include <linux/dma-mapping.h>
  12. #include <linux/dmaengine.h>
  13. #include <linux/err.h>
  14. #include <linux/interrupt.h>
  15. #include <linux/io.h>
  16. #include <linux/kernel.h>
  17. #include <linux/module.h>
  18. #include <linux/of.h>
  19. #include <linux/of_dma.h>
  20. #include <linux/platform_device.h>
  21. #include <linux/regmap.h>
  22. #include <linux/slab.h>
  23. #include <linux/fpga/adi-axi-common.h>
  24. #include <dt-bindings/dma/axi-dmac.h>
  25. #include "dmaengine.h"
  26. #include "virt-dma.h"
  27. /*
  28. * The AXI-DMAC is a soft IP core that is used in FPGA designs. The core has
  29. * various instantiation parameters which decided the exact feature set support
  30. * by the core.
  31. *
  32. * Each channel of the core has a source interface and a destination interface.
  33. * The number of channels and the type of the channel interfaces is selected at
  34. * configuration time. A interface can either be a connected to a central memory
  35. * interconnect, which allows access to system memory, or it can be connected to
  36. * a dedicated bus which is directly connected to a data port on a peripheral.
  37. * Given that those are configuration options of the core that are selected when
  38. * it is instantiated this means that they can not be changed by software at
  39. * runtime. By extension this means that each channel is uni-directional. It can
  40. * either be device to memory or memory to device, but not both. Also since the
  41. * device side is a dedicated data bus only connected to a single peripheral
  42. * there is no address than can or needs to be configured for the device side.
  43. */
  44. #define AXI_DMAC_REG_INTERFACE_DESC 0x10
  45. #define AXI_DMAC_DMA_SRC_TYPE_MSK GENMASK(13, 12)
  46. #define AXI_DMAC_DMA_SRC_TYPE_GET(x) FIELD_GET(AXI_DMAC_DMA_SRC_TYPE_MSK, x)
  47. #define AXI_DMAC_DMA_SRC_WIDTH_MSK GENMASK(11, 8)
  48. #define AXI_DMAC_DMA_SRC_WIDTH_GET(x) FIELD_GET(AXI_DMAC_DMA_SRC_WIDTH_MSK, x)
  49. #define AXI_DMAC_DMA_DST_TYPE_MSK GENMASK(5, 4)
  50. #define AXI_DMAC_DMA_DST_TYPE_GET(x) FIELD_GET(AXI_DMAC_DMA_DST_TYPE_MSK, x)
  51. #define AXI_DMAC_DMA_DST_WIDTH_MSK GENMASK(3, 0)
  52. #define AXI_DMAC_DMA_DST_WIDTH_GET(x) FIELD_GET(AXI_DMAC_DMA_DST_WIDTH_MSK, x)
  53. #define AXI_DMAC_REG_IRQ_MASK 0x80
  54. #define AXI_DMAC_REG_IRQ_PENDING 0x84
  55. #define AXI_DMAC_REG_IRQ_SOURCE 0x88
  56. #define AXI_DMAC_REG_CTRL 0x400
  57. #define AXI_DMAC_REG_TRANSFER_ID 0x404
  58. #define AXI_DMAC_REG_START_TRANSFER 0x408
  59. #define AXI_DMAC_REG_FLAGS 0x40c
  60. #define AXI_DMAC_REG_DEST_ADDRESS 0x410
  61. #define AXI_DMAC_REG_SRC_ADDRESS 0x414
  62. #define AXI_DMAC_REG_X_LENGTH 0x418
  63. #define AXI_DMAC_REG_Y_LENGTH 0x41c
  64. #define AXI_DMAC_REG_DEST_STRIDE 0x420
  65. #define AXI_DMAC_REG_SRC_STRIDE 0x424
  66. #define AXI_DMAC_REG_TRANSFER_DONE 0x428
  67. #define AXI_DMAC_REG_ACTIVE_TRANSFER_ID 0x42c
  68. #define AXI_DMAC_REG_STATUS 0x430
  69. #define AXI_DMAC_REG_CURRENT_SRC_ADDR 0x434
  70. #define AXI_DMAC_REG_CURRENT_DEST_ADDR 0x438
  71. #define AXI_DMAC_REG_PARTIAL_XFER_LEN 0x44c
  72. #define AXI_DMAC_REG_PARTIAL_XFER_ID 0x450
  73. #define AXI_DMAC_CTRL_ENABLE BIT(0)
  74. #define AXI_DMAC_CTRL_PAUSE BIT(1)
  75. #define AXI_DMAC_IRQ_SOT BIT(0)
  76. #define AXI_DMAC_IRQ_EOT BIT(1)
  77. #define AXI_DMAC_FLAG_CYCLIC BIT(0)
  78. #define AXI_DMAC_FLAG_LAST BIT(1)
  79. #define AXI_DMAC_FLAG_PARTIAL_REPORT BIT(2)
  80. #define AXI_DMAC_FLAG_PARTIAL_XFER_DONE BIT(31)
  81. /* The maximum ID allocated by the hardware is 31 */
  82. #define AXI_DMAC_SG_UNUSED 32U
  83. struct axi_dmac_sg {
  84. dma_addr_t src_addr;
  85. dma_addr_t dest_addr;
  86. unsigned int x_len;
  87. unsigned int y_len;
  88. unsigned int dest_stride;
  89. unsigned int src_stride;
  90. unsigned int id;
  91. unsigned int partial_len;
  92. bool schedule_when_free;
  93. };
  94. struct axi_dmac_desc {
  95. struct virt_dma_desc vdesc;
  96. bool cyclic;
  97. bool have_partial_xfer;
  98. unsigned int num_submitted;
  99. unsigned int num_completed;
  100. unsigned int num_sgs;
  101. struct axi_dmac_sg sg[];
  102. };
  103. struct axi_dmac_chan {
  104. struct virt_dma_chan vchan;
  105. struct axi_dmac_desc *next_desc;
  106. struct list_head active_descs;
  107. enum dma_transfer_direction direction;
  108. unsigned int src_width;
  109. unsigned int dest_width;
  110. unsigned int src_type;
  111. unsigned int dest_type;
  112. unsigned int max_length;
  113. unsigned int address_align_mask;
  114. unsigned int length_align_mask;
  115. bool hw_partial_xfer;
  116. bool hw_cyclic;
  117. bool hw_2d;
  118. };
  119. struct axi_dmac {
  120. void __iomem *base;
  121. int irq;
  122. struct clk *clk;
  123. struct dma_device dma_dev;
  124. struct axi_dmac_chan chan;
  125. };
  126. static struct axi_dmac *chan_to_axi_dmac(struct axi_dmac_chan *chan)
  127. {
  128. return container_of(chan->vchan.chan.device, struct axi_dmac,
  129. dma_dev);
  130. }
  131. static struct axi_dmac_chan *to_axi_dmac_chan(struct dma_chan *c)
  132. {
  133. return container_of(c, struct axi_dmac_chan, vchan.chan);
  134. }
  135. static struct axi_dmac_desc *to_axi_dmac_desc(struct virt_dma_desc *vdesc)
  136. {
  137. return container_of(vdesc, struct axi_dmac_desc, vdesc);
  138. }
  139. static void axi_dmac_write(struct axi_dmac *axi_dmac, unsigned int reg,
  140. unsigned int val)
  141. {
  142. writel(val, axi_dmac->base + reg);
  143. }
  144. static int axi_dmac_read(struct axi_dmac *axi_dmac, unsigned int reg)
  145. {
  146. return readl(axi_dmac->base + reg);
  147. }
  148. static int axi_dmac_src_is_mem(struct axi_dmac_chan *chan)
  149. {
  150. return chan->src_type == AXI_DMAC_BUS_TYPE_AXI_MM;
  151. }
  152. static int axi_dmac_dest_is_mem(struct axi_dmac_chan *chan)
  153. {
  154. return chan->dest_type == AXI_DMAC_BUS_TYPE_AXI_MM;
  155. }
  156. static bool axi_dmac_check_len(struct axi_dmac_chan *chan, unsigned int len)
  157. {
  158. if (len == 0)
  159. return false;
  160. if ((len & chan->length_align_mask) != 0) /* Not aligned */
  161. return false;
  162. return true;
  163. }
  164. static bool axi_dmac_check_addr(struct axi_dmac_chan *chan, dma_addr_t addr)
  165. {
  166. if ((addr & chan->address_align_mask) != 0) /* Not aligned */
  167. return false;
  168. return true;
  169. }
  170. static void axi_dmac_start_transfer(struct axi_dmac_chan *chan)
  171. {
  172. struct axi_dmac *dmac = chan_to_axi_dmac(chan);
  173. struct virt_dma_desc *vdesc;
  174. struct axi_dmac_desc *desc;
  175. struct axi_dmac_sg *sg;
  176. unsigned int flags = 0;
  177. unsigned int val;
  178. val = axi_dmac_read(dmac, AXI_DMAC_REG_START_TRANSFER);
  179. if (val) /* Queue is full, wait for the next SOT IRQ */
  180. return;
  181. desc = chan->next_desc;
  182. if (!desc) {
  183. vdesc = vchan_next_desc(&chan->vchan);
  184. if (!vdesc)
  185. return;
  186. list_move_tail(&vdesc->node, &chan->active_descs);
  187. desc = to_axi_dmac_desc(vdesc);
  188. }
  189. sg = &desc->sg[desc->num_submitted];
  190. /* Already queued in cyclic mode. Wait for it to finish */
  191. if (sg->id != AXI_DMAC_SG_UNUSED) {
  192. sg->schedule_when_free = true;
  193. return;
  194. }
  195. desc->num_submitted++;
  196. if (desc->num_submitted == desc->num_sgs ||
  197. desc->have_partial_xfer) {
  198. if (desc->cyclic)
  199. desc->num_submitted = 0; /* Start again */
  200. else
  201. chan->next_desc = NULL;
  202. flags |= AXI_DMAC_FLAG_LAST;
  203. } else {
  204. chan->next_desc = desc;
  205. }
  206. sg->id = axi_dmac_read(dmac, AXI_DMAC_REG_TRANSFER_ID);
  207. if (axi_dmac_dest_is_mem(chan)) {
  208. axi_dmac_write(dmac, AXI_DMAC_REG_DEST_ADDRESS, sg->dest_addr);
  209. axi_dmac_write(dmac, AXI_DMAC_REG_DEST_STRIDE, sg->dest_stride);
  210. }
  211. if (axi_dmac_src_is_mem(chan)) {
  212. axi_dmac_write(dmac, AXI_DMAC_REG_SRC_ADDRESS, sg->src_addr);
  213. axi_dmac_write(dmac, AXI_DMAC_REG_SRC_STRIDE, sg->src_stride);
  214. }
  215. /*
  216. * If the hardware supports cyclic transfers and there is no callback to
  217. * call and only a single segment, enable hw cyclic mode to avoid
  218. * unnecessary interrupts.
  219. */
  220. if (chan->hw_cyclic && desc->cyclic && !desc->vdesc.tx.callback &&
  221. desc->num_sgs == 1)
  222. flags |= AXI_DMAC_FLAG_CYCLIC;
  223. if (chan->hw_partial_xfer)
  224. flags |= AXI_DMAC_FLAG_PARTIAL_REPORT;
  225. axi_dmac_write(dmac, AXI_DMAC_REG_X_LENGTH, sg->x_len - 1);
  226. axi_dmac_write(dmac, AXI_DMAC_REG_Y_LENGTH, sg->y_len - 1);
  227. axi_dmac_write(dmac, AXI_DMAC_REG_FLAGS, flags);
  228. axi_dmac_write(dmac, AXI_DMAC_REG_START_TRANSFER, 1);
  229. }
  230. static struct axi_dmac_desc *axi_dmac_active_desc(struct axi_dmac_chan *chan)
  231. {
  232. return list_first_entry_or_null(&chan->active_descs,
  233. struct axi_dmac_desc, vdesc.node);
  234. }
  235. static inline unsigned int axi_dmac_total_sg_bytes(struct axi_dmac_chan *chan,
  236. struct axi_dmac_sg *sg)
  237. {
  238. if (chan->hw_2d)
  239. return sg->x_len * sg->y_len;
  240. else
  241. return sg->x_len;
  242. }
  243. static void axi_dmac_dequeue_partial_xfers(struct axi_dmac_chan *chan)
  244. {
  245. struct axi_dmac *dmac = chan_to_axi_dmac(chan);
  246. struct axi_dmac_desc *desc;
  247. struct axi_dmac_sg *sg;
  248. u32 xfer_done, len, id, i;
  249. bool found_sg;
  250. do {
  251. len = axi_dmac_read(dmac, AXI_DMAC_REG_PARTIAL_XFER_LEN);
  252. id = axi_dmac_read(dmac, AXI_DMAC_REG_PARTIAL_XFER_ID);
  253. found_sg = false;
  254. list_for_each_entry(desc, &chan->active_descs, vdesc.node) {
  255. for (i = 0; i < desc->num_sgs; i++) {
  256. sg = &desc->sg[i];
  257. if (sg->id == AXI_DMAC_SG_UNUSED)
  258. continue;
  259. if (sg->id == id) {
  260. desc->have_partial_xfer = true;
  261. sg->partial_len = len;
  262. found_sg = true;
  263. break;
  264. }
  265. }
  266. if (found_sg)
  267. break;
  268. }
  269. if (found_sg) {
  270. dev_dbg(dmac->dma_dev.dev,
  271. "Found partial segment id=%u, len=%u\n",
  272. id, len);
  273. } else {
  274. dev_warn(dmac->dma_dev.dev,
  275. "Not found partial segment id=%u, len=%u\n",
  276. id, len);
  277. }
  278. /* Check if we have any more partial transfers */
  279. xfer_done = axi_dmac_read(dmac, AXI_DMAC_REG_TRANSFER_DONE);
  280. xfer_done = !(xfer_done & AXI_DMAC_FLAG_PARTIAL_XFER_DONE);
  281. } while (!xfer_done);
  282. }
  283. static void axi_dmac_compute_residue(struct axi_dmac_chan *chan,
  284. struct axi_dmac_desc *active)
  285. {
  286. struct dmaengine_result *rslt = &active->vdesc.tx_result;
  287. unsigned int start = active->num_completed - 1;
  288. struct axi_dmac_sg *sg;
  289. unsigned int i, total;
  290. rslt->result = DMA_TRANS_NOERROR;
  291. rslt->residue = 0;
  292. /*
  293. * We get here if the last completed segment is partial, which
  294. * means we can compute the residue from that segment onwards
  295. */
  296. for (i = start; i < active->num_sgs; i++) {
  297. sg = &active->sg[i];
  298. total = axi_dmac_total_sg_bytes(chan, sg);
  299. rslt->residue += (total - sg->partial_len);
  300. }
  301. }
  302. static bool axi_dmac_transfer_done(struct axi_dmac_chan *chan,
  303. unsigned int completed_transfers)
  304. {
  305. struct axi_dmac_desc *active;
  306. struct axi_dmac_sg *sg;
  307. bool start_next = false;
  308. active = axi_dmac_active_desc(chan);
  309. if (!active)
  310. return false;
  311. if (chan->hw_partial_xfer &&
  312. (completed_transfers & AXI_DMAC_FLAG_PARTIAL_XFER_DONE))
  313. axi_dmac_dequeue_partial_xfers(chan);
  314. do {
  315. sg = &active->sg[active->num_completed];
  316. if (sg->id == AXI_DMAC_SG_UNUSED) /* Not yet submitted */
  317. break;
  318. if (!(BIT(sg->id) & completed_transfers))
  319. break;
  320. active->num_completed++;
  321. sg->id = AXI_DMAC_SG_UNUSED;
  322. if (sg->schedule_when_free) {
  323. sg->schedule_when_free = false;
  324. start_next = true;
  325. }
  326. if (sg->partial_len)
  327. axi_dmac_compute_residue(chan, active);
  328. if (active->cyclic)
  329. vchan_cyclic_callback(&active->vdesc);
  330. if (active->num_completed == active->num_sgs ||
  331. sg->partial_len) {
  332. if (active->cyclic) {
  333. active->num_completed = 0; /* wrap around */
  334. } else {
  335. list_del(&active->vdesc.node);
  336. vchan_cookie_complete(&active->vdesc);
  337. active = axi_dmac_active_desc(chan);
  338. }
  339. }
  340. } while (active);
  341. return start_next;
  342. }
  343. static irqreturn_t axi_dmac_interrupt_handler(int irq, void *devid)
  344. {
  345. struct axi_dmac *dmac = devid;
  346. unsigned int pending;
  347. bool start_next = false;
  348. pending = axi_dmac_read(dmac, AXI_DMAC_REG_IRQ_PENDING);
  349. if (!pending)
  350. return IRQ_NONE;
  351. axi_dmac_write(dmac, AXI_DMAC_REG_IRQ_PENDING, pending);
  352. spin_lock(&dmac->chan.vchan.lock);
  353. /* One or more transfers have finished */
  354. if (pending & AXI_DMAC_IRQ_EOT) {
  355. unsigned int completed;
  356. completed = axi_dmac_read(dmac, AXI_DMAC_REG_TRANSFER_DONE);
  357. start_next = axi_dmac_transfer_done(&dmac->chan, completed);
  358. }
  359. /* Space has become available in the descriptor queue */
  360. if ((pending & AXI_DMAC_IRQ_SOT) || start_next)
  361. axi_dmac_start_transfer(&dmac->chan);
  362. spin_unlock(&dmac->chan.vchan.lock);
  363. return IRQ_HANDLED;
  364. }
  365. static int axi_dmac_terminate_all(struct dma_chan *c)
  366. {
  367. struct axi_dmac_chan *chan = to_axi_dmac_chan(c);
  368. struct axi_dmac *dmac = chan_to_axi_dmac(chan);
  369. unsigned long flags;
  370. LIST_HEAD(head);
  371. spin_lock_irqsave(&chan->vchan.lock, flags);
  372. axi_dmac_write(dmac, AXI_DMAC_REG_CTRL, 0);
  373. chan->next_desc = NULL;
  374. vchan_get_all_descriptors(&chan->vchan, &head);
  375. list_splice_tail_init(&chan->active_descs, &head);
  376. spin_unlock_irqrestore(&chan->vchan.lock, flags);
  377. vchan_dma_desc_free_list(&chan->vchan, &head);
  378. return 0;
  379. }
  380. static void axi_dmac_synchronize(struct dma_chan *c)
  381. {
  382. struct axi_dmac_chan *chan = to_axi_dmac_chan(c);
  383. vchan_synchronize(&chan->vchan);
  384. }
  385. static void axi_dmac_issue_pending(struct dma_chan *c)
  386. {
  387. struct axi_dmac_chan *chan = to_axi_dmac_chan(c);
  388. struct axi_dmac *dmac = chan_to_axi_dmac(chan);
  389. unsigned long flags;
  390. axi_dmac_write(dmac, AXI_DMAC_REG_CTRL, AXI_DMAC_CTRL_ENABLE);
  391. spin_lock_irqsave(&chan->vchan.lock, flags);
  392. if (vchan_issue_pending(&chan->vchan))
  393. axi_dmac_start_transfer(chan);
  394. spin_unlock_irqrestore(&chan->vchan.lock, flags);
  395. }
  396. static struct axi_dmac_desc *axi_dmac_alloc_desc(unsigned int num_sgs)
  397. {
  398. struct axi_dmac_desc *desc;
  399. unsigned int i;
  400. desc = kzalloc(struct_size(desc, sg, num_sgs), GFP_NOWAIT);
  401. if (!desc)
  402. return NULL;
  403. for (i = 0; i < num_sgs; i++)
  404. desc->sg[i].id = AXI_DMAC_SG_UNUSED;
  405. desc->num_sgs = num_sgs;
  406. return desc;
  407. }
  408. static struct axi_dmac_sg *axi_dmac_fill_linear_sg(struct axi_dmac_chan *chan,
  409. enum dma_transfer_direction direction, dma_addr_t addr,
  410. unsigned int num_periods, unsigned int period_len,
  411. struct axi_dmac_sg *sg)
  412. {
  413. unsigned int num_segments, i;
  414. unsigned int segment_size;
  415. unsigned int len;
  416. /* Split into multiple equally sized segments if necessary */
  417. num_segments = DIV_ROUND_UP(period_len, chan->max_length);
  418. segment_size = DIV_ROUND_UP(period_len, num_segments);
  419. /* Take care of alignment */
  420. segment_size = ((segment_size - 1) | chan->length_align_mask) + 1;
  421. for (i = 0; i < num_periods; i++) {
  422. len = period_len;
  423. while (len > segment_size) {
  424. if (direction == DMA_DEV_TO_MEM)
  425. sg->dest_addr = addr;
  426. else
  427. sg->src_addr = addr;
  428. sg->x_len = segment_size;
  429. sg->y_len = 1;
  430. sg++;
  431. addr += segment_size;
  432. len -= segment_size;
  433. }
  434. if (direction == DMA_DEV_TO_MEM)
  435. sg->dest_addr = addr;
  436. else
  437. sg->src_addr = addr;
  438. sg->x_len = len;
  439. sg->y_len = 1;
  440. sg++;
  441. addr += len;
  442. }
  443. return sg;
  444. }
  445. static struct dma_async_tx_descriptor *axi_dmac_prep_slave_sg(
  446. struct dma_chan *c, struct scatterlist *sgl,
  447. unsigned int sg_len, enum dma_transfer_direction direction,
  448. unsigned long flags, void *context)
  449. {
  450. struct axi_dmac_chan *chan = to_axi_dmac_chan(c);
  451. struct axi_dmac_desc *desc;
  452. struct axi_dmac_sg *dsg;
  453. struct scatterlist *sg;
  454. unsigned int num_sgs;
  455. unsigned int i;
  456. if (direction != chan->direction)
  457. return NULL;
  458. num_sgs = 0;
  459. for_each_sg(sgl, sg, sg_len, i)
  460. num_sgs += DIV_ROUND_UP(sg_dma_len(sg), chan->max_length);
  461. desc = axi_dmac_alloc_desc(num_sgs);
  462. if (!desc)
  463. return NULL;
  464. dsg = desc->sg;
  465. for_each_sg(sgl, sg, sg_len, i) {
  466. if (!axi_dmac_check_addr(chan, sg_dma_address(sg)) ||
  467. !axi_dmac_check_len(chan, sg_dma_len(sg))) {
  468. kfree(desc);
  469. return NULL;
  470. }
  471. dsg = axi_dmac_fill_linear_sg(chan, direction, sg_dma_address(sg), 1,
  472. sg_dma_len(sg), dsg);
  473. }
  474. desc->cyclic = false;
  475. return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
  476. }
  477. static struct dma_async_tx_descriptor *axi_dmac_prep_dma_cyclic(
  478. struct dma_chan *c, dma_addr_t buf_addr, size_t buf_len,
  479. size_t period_len, enum dma_transfer_direction direction,
  480. unsigned long flags)
  481. {
  482. struct axi_dmac_chan *chan = to_axi_dmac_chan(c);
  483. struct axi_dmac_desc *desc;
  484. unsigned int num_periods, num_segments;
  485. if (direction != chan->direction)
  486. return NULL;
  487. if (!axi_dmac_check_len(chan, buf_len) ||
  488. !axi_dmac_check_addr(chan, buf_addr))
  489. return NULL;
  490. if (period_len == 0 || buf_len % period_len)
  491. return NULL;
  492. num_periods = buf_len / period_len;
  493. num_segments = DIV_ROUND_UP(period_len, chan->max_length);
  494. desc = axi_dmac_alloc_desc(num_periods * num_segments);
  495. if (!desc)
  496. return NULL;
  497. axi_dmac_fill_linear_sg(chan, direction, buf_addr, num_periods,
  498. period_len, desc->sg);
  499. desc->cyclic = true;
  500. return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
  501. }
  502. static struct dma_async_tx_descriptor *axi_dmac_prep_interleaved(
  503. struct dma_chan *c, struct dma_interleaved_template *xt,
  504. unsigned long flags)
  505. {
  506. struct axi_dmac_chan *chan = to_axi_dmac_chan(c);
  507. struct axi_dmac_desc *desc;
  508. size_t dst_icg, src_icg;
  509. if (xt->frame_size != 1)
  510. return NULL;
  511. if (xt->dir != chan->direction)
  512. return NULL;
  513. if (axi_dmac_src_is_mem(chan)) {
  514. if (!xt->src_inc || !axi_dmac_check_addr(chan, xt->src_start))
  515. return NULL;
  516. }
  517. if (axi_dmac_dest_is_mem(chan)) {
  518. if (!xt->dst_inc || !axi_dmac_check_addr(chan, xt->dst_start))
  519. return NULL;
  520. }
  521. dst_icg = dmaengine_get_dst_icg(xt, &xt->sgl[0]);
  522. src_icg = dmaengine_get_src_icg(xt, &xt->sgl[0]);
  523. if (chan->hw_2d) {
  524. if (!axi_dmac_check_len(chan, xt->sgl[0].size) ||
  525. xt->numf == 0)
  526. return NULL;
  527. if (xt->sgl[0].size + dst_icg > chan->max_length ||
  528. xt->sgl[0].size + src_icg > chan->max_length)
  529. return NULL;
  530. } else {
  531. if (dst_icg != 0 || src_icg != 0)
  532. return NULL;
  533. if (chan->max_length / xt->sgl[0].size < xt->numf)
  534. return NULL;
  535. if (!axi_dmac_check_len(chan, xt->sgl[0].size * xt->numf))
  536. return NULL;
  537. }
  538. desc = axi_dmac_alloc_desc(1);
  539. if (!desc)
  540. return NULL;
  541. if (axi_dmac_src_is_mem(chan)) {
  542. desc->sg[0].src_addr = xt->src_start;
  543. desc->sg[0].src_stride = xt->sgl[0].size + src_icg;
  544. }
  545. if (axi_dmac_dest_is_mem(chan)) {
  546. desc->sg[0].dest_addr = xt->dst_start;
  547. desc->sg[0].dest_stride = xt->sgl[0].size + dst_icg;
  548. }
  549. if (chan->hw_2d) {
  550. desc->sg[0].x_len = xt->sgl[0].size;
  551. desc->sg[0].y_len = xt->numf;
  552. } else {
  553. desc->sg[0].x_len = xt->sgl[0].size * xt->numf;
  554. desc->sg[0].y_len = 1;
  555. }
  556. if (flags & DMA_CYCLIC)
  557. desc->cyclic = true;
  558. return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
  559. }
  560. static void axi_dmac_free_chan_resources(struct dma_chan *c)
  561. {
  562. vchan_free_chan_resources(to_virt_chan(c));
  563. }
  564. static void axi_dmac_desc_free(struct virt_dma_desc *vdesc)
  565. {
  566. kfree(container_of(vdesc, struct axi_dmac_desc, vdesc));
  567. }
  568. static bool axi_dmac_regmap_rdwr(struct device *dev, unsigned int reg)
  569. {
  570. switch (reg) {
  571. case AXI_DMAC_REG_IRQ_MASK:
  572. case AXI_DMAC_REG_IRQ_SOURCE:
  573. case AXI_DMAC_REG_IRQ_PENDING:
  574. case AXI_DMAC_REG_CTRL:
  575. case AXI_DMAC_REG_TRANSFER_ID:
  576. case AXI_DMAC_REG_START_TRANSFER:
  577. case AXI_DMAC_REG_FLAGS:
  578. case AXI_DMAC_REG_DEST_ADDRESS:
  579. case AXI_DMAC_REG_SRC_ADDRESS:
  580. case AXI_DMAC_REG_X_LENGTH:
  581. case AXI_DMAC_REG_Y_LENGTH:
  582. case AXI_DMAC_REG_DEST_STRIDE:
  583. case AXI_DMAC_REG_SRC_STRIDE:
  584. case AXI_DMAC_REG_TRANSFER_DONE:
  585. case AXI_DMAC_REG_ACTIVE_TRANSFER_ID:
  586. case AXI_DMAC_REG_STATUS:
  587. case AXI_DMAC_REG_CURRENT_SRC_ADDR:
  588. case AXI_DMAC_REG_CURRENT_DEST_ADDR:
  589. case AXI_DMAC_REG_PARTIAL_XFER_LEN:
  590. case AXI_DMAC_REG_PARTIAL_XFER_ID:
  591. return true;
  592. default:
  593. return false;
  594. }
  595. }
  596. static const struct regmap_config axi_dmac_regmap_config = {
  597. .reg_bits = 32,
  598. .val_bits = 32,
  599. .reg_stride = 4,
  600. .max_register = AXI_DMAC_REG_PARTIAL_XFER_ID,
  601. .readable_reg = axi_dmac_regmap_rdwr,
  602. .writeable_reg = axi_dmac_regmap_rdwr,
  603. };
  604. static void axi_dmac_adjust_chan_params(struct axi_dmac_chan *chan)
  605. {
  606. chan->address_align_mask = max(chan->dest_width, chan->src_width) - 1;
  607. if (axi_dmac_dest_is_mem(chan) && axi_dmac_src_is_mem(chan))
  608. chan->direction = DMA_MEM_TO_MEM;
  609. else if (!axi_dmac_dest_is_mem(chan) && axi_dmac_src_is_mem(chan))
  610. chan->direction = DMA_MEM_TO_DEV;
  611. else if (axi_dmac_dest_is_mem(chan) && !axi_dmac_src_is_mem(chan))
  612. chan->direction = DMA_DEV_TO_MEM;
  613. else
  614. chan->direction = DMA_DEV_TO_DEV;
  615. }
  616. /*
  617. * The configuration stored in the devicetree matches the configuration
  618. * parameters of the peripheral instance and allows the driver to know which
  619. * features are implemented and how it should behave.
  620. */
  621. static int axi_dmac_parse_chan_dt(struct device_node *of_chan,
  622. struct axi_dmac_chan *chan)
  623. {
  624. u32 val;
  625. int ret;
  626. ret = of_property_read_u32(of_chan, "reg", &val);
  627. if (ret)
  628. return ret;
  629. /* We only support 1 channel for now */
  630. if (val != 0)
  631. return -EINVAL;
  632. ret = of_property_read_u32(of_chan, "adi,source-bus-type", &val);
  633. if (ret)
  634. return ret;
  635. if (val > AXI_DMAC_BUS_TYPE_FIFO)
  636. return -EINVAL;
  637. chan->src_type = val;
  638. ret = of_property_read_u32(of_chan, "adi,destination-bus-type", &val);
  639. if (ret)
  640. return ret;
  641. if (val > AXI_DMAC_BUS_TYPE_FIFO)
  642. return -EINVAL;
  643. chan->dest_type = val;
  644. ret = of_property_read_u32(of_chan, "adi,source-bus-width", &val);
  645. if (ret)
  646. return ret;
  647. chan->src_width = val / 8;
  648. ret = of_property_read_u32(of_chan, "adi,destination-bus-width", &val);
  649. if (ret)
  650. return ret;
  651. chan->dest_width = val / 8;
  652. axi_dmac_adjust_chan_params(chan);
  653. return 0;
  654. }
  655. static int axi_dmac_parse_dt(struct device *dev, struct axi_dmac *dmac)
  656. {
  657. struct device_node *of_channels, *of_chan;
  658. int ret;
  659. of_channels = of_get_child_by_name(dev->of_node, "adi,channels");
  660. if (of_channels == NULL)
  661. return -ENODEV;
  662. for_each_child_of_node(of_channels, of_chan) {
  663. ret = axi_dmac_parse_chan_dt(of_chan, &dmac->chan);
  664. if (ret) {
  665. of_node_put(of_chan);
  666. of_node_put(of_channels);
  667. return -EINVAL;
  668. }
  669. }
  670. of_node_put(of_channels);
  671. return 0;
  672. }
  673. static int axi_dmac_read_chan_config(struct device *dev, struct axi_dmac *dmac)
  674. {
  675. struct axi_dmac_chan *chan = &dmac->chan;
  676. unsigned int val, desc;
  677. desc = axi_dmac_read(dmac, AXI_DMAC_REG_INTERFACE_DESC);
  678. if (desc == 0) {
  679. dev_err(dev, "DMA interface register reads zero\n");
  680. return -EFAULT;
  681. }
  682. val = AXI_DMAC_DMA_SRC_TYPE_GET(desc);
  683. if (val > AXI_DMAC_BUS_TYPE_FIFO) {
  684. dev_err(dev, "Invalid source bus type read: %d\n", val);
  685. return -EINVAL;
  686. }
  687. chan->src_type = val;
  688. val = AXI_DMAC_DMA_DST_TYPE_GET(desc);
  689. if (val > AXI_DMAC_BUS_TYPE_FIFO) {
  690. dev_err(dev, "Invalid destination bus type read: %d\n", val);
  691. return -EINVAL;
  692. }
  693. chan->dest_type = val;
  694. val = AXI_DMAC_DMA_SRC_WIDTH_GET(desc);
  695. if (val == 0) {
  696. dev_err(dev, "Source bus width is zero\n");
  697. return -EINVAL;
  698. }
  699. /* widths are stored in log2 */
  700. chan->src_width = 1 << val;
  701. val = AXI_DMAC_DMA_DST_WIDTH_GET(desc);
  702. if (val == 0) {
  703. dev_err(dev, "Destination bus width is zero\n");
  704. return -EINVAL;
  705. }
  706. chan->dest_width = 1 << val;
  707. axi_dmac_adjust_chan_params(chan);
  708. return 0;
  709. }
  710. static int axi_dmac_detect_caps(struct axi_dmac *dmac, unsigned int version)
  711. {
  712. struct axi_dmac_chan *chan = &dmac->chan;
  713. axi_dmac_write(dmac, AXI_DMAC_REG_FLAGS, AXI_DMAC_FLAG_CYCLIC);
  714. if (axi_dmac_read(dmac, AXI_DMAC_REG_FLAGS) == AXI_DMAC_FLAG_CYCLIC)
  715. chan->hw_cyclic = true;
  716. axi_dmac_write(dmac, AXI_DMAC_REG_Y_LENGTH, 1);
  717. if (axi_dmac_read(dmac, AXI_DMAC_REG_Y_LENGTH) == 1)
  718. chan->hw_2d = true;
  719. axi_dmac_write(dmac, AXI_DMAC_REG_X_LENGTH, 0xffffffff);
  720. chan->max_length = axi_dmac_read(dmac, AXI_DMAC_REG_X_LENGTH);
  721. if (chan->max_length != UINT_MAX)
  722. chan->max_length++;
  723. axi_dmac_write(dmac, AXI_DMAC_REG_DEST_ADDRESS, 0xffffffff);
  724. if (axi_dmac_read(dmac, AXI_DMAC_REG_DEST_ADDRESS) == 0 &&
  725. chan->dest_type == AXI_DMAC_BUS_TYPE_AXI_MM) {
  726. dev_err(dmac->dma_dev.dev,
  727. "Destination memory-mapped interface not supported.");
  728. return -ENODEV;
  729. }
  730. axi_dmac_write(dmac, AXI_DMAC_REG_SRC_ADDRESS, 0xffffffff);
  731. if (axi_dmac_read(dmac, AXI_DMAC_REG_SRC_ADDRESS) == 0 &&
  732. chan->src_type == AXI_DMAC_BUS_TYPE_AXI_MM) {
  733. dev_err(dmac->dma_dev.dev,
  734. "Source memory-mapped interface not supported.");
  735. return -ENODEV;
  736. }
  737. if (version >= ADI_AXI_PCORE_VER(4, 2, 'a'))
  738. chan->hw_partial_xfer = true;
  739. if (version >= ADI_AXI_PCORE_VER(4, 1, 'a')) {
  740. axi_dmac_write(dmac, AXI_DMAC_REG_X_LENGTH, 0x00);
  741. chan->length_align_mask =
  742. axi_dmac_read(dmac, AXI_DMAC_REG_X_LENGTH);
  743. } else {
  744. chan->length_align_mask = chan->address_align_mask;
  745. }
  746. return 0;
  747. }
  748. static int axi_dmac_probe(struct platform_device *pdev)
  749. {
  750. struct dma_device *dma_dev;
  751. struct axi_dmac *dmac;
  752. struct resource *res;
  753. struct regmap *regmap;
  754. unsigned int version;
  755. int ret;
  756. dmac = devm_kzalloc(&pdev->dev, sizeof(*dmac), GFP_KERNEL);
  757. if (!dmac)
  758. return -ENOMEM;
  759. dmac->irq = platform_get_irq(pdev, 0);
  760. if (dmac->irq < 0)
  761. return dmac->irq;
  762. if (dmac->irq == 0)
  763. return -EINVAL;
  764. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  765. dmac->base = devm_ioremap_resource(&pdev->dev, res);
  766. if (IS_ERR(dmac->base))
  767. return PTR_ERR(dmac->base);
  768. dmac->clk = devm_clk_get(&pdev->dev, NULL);
  769. if (IS_ERR(dmac->clk))
  770. return PTR_ERR(dmac->clk);
  771. ret = clk_prepare_enable(dmac->clk);
  772. if (ret < 0)
  773. return ret;
  774. version = axi_dmac_read(dmac, ADI_AXI_REG_VERSION);
  775. if (version >= ADI_AXI_PCORE_VER(4, 3, 'a'))
  776. ret = axi_dmac_read_chan_config(&pdev->dev, dmac);
  777. else
  778. ret = axi_dmac_parse_dt(&pdev->dev, dmac);
  779. if (ret < 0)
  780. goto err_clk_disable;
  781. INIT_LIST_HEAD(&dmac->chan.active_descs);
  782. dma_set_max_seg_size(&pdev->dev, UINT_MAX);
  783. dma_dev = &dmac->dma_dev;
  784. dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
  785. dma_cap_set(DMA_CYCLIC, dma_dev->cap_mask);
  786. dma_cap_set(DMA_INTERLEAVE, dma_dev->cap_mask);
  787. dma_dev->device_free_chan_resources = axi_dmac_free_chan_resources;
  788. dma_dev->device_tx_status = dma_cookie_status;
  789. dma_dev->device_issue_pending = axi_dmac_issue_pending;
  790. dma_dev->device_prep_slave_sg = axi_dmac_prep_slave_sg;
  791. dma_dev->device_prep_dma_cyclic = axi_dmac_prep_dma_cyclic;
  792. dma_dev->device_prep_interleaved_dma = axi_dmac_prep_interleaved;
  793. dma_dev->device_terminate_all = axi_dmac_terminate_all;
  794. dma_dev->device_synchronize = axi_dmac_synchronize;
  795. dma_dev->dev = &pdev->dev;
  796. dma_dev->chancnt = 1;
  797. dma_dev->src_addr_widths = BIT(dmac->chan.src_width);
  798. dma_dev->dst_addr_widths = BIT(dmac->chan.dest_width);
  799. dma_dev->directions = BIT(dmac->chan.direction);
  800. dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
  801. INIT_LIST_HEAD(&dma_dev->channels);
  802. dmac->chan.vchan.desc_free = axi_dmac_desc_free;
  803. vchan_init(&dmac->chan.vchan, dma_dev);
  804. ret = axi_dmac_detect_caps(dmac, version);
  805. if (ret)
  806. goto err_clk_disable;
  807. dma_dev->copy_align = (dmac->chan.address_align_mask + 1);
  808. axi_dmac_write(dmac, AXI_DMAC_REG_IRQ_MASK, 0x00);
  809. ret = dma_async_device_register(dma_dev);
  810. if (ret)
  811. goto err_clk_disable;
  812. ret = of_dma_controller_register(pdev->dev.of_node,
  813. of_dma_xlate_by_chan_id, dma_dev);
  814. if (ret)
  815. goto err_unregister_device;
  816. ret = request_irq(dmac->irq, axi_dmac_interrupt_handler, IRQF_SHARED,
  817. dev_name(&pdev->dev), dmac);
  818. if (ret)
  819. goto err_unregister_of;
  820. platform_set_drvdata(pdev, dmac);
  821. regmap = devm_regmap_init_mmio(&pdev->dev, dmac->base,
  822. &axi_dmac_regmap_config);
  823. if (IS_ERR(regmap)) {
  824. ret = PTR_ERR(regmap);
  825. goto err_free_irq;
  826. }
  827. return 0;
  828. err_free_irq:
  829. free_irq(dmac->irq, dmac);
  830. err_unregister_of:
  831. of_dma_controller_free(pdev->dev.of_node);
  832. err_unregister_device:
  833. dma_async_device_unregister(&dmac->dma_dev);
  834. err_clk_disable:
  835. clk_disable_unprepare(dmac->clk);
  836. return ret;
  837. }
  838. static int axi_dmac_remove(struct platform_device *pdev)
  839. {
  840. struct axi_dmac *dmac = platform_get_drvdata(pdev);
  841. of_dma_controller_free(pdev->dev.of_node);
  842. free_irq(dmac->irq, dmac);
  843. tasklet_kill(&dmac->chan.vchan.task);
  844. dma_async_device_unregister(&dmac->dma_dev);
  845. clk_disable_unprepare(dmac->clk);
  846. return 0;
  847. }
  848. static const struct of_device_id axi_dmac_of_match_table[] = {
  849. { .compatible = "adi,axi-dmac-1.00.a" },
  850. { },
  851. };
  852. MODULE_DEVICE_TABLE(of, axi_dmac_of_match_table);
  853. static struct platform_driver axi_dmac_driver = {
  854. .driver = {
  855. .name = "dma-axi-dmac",
  856. .of_match_table = axi_dmac_of_match_table,
  857. },
  858. .probe = axi_dmac_probe,
  859. .remove = axi_dmac_remove,
  860. };
  861. module_platform_driver(axi_dmac_driver);
  862. MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
  863. MODULE_DESCRIPTION("DMA controller driver for the AXI-DMAC controller");
  864. MODULE_LICENSE("GPL v2");