hidma.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979
  1. /*
  2. * Qualcomm Technologies HIDMA DMA engine interface
  3. *
  4. * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 and
  8. * only version 2 as published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. */
  15. /*
  16. * Copyright (C) Freescale Semicondutor, Inc. 2007, 2008.
  17. * Copyright (C) Semihalf 2009
  18. * Copyright (C) Ilya Yanok, Emcraft Systems 2010
  19. * Copyright (C) Alexander Popov, Promcontroller 2014
  20. *
  21. * Written by Piotr Ziecik <kosmo@semihalf.com>. Hardware description
  22. * (defines, structures and comments) was taken from MPC5121 DMA driver
  23. * written by Hongjun Chen <hong-jun.chen@freescale.com>.
  24. *
  25. * Approved as OSADL project by a majority of OSADL members and funded
  26. * by OSADL membership fees in 2009; for details see www.osadl.org.
  27. *
  28. * This program is free software; you can redistribute it and/or modify it
  29. * under the terms of the GNU General Public License as published by the Free
  30. * Software Foundation; either version 2 of the License, or (at your option)
  31. * any later version.
  32. *
  33. * This program is distributed in the hope that it will be useful, but WITHOUT
  34. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  35. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  36. * more details.
  37. *
  38. * The full GNU General Public License is included in this distribution in the
  39. * file called COPYING.
  40. */
  41. /* Linux Foundation elects GPLv2 license only. */
  42. #include <linux/dmaengine.h>
  43. #include <linux/dma-mapping.h>
  44. #include <linux/list.h>
  45. #include <linux/module.h>
  46. #include <linux/platform_device.h>
  47. #include <linux/slab.h>
  48. #include <linux/spinlock.h>
  49. #include <linux/of_dma.h>
  50. #include <linux/of_device.h>
  51. #include <linux/property.h>
  52. #include <linux/delay.h>
  53. #include <linux/acpi.h>
  54. #include <linux/irq.h>
  55. #include <linux/atomic.h>
  56. #include <linux/pm_runtime.h>
  57. #include <linux/msi.h>
  58. #include "../dmaengine.h"
  59. #include "hidma.h"
  60. /*
  61. * Default idle time is 2 seconds. This parameter can
  62. * be overridden by changing the following
  63. * /sys/bus/platform/devices/QCOM8061:<xy>/power/autosuspend_delay_ms
  64. * during kernel boot.
  65. */
  66. #define HIDMA_AUTOSUSPEND_TIMEOUT 2000
  67. #define HIDMA_ERR_INFO_SW 0xFF
  68. #define HIDMA_ERR_CODE_UNEXPECTED_TERMINATE 0x0
  69. #define HIDMA_NR_DEFAULT_DESC 10
  70. #define HIDMA_MSI_INTS 11
  71. static inline struct hidma_dev *to_hidma_dev(struct dma_device *dmadev)
  72. {
  73. return container_of(dmadev, struct hidma_dev, ddev);
  74. }
  75. static inline
  76. struct hidma_dev *to_hidma_dev_from_lldev(struct hidma_lldev **_lldevp)
  77. {
  78. return container_of(_lldevp, struct hidma_dev, lldev);
  79. }
  80. static inline struct hidma_chan *to_hidma_chan(struct dma_chan *dmach)
  81. {
  82. return container_of(dmach, struct hidma_chan, chan);
  83. }
  84. static inline
  85. struct hidma_desc *to_hidma_desc(struct dma_async_tx_descriptor *t)
  86. {
  87. return container_of(t, struct hidma_desc, desc);
  88. }
  89. static void hidma_free(struct hidma_dev *dmadev)
  90. {
  91. INIT_LIST_HEAD(&dmadev->ddev.channels);
  92. }
  93. static unsigned int nr_desc_prm;
  94. module_param(nr_desc_prm, uint, 0644);
  95. MODULE_PARM_DESC(nr_desc_prm, "number of descriptors (default: 0)");
  96. enum hidma_cap {
  97. HIDMA_MSI_CAP = 1,
  98. HIDMA_IDENTITY_CAP,
  99. };
  100. /* process completed descriptors */
  101. static void hidma_process_completed(struct hidma_chan *mchan)
  102. {
  103. struct dma_device *ddev = mchan->chan.device;
  104. struct hidma_dev *mdma = to_hidma_dev(ddev);
  105. struct dma_async_tx_descriptor *desc;
  106. dma_cookie_t last_cookie;
  107. struct hidma_desc *mdesc;
  108. struct hidma_desc *next;
  109. unsigned long irqflags;
  110. struct list_head list;
  111. INIT_LIST_HEAD(&list);
  112. /* Get all completed descriptors */
  113. spin_lock_irqsave(&mchan->lock, irqflags);
  114. list_splice_tail_init(&mchan->completed, &list);
  115. spin_unlock_irqrestore(&mchan->lock, irqflags);
  116. /* Execute callbacks and run dependencies */
  117. list_for_each_entry_safe(mdesc, next, &list, node) {
  118. enum dma_status llstat;
  119. struct dmaengine_desc_callback cb;
  120. struct dmaengine_result result;
  121. desc = &mdesc->desc;
  122. last_cookie = desc->cookie;
  123. llstat = hidma_ll_status(mdma->lldev, mdesc->tre_ch);
  124. spin_lock_irqsave(&mchan->lock, irqflags);
  125. if (llstat == DMA_COMPLETE) {
  126. mchan->last_success = last_cookie;
  127. result.result = DMA_TRANS_NOERROR;
  128. } else {
  129. result.result = DMA_TRANS_ABORTED;
  130. }
  131. dma_cookie_complete(desc);
  132. spin_unlock_irqrestore(&mchan->lock, irqflags);
  133. dmaengine_desc_get_callback(desc, &cb);
  134. dma_run_dependencies(desc);
  135. spin_lock_irqsave(&mchan->lock, irqflags);
  136. list_move(&mdesc->node, &mchan->free);
  137. spin_unlock_irqrestore(&mchan->lock, irqflags);
  138. dmaengine_desc_callback_invoke(&cb, &result);
  139. }
  140. }
  141. /*
  142. * Called once for each submitted descriptor.
  143. * PM is locked once for each descriptor that is currently
  144. * in execution.
  145. */
  146. static void hidma_callback(void *data)
  147. {
  148. struct hidma_desc *mdesc = data;
  149. struct hidma_chan *mchan = to_hidma_chan(mdesc->desc.chan);
  150. struct dma_device *ddev = mchan->chan.device;
  151. struct hidma_dev *dmadev = to_hidma_dev(ddev);
  152. unsigned long irqflags;
  153. bool queued = false;
  154. spin_lock_irqsave(&mchan->lock, irqflags);
  155. if (mdesc->node.next) {
  156. /* Delete from the active list, add to completed list */
  157. list_move_tail(&mdesc->node, &mchan->completed);
  158. queued = true;
  159. /* calculate the next running descriptor */
  160. mchan->running = list_first_entry(&mchan->active,
  161. struct hidma_desc, node);
  162. }
  163. spin_unlock_irqrestore(&mchan->lock, irqflags);
  164. hidma_process_completed(mchan);
  165. if (queued) {
  166. pm_runtime_mark_last_busy(dmadev->ddev.dev);
  167. pm_runtime_put_autosuspend(dmadev->ddev.dev);
  168. }
  169. }
  170. static int hidma_chan_init(struct hidma_dev *dmadev, u32 dma_sig)
  171. {
  172. struct hidma_chan *mchan;
  173. struct dma_device *ddev;
  174. mchan = devm_kzalloc(dmadev->ddev.dev, sizeof(*mchan), GFP_KERNEL);
  175. if (!mchan)
  176. return -ENOMEM;
  177. ddev = &dmadev->ddev;
  178. mchan->dma_sig = dma_sig;
  179. mchan->dmadev = dmadev;
  180. mchan->chan.device = ddev;
  181. dma_cookie_init(&mchan->chan);
  182. INIT_LIST_HEAD(&mchan->free);
  183. INIT_LIST_HEAD(&mchan->prepared);
  184. INIT_LIST_HEAD(&mchan->active);
  185. INIT_LIST_HEAD(&mchan->completed);
  186. INIT_LIST_HEAD(&mchan->queued);
  187. spin_lock_init(&mchan->lock);
  188. list_add_tail(&mchan->chan.device_node, &ddev->channels);
  189. dmadev->ddev.chancnt++;
  190. return 0;
  191. }
  192. static void hidma_issue_task(struct tasklet_struct *t)
  193. {
  194. struct hidma_dev *dmadev = from_tasklet(dmadev, t, task);
  195. pm_runtime_get_sync(dmadev->ddev.dev);
  196. hidma_ll_start(dmadev->lldev);
  197. }
  198. static void hidma_issue_pending(struct dma_chan *dmach)
  199. {
  200. struct hidma_chan *mchan = to_hidma_chan(dmach);
  201. struct hidma_dev *dmadev = mchan->dmadev;
  202. unsigned long flags;
  203. struct hidma_desc *qdesc, *next;
  204. int status;
  205. spin_lock_irqsave(&mchan->lock, flags);
  206. list_for_each_entry_safe(qdesc, next, &mchan->queued, node) {
  207. hidma_ll_queue_request(dmadev->lldev, qdesc->tre_ch);
  208. list_move_tail(&qdesc->node, &mchan->active);
  209. }
  210. if (!mchan->running) {
  211. struct hidma_desc *desc = list_first_entry(&mchan->active,
  212. struct hidma_desc,
  213. node);
  214. mchan->running = desc;
  215. }
  216. spin_unlock_irqrestore(&mchan->lock, flags);
  217. /* PM will be released in hidma_callback function. */
  218. status = pm_runtime_get(dmadev->ddev.dev);
  219. if (status < 0)
  220. tasklet_schedule(&dmadev->task);
  221. else
  222. hidma_ll_start(dmadev->lldev);
  223. }
  224. static inline bool hidma_txn_is_success(dma_cookie_t cookie,
  225. dma_cookie_t last_success, dma_cookie_t last_used)
  226. {
  227. if (last_success <= last_used) {
  228. if ((cookie <= last_success) || (cookie > last_used))
  229. return true;
  230. } else {
  231. if ((cookie <= last_success) && (cookie > last_used))
  232. return true;
  233. }
  234. return false;
  235. }
  236. static enum dma_status hidma_tx_status(struct dma_chan *dmach,
  237. dma_cookie_t cookie,
  238. struct dma_tx_state *txstate)
  239. {
  240. struct hidma_chan *mchan = to_hidma_chan(dmach);
  241. enum dma_status ret;
  242. ret = dma_cookie_status(dmach, cookie, txstate);
  243. if (ret == DMA_COMPLETE) {
  244. bool is_success;
  245. is_success = hidma_txn_is_success(cookie, mchan->last_success,
  246. dmach->cookie);
  247. return is_success ? ret : DMA_ERROR;
  248. }
  249. if (mchan->paused && (ret == DMA_IN_PROGRESS)) {
  250. unsigned long flags;
  251. dma_cookie_t runcookie;
  252. spin_lock_irqsave(&mchan->lock, flags);
  253. if (mchan->running)
  254. runcookie = mchan->running->desc.cookie;
  255. else
  256. runcookie = -EINVAL;
  257. if (runcookie == cookie)
  258. ret = DMA_PAUSED;
  259. spin_unlock_irqrestore(&mchan->lock, flags);
  260. }
  261. return ret;
  262. }
  263. /*
  264. * Submit descriptor to hardware.
  265. * Lock the PM for each descriptor we are sending.
  266. */
  267. static dma_cookie_t hidma_tx_submit(struct dma_async_tx_descriptor *txd)
  268. {
  269. struct hidma_chan *mchan = to_hidma_chan(txd->chan);
  270. struct hidma_dev *dmadev = mchan->dmadev;
  271. struct hidma_desc *mdesc;
  272. unsigned long irqflags;
  273. dma_cookie_t cookie;
  274. pm_runtime_get_sync(dmadev->ddev.dev);
  275. if (!hidma_ll_isenabled(dmadev->lldev)) {
  276. pm_runtime_mark_last_busy(dmadev->ddev.dev);
  277. pm_runtime_put_autosuspend(dmadev->ddev.dev);
  278. return -ENODEV;
  279. }
  280. pm_runtime_mark_last_busy(dmadev->ddev.dev);
  281. pm_runtime_put_autosuspend(dmadev->ddev.dev);
  282. mdesc = container_of(txd, struct hidma_desc, desc);
  283. spin_lock_irqsave(&mchan->lock, irqflags);
  284. /* Move descriptor to queued */
  285. list_move_tail(&mdesc->node, &mchan->queued);
  286. /* Update cookie */
  287. cookie = dma_cookie_assign(txd);
  288. spin_unlock_irqrestore(&mchan->lock, irqflags);
  289. return cookie;
  290. }
  291. static int hidma_alloc_chan_resources(struct dma_chan *dmach)
  292. {
  293. struct hidma_chan *mchan = to_hidma_chan(dmach);
  294. struct hidma_dev *dmadev = mchan->dmadev;
  295. struct hidma_desc *mdesc, *tmp;
  296. unsigned long irqflags;
  297. LIST_HEAD(descs);
  298. unsigned int i;
  299. int rc = 0;
  300. if (mchan->allocated)
  301. return 0;
  302. /* Alloc descriptors for this channel */
  303. for (i = 0; i < dmadev->nr_descriptors; i++) {
  304. mdesc = kzalloc(sizeof(struct hidma_desc), GFP_NOWAIT);
  305. if (!mdesc) {
  306. rc = -ENOMEM;
  307. break;
  308. }
  309. dma_async_tx_descriptor_init(&mdesc->desc, dmach);
  310. mdesc->desc.tx_submit = hidma_tx_submit;
  311. rc = hidma_ll_request(dmadev->lldev, mchan->dma_sig,
  312. "DMA engine", hidma_callback, mdesc,
  313. &mdesc->tre_ch);
  314. if (rc) {
  315. dev_err(dmach->device->dev,
  316. "channel alloc failed at %u\n", i);
  317. kfree(mdesc);
  318. break;
  319. }
  320. list_add_tail(&mdesc->node, &descs);
  321. }
  322. if (rc) {
  323. /* return the allocated descriptors */
  324. list_for_each_entry_safe(mdesc, tmp, &descs, node) {
  325. hidma_ll_free(dmadev->lldev, mdesc->tre_ch);
  326. kfree(mdesc);
  327. }
  328. return rc;
  329. }
  330. spin_lock_irqsave(&mchan->lock, irqflags);
  331. list_splice_tail_init(&descs, &mchan->free);
  332. mchan->allocated = true;
  333. spin_unlock_irqrestore(&mchan->lock, irqflags);
  334. return 1;
  335. }
  336. static struct dma_async_tx_descriptor *
  337. hidma_prep_dma_memcpy(struct dma_chan *dmach, dma_addr_t dest, dma_addr_t src,
  338. size_t len, unsigned long flags)
  339. {
  340. struct hidma_chan *mchan = to_hidma_chan(dmach);
  341. struct hidma_desc *mdesc = NULL;
  342. struct hidma_dev *mdma = mchan->dmadev;
  343. unsigned long irqflags;
  344. /* Get free descriptor */
  345. spin_lock_irqsave(&mchan->lock, irqflags);
  346. if (!list_empty(&mchan->free)) {
  347. mdesc = list_first_entry(&mchan->free, struct hidma_desc, node);
  348. list_del(&mdesc->node);
  349. }
  350. spin_unlock_irqrestore(&mchan->lock, irqflags);
  351. if (!mdesc)
  352. return NULL;
  353. mdesc->desc.flags = flags;
  354. hidma_ll_set_transfer_params(mdma->lldev, mdesc->tre_ch,
  355. src, dest, len, flags,
  356. HIDMA_TRE_MEMCPY);
  357. /* Place descriptor in prepared list */
  358. spin_lock_irqsave(&mchan->lock, irqflags);
  359. list_add_tail(&mdesc->node, &mchan->prepared);
  360. spin_unlock_irqrestore(&mchan->lock, irqflags);
  361. return &mdesc->desc;
  362. }
  363. static struct dma_async_tx_descriptor *
  364. hidma_prep_dma_memset(struct dma_chan *dmach, dma_addr_t dest, int value,
  365. size_t len, unsigned long flags)
  366. {
  367. struct hidma_chan *mchan = to_hidma_chan(dmach);
  368. struct hidma_desc *mdesc = NULL;
  369. struct hidma_dev *mdma = mchan->dmadev;
  370. unsigned long irqflags;
  371. /* Get free descriptor */
  372. spin_lock_irqsave(&mchan->lock, irqflags);
  373. if (!list_empty(&mchan->free)) {
  374. mdesc = list_first_entry(&mchan->free, struct hidma_desc, node);
  375. list_del(&mdesc->node);
  376. }
  377. spin_unlock_irqrestore(&mchan->lock, irqflags);
  378. if (!mdesc)
  379. return NULL;
  380. mdesc->desc.flags = flags;
  381. hidma_ll_set_transfer_params(mdma->lldev, mdesc->tre_ch,
  382. value, dest, len, flags,
  383. HIDMA_TRE_MEMSET);
  384. /* Place descriptor in prepared list */
  385. spin_lock_irqsave(&mchan->lock, irqflags);
  386. list_add_tail(&mdesc->node, &mchan->prepared);
  387. spin_unlock_irqrestore(&mchan->lock, irqflags);
  388. return &mdesc->desc;
  389. }
  390. static int hidma_terminate_channel(struct dma_chan *chan)
  391. {
  392. struct hidma_chan *mchan = to_hidma_chan(chan);
  393. struct hidma_dev *dmadev = to_hidma_dev(mchan->chan.device);
  394. struct hidma_desc *tmp, *mdesc;
  395. unsigned long irqflags;
  396. LIST_HEAD(list);
  397. int rc;
  398. pm_runtime_get_sync(dmadev->ddev.dev);
  399. /* give completed requests a chance to finish */
  400. hidma_process_completed(mchan);
  401. spin_lock_irqsave(&mchan->lock, irqflags);
  402. mchan->last_success = 0;
  403. list_splice_init(&mchan->active, &list);
  404. list_splice_init(&mchan->prepared, &list);
  405. list_splice_init(&mchan->completed, &list);
  406. list_splice_init(&mchan->queued, &list);
  407. spin_unlock_irqrestore(&mchan->lock, irqflags);
  408. /* this suspends the existing transfer */
  409. rc = hidma_ll_disable(dmadev->lldev);
  410. if (rc) {
  411. dev_err(dmadev->ddev.dev, "channel did not pause\n");
  412. goto out;
  413. }
  414. /* return all user requests */
  415. list_for_each_entry_safe(mdesc, tmp, &list, node) {
  416. struct dma_async_tx_descriptor *txd = &mdesc->desc;
  417. dma_descriptor_unmap(txd);
  418. dmaengine_desc_get_callback_invoke(txd, NULL);
  419. dma_run_dependencies(txd);
  420. /* move myself to free_list */
  421. list_move(&mdesc->node, &mchan->free);
  422. }
  423. rc = hidma_ll_enable(dmadev->lldev);
  424. out:
  425. pm_runtime_mark_last_busy(dmadev->ddev.dev);
  426. pm_runtime_put_autosuspend(dmadev->ddev.dev);
  427. return rc;
  428. }
  429. static int hidma_terminate_all(struct dma_chan *chan)
  430. {
  431. struct hidma_chan *mchan = to_hidma_chan(chan);
  432. struct hidma_dev *dmadev = to_hidma_dev(mchan->chan.device);
  433. int rc;
  434. rc = hidma_terminate_channel(chan);
  435. if (rc)
  436. return rc;
  437. /* reinitialize the hardware */
  438. pm_runtime_get_sync(dmadev->ddev.dev);
  439. rc = hidma_ll_setup(dmadev->lldev);
  440. pm_runtime_mark_last_busy(dmadev->ddev.dev);
  441. pm_runtime_put_autosuspend(dmadev->ddev.dev);
  442. return rc;
  443. }
  444. static void hidma_free_chan_resources(struct dma_chan *dmach)
  445. {
  446. struct hidma_chan *mchan = to_hidma_chan(dmach);
  447. struct hidma_dev *mdma = mchan->dmadev;
  448. struct hidma_desc *mdesc, *tmp;
  449. unsigned long irqflags;
  450. LIST_HEAD(descs);
  451. /* terminate running transactions and free descriptors */
  452. hidma_terminate_channel(dmach);
  453. spin_lock_irqsave(&mchan->lock, irqflags);
  454. /* Move data */
  455. list_splice_tail_init(&mchan->free, &descs);
  456. /* Free descriptors */
  457. list_for_each_entry_safe(mdesc, tmp, &descs, node) {
  458. hidma_ll_free(mdma->lldev, mdesc->tre_ch);
  459. list_del(&mdesc->node);
  460. kfree(mdesc);
  461. }
  462. mchan->allocated = false;
  463. spin_unlock_irqrestore(&mchan->lock, irqflags);
  464. }
  465. static int hidma_pause(struct dma_chan *chan)
  466. {
  467. struct hidma_chan *mchan;
  468. struct hidma_dev *dmadev;
  469. mchan = to_hidma_chan(chan);
  470. dmadev = to_hidma_dev(mchan->chan.device);
  471. if (!mchan->paused) {
  472. pm_runtime_get_sync(dmadev->ddev.dev);
  473. if (hidma_ll_disable(dmadev->lldev))
  474. dev_warn(dmadev->ddev.dev, "channel did not stop\n");
  475. mchan->paused = true;
  476. pm_runtime_mark_last_busy(dmadev->ddev.dev);
  477. pm_runtime_put_autosuspend(dmadev->ddev.dev);
  478. }
  479. return 0;
  480. }
  481. static int hidma_resume(struct dma_chan *chan)
  482. {
  483. struct hidma_chan *mchan;
  484. struct hidma_dev *dmadev;
  485. int rc = 0;
  486. mchan = to_hidma_chan(chan);
  487. dmadev = to_hidma_dev(mchan->chan.device);
  488. if (mchan->paused) {
  489. pm_runtime_get_sync(dmadev->ddev.dev);
  490. rc = hidma_ll_enable(dmadev->lldev);
  491. if (!rc)
  492. mchan->paused = false;
  493. else
  494. dev_err(dmadev->ddev.dev,
  495. "failed to resume the channel");
  496. pm_runtime_mark_last_busy(dmadev->ddev.dev);
  497. pm_runtime_put_autosuspend(dmadev->ddev.dev);
  498. }
  499. return rc;
  500. }
  501. static irqreturn_t hidma_chirq_handler(int chirq, void *arg)
  502. {
  503. struct hidma_lldev *lldev = arg;
  504. /*
  505. * All interrupts are request driven.
  506. * HW doesn't send an interrupt by itself.
  507. */
  508. return hidma_ll_inthandler(chirq, lldev);
  509. }
  510. #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
  511. static irqreturn_t hidma_chirq_handler_msi(int chirq, void *arg)
  512. {
  513. struct hidma_lldev **lldevp = arg;
  514. struct hidma_dev *dmadev = to_hidma_dev_from_lldev(lldevp);
  515. return hidma_ll_inthandler_msi(chirq, *lldevp,
  516. 1 << (chirq - dmadev->msi_virqbase));
  517. }
  518. #endif
  519. static ssize_t hidma_show_values(struct device *dev,
  520. struct device_attribute *attr, char *buf)
  521. {
  522. struct hidma_dev *mdev = dev_get_drvdata(dev);
  523. buf[0] = 0;
  524. if (strcmp(attr->attr.name, "chid") == 0)
  525. sprintf(buf, "%d\n", mdev->chidx);
  526. return strlen(buf);
  527. }
  528. static inline void hidma_sysfs_uninit(struct hidma_dev *dev)
  529. {
  530. device_remove_file(dev->ddev.dev, dev->chid_attrs);
  531. }
  532. static struct device_attribute*
  533. hidma_create_sysfs_entry(struct hidma_dev *dev, char *name, int mode)
  534. {
  535. struct device_attribute *attrs;
  536. char *name_copy;
  537. attrs = devm_kmalloc(dev->ddev.dev, sizeof(struct device_attribute),
  538. GFP_KERNEL);
  539. if (!attrs)
  540. return NULL;
  541. name_copy = devm_kstrdup(dev->ddev.dev, name, GFP_KERNEL);
  542. if (!name_copy)
  543. return NULL;
  544. attrs->attr.name = name_copy;
  545. attrs->attr.mode = mode;
  546. attrs->show = hidma_show_values;
  547. sysfs_attr_init(&attrs->attr);
  548. return attrs;
  549. }
  550. static int hidma_sysfs_init(struct hidma_dev *dev)
  551. {
  552. dev->chid_attrs = hidma_create_sysfs_entry(dev, "chid", S_IRUGO);
  553. if (!dev->chid_attrs)
  554. return -ENOMEM;
  555. return device_create_file(dev->ddev.dev, dev->chid_attrs);
  556. }
  557. #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
  558. static void hidma_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
  559. {
  560. struct device *dev = msi_desc_to_dev(desc);
  561. struct hidma_dev *dmadev = dev_get_drvdata(dev);
  562. if (!desc->platform.msi_index) {
  563. writel(msg->address_lo, dmadev->dev_evca + 0x118);
  564. writel(msg->address_hi, dmadev->dev_evca + 0x11C);
  565. writel(msg->data, dmadev->dev_evca + 0x120);
  566. }
  567. }
  568. #endif
  569. static void hidma_free_msis(struct hidma_dev *dmadev)
  570. {
  571. #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
  572. struct device *dev = dmadev->ddev.dev;
  573. struct msi_desc *desc;
  574. /* free allocated MSI interrupts above */
  575. for_each_msi_entry(desc, dev)
  576. devm_free_irq(dev, desc->irq, &dmadev->lldev);
  577. platform_msi_domain_free_irqs(dev);
  578. #endif
  579. }
  580. static int hidma_request_msi(struct hidma_dev *dmadev,
  581. struct platform_device *pdev)
  582. {
  583. #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
  584. int rc;
  585. struct msi_desc *desc;
  586. struct msi_desc *failed_desc = NULL;
  587. rc = platform_msi_domain_alloc_irqs(&pdev->dev, HIDMA_MSI_INTS,
  588. hidma_write_msi_msg);
  589. if (rc)
  590. return rc;
  591. for_each_msi_entry(desc, &pdev->dev) {
  592. if (!desc->platform.msi_index)
  593. dmadev->msi_virqbase = desc->irq;
  594. rc = devm_request_irq(&pdev->dev, desc->irq,
  595. hidma_chirq_handler_msi,
  596. 0, "qcom-hidma-msi",
  597. &dmadev->lldev);
  598. if (rc) {
  599. failed_desc = desc;
  600. break;
  601. }
  602. }
  603. if (rc) {
  604. /* free allocated MSI interrupts above */
  605. for_each_msi_entry(desc, &pdev->dev) {
  606. if (desc == failed_desc)
  607. break;
  608. devm_free_irq(&pdev->dev, desc->irq,
  609. &dmadev->lldev);
  610. }
  611. } else {
  612. /* Add callback to free MSIs on teardown */
  613. hidma_ll_setup_irq(dmadev->lldev, true);
  614. }
  615. if (rc)
  616. dev_warn(&pdev->dev,
  617. "failed to request MSI irq, falling back to wired IRQ\n");
  618. return rc;
  619. #else
  620. return -EINVAL;
  621. #endif
  622. }
  623. static bool hidma_test_capability(struct device *dev, enum hidma_cap test_cap)
  624. {
  625. enum hidma_cap cap;
  626. cap = (enum hidma_cap) device_get_match_data(dev);
  627. return cap ? ((cap & test_cap) > 0) : 0;
  628. }
  629. static int hidma_probe(struct platform_device *pdev)
  630. {
  631. struct hidma_dev *dmadev;
  632. struct resource *trca_resource;
  633. struct resource *evca_resource;
  634. int chirq;
  635. void __iomem *evca;
  636. void __iomem *trca;
  637. int rc;
  638. bool msi;
  639. pm_runtime_set_autosuspend_delay(&pdev->dev, HIDMA_AUTOSUSPEND_TIMEOUT);
  640. pm_runtime_use_autosuspend(&pdev->dev);
  641. pm_runtime_set_active(&pdev->dev);
  642. pm_runtime_enable(&pdev->dev);
  643. trca_resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  644. trca = devm_ioremap_resource(&pdev->dev, trca_resource);
  645. if (IS_ERR(trca)) {
  646. rc = -ENOMEM;
  647. goto bailout;
  648. }
  649. evca_resource = platform_get_resource(pdev, IORESOURCE_MEM, 1);
  650. evca = devm_ioremap_resource(&pdev->dev, evca_resource);
  651. if (IS_ERR(evca)) {
  652. rc = -ENOMEM;
  653. goto bailout;
  654. }
  655. /*
  656. * This driver only handles the channel IRQs.
  657. * Common IRQ is handled by the management driver.
  658. */
  659. chirq = platform_get_irq(pdev, 0);
  660. if (chirq < 0) {
  661. rc = -ENODEV;
  662. goto bailout;
  663. }
  664. dmadev = devm_kzalloc(&pdev->dev, sizeof(*dmadev), GFP_KERNEL);
  665. if (!dmadev) {
  666. rc = -ENOMEM;
  667. goto bailout;
  668. }
  669. INIT_LIST_HEAD(&dmadev->ddev.channels);
  670. spin_lock_init(&dmadev->lock);
  671. dmadev->ddev.dev = &pdev->dev;
  672. pm_runtime_get_sync(dmadev->ddev.dev);
  673. dma_cap_set(DMA_MEMCPY, dmadev->ddev.cap_mask);
  674. dma_cap_set(DMA_MEMSET, dmadev->ddev.cap_mask);
  675. if (WARN_ON(!pdev->dev.dma_mask)) {
  676. rc = -ENXIO;
  677. goto dmafree;
  678. }
  679. dmadev->dev_evca = evca;
  680. dmadev->evca_resource = evca_resource;
  681. dmadev->dev_trca = trca;
  682. dmadev->trca_resource = trca_resource;
  683. dmadev->ddev.device_prep_dma_memcpy = hidma_prep_dma_memcpy;
  684. dmadev->ddev.device_prep_dma_memset = hidma_prep_dma_memset;
  685. dmadev->ddev.device_alloc_chan_resources = hidma_alloc_chan_resources;
  686. dmadev->ddev.device_free_chan_resources = hidma_free_chan_resources;
  687. dmadev->ddev.device_tx_status = hidma_tx_status;
  688. dmadev->ddev.device_issue_pending = hidma_issue_pending;
  689. dmadev->ddev.device_pause = hidma_pause;
  690. dmadev->ddev.device_resume = hidma_resume;
  691. dmadev->ddev.device_terminate_all = hidma_terminate_all;
  692. dmadev->ddev.copy_align = 8;
  693. /*
  694. * Determine the MSI capability of the platform. Old HW doesn't
  695. * support MSI.
  696. */
  697. msi = hidma_test_capability(&pdev->dev, HIDMA_MSI_CAP);
  698. device_property_read_u32(&pdev->dev, "desc-count",
  699. &dmadev->nr_descriptors);
  700. if (nr_desc_prm) {
  701. dev_info(&pdev->dev, "overriding number of descriptors as %d\n",
  702. nr_desc_prm);
  703. dmadev->nr_descriptors = nr_desc_prm;
  704. }
  705. if (!dmadev->nr_descriptors)
  706. dmadev->nr_descriptors = HIDMA_NR_DEFAULT_DESC;
  707. if (hidma_test_capability(&pdev->dev, HIDMA_IDENTITY_CAP))
  708. dmadev->chidx = readl(dmadev->dev_trca + 0x40);
  709. else
  710. dmadev->chidx = readl(dmadev->dev_trca + 0x28);
  711. /* Set DMA mask to 64 bits. */
  712. rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
  713. if (rc) {
  714. dev_warn(&pdev->dev, "unable to set coherent mask to 64");
  715. rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
  716. if (rc)
  717. goto dmafree;
  718. }
  719. dmadev->lldev = hidma_ll_init(dmadev->ddev.dev,
  720. dmadev->nr_descriptors, dmadev->dev_trca,
  721. dmadev->dev_evca, dmadev->chidx);
  722. if (!dmadev->lldev) {
  723. rc = -EPROBE_DEFER;
  724. goto dmafree;
  725. }
  726. platform_set_drvdata(pdev, dmadev);
  727. if (msi)
  728. rc = hidma_request_msi(dmadev, pdev);
  729. if (!msi || rc) {
  730. hidma_ll_setup_irq(dmadev->lldev, false);
  731. rc = devm_request_irq(&pdev->dev, chirq, hidma_chirq_handler,
  732. 0, "qcom-hidma", dmadev->lldev);
  733. if (rc)
  734. goto uninit;
  735. }
  736. INIT_LIST_HEAD(&dmadev->ddev.channels);
  737. rc = hidma_chan_init(dmadev, 0);
  738. if (rc)
  739. goto uninit;
  740. rc = dma_async_device_register(&dmadev->ddev);
  741. if (rc)
  742. goto uninit;
  743. dmadev->irq = chirq;
  744. tasklet_setup(&dmadev->task, hidma_issue_task);
  745. hidma_debug_init(dmadev);
  746. hidma_sysfs_init(dmadev);
  747. dev_info(&pdev->dev, "HI-DMA engine driver registration complete\n");
  748. pm_runtime_mark_last_busy(dmadev->ddev.dev);
  749. pm_runtime_put_autosuspend(dmadev->ddev.dev);
  750. return 0;
  751. uninit:
  752. if (msi)
  753. hidma_free_msis(dmadev);
  754. hidma_ll_uninit(dmadev->lldev);
  755. dmafree:
  756. if (dmadev)
  757. hidma_free(dmadev);
  758. bailout:
  759. pm_runtime_put_sync(&pdev->dev);
  760. pm_runtime_disable(&pdev->dev);
  761. return rc;
  762. }
  763. static void hidma_shutdown(struct platform_device *pdev)
  764. {
  765. struct hidma_dev *dmadev = platform_get_drvdata(pdev);
  766. dev_info(dmadev->ddev.dev, "HI-DMA engine shutdown\n");
  767. pm_runtime_get_sync(dmadev->ddev.dev);
  768. if (hidma_ll_disable(dmadev->lldev))
  769. dev_warn(dmadev->ddev.dev, "channel did not stop\n");
  770. pm_runtime_mark_last_busy(dmadev->ddev.dev);
  771. pm_runtime_put_autosuspend(dmadev->ddev.dev);
  772. }
  773. static int hidma_remove(struct platform_device *pdev)
  774. {
  775. struct hidma_dev *dmadev = platform_get_drvdata(pdev);
  776. pm_runtime_get_sync(dmadev->ddev.dev);
  777. dma_async_device_unregister(&dmadev->ddev);
  778. if (!dmadev->lldev->msi_support)
  779. devm_free_irq(dmadev->ddev.dev, dmadev->irq, dmadev->lldev);
  780. else
  781. hidma_free_msis(dmadev);
  782. tasklet_kill(&dmadev->task);
  783. hidma_sysfs_uninit(dmadev);
  784. hidma_debug_uninit(dmadev);
  785. hidma_ll_uninit(dmadev->lldev);
  786. hidma_free(dmadev);
  787. dev_info(&pdev->dev, "HI-DMA engine removed\n");
  788. pm_runtime_put_sync_suspend(&pdev->dev);
  789. pm_runtime_disable(&pdev->dev);
  790. return 0;
  791. }
  792. #if IS_ENABLED(CONFIG_ACPI)
  793. static const struct acpi_device_id hidma_acpi_ids[] = {
  794. {"QCOM8061"},
  795. {"QCOM8062", HIDMA_MSI_CAP},
  796. {"QCOM8063", (HIDMA_MSI_CAP | HIDMA_IDENTITY_CAP)},
  797. {},
  798. };
  799. MODULE_DEVICE_TABLE(acpi, hidma_acpi_ids);
  800. #endif
  801. static const struct of_device_id hidma_match[] = {
  802. {.compatible = "qcom,hidma-1.0",},
  803. {.compatible = "qcom,hidma-1.1", .data = (void *)(HIDMA_MSI_CAP),},
  804. {.compatible = "qcom,hidma-1.2",
  805. .data = (void *)(HIDMA_MSI_CAP | HIDMA_IDENTITY_CAP),},
  806. {},
  807. };
  808. MODULE_DEVICE_TABLE(of, hidma_match);
  809. static struct platform_driver hidma_driver = {
  810. .probe = hidma_probe,
  811. .remove = hidma_remove,
  812. .shutdown = hidma_shutdown,
  813. .driver = {
  814. .name = "hidma",
  815. .of_match_table = hidma_match,
  816. .acpi_match_table = ACPI_PTR(hidma_acpi_ids),
  817. },
  818. };
  819. module_platform_driver(hidma_driver);
  820. MODULE_LICENSE("GPL v2");