dmaengine.c 41 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
  4. */
  5. /*
  6. * This code implements the DMA subsystem. It provides a HW-neutral interface
  7. * for other kernel code to use asynchronous memory copy capabilities,
  8. * if present, and allows different HW DMA drivers to register as providing
  9. * this capability.
  10. *
  11. * Due to the fact we are accelerating what is already a relatively fast
  12. * operation, the code goes to great lengths to avoid additional overhead,
  13. * such as locking.
  14. *
  15. * LOCKING:
  16. *
  17. * The subsystem keeps a global list of dma_device structs it is protected by a
  18. * mutex, dma_list_mutex.
  19. *
  20. * A subsystem can get access to a channel by calling dmaengine_get() followed
  21. * by dma_find_channel(), or if it has need for an exclusive channel it can call
  22. * dma_request_channel(). Once a channel is allocated a reference is taken
  23. * against its corresponding driver to disable removal.
  24. *
  25. * Each device has a channels list, which runs unlocked but is never modified
  26. * once the device is registered, it's just setup by the driver.
  27. *
  28. * See Documentation/driver-api/dmaengine for more details
  29. */
  30. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  31. #include <linux/platform_device.h>
  32. #include <linux/dma-mapping.h>
  33. #include <linux/init.h>
  34. #include <linux/module.h>
  35. #include <linux/mm.h>
  36. #include <linux/device.h>
  37. #include <linux/dmaengine.h>
  38. #include <linux/hardirq.h>
  39. #include <linux/spinlock.h>
  40. #include <linux/percpu.h>
  41. #include <linux/rcupdate.h>
  42. #include <linux/mutex.h>
  43. #include <linux/jiffies.h>
  44. #include <linux/rculist.h>
  45. #include <linux/idr.h>
  46. #include <linux/slab.h>
  47. #include <linux/acpi.h>
  48. #include <linux/acpi_dma.h>
  49. #include <linux/of_dma.h>
  50. #include <linux/mempool.h>
  51. #include <linux/numa.h>
  52. #include "dmaengine.h"
  53. static DEFINE_MUTEX(dma_list_mutex);
  54. static DEFINE_IDA(dma_ida);
  55. static LIST_HEAD(dma_device_list);
  56. static long dmaengine_ref_count;
  57. /* --- debugfs implementation --- */
  58. #ifdef CONFIG_DEBUG_FS
  59. #include <linux/debugfs.h>
  60. static struct dentry *rootdir;
  61. static void dmaengine_debug_register(struct dma_device *dma_dev)
  62. {
  63. dma_dev->dbg_dev_root = debugfs_create_dir(dev_name(dma_dev->dev),
  64. rootdir);
  65. if (IS_ERR(dma_dev->dbg_dev_root))
  66. dma_dev->dbg_dev_root = NULL;
  67. }
  68. static void dmaengine_debug_unregister(struct dma_device *dma_dev)
  69. {
  70. debugfs_remove_recursive(dma_dev->dbg_dev_root);
  71. dma_dev->dbg_dev_root = NULL;
  72. }
  73. static void dmaengine_dbg_summary_show(struct seq_file *s,
  74. struct dma_device *dma_dev)
  75. {
  76. struct dma_chan *chan;
  77. list_for_each_entry(chan, &dma_dev->channels, device_node) {
  78. if (chan->client_count) {
  79. seq_printf(s, " %-13s| %s", dma_chan_name(chan),
  80. chan->dbg_client_name ?: "in-use");
  81. if (chan->router)
  82. seq_printf(s, " (via router: %s)\n",
  83. dev_name(chan->router->dev));
  84. else
  85. seq_puts(s, "\n");
  86. }
  87. }
  88. }
  89. static int dmaengine_summary_show(struct seq_file *s, void *data)
  90. {
  91. struct dma_device *dma_dev = NULL;
  92. mutex_lock(&dma_list_mutex);
  93. list_for_each_entry(dma_dev, &dma_device_list, global_node) {
  94. seq_printf(s, "dma%d (%s): number of channels: %u\n",
  95. dma_dev->dev_id, dev_name(dma_dev->dev),
  96. dma_dev->chancnt);
  97. if (dma_dev->dbg_summary_show)
  98. dma_dev->dbg_summary_show(s, dma_dev);
  99. else
  100. dmaengine_dbg_summary_show(s, dma_dev);
  101. if (!list_is_last(&dma_dev->global_node, &dma_device_list))
  102. seq_puts(s, "\n");
  103. }
  104. mutex_unlock(&dma_list_mutex);
  105. return 0;
  106. }
  107. DEFINE_SHOW_ATTRIBUTE(dmaengine_summary);
  108. static void __init dmaengine_debugfs_init(void)
  109. {
  110. rootdir = debugfs_create_dir("dmaengine", NULL);
  111. /* /sys/kernel/debug/dmaengine/summary */
  112. debugfs_create_file("summary", 0444, rootdir, NULL,
  113. &dmaengine_summary_fops);
  114. }
  115. #else
  116. static inline void dmaengine_debugfs_init(void) { }
  117. static inline int dmaengine_debug_register(struct dma_device *dma_dev)
  118. {
  119. return 0;
  120. }
  121. static inline void dmaengine_debug_unregister(struct dma_device *dma_dev) { }
  122. #endif /* DEBUG_FS */
  123. /* --- sysfs implementation --- */
  124. #define DMA_SLAVE_NAME "slave"
  125. /**
  126. * dev_to_dma_chan - convert a device pointer to its sysfs container object
  127. * @dev: device node
  128. *
  129. * Must be called under dma_list_mutex.
  130. */
  131. static struct dma_chan *dev_to_dma_chan(struct device *dev)
  132. {
  133. struct dma_chan_dev *chan_dev;
  134. chan_dev = container_of(dev, typeof(*chan_dev), device);
  135. return chan_dev->chan;
  136. }
  137. static ssize_t memcpy_count_show(struct device *dev,
  138. struct device_attribute *attr, char *buf)
  139. {
  140. struct dma_chan *chan;
  141. unsigned long count = 0;
  142. int i;
  143. int err;
  144. mutex_lock(&dma_list_mutex);
  145. chan = dev_to_dma_chan(dev);
  146. if (chan) {
  147. for_each_possible_cpu(i)
  148. count += per_cpu_ptr(chan->local, i)->memcpy_count;
  149. err = sprintf(buf, "%lu\n", count);
  150. } else
  151. err = -ENODEV;
  152. mutex_unlock(&dma_list_mutex);
  153. return err;
  154. }
  155. static DEVICE_ATTR_RO(memcpy_count);
  156. static ssize_t bytes_transferred_show(struct device *dev,
  157. struct device_attribute *attr, char *buf)
  158. {
  159. struct dma_chan *chan;
  160. unsigned long count = 0;
  161. int i;
  162. int err;
  163. mutex_lock(&dma_list_mutex);
  164. chan = dev_to_dma_chan(dev);
  165. if (chan) {
  166. for_each_possible_cpu(i)
  167. count += per_cpu_ptr(chan->local, i)->bytes_transferred;
  168. err = sprintf(buf, "%lu\n", count);
  169. } else
  170. err = -ENODEV;
  171. mutex_unlock(&dma_list_mutex);
  172. return err;
  173. }
  174. static DEVICE_ATTR_RO(bytes_transferred);
  175. static ssize_t in_use_show(struct device *dev, struct device_attribute *attr,
  176. char *buf)
  177. {
  178. struct dma_chan *chan;
  179. int err;
  180. mutex_lock(&dma_list_mutex);
  181. chan = dev_to_dma_chan(dev);
  182. if (chan)
  183. err = sprintf(buf, "%d\n", chan->client_count);
  184. else
  185. err = -ENODEV;
  186. mutex_unlock(&dma_list_mutex);
  187. return err;
  188. }
  189. static DEVICE_ATTR_RO(in_use);
  190. static struct attribute *dma_dev_attrs[] = {
  191. &dev_attr_memcpy_count.attr,
  192. &dev_attr_bytes_transferred.attr,
  193. &dev_attr_in_use.attr,
  194. NULL,
  195. };
  196. ATTRIBUTE_GROUPS(dma_dev);
  197. static void chan_dev_release(struct device *dev)
  198. {
  199. struct dma_chan_dev *chan_dev;
  200. chan_dev = container_of(dev, typeof(*chan_dev), device);
  201. kfree(chan_dev);
  202. }
  203. static struct class dma_devclass = {
  204. .name = "dma",
  205. .dev_groups = dma_dev_groups,
  206. .dev_release = chan_dev_release,
  207. };
  208. /* --- client and device registration --- */
  209. /* enable iteration over all operation types */
  210. static dma_cap_mask_t dma_cap_mask_all;
  211. /**
  212. * struct dma_chan_tbl_ent - tracks channel allocations per core/operation
  213. * @chan: associated channel for this entry
  214. */
  215. struct dma_chan_tbl_ent {
  216. struct dma_chan *chan;
  217. };
  218. /* percpu lookup table for memory-to-memory offload providers */
  219. static struct dma_chan_tbl_ent __percpu *channel_table[DMA_TX_TYPE_END];
  220. static int __init dma_channel_table_init(void)
  221. {
  222. enum dma_transaction_type cap;
  223. int err = 0;
  224. bitmap_fill(dma_cap_mask_all.bits, DMA_TX_TYPE_END);
  225. /* 'interrupt', 'private', and 'slave' are channel capabilities,
  226. * but are not associated with an operation so they do not need
  227. * an entry in the channel_table
  228. */
  229. clear_bit(DMA_INTERRUPT, dma_cap_mask_all.bits);
  230. clear_bit(DMA_PRIVATE, dma_cap_mask_all.bits);
  231. clear_bit(DMA_SLAVE, dma_cap_mask_all.bits);
  232. for_each_dma_cap_mask(cap, dma_cap_mask_all) {
  233. channel_table[cap] = alloc_percpu(struct dma_chan_tbl_ent);
  234. if (!channel_table[cap]) {
  235. err = -ENOMEM;
  236. break;
  237. }
  238. }
  239. if (err) {
  240. pr_err("dmaengine dma_channel_table_init failure: %d\n", err);
  241. for_each_dma_cap_mask(cap, dma_cap_mask_all)
  242. free_percpu(channel_table[cap]);
  243. }
  244. return err;
  245. }
  246. arch_initcall(dma_channel_table_init);
  247. /**
  248. * dma_chan_is_local - checks if the channel is in the same NUMA-node as the CPU
  249. * @chan: DMA channel to test
  250. * @cpu: CPU index which the channel should be close to
  251. *
  252. * Returns true if the channel is in the same NUMA-node as the CPU.
  253. */
  254. static bool dma_chan_is_local(struct dma_chan *chan, int cpu)
  255. {
  256. int node = dev_to_node(chan->device->dev);
  257. return node == NUMA_NO_NODE ||
  258. cpumask_test_cpu(cpu, cpumask_of_node(node));
  259. }
  260. /**
  261. * min_chan - finds the channel with min count and in the same NUMA-node as the CPU
  262. * @cap: capability to match
  263. * @cpu: CPU index which the channel should be close to
  264. *
  265. * If some channels are close to the given CPU, the one with the lowest
  266. * reference count is returned. Otherwise, CPU is ignored and only the
  267. * reference count is taken into account.
  268. *
  269. * Must be called under dma_list_mutex.
  270. */
  271. static struct dma_chan *min_chan(enum dma_transaction_type cap, int cpu)
  272. {
  273. struct dma_device *device;
  274. struct dma_chan *chan;
  275. struct dma_chan *min = NULL;
  276. struct dma_chan *localmin = NULL;
  277. list_for_each_entry(device, &dma_device_list, global_node) {
  278. if (!dma_has_cap(cap, device->cap_mask) ||
  279. dma_has_cap(DMA_PRIVATE, device->cap_mask))
  280. continue;
  281. list_for_each_entry(chan, &device->channels, device_node) {
  282. if (!chan->client_count)
  283. continue;
  284. if (!min || chan->table_count < min->table_count)
  285. min = chan;
  286. if (dma_chan_is_local(chan, cpu))
  287. if (!localmin ||
  288. chan->table_count < localmin->table_count)
  289. localmin = chan;
  290. }
  291. }
  292. chan = localmin ? localmin : min;
  293. if (chan)
  294. chan->table_count++;
  295. return chan;
  296. }
  297. /**
  298. * dma_channel_rebalance - redistribute the available channels
  299. *
  300. * Optimize for CPU isolation (each CPU gets a dedicated channel for an
  301. * operation type) in the SMP case, and operation isolation (avoid
  302. * multi-tasking channels) in the non-SMP case.
  303. *
  304. * Must be called under dma_list_mutex.
  305. */
  306. static void dma_channel_rebalance(void)
  307. {
  308. struct dma_chan *chan;
  309. struct dma_device *device;
  310. int cpu;
  311. int cap;
  312. /* undo the last distribution */
  313. for_each_dma_cap_mask(cap, dma_cap_mask_all)
  314. for_each_possible_cpu(cpu)
  315. per_cpu_ptr(channel_table[cap], cpu)->chan = NULL;
  316. list_for_each_entry(device, &dma_device_list, global_node) {
  317. if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
  318. continue;
  319. list_for_each_entry(chan, &device->channels, device_node)
  320. chan->table_count = 0;
  321. }
  322. /* don't populate the channel_table if no clients are available */
  323. if (!dmaengine_ref_count)
  324. return;
  325. /* redistribute available channels */
  326. for_each_dma_cap_mask(cap, dma_cap_mask_all)
  327. for_each_online_cpu(cpu) {
  328. chan = min_chan(cap, cpu);
  329. per_cpu_ptr(channel_table[cap], cpu)->chan = chan;
  330. }
  331. }
  332. static int dma_device_satisfies_mask(struct dma_device *device,
  333. const dma_cap_mask_t *want)
  334. {
  335. dma_cap_mask_t has;
  336. bitmap_and(has.bits, want->bits, device->cap_mask.bits,
  337. DMA_TX_TYPE_END);
  338. return bitmap_equal(want->bits, has.bits, DMA_TX_TYPE_END);
  339. }
  340. static struct module *dma_chan_to_owner(struct dma_chan *chan)
  341. {
  342. return chan->device->owner;
  343. }
  344. /**
  345. * balance_ref_count - catch up the channel reference count
  346. * @chan: channel to balance ->client_count versus dmaengine_ref_count
  347. *
  348. * Must be called under dma_list_mutex.
  349. */
  350. static void balance_ref_count(struct dma_chan *chan)
  351. {
  352. struct module *owner = dma_chan_to_owner(chan);
  353. while (chan->client_count < dmaengine_ref_count) {
  354. __module_get(owner);
  355. chan->client_count++;
  356. }
  357. }
  358. static void dma_device_release(struct kref *ref)
  359. {
  360. struct dma_device *device = container_of(ref, struct dma_device, ref);
  361. list_del_rcu(&device->global_node);
  362. dma_channel_rebalance();
  363. if (device->device_release)
  364. device->device_release(device);
  365. }
  366. static void dma_device_put(struct dma_device *device)
  367. {
  368. lockdep_assert_held(&dma_list_mutex);
  369. kref_put(&device->ref, dma_device_release);
  370. }
  371. /**
  372. * dma_chan_get - try to grab a DMA channel's parent driver module
  373. * @chan: channel to grab
  374. *
  375. * Must be called under dma_list_mutex.
  376. */
  377. static int dma_chan_get(struct dma_chan *chan)
  378. {
  379. struct module *owner = dma_chan_to_owner(chan);
  380. int ret;
  381. /* The channel is already in use, update client count */
  382. if (chan->client_count) {
  383. __module_get(owner);
  384. goto out;
  385. }
  386. if (!try_module_get(owner))
  387. return -ENODEV;
  388. ret = kref_get_unless_zero(&chan->device->ref);
  389. if (!ret) {
  390. ret = -ENODEV;
  391. goto module_put_out;
  392. }
  393. /* allocate upon first client reference */
  394. if (chan->device->device_alloc_chan_resources) {
  395. ret = chan->device->device_alloc_chan_resources(chan);
  396. if (ret < 0)
  397. goto err_out;
  398. }
  399. if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask))
  400. balance_ref_count(chan);
  401. out:
  402. chan->client_count++;
  403. return 0;
  404. err_out:
  405. dma_device_put(chan->device);
  406. module_put_out:
  407. module_put(owner);
  408. return ret;
  409. }
  410. /**
  411. * dma_chan_put - drop a reference to a DMA channel's parent driver module
  412. * @chan: channel to release
  413. *
  414. * Must be called under dma_list_mutex.
  415. */
  416. static void dma_chan_put(struct dma_chan *chan)
  417. {
  418. /* This channel is not in use, bail out */
  419. if (!chan->client_count)
  420. return;
  421. chan->client_count--;
  422. /* This channel is not in use anymore, free it */
  423. if (!chan->client_count && chan->device->device_free_chan_resources) {
  424. /* Make sure all operations have completed */
  425. dmaengine_synchronize(chan);
  426. chan->device->device_free_chan_resources(chan);
  427. }
  428. /* If the channel is used via a DMA request router, free the mapping */
  429. if (chan->router && chan->router->route_free) {
  430. chan->router->route_free(chan->router->dev, chan->route_data);
  431. chan->router = NULL;
  432. chan->route_data = NULL;
  433. }
  434. dma_device_put(chan->device);
  435. module_put(dma_chan_to_owner(chan));
  436. }
  437. enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
  438. {
  439. enum dma_status status;
  440. unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
  441. dma_async_issue_pending(chan);
  442. do {
  443. status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
  444. if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
  445. dev_err(chan->device->dev, "%s: timeout!\n", __func__);
  446. return DMA_ERROR;
  447. }
  448. if (status != DMA_IN_PROGRESS)
  449. break;
  450. cpu_relax();
  451. } while (1);
  452. return status;
  453. }
  454. EXPORT_SYMBOL(dma_sync_wait);
  455. /**
  456. * dma_find_channel - find a channel to carry out the operation
  457. * @tx_type: transaction type
  458. */
  459. struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
  460. {
  461. return this_cpu_read(channel_table[tx_type]->chan);
  462. }
  463. EXPORT_SYMBOL(dma_find_channel);
  464. /**
  465. * dma_issue_pending_all - flush all pending operations across all channels
  466. */
  467. void dma_issue_pending_all(void)
  468. {
  469. struct dma_device *device;
  470. struct dma_chan *chan;
  471. rcu_read_lock();
  472. list_for_each_entry_rcu(device, &dma_device_list, global_node) {
  473. if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
  474. continue;
  475. list_for_each_entry(chan, &device->channels, device_node)
  476. if (chan->client_count)
  477. device->device_issue_pending(chan);
  478. }
  479. rcu_read_unlock();
  480. }
  481. EXPORT_SYMBOL(dma_issue_pending_all);
  482. int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps)
  483. {
  484. struct dma_device *device;
  485. if (!chan || !caps)
  486. return -EINVAL;
  487. device = chan->device;
  488. /* check if the channel supports slave transactions */
  489. if (!(test_bit(DMA_SLAVE, device->cap_mask.bits) ||
  490. test_bit(DMA_CYCLIC, device->cap_mask.bits)))
  491. return -ENXIO;
  492. /*
  493. * Check whether it reports it uses the generic slave
  494. * capabilities, if not, that means it doesn't support any
  495. * kind of slave capabilities reporting.
  496. */
  497. if (!device->directions)
  498. return -ENXIO;
  499. caps->src_addr_widths = device->src_addr_widths;
  500. caps->dst_addr_widths = device->dst_addr_widths;
  501. caps->directions = device->directions;
  502. caps->min_burst = device->min_burst;
  503. caps->max_burst = device->max_burst;
  504. caps->max_sg_burst = device->max_sg_burst;
  505. caps->residue_granularity = device->residue_granularity;
  506. caps->descriptor_reuse = device->descriptor_reuse;
  507. caps->cmd_pause = !!device->device_pause;
  508. caps->cmd_resume = !!device->device_resume;
  509. caps->cmd_terminate = !!device->device_terminate_all;
  510. /*
  511. * DMA engine device might be configured with non-uniformly
  512. * distributed slave capabilities per device channels. In this
  513. * case the corresponding driver may provide the device_caps
  514. * callback to override the generic capabilities with
  515. * channel-specific ones.
  516. */
  517. if (device->device_caps)
  518. device->device_caps(chan, caps);
  519. return 0;
  520. }
  521. EXPORT_SYMBOL_GPL(dma_get_slave_caps);
  522. static struct dma_chan *private_candidate(const dma_cap_mask_t *mask,
  523. struct dma_device *dev,
  524. dma_filter_fn fn, void *fn_param)
  525. {
  526. struct dma_chan *chan;
  527. if (mask && !dma_device_satisfies_mask(dev, mask)) {
  528. dev_dbg(dev->dev, "%s: wrong capabilities\n", __func__);
  529. return NULL;
  530. }
  531. /* devices with multiple channels need special handling as we need to
  532. * ensure that all channels are either private or public.
  533. */
  534. if (dev->chancnt > 1 && !dma_has_cap(DMA_PRIVATE, dev->cap_mask))
  535. list_for_each_entry(chan, &dev->channels, device_node) {
  536. /* some channels are already publicly allocated */
  537. if (chan->client_count)
  538. return NULL;
  539. }
  540. list_for_each_entry(chan, &dev->channels, device_node) {
  541. if (chan->client_count) {
  542. dev_dbg(dev->dev, "%s: %s busy\n",
  543. __func__, dma_chan_name(chan));
  544. continue;
  545. }
  546. if (fn && !fn(chan, fn_param)) {
  547. dev_dbg(dev->dev, "%s: %s filter said false\n",
  548. __func__, dma_chan_name(chan));
  549. continue;
  550. }
  551. return chan;
  552. }
  553. return NULL;
  554. }
  555. static struct dma_chan *find_candidate(struct dma_device *device,
  556. const dma_cap_mask_t *mask,
  557. dma_filter_fn fn, void *fn_param)
  558. {
  559. struct dma_chan *chan = private_candidate(mask, device, fn, fn_param);
  560. int err;
  561. if (chan) {
  562. /* Found a suitable channel, try to grab, prep, and return it.
  563. * We first set DMA_PRIVATE to disable balance_ref_count as this
  564. * channel will not be published in the general-purpose
  565. * allocator
  566. */
  567. dma_cap_set(DMA_PRIVATE, device->cap_mask);
  568. device->privatecnt++;
  569. err = dma_chan_get(chan);
  570. if (err) {
  571. if (err == -ENODEV) {
  572. dev_dbg(device->dev, "%s: %s module removed\n",
  573. __func__, dma_chan_name(chan));
  574. list_del_rcu(&device->global_node);
  575. } else
  576. dev_dbg(device->dev,
  577. "%s: failed to get %s: (%d)\n",
  578. __func__, dma_chan_name(chan), err);
  579. if (--device->privatecnt == 0)
  580. dma_cap_clear(DMA_PRIVATE, device->cap_mask);
  581. chan = ERR_PTR(err);
  582. }
  583. }
  584. return chan ? chan : ERR_PTR(-EPROBE_DEFER);
  585. }
  586. /**
  587. * dma_get_slave_channel - try to get specific channel exclusively
  588. * @chan: target channel
  589. */
  590. struct dma_chan *dma_get_slave_channel(struct dma_chan *chan)
  591. {
  592. int err = -EBUSY;
  593. /* lock against __dma_request_channel */
  594. mutex_lock(&dma_list_mutex);
  595. if (chan->client_count == 0) {
  596. struct dma_device *device = chan->device;
  597. dma_cap_set(DMA_PRIVATE, device->cap_mask);
  598. device->privatecnt++;
  599. err = dma_chan_get(chan);
  600. if (err) {
  601. dev_dbg(chan->device->dev,
  602. "%s: failed to get %s: (%d)\n",
  603. __func__, dma_chan_name(chan), err);
  604. chan = NULL;
  605. if (--device->privatecnt == 0)
  606. dma_cap_clear(DMA_PRIVATE, device->cap_mask);
  607. }
  608. } else
  609. chan = NULL;
  610. mutex_unlock(&dma_list_mutex);
  611. return chan;
  612. }
  613. EXPORT_SYMBOL_GPL(dma_get_slave_channel);
  614. struct dma_chan *dma_get_any_slave_channel(struct dma_device *device)
  615. {
  616. dma_cap_mask_t mask;
  617. struct dma_chan *chan;
  618. dma_cap_zero(mask);
  619. dma_cap_set(DMA_SLAVE, mask);
  620. /* lock against __dma_request_channel */
  621. mutex_lock(&dma_list_mutex);
  622. chan = find_candidate(device, &mask, NULL, NULL);
  623. mutex_unlock(&dma_list_mutex);
  624. return IS_ERR(chan) ? NULL : chan;
  625. }
  626. EXPORT_SYMBOL_GPL(dma_get_any_slave_channel);
  627. /**
  628. * __dma_request_channel - try to allocate an exclusive channel
  629. * @mask: capabilities that the channel must satisfy
  630. * @fn: optional callback to disposition available channels
  631. * @fn_param: opaque parameter to pass to dma_filter_fn()
  632. * @np: device node to look for DMA channels
  633. *
  634. * Returns pointer to appropriate DMA channel on success or NULL.
  635. */
  636. struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
  637. dma_filter_fn fn, void *fn_param,
  638. struct device_node *np)
  639. {
  640. struct dma_device *device, *_d;
  641. struct dma_chan *chan = NULL;
  642. /* Find a channel */
  643. mutex_lock(&dma_list_mutex);
  644. list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
  645. /* Finds a DMA controller with matching device node */
  646. if (np && device->dev->of_node && np != device->dev->of_node)
  647. continue;
  648. chan = find_candidate(device, mask, fn, fn_param);
  649. if (!IS_ERR(chan))
  650. break;
  651. chan = NULL;
  652. }
  653. mutex_unlock(&dma_list_mutex);
  654. pr_debug("%s: %s (%s)\n",
  655. __func__,
  656. chan ? "success" : "fail",
  657. chan ? dma_chan_name(chan) : NULL);
  658. return chan;
  659. }
  660. EXPORT_SYMBOL_GPL(__dma_request_channel);
  661. static const struct dma_slave_map *dma_filter_match(struct dma_device *device,
  662. const char *name,
  663. struct device *dev)
  664. {
  665. int i;
  666. if (!device->filter.mapcnt)
  667. return NULL;
  668. for (i = 0; i < device->filter.mapcnt; i++) {
  669. const struct dma_slave_map *map = &device->filter.map[i];
  670. if (!strcmp(map->devname, dev_name(dev)) &&
  671. !strcmp(map->slave, name))
  672. return map;
  673. }
  674. return NULL;
  675. }
  676. /**
  677. * dma_request_chan - try to allocate an exclusive slave channel
  678. * @dev: pointer to client device structure
  679. * @name: slave channel name
  680. *
  681. * Returns pointer to appropriate DMA channel on success or an error pointer.
  682. */
  683. struct dma_chan *dma_request_chan(struct device *dev, const char *name)
  684. {
  685. struct dma_device *d, *_d;
  686. struct dma_chan *chan = NULL;
  687. /* If device-tree is present get slave info from here */
  688. if (dev->of_node)
  689. chan = of_dma_request_slave_channel(dev->of_node, name);
  690. /* If device was enumerated by ACPI get slave info from here */
  691. if (has_acpi_companion(dev) && !chan)
  692. chan = acpi_dma_request_slave_chan_by_name(dev, name);
  693. if (PTR_ERR(chan) == -EPROBE_DEFER)
  694. return chan;
  695. if (!IS_ERR_OR_NULL(chan))
  696. goto found;
  697. /* Try to find the channel via the DMA filter map(s) */
  698. mutex_lock(&dma_list_mutex);
  699. list_for_each_entry_safe(d, _d, &dma_device_list, global_node) {
  700. dma_cap_mask_t mask;
  701. const struct dma_slave_map *map = dma_filter_match(d, name, dev);
  702. if (!map)
  703. continue;
  704. dma_cap_zero(mask);
  705. dma_cap_set(DMA_SLAVE, mask);
  706. chan = find_candidate(d, &mask, d->filter.fn, map->param);
  707. if (!IS_ERR(chan))
  708. break;
  709. }
  710. mutex_unlock(&dma_list_mutex);
  711. if (IS_ERR(chan))
  712. return chan;
  713. if (!chan)
  714. return ERR_PTR(-EPROBE_DEFER);
  715. found:
  716. #ifdef CONFIG_DEBUG_FS
  717. chan->dbg_client_name = kasprintf(GFP_KERNEL, "%s:%s", dev_name(dev),
  718. name);
  719. #endif
  720. chan->name = kasprintf(GFP_KERNEL, "dma:%s", name);
  721. if (!chan->name)
  722. return chan;
  723. chan->slave = dev;
  724. if (sysfs_create_link(&chan->dev->device.kobj, &dev->kobj,
  725. DMA_SLAVE_NAME))
  726. dev_warn(dev, "Cannot create DMA %s symlink\n", DMA_SLAVE_NAME);
  727. if (sysfs_create_link(&dev->kobj, &chan->dev->device.kobj, chan->name))
  728. dev_warn(dev, "Cannot create DMA %s symlink\n", chan->name);
  729. return chan;
  730. }
  731. EXPORT_SYMBOL_GPL(dma_request_chan);
  732. /**
  733. * dma_request_chan_by_mask - allocate a channel satisfying certain capabilities
  734. * @mask: capabilities that the channel must satisfy
  735. *
  736. * Returns pointer to appropriate DMA channel on success or an error pointer.
  737. */
  738. struct dma_chan *dma_request_chan_by_mask(const dma_cap_mask_t *mask)
  739. {
  740. struct dma_chan *chan;
  741. if (!mask)
  742. return ERR_PTR(-ENODEV);
  743. chan = __dma_request_channel(mask, NULL, NULL, NULL);
  744. if (!chan) {
  745. mutex_lock(&dma_list_mutex);
  746. if (list_empty(&dma_device_list))
  747. chan = ERR_PTR(-EPROBE_DEFER);
  748. else
  749. chan = ERR_PTR(-ENODEV);
  750. mutex_unlock(&dma_list_mutex);
  751. }
  752. return chan;
  753. }
  754. EXPORT_SYMBOL_GPL(dma_request_chan_by_mask);
  755. void dma_release_channel(struct dma_chan *chan)
  756. {
  757. mutex_lock(&dma_list_mutex);
  758. WARN_ONCE(chan->client_count != 1,
  759. "chan reference count %d != 1\n", chan->client_count);
  760. dma_chan_put(chan);
  761. /* drop PRIVATE cap enabled by __dma_request_channel() */
  762. if (--chan->device->privatecnt == 0)
  763. dma_cap_clear(DMA_PRIVATE, chan->device->cap_mask);
  764. if (chan->slave) {
  765. sysfs_remove_link(&chan->dev->device.kobj, DMA_SLAVE_NAME);
  766. sysfs_remove_link(&chan->slave->kobj, chan->name);
  767. kfree(chan->name);
  768. chan->name = NULL;
  769. chan->slave = NULL;
  770. }
  771. #ifdef CONFIG_DEBUG_FS
  772. kfree(chan->dbg_client_name);
  773. chan->dbg_client_name = NULL;
  774. #endif
  775. mutex_unlock(&dma_list_mutex);
  776. }
  777. EXPORT_SYMBOL_GPL(dma_release_channel);
  778. /**
  779. * dmaengine_get - register interest in dma_channels
  780. */
  781. void dmaengine_get(void)
  782. {
  783. struct dma_device *device, *_d;
  784. struct dma_chan *chan;
  785. int err;
  786. mutex_lock(&dma_list_mutex);
  787. dmaengine_ref_count++;
  788. /* try to grab channels */
  789. list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
  790. if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
  791. continue;
  792. list_for_each_entry(chan, &device->channels, device_node) {
  793. err = dma_chan_get(chan);
  794. if (err == -ENODEV) {
  795. /* module removed before we could use it */
  796. list_del_rcu(&device->global_node);
  797. break;
  798. } else if (err)
  799. dev_dbg(chan->device->dev,
  800. "%s: failed to get %s: (%d)\n",
  801. __func__, dma_chan_name(chan), err);
  802. }
  803. }
  804. /* if this is the first reference and there were channels
  805. * waiting we need to rebalance to get those channels
  806. * incorporated into the channel table
  807. */
  808. if (dmaengine_ref_count == 1)
  809. dma_channel_rebalance();
  810. mutex_unlock(&dma_list_mutex);
  811. }
  812. EXPORT_SYMBOL(dmaengine_get);
  813. /**
  814. * dmaengine_put - let DMA drivers be removed when ref_count == 0
  815. */
  816. void dmaengine_put(void)
  817. {
  818. struct dma_device *device, *_d;
  819. struct dma_chan *chan;
  820. mutex_lock(&dma_list_mutex);
  821. dmaengine_ref_count--;
  822. BUG_ON(dmaengine_ref_count < 0);
  823. /* drop channel references */
  824. list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
  825. if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
  826. continue;
  827. list_for_each_entry(chan, &device->channels, device_node)
  828. dma_chan_put(chan);
  829. }
  830. mutex_unlock(&dma_list_mutex);
  831. }
  832. EXPORT_SYMBOL(dmaengine_put);
  833. static bool device_has_all_tx_types(struct dma_device *device)
  834. {
  835. /* A device that satisfies this test has channels that will never cause
  836. * an async_tx channel switch event as all possible operation types can
  837. * be handled.
  838. */
  839. #ifdef CONFIG_ASYNC_TX_DMA
  840. if (!dma_has_cap(DMA_INTERRUPT, device->cap_mask))
  841. return false;
  842. #endif
  843. #if IS_ENABLED(CONFIG_ASYNC_MEMCPY)
  844. if (!dma_has_cap(DMA_MEMCPY, device->cap_mask))
  845. return false;
  846. #endif
  847. #if IS_ENABLED(CONFIG_ASYNC_XOR)
  848. if (!dma_has_cap(DMA_XOR, device->cap_mask))
  849. return false;
  850. #ifndef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA
  851. if (!dma_has_cap(DMA_XOR_VAL, device->cap_mask))
  852. return false;
  853. #endif
  854. #endif
  855. #if IS_ENABLED(CONFIG_ASYNC_PQ)
  856. if (!dma_has_cap(DMA_PQ, device->cap_mask))
  857. return false;
  858. #ifndef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA
  859. if (!dma_has_cap(DMA_PQ_VAL, device->cap_mask))
  860. return false;
  861. #endif
  862. #endif
  863. return true;
  864. }
  865. static int get_dma_id(struct dma_device *device)
  866. {
  867. int rc = ida_alloc(&dma_ida, GFP_KERNEL);
  868. if (rc < 0)
  869. return rc;
  870. device->dev_id = rc;
  871. return 0;
  872. }
  873. static int __dma_async_device_channel_register(struct dma_device *device,
  874. struct dma_chan *chan)
  875. {
  876. int rc;
  877. chan->local = alloc_percpu(typeof(*chan->local));
  878. if (!chan->local)
  879. return -ENOMEM;
  880. chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL);
  881. if (!chan->dev) {
  882. rc = -ENOMEM;
  883. goto err_free_local;
  884. }
  885. /*
  886. * When the chan_id is a negative value, we are dynamically adding
  887. * the channel. Otherwise we are static enumerating.
  888. */
  889. mutex_lock(&device->chan_mutex);
  890. chan->chan_id = ida_alloc(&device->chan_ida, GFP_KERNEL);
  891. mutex_unlock(&device->chan_mutex);
  892. if (chan->chan_id < 0) {
  893. pr_err("%s: unable to alloc ida for chan: %d\n",
  894. __func__, chan->chan_id);
  895. rc = chan->chan_id;
  896. goto err_free_dev;
  897. }
  898. chan->dev->device.class = &dma_devclass;
  899. chan->dev->device.parent = device->dev;
  900. chan->dev->chan = chan;
  901. chan->dev->dev_id = device->dev_id;
  902. dev_set_name(&chan->dev->device, "dma%dchan%d",
  903. device->dev_id, chan->chan_id);
  904. rc = device_register(&chan->dev->device);
  905. if (rc)
  906. goto err_out_ida;
  907. chan->client_count = 0;
  908. device->chancnt++;
  909. return 0;
  910. err_out_ida:
  911. mutex_lock(&device->chan_mutex);
  912. ida_free(&device->chan_ida, chan->chan_id);
  913. mutex_unlock(&device->chan_mutex);
  914. err_free_dev:
  915. kfree(chan->dev);
  916. err_free_local:
  917. free_percpu(chan->local);
  918. chan->local = NULL;
  919. return rc;
  920. }
  921. int dma_async_device_channel_register(struct dma_device *device,
  922. struct dma_chan *chan)
  923. {
  924. int rc;
  925. rc = __dma_async_device_channel_register(device, chan);
  926. if (rc < 0)
  927. return rc;
  928. dma_channel_rebalance();
  929. return 0;
  930. }
  931. EXPORT_SYMBOL_GPL(dma_async_device_channel_register);
  932. static void __dma_async_device_channel_unregister(struct dma_device *device,
  933. struct dma_chan *chan)
  934. {
  935. WARN_ONCE(!device->device_release && chan->client_count,
  936. "%s called while %d clients hold a reference\n",
  937. __func__, chan->client_count);
  938. mutex_lock(&dma_list_mutex);
  939. device->chancnt--;
  940. chan->dev->chan = NULL;
  941. mutex_unlock(&dma_list_mutex);
  942. mutex_lock(&device->chan_mutex);
  943. ida_free(&device->chan_ida, chan->chan_id);
  944. mutex_unlock(&device->chan_mutex);
  945. device_unregister(&chan->dev->device);
  946. free_percpu(chan->local);
  947. }
  948. void dma_async_device_channel_unregister(struct dma_device *device,
  949. struct dma_chan *chan)
  950. {
  951. __dma_async_device_channel_unregister(device, chan);
  952. dma_channel_rebalance();
  953. }
  954. EXPORT_SYMBOL_GPL(dma_async_device_channel_unregister);
  955. /**
  956. * dma_async_device_register - registers DMA devices found
  957. * @device: pointer to &struct dma_device
  958. *
  959. * After calling this routine the structure should not be freed except in the
  960. * device_release() callback which will be called after
  961. * dma_async_device_unregister() is called and no further references are taken.
  962. */
  963. int dma_async_device_register(struct dma_device *device)
  964. {
  965. int rc;
  966. struct dma_chan* chan;
  967. if (!device)
  968. return -ENODEV;
  969. /* validate device routines */
  970. if (!device->dev) {
  971. pr_err("DMAdevice must have dev\n");
  972. return -EIO;
  973. }
  974. device->owner = device->dev->driver->owner;
  975. if (dma_has_cap(DMA_MEMCPY, device->cap_mask) && !device->device_prep_dma_memcpy) {
  976. dev_err(device->dev,
  977. "Device claims capability %s, but op is not defined\n",
  978. "DMA_MEMCPY");
  979. return -EIO;
  980. }
  981. if (dma_has_cap(DMA_XOR, device->cap_mask) && !device->device_prep_dma_xor) {
  982. dev_err(device->dev,
  983. "Device claims capability %s, but op is not defined\n",
  984. "DMA_XOR");
  985. return -EIO;
  986. }
  987. if (dma_has_cap(DMA_XOR_VAL, device->cap_mask) && !device->device_prep_dma_xor_val) {
  988. dev_err(device->dev,
  989. "Device claims capability %s, but op is not defined\n",
  990. "DMA_XOR_VAL");
  991. return -EIO;
  992. }
  993. if (dma_has_cap(DMA_PQ, device->cap_mask) && !device->device_prep_dma_pq) {
  994. dev_err(device->dev,
  995. "Device claims capability %s, but op is not defined\n",
  996. "DMA_PQ");
  997. return -EIO;
  998. }
  999. if (dma_has_cap(DMA_PQ_VAL, device->cap_mask) && !device->device_prep_dma_pq_val) {
  1000. dev_err(device->dev,
  1001. "Device claims capability %s, but op is not defined\n",
  1002. "DMA_PQ_VAL");
  1003. return -EIO;
  1004. }
  1005. if (dma_has_cap(DMA_MEMSET, device->cap_mask) && !device->device_prep_dma_memset) {
  1006. dev_err(device->dev,
  1007. "Device claims capability %s, but op is not defined\n",
  1008. "DMA_MEMSET");
  1009. return -EIO;
  1010. }
  1011. if (dma_has_cap(DMA_INTERRUPT, device->cap_mask) && !device->device_prep_dma_interrupt) {
  1012. dev_err(device->dev,
  1013. "Device claims capability %s, but op is not defined\n",
  1014. "DMA_INTERRUPT");
  1015. return -EIO;
  1016. }
  1017. if (dma_has_cap(DMA_CYCLIC, device->cap_mask) && !device->device_prep_dma_cyclic) {
  1018. dev_err(device->dev,
  1019. "Device claims capability %s, but op is not defined\n",
  1020. "DMA_CYCLIC");
  1021. return -EIO;
  1022. }
  1023. if (dma_has_cap(DMA_INTERLEAVE, device->cap_mask) && !device->device_prep_interleaved_dma) {
  1024. dev_err(device->dev,
  1025. "Device claims capability %s, but op is not defined\n",
  1026. "DMA_INTERLEAVE");
  1027. return -EIO;
  1028. }
  1029. if (!device->device_tx_status) {
  1030. dev_err(device->dev, "Device tx_status is not defined\n");
  1031. return -EIO;
  1032. }
  1033. if (!device->device_issue_pending) {
  1034. dev_err(device->dev, "Device issue_pending is not defined\n");
  1035. return -EIO;
  1036. }
  1037. if (!device->device_release)
  1038. dev_dbg(device->dev,
  1039. "WARN: Device release is not defined so it is not safe to unbind this driver while in use\n");
  1040. kref_init(&device->ref);
  1041. /* note: this only matters in the
  1042. * CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=n case
  1043. */
  1044. if (device_has_all_tx_types(device))
  1045. dma_cap_set(DMA_ASYNC_TX, device->cap_mask);
  1046. rc = get_dma_id(device);
  1047. if (rc != 0)
  1048. return rc;
  1049. mutex_init(&device->chan_mutex);
  1050. ida_init(&device->chan_ida);
  1051. /* represent channels in sysfs. Probably want devs too */
  1052. list_for_each_entry(chan, &device->channels, device_node) {
  1053. rc = __dma_async_device_channel_register(device, chan);
  1054. if (rc < 0)
  1055. goto err_out;
  1056. }
  1057. mutex_lock(&dma_list_mutex);
  1058. /* take references on public channels */
  1059. if (dmaengine_ref_count && !dma_has_cap(DMA_PRIVATE, device->cap_mask))
  1060. list_for_each_entry(chan, &device->channels, device_node) {
  1061. /* if clients are already waiting for channels we need
  1062. * to take references on their behalf
  1063. */
  1064. if (dma_chan_get(chan) == -ENODEV) {
  1065. /* note we can only get here for the first
  1066. * channel as the remaining channels are
  1067. * guaranteed to get a reference
  1068. */
  1069. rc = -ENODEV;
  1070. mutex_unlock(&dma_list_mutex);
  1071. goto err_out;
  1072. }
  1073. }
  1074. list_add_tail_rcu(&device->global_node, &dma_device_list);
  1075. if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
  1076. device->privatecnt++; /* Always private */
  1077. dma_channel_rebalance();
  1078. mutex_unlock(&dma_list_mutex);
  1079. dmaengine_debug_register(device);
  1080. return 0;
  1081. err_out:
  1082. /* if we never registered a channel just release the idr */
  1083. if (!device->chancnt) {
  1084. ida_free(&dma_ida, device->dev_id);
  1085. return rc;
  1086. }
  1087. list_for_each_entry(chan, &device->channels, device_node) {
  1088. if (chan->local == NULL)
  1089. continue;
  1090. mutex_lock(&dma_list_mutex);
  1091. chan->dev->chan = NULL;
  1092. mutex_unlock(&dma_list_mutex);
  1093. device_unregister(&chan->dev->device);
  1094. free_percpu(chan->local);
  1095. }
  1096. return rc;
  1097. }
  1098. EXPORT_SYMBOL(dma_async_device_register);
  1099. /**
  1100. * dma_async_device_unregister - unregister a DMA device
  1101. * @device: pointer to &struct dma_device
  1102. *
  1103. * This routine is called by dma driver exit routines, dmaengine holds module
  1104. * references to prevent it being called while channels are in use.
  1105. */
  1106. void dma_async_device_unregister(struct dma_device *device)
  1107. {
  1108. struct dma_chan *chan, *n;
  1109. dmaengine_debug_unregister(device);
  1110. list_for_each_entry_safe(chan, n, &device->channels, device_node)
  1111. __dma_async_device_channel_unregister(device, chan);
  1112. mutex_lock(&dma_list_mutex);
  1113. /*
  1114. * setting DMA_PRIVATE ensures the device being torn down will not
  1115. * be used in the channel_table
  1116. */
  1117. dma_cap_set(DMA_PRIVATE, device->cap_mask);
  1118. dma_channel_rebalance();
  1119. ida_free(&dma_ida, device->dev_id);
  1120. dma_device_put(device);
  1121. mutex_unlock(&dma_list_mutex);
  1122. }
  1123. EXPORT_SYMBOL(dma_async_device_unregister);
  1124. static void dmam_device_release(struct device *dev, void *res)
  1125. {
  1126. struct dma_device *device;
  1127. device = *(struct dma_device **)res;
  1128. dma_async_device_unregister(device);
  1129. }
  1130. /**
  1131. * dmaenginem_async_device_register - registers DMA devices found
  1132. * @device: pointer to &struct dma_device
  1133. *
  1134. * The operation is managed and will be undone on driver detach.
  1135. */
  1136. int dmaenginem_async_device_register(struct dma_device *device)
  1137. {
  1138. void *p;
  1139. int ret;
  1140. p = devres_alloc(dmam_device_release, sizeof(void *), GFP_KERNEL);
  1141. if (!p)
  1142. return -ENOMEM;
  1143. ret = dma_async_device_register(device);
  1144. if (!ret) {
  1145. *(struct dma_device **)p = device;
  1146. devres_add(device->dev, p);
  1147. } else {
  1148. devres_free(p);
  1149. }
  1150. return ret;
  1151. }
  1152. EXPORT_SYMBOL(dmaenginem_async_device_register);
  1153. struct dmaengine_unmap_pool {
  1154. struct kmem_cache *cache;
  1155. const char *name;
  1156. mempool_t *pool;
  1157. size_t size;
  1158. };
  1159. #define __UNMAP_POOL(x) { .size = x, .name = "dmaengine-unmap-" __stringify(x) }
  1160. static struct dmaengine_unmap_pool unmap_pool[] = {
  1161. __UNMAP_POOL(2),
  1162. #if IS_ENABLED(CONFIG_DMA_ENGINE_RAID)
  1163. __UNMAP_POOL(16),
  1164. __UNMAP_POOL(128),
  1165. __UNMAP_POOL(256),
  1166. #endif
  1167. };
  1168. static struct dmaengine_unmap_pool *__get_unmap_pool(int nr)
  1169. {
  1170. int order = get_count_order(nr);
  1171. switch (order) {
  1172. case 0 ... 1:
  1173. return &unmap_pool[0];
  1174. #if IS_ENABLED(CONFIG_DMA_ENGINE_RAID)
  1175. case 2 ... 4:
  1176. return &unmap_pool[1];
  1177. case 5 ... 7:
  1178. return &unmap_pool[2];
  1179. case 8:
  1180. return &unmap_pool[3];
  1181. #endif
  1182. default:
  1183. BUG();
  1184. return NULL;
  1185. }
  1186. }
  1187. static void dmaengine_unmap(struct kref *kref)
  1188. {
  1189. struct dmaengine_unmap_data *unmap = container_of(kref, typeof(*unmap), kref);
  1190. struct device *dev = unmap->dev;
  1191. int cnt, i;
  1192. cnt = unmap->to_cnt;
  1193. for (i = 0; i < cnt; i++)
  1194. dma_unmap_page(dev, unmap->addr[i], unmap->len,
  1195. DMA_TO_DEVICE);
  1196. cnt += unmap->from_cnt;
  1197. for (; i < cnt; i++)
  1198. dma_unmap_page(dev, unmap->addr[i], unmap->len,
  1199. DMA_FROM_DEVICE);
  1200. cnt += unmap->bidi_cnt;
  1201. for (; i < cnt; i++) {
  1202. if (unmap->addr[i] == 0)
  1203. continue;
  1204. dma_unmap_page(dev, unmap->addr[i], unmap->len,
  1205. DMA_BIDIRECTIONAL);
  1206. }
  1207. cnt = unmap->map_cnt;
  1208. mempool_free(unmap, __get_unmap_pool(cnt)->pool);
  1209. }
  1210. void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap)
  1211. {
  1212. if (unmap)
  1213. kref_put(&unmap->kref, dmaengine_unmap);
  1214. }
  1215. EXPORT_SYMBOL_GPL(dmaengine_unmap_put);
  1216. static void dmaengine_destroy_unmap_pool(void)
  1217. {
  1218. int i;
  1219. for (i = 0; i < ARRAY_SIZE(unmap_pool); i++) {
  1220. struct dmaengine_unmap_pool *p = &unmap_pool[i];
  1221. mempool_destroy(p->pool);
  1222. p->pool = NULL;
  1223. kmem_cache_destroy(p->cache);
  1224. p->cache = NULL;
  1225. }
  1226. }
  1227. static int __init dmaengine_init_unmap_pool(void)
  1228. {
  1229. int i;
  1230. for (i = 0; i < ARRAY_SIZE(unmap_pool); i++) {
  1231. struct dmaengine_unmap_pool *p = &unmap_pool[i];
  1232. size_t size;
  1233. size = sizeof(struct dmaengine_unmap_data) +
  1234. sizeof(dma_addr_t) * p->size;
  1235. p->cache = kmem_cache_create(p->name, size, 0,
  1236. SLAB_HWCACHE_ALIGN, NULL);
  1237. if (!p->cache)
  1238. break;
  1239. p->pool = mempool_create_slab_pool(1, p->cache);
  1240. if (!p->pool)
  1241. break;
  1242. }
  1243. if (i == ARRAY_SIZE(unmap_pool))
  1244. return 0;
  1245. dmaengine_destroy_unmap_pool();
  1246. return -ENOMEM;
  1247. }
  1248. struct dmaengine_unmap_data *
  1249. dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags)
  1250. {
  1251. struct dmaengine_unmap_data *unmap;
  1252. unmap = mempool_alloc(__get_unmap_pool(nr)->pool, flags);
  1253. if (!unmap)
  1254. return NULL;
  1255. memset(unmap, 0, sizeof(*unmap));
  1256. kref_init(&unmap->kref);
  1257. unmap->dev = dev;
  1258. unmap->map_cnt = nr;
  1259. return unmap;
  1260. }
  1261. EXPORT_SYMBOL(dmaengine_get_unmap_data);
  1262. void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
  1263. struct dma_chan *chan)
  1264. {
  1265. tx->chan = chan;
  1266. #ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
  1267. spin_lock_init(&tx->lock);
  1268. #endif
  1269. }
  1270. EXPORT_SYMBOL(dma_async_tx_descriptor_init);
  1271. static inline int desc_check_and_set_metadata_mode(
  1272. struct dma_async_tx_descriptor *desc, enum dma_desc_metadata_mode mode)
  1273. {
  1274. /* Make sure that the metadata mode is not mixed */
  1275. if (!desc->desc_metadata_mode) {
  1276. if (dmaengine_is_metadata_mode_supported(desc->chan, mode))
  1277. desc->desc_metadata_mode = mode;
  1278. else
  1279. return -ENOTSUPP;
  1280. } else if (desc->desc_metadata_mode != mode) {
  1281. return -EINVAL;
  1282. }
  1283. return 0;
  1284. }
  1285. int dmaengine_desc_attach_metadata(struct dma_async_tx_descriptor *desc,
  1286. void *data, size_t len)
  1287. {
  1288. int ret;
  1289. if (!desc)
  1290. return -EINVAL;
  1291. ret = desc_check_and_set_metadata_mode(desc, DESC_METADATA_CLIENT);
  1292. if (ret)
  1293. return ret;
  1294. if (!desc->metadata_ops || !desc->metadata_ops->attach)
  1295. return -ENOTSUPP;
  1296. return desc->metadata_ops->attach(desc, data, len);
  1297. }
  1298. EXPORT_SYMBOL_GPL(dmaengine_desc_attach_metadata);
  1299. void *dmaengine_desc_get_metadata_ptr(struct dma_async_tx_descriptor *desc,
  1300. size_t *payload_len, size_t *max_len)
  1301. {
  1302. int ret;
  1303. if (!desc)
  1304. return ERR_PTR(-EINVAL);
  1305. ret = desc_check_and_set_metadata_mode(desc, DESC_METADATA_ENGINE);
  1306. if (ret)
  1307. return ERR_PTR(ret);
  1308. if (!desc->metadata_ops || !desc->metadata_ops->get_ptr)
  1309. return ERR_PTR(-ENOTSUPP);
  1310. return desc->metadata_ops->get_ptr(desc, payload_len, max_len);
  1311. }
  1312. EXPORT_SYMBOL_GPL(dmaengine_desc_get_metadata_ptr);
  1313. int dmaengine_desc_set_metadata_len(struct dma_async_tx_descriptor *desc,
  1314. size_t payload_len)
  1315. {
  1316. int ret;
  1317. if (!desc)
  1318. return -EINVAL;
  1319. ret = desc_check_and_set_metadata_mode(desc, DESC_METADATA_ENGINE);
  1320. if (ret)
  1321. return ret;
  1322. if (!desc->metadata_ops || !desc->metadata_ops->set_len)
  1323. return -ENOTSUPP;
  1324. return desc->metadata_ops->set_len(desc, payload_len);
  1325. }
  1326. EXPORT_SYMBOL_GPL(dmaengine_desc_set_metadata_len);
  1327. /**
  1328. * dma_wait_for_async_tx - spin wait for a transaction to complete
  1329. * @tx: in-flight transaction to wait on
  1330. */
  1331. enum dma_status
  1332. dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
  1333. {
  1334. unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
  1335. if (!tx)
  1336. return DMA_COMPLETE;
  1337. while (tx->cookie == -EBUSY) {
  1338. if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
  1339. dev_err(tx->chan->device->dev,
  1340. "%s timeout waiting for descriptor submission\n",
  1341. __func__);
  1342. return DMA_ERROR;
  1343. }
  1344. cpu_relax();
  1345. }
  1346. return dma_sync_wait(tx->chan, tx->cookie);
  1347. }
  1348. EXPORT_SYMBOL_GPL(dma_wait_for_async_tx);
  1349. /**
  1350. * dma_run_dependencies - process dependent operations on the target channel
  1351. * @tx: transaction with dependencies
  1352. *
  1353. * Helper routine for DMA drivers to process (start) dependent operations
  1354. * on their target channel.
  1355. */
  1356. void dma_run_dependencies(struct dma_async_tx_descriptor *tx)
  1357. {
  1358. struct dma_async_tx_descriptor *dep = txd_next(tx);
  1359. struct dma_async_tx_descriptor *dep_next;
  1360. struct dma_chan *chan;
  1361. if (!dep)
  1362. return;
  1363. /* we'll submit tx->next now, so clear the link */
  1364. txd_clear_next(tx);
  1365. chan = dep->chan;
  1366. /* keep submitting up until a channel switch is detected
  1367. * in that case we will be called again as a result of
  1368. * processing the interrupt from async_tx_channel_switch
  1369. */
  1370. for (; dep; dep = dep_next) {
  1371. txd_lock(dep);
  1372. txd_clear_parent(dep);
  1373. dep_next = txd_next(dep);
  1374. if (dep_next && dep_next->chan == chan)
  1375. txd_clear_next(dep); /* ->next will be submitted */
  1376. else
  1377. dep_next = NULL; /* submit current dep and terminate */
  1378. txd_unlock(dep);
  1379. dep->tx_submit(dep);
  1380. }
  1381. chan->device->device_issue_pending(chan);
  1382. }
  1383. EXPORT_SYMBOL_GPL(dma_run_dependencies);
  1384. static int __init dma_bus_init(void)
  1385. {
  1386. int err = dmaengine_init_unmap_pool();
  1387. if (err)
  1388. return err;
  1389. err = class_register(&dma_devclass);
  1390. if (!err)
  1391. dmaengine_debugfs_init();
  1392. return err;
  1393. }
  1394. arch_initcall(dma_bus_init);