iop-adma.c 43 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * offload engine driver for the Intel Xscale series of i/o processors
  4. * Copyright © 2006, Intel Corporation.
  5. */
  6. /*
  7. * This driver supports the asynchrounous DMA copy and RAID engines available
  8. * on the Intel Xscale(R) family of I/O Processors (IOP 32x, 33x, 134x)
  9. */
  10. #include <linux/init.h>
  11. #include <linux/module.h>
  12. #include <linux/delay.h>
  13. #include <linux/dma-mapping.h>
  14. #include <linux/spinlock.h>
  15. #include <linux/interrupt.h>
  16. #include <linux/platform_device.h>
  17. #include <linux/prefetch.h>
  18. #include <linux/memory.h>
  19. #include <linux/ioport.h>
  20. #include <linux/raid/pq.h>
  21. #include <linux/slab.h>
  22. #include "iop-adma.h"
  23. #include "dmaengine.h"
  24. #define to_iop_adma_chan(chan) container_of(chan, struct iop_adma_chan, common)
  25. #define to_iop_adma_device(dev) \
  26. container_of(dev, struct iop_adma_device, common)
  27. #define tx_to_iop_adma_slot(tx) \
  28. container_of(tx, struct iop_adma_desc_slot, async_tx)
  29. /**
  30. * iop_adma_free_slots - flags descriptor slots for reuse
  31. * @slot: Slot to free
  32. * Caller must hold &iop_chan->lock while calling this function
  33. */
  34. static void iop_adma_free_slots(struct iop_adma_desc_slot *slot)
  35. {
  36. int stride = slot->slots_per_op;
  37. while (stride--) {
  38. slot->slots_per_op = 0;
  39. slot = list_entry(slot->slot_node.next,
  40. struct iop_adma_desc_slot,
  41. slot_node);
  42. }
  43. }
  44. static dma_cookie_t
  45. iop_adma_run_tx_complete_actions(struct iop_adma_desc_slot *desc,
  46. struct iop_adma_chan *iop_chan, dma_cookie_t cookie)
  47. {
  48. struct dma_async_tx_descriptor *tx = &desc->async_tx;
  49. BUG_ON(tx->cookie < 0);
  50. if (tx->cookie > 0) {
  51. cookie = tx->cookie;
  52. tx->cookie = 0;
  53. /* call the callback (must not sleep or submit new
  54. * operations to this channel)
  55. */
  56. dmaengine_desc_get_callback_invoke(tx, NULL);
  57. dma_descriptor_unmap(tx);
  58. if (desc->group_head)
  59. desc->group_head = NULL;
  60. }
  61. /* run dependent operations */
  62. dma_run_dependencies(tx);
  63. return cookie;
  64. }
  65. static int
  66. iop_adma_clean_slot(struct iop_adma_desc_slot *desc,
  67. struct iop_adma_chan *iop_chan)
  68. {
  69. /* the client is allowed to attach dependent operations
  70. * until 'ack' is set
  71. */
  72. if (!async_tx_test_ack(&desc->async_tx))
  73. return 0;
  74. /* leave the last descriptor in the chain
  75. * so we can append to it
  76. */
  77. if (desc->chain_node.next == &iop_chan->chain)
  78. return 1;
  79. dev_dbg(iop_chan->device->common.dev,
  80. "\tfree slot: %d slots_per_op: %d\n",
  81. desc->idx, desc->slots_per_op);
  82. list_del(&desc->chain_node);
  83. iop_adma_free_slots(desc);
  84. return 0;
  85. }
  86. static void __iop_adma_slot_cleanup(struct iop_adma_chan *iop_chan)
  87. {
  88. struct iop_adma_desc_slot *iter, *_iter, *grp_start = NULL;
  89. dma_cookie_t cookie = 0;
  90. u32 current_desc = iop_chan_get_current_descriptor(iop_chan);
  91. int busy = iop_chan_is_busy(iop_chan);
  92. int seen_current = 0, slot_cnt = 0, slots_per_op = 0;
  93. dev_dbg(iop_chan->device->common.dev, "%s\n", __func__);
  94. /* free completed slots from the chain starting with
  95. * the oldest descriptor
  96. */
  97. list_for_each_entry_safe(iter, _iter, &iop_chan->chain,
  98. chain_node) {
  99. pr_debug("\tcookie: %d slot: %d busy: %d "
  100. "this_desc: %pad next_desc: %#llx ack: %d\n",
  101. iter->async_tx.cookie, iter->idx, busy,
  102. &iter->async_tx.phys, (u64)iop_desc_get_next_desc(iter),
  103. async_tx_test_ack(&iter->async_tx));
  104. prefetch(_iter);
  105. prefetch(&_iter->async_tx);
  106. /* do not advance past the current descriptor loaded into the
  107. * hardware channel, subsequent descriptors are either in
  108. * process or have not been submitted
  109. */
  110. if (seen_current)
  111. break;
  112. /* stop the search if we reach the current descriptor and the
  113. * channel is busy, or if it appears that the current descriptor
  114. * needs to be re-read (i.e. has been appended to)
  115. */
  116. if (iter->async_tx.phys == current_desc) {
  117. BUG_ON(seen_current++);
  118. if (busy || iop_desc_get_next_desc(iter))
  119. break;
  120. }
  121. /* detect the start of a group transaction */
  122. if (!slot_cnt && !slots_per_op) {
  123. slot_cnt = iter->slot_cnt;
  124. slots_per_op = iter->slots_per_op;
  125. if (slot_cnt <= slots_per_op) {
  126. slot_cnt = 0;
  127. slots_per_op = 0;
  128. }
  129. }
  130. if (slot_cnt) {
  131. pr_debug("\tgroup++\n");
  132. if (!grp_start)
  133. grp_start = iter;
  134. slot_cnt -= slots_per_op;
  135. }
  136. /* all the members of a group are complete */
  137. if (slots_per_op != 0 && slot_cnt == 0) {
  138. struct iop_adma_desc_slot *grp_iter, *_grp_iter;
  139. int end_of_chain = 0;
  140. pr_debug("\tgroup end\n");
  141. /* collect the total results */
  142. if (grp_start->xor_check_result) {
  143. u32 zero_sum_result = 0;
  144. slot_cnt = grp_start->slot_cnt;
  145. grp_iter = grp_start;
  146. list_for_each_entry_from(grp_iter,
  147. &iop_chan->chain, chain_node) {
  148. zero_sum_result |=
  149. iop_desc_get_zero_result(grp_iter);
  150. pr_debug("\titer%d result: %d\n",
  151. grp_iter->idx, zero_sum_result);
  152. slot_cnt -= slots_per_op;
  153. if (slot_cnt == 0)
  154. break;
  155. }
  156. pr_debug("\tgrp_start->xor_check_result: %p\n",
  157. grp_start->xor_check_result);
  158. *grp_start->xor_check_result = zero_sum_result;
  159. }
  160. /* clean up the group */
  161. slot_cnt = grp_start->slot_cnt;
  162. grp_iter = grp_start;
  163. list_for_each_entry_safe_from(grp_iter, _grp_iter,
  164. &iop_chan->chain, chain_node) {
  165. cookie = iop_adma_run_tx_complete_actions(
  166. grp_iter, iop_chan, cookie);
  167. slot_cnt -= slots_per_op;
  168. end_of_chain = iop_adma_clean_slot(grp_iter,
  169. iop_chan);
  170. if (slot_cnt == 0 || end_of_chain)
  171. break;
  172. }
  173. /* the group should be complete at this point */
  174. BUG_ON(slot_cnt);
  175. slots_per_op = 0;
  176. grp_start = NULL;
  177. if (end_of_chain)
  178. break;
  179. else
  180. continue;
  181. } else if (slots_per_op) /* wait for group completion */
  182. continue;
  183. /* write back zero sum results (single descriptor case) */
  184. if (iter->xor_check_result && iter->async_tx.cookie)
  185. *iter->xor_check_result =
  186. iop_desc_get_zero_result(iter);
  187. cookie = iop_adma_run_tx_complete_actions(
  188. iter, iop_chan, cookie);
  189. if (iop_adma_clean_slot(iter, iop_chan))
  190. break;
  191. }
  192. if (cookie > 0) {
  193. iop_chan->common.completed_cookie = cookie;
  194. pr_debug("\tcompleted cookie %d\n", cookie);
  195. }
  196. }
  197. static void
  198. iop_adma_slot_cleanup(struct iop_adma_chan *iop_chan)
  199. {
  200. spin_lock_bh(&iop_chan->lock);
  201. __iop_adma_slot_cleanup(iop_chan);
  202. spin_unlock_bh(&iop_chan->lock);
  203. }
  204. static void iop_adma_tasklet(struct tasklet_struct *t)
  205. {
  206. struct iop_adma_chan *iop_chan = from_tasklet(iop_chan, t,
  207. irq_tasklet);
  208. /* lockdep will flag depedency submissions as potentially
  209. * recursive locking, this is not the case as a dependency
  210. * submission will never recurse a channels submit routine.
  211. * There are checks in async_tx.c to prevent this.
  212. */
  213. spin_lock_nested(&iop_chan->lock, SINGLE_DEPTH_NESTING);
  214. __iop_adma_slot_cleanup(iop_chan);
  215. spin_unlock(&iop_chan->lock);
  216. }
  217. static struct iop_adma_desc_slot *
  218. iop_adma_alloc_slots(struct iop_adma_chan *iop_chan, int num_slots,
  219. int slots_per_op)
  220. {
  221. struct iop_adma_desc_slot *iter, *_iter, *alloc_start = NULL;
  222. LIST_HEAD(chain);
  223. int slots_found, retry = 0;
  224. /* start search from the last allocated descrtiptor
  225. * if a contiguous allocation can not be found start searching
  226. * from the beginning of the list
  227. */
  228. retry:
  229. slots_found = 0;
  230. if (retry == 0)
  231. iter = iop_chan->last_used;
  232. else
  233. iter = list_entry(&iop_chan->all_slots,
  234. struct iop_adma_desc_slot,
  235. slot_node);
  236. list_for_each_entry_safe_continue(
  237. iter, _iter, &iop_chan->all_slots, slot_node) {
  238. prefetch(_iter);
  239. prefetch(&_iter->async_tx);
  240. if (iter->slots_per_op) {
  241. /* give up after finding the first busy slot
  242. * on the second pass through the list
  243. */
  244. if (retry)
  245. break;
  246. slots_found = 0;
  247. continue;
  248. }
  249. /* start the allocation if the slot is correctly aligned */
  250. if (!slots_found++) {
  251. if (iop_desc_is_aligned(iter, slots_per_op))
  252. alloc_start = iter;
  253. else {
  254. slots_found = 0;
  255. continue;
  256. }
  257. }
  258. if (slots_found == num_slots) {
  259. struct iop_adma_desc_slot *alloc_tail = NULL;
  260. struct iop_adma_desc_slot *last_used = NULL;
  261. iter = alloc_start;
  262. while (num_slots) {
  263. int i;
  264. dev_dbg(iop_chan->device->common.dev,
  265. "allocated slot: %d "
  266. "(desc %p phys: %#llx) slots_per_op %d\n",
  267. iter->idx, iter->hw_desc,
  268. (u64)iter->async_tx.phys, slots_per_op);
  269. /* pre-ack all but the last descriptor */
  270. if (num_slots != slots_per_op)
  271. async_tx_ack(&iter->async_tx);
  272. list_add_tail(&iter->chain_node, &chain);
  273. alloc_tail = iter;
  274. iter->async_tx.cookie = 0;
  275. iter->slot_cnt = num_slots;
  276. iter->xor_check_result = NULL;
  277. for (i = 0; i < slots_per_op; i++) {
  278. iter->slots_per_op = slots_per_op - i;
  279. last_used = iter;
  280. iter = list_entry(iter->slot_node.next,
  281. struct iop_adma_desc_slot,
  282. slot_node);
  283. }
  284. num_slots -= slots_per_op;
  285. }
  286. alloc_tail->group_head = alloc_start;
  287. alloc_tail->async_tx.cookie = -EBUSY;
  288. list_splice(&chain, &alloc_tail->tx_list);
  289. iop_chan->last_used = last_used;
  290. iop_desc_clear_next_desc(alloc_start);
  291. iop_desc_clear_next_desc(alloc_tail);
  292. return alloc_tail;
  293. }
  294. }
  295. if (!retry++)
  296. goto retry;
  297. /* perform direct reclaim if the allocation fails */
  298. __iop_adma_slot_cleanup(iop_chan);
  299. return NULL;
  300. }
  301. static void iop_adma_check_threshold(struct iop_adma_chan *iop_chan)
  302. {
  303. dev_dbg(iop_chan->device->common.dev, "pending: %d\n",
  304. iop_chan->pending);
  305. if (iop_chan->pending >= IOP_ADMA_THRESHOLD) {
  306. iop_chan->pending = 0;
  307. iop_chan_append(iop_chan);
  308. }
  309. }
  310. static dma_cookie_t
  311. iop_adma_tx_submit(struct dma_async_tx_descriptor *tx)
  312. {
  313. struct iop_adma_desc_slot *sw_desc = tx_to_iop_adma_slot(tx);
  314. struct iop_adma_chan *iop_chan = to_iop_adma_chan(tx->chan);
  315. struct iop_adma_desc_slot *grp_start, *old_chain_tail;
  316. int slot_cnt;
  317. dma_cookie_t cookie;
  318. dma_addr_t next_dma;
  319. grp_start = sw_desc->group_head;
  320. slot_cnt = grp_start->slot_cnt;
  321. spin_lock_bh(&iop_chan->lock);
  322. cookie = dma_cookie_assign(tx);
  323. old_chain_tail = list_entry(iop_chan->chain.prev,
  324. struct iop_adma_desc_slot, chain_node);
  325. list_splice_init(&sw_desc->tx_list,
  326. &old_chain_tail->chain_node);
  327. /* fix up the hardware chain */
  328. next_dma = grp_start->async_tx.phys;
  329. iop_desc_set_next_desc(old_chain_tail, next_dma);
  330. BUG_ON(iop_desc_get_next_desc(old_chain_tail) != next_dma); /* flush */
  331. /* check for pre-chained descriptors */
  332. iop_paranoia(iop_desc_get_next_desc(sw_desc));
  333. /* increment the pending count by the number of slots
  334. * memcpy operations have a 1:1 (slot:operation) relation
  335. * other operations are heavier and will pop the threshold
  336. * more often.
  337. */
  338. iop_chan->pending += slot_cnt;
  339. iop_adma_check_threshold(iop_chan);
  340. spin_unlock_bh(&iop_chan->lock);
  341. dev_dbg(iop_chan->device->common.dev, "%s cookie: %d slot: %d\n",
  342. __func__, sw_desc->async_tx.cookie, sw_desc->idx);
  343. return cookie;
  344. }
  345. static void iop_chan_start_null_memcpy(struct iop_adma_chan *iop_chan);
  346. static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan);
  347. /**
  348. * iop_adma_alloc_chan_resources - returns the number of allocated descriptors
  349. * @chan: allocate descriptor resources for this channel
  350. *
  351. * Note: We keep the slots for 1 operation on iop_chan->chain at all times. To
  352. * avoid deadlock, via async_xor, num_descs_in_pool must at a minimum be
  353. * greater than 2x the number slots needed to satisfy a device->max_xor
  354. * request.
  355. * */
  356. static int iop_adma_alloc_chan_resources(struct dma_chan *chan)
  357. {
  358. char *hw_desc;
  359. dma_addr_t dma_desc;
  360. int idx;
  361. struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
  362. struct iop_adma_desc_slot *slot = NULL;
  363. int init = iop_chan->slots_allocated ? 0 : 1;
  364. struct iop_adma_platform_data *plat_data =
  365. dev_get_platdata(&iop_chan->device->pdev->dev);
  366. int num_descs_in_pool = plat_data->pool_size/IOP_ADMA_SLOT_SIZE;
  367. /* Allocate descriptor slots */
  368. do {
  369. idx = iop_chan->slots_allocated;
  370. if (idx == num_descs_in_pool)
  371. break;
  372. slot = kzalloc(sizeof(*slot), GFP_KERNEL);
  373. if (!slot) {
  374. printk(KERN_INFO "IOP ADMA Channel only initialized"
  375. " %d descriptor slots", idx);
  376. break;
  377. }
  378. hw_desc = (char *) iop_chan->device->dma_desc_pool_virt;
  379. slot->hw_desc = (void *) &hw_desc[idx * IOP_ADMA_SLOT_SIZE];
  380. dma_async_tx_descriptor_init(&slot->async_tx, chan);
  381. slot->async_tx.tx_submit = iop_adma_tx_submit;
  382. INIT_LIST_HEAD(&slot->tx_list);
  383. INIT_LIST_HEAD(&slot->chain_node);
  384. INIT_LIST_HEAD(&slot->slot_node);
  385. dma_desc = iop_chan->device->dma_desc_pool;
  386. slot->async_tx.phys = dma_desc + idx * IOP_ADMA_SLOT_SIZE;
  387. slot->idx = idx;
  388. spin_lock_bh(&iop_chan->lock);
  389. iop_chan->slots_allocated++;
  390. list_add_tail(&slot->slot_node, &iop_chan->all_slots);
  391. spin_unlock_bh(&iop_chan->lock);
  392. } while (iop_chan->slots_allocated < num_descs_in_pool);
  393. if (idx && !iop_chan->last_used)
  394. iop_chan->last_used = list_entry(iop_chan->all_slots.next,
  395. struct iop_adma_desc_slot,
  396. slot_node);
  397. dev_dbg(iop_chan->device->common.dev,
  398. "allocated %d descriptor slots last_used: %p\n",
  399. iop_chan->slots_allocated, iop_chan->last_used);
  400. /* initialize the channel and the chain with a null operation */
  401. if (init) {
  402. if (dma_has_cap(DMA_MEMCPY,
  403. iop_chan->device->common.cap_mask))
  404. iop_chan_start_null_memcpy(iop_chan);
  405. else if (dma_has_cap(DMA_XOR,
  406. iop_chan->device->common.cap_mask))
  407. iop_chan_start_null_xor(iop_chan);
  408. else
  409. BUG();
  410. }
  411. return (idx > 0) ? idx : -ENOMEM;
  412. }
  413. static struct dma_async_tx_descriptor *
  414. iop_adma_prep_dma_interrupt(struct dma_chan *chan, unsigned long flags)
  415. {
  416. struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
  417. struct iop_adma_desc_slot *sw_desc, *grp_start;
  418. int slot_cnt, slots_per_op;
  419. dev_dbg(iop_chan->device->common.dev, "%s\n", __func__);
  420. spin_lock_bh(&iop_chan->lock);
  421. slot_cnt = iop_chan_interrupt_slot_count(&slots_per_op, iop_chan);
  422. sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
  423. if (sw_desc) {
  424. grp_start = sw_desc->group_head;
  425. iop_desc_init_interrupt(grp_start, iop_chan);
  426. sw_desc->async_tx.flags = flags;
  427. }
  428. spin_unlock_bh(&iop_chan->lock);
  429. return sw_desc ? &sw_desc->async_tx : NULL;
  430. }
  431. static struct dma_async_tx_descriptor *
  432. iop_adma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dma_dest,
  433. dma_addr_t dma_src, size_t len, unsigned long flags)
  434. {
  435. struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
  436. struct iop_adma_desc_slot *sw_desc, *grp_start;
  437. int slot_cnt, slots_per_op;
  438. if (unlikely(!len))
  439. return NULL;
  440. BUG_ON(len > IOP_ADMA_MAX_BYTE_COUNT);
  441. dev_dbg(iop_chan->device->common.dev, "%s len: %zu\n",
  442. __func__, len);
  443. spin_lock_bh(&iop_chan->lock);
  444. slot_cnt = iop_chan_memcpy_slot_count(len, &slots_per_op);
  445. sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
  446. if (sw_desc) {
  447. grp_start = sw_desc->group_head;
  448. iop_desc_init_memcpy(grp_start, flags);
  449. iop_desc_set_byte_count(grp_start, iop_chan, len);
  450. iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest);
  451. iop_desc_set_memcpy_src_addr(grp_start, dma_src);
  452. sw_desc->async_tx.flags = flags;
  453. }
  454. spin_unlock_bh(&iop_chan->lock);
  455. return sw_desc ? &sw_desc->async_tx : NULL;
  456. }
  457. static struct dma_async_tx_descriptor *
  458. iop_adma_prep_dma_xor(struct dma_chan *chan, dma_addr_t dma_dest,
  459. dma_addr_t *dma_src, unsigned int src_cnt, size_t len,
  460. unsigned long flags)
  461. {
  462. struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
  463. struct iop_adma_desc_slot *sw_desc, *grp_start;
  464. int slot_cnt, slots_per_op;
  465. if (unlikely(!len))
  466. return NULL;
  467. BUG_ON(len > IOP_ADMA_XOR_MAX_BYTE_COUNT);
  468. dev_dbg(iop_chan->device->common.dev,
  469. "%s src_cnt: %d len: %zu flags: %lx\n",
  470. __func__, src_cnt, len, flags);
  471. spin_lock_bh(&iop_chan->lock);
  472. slot_cnt = iop_chan_xor_slot_count(len, src_cnt, &slots_per_op);
  473. sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
  474. if (sw_desc) {
  475. grp_start = sw_desc->group_head;
  476. iop_desc_init_xor(grp_start, src_cnt, flags);
  477. iop_desc_set_byte_count(grp_start, iop_chan, len);
  478. iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest);
  479. sw_desc->async_tx.flags = flags;
  480. while (src_cnt--)
  481. iop_desc_set_xor_src_addr(grp_start, src_cnt,
  482. dma_src[src_cnt]);
  483. }
  484. spin_unlock_bh(&iop_chan->lock);
  485. return sw_desc ? &sw_desc->async_tx : NULL;
  486. }
  487. static struct dma_async_tx_descriptor *
  488. iop_adma_prep_dma_xor_val(struct dma_chan *chan, dma_addr_t *dma_src,
  489. unsigned int src_cnt, size_t len, u32 *result,
  490. unsigned long flags)
  491. {
  492. struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
  493. struct iop_adma_desc_slot *sw_desc, *grp_start;
  494. int slot_cnt, slots_per_op;
  495. if (unlikely(!len))
  496. return NULL;
  497. dev_dbg(iop_chan->device->common.dev, "%s src_cnt: %d len: %zu\n",
  498. __func__, src_cnt, len);
  499. spin_lock_bh(&iop_chan->lock);
  500. slot_cnt = iop_chan_zero_sum_slot_count(len, src_cnt, &slots_per_op);
  501. sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
  502. if (sw_desc) {
  503. grp_start = sw_desc->group_head;
  504. iop_desc_init_zero_sum(grp_start, src_cnt, flags);
  505. iop_desc_set_zero_sum_byte_count(grp_start, len);
  506. grp_start->xor_check_result = result;
  507. pr_debug("\t%s: grp_start->xor_check_result: %p\n",
  508. __func__, grp_start->xor_check_result);
  509. sw_desc->async_tx.flags = flags;
  510. while (src_cnt--)
  511. iop_desc_set_zero_sum_src_addr(grp_start, src_cnt,
  512. dma_src[src_cnt]);
  513. }
  514. spin_unlock_bh(&iop_chan->lock);
  515. return sw_desc ? &sw_desc->async_tx : NULL;
  516. }
  517. static struct dma_async_tx_descriptor *
  518. iop_adma_prep_dma_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
  519. unsigned int src_cnt, const unsigned char *scf, size_t len,
  520. unsigned long flags)
  521. {
  522. struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
  523. struct iop_adma_desc_slot *sw_desc, *g;
  524. int slot_cnt, slots_per_op;
  525. int continue_srcs;
  526. if (unlikely(!len))
  527. return NULL;
  528. BUG_ON(len > IOP_ADMA_XOR_MAX_BYTE_COUNT);
  529. dev_dbg(iop_chan->device->common.dev,
  530. "%s src_cnt: %d len: %zu flags: %lx\n",
  531. __func__, src_cnt, len, flags);
  532. if (dmaf_p_disabled_continue(flags))
  533. continue_srcs = 1+src_cnt;
  534. else if (dmaf_continue(flags))
  535. continue_srcs = 3+src_cnt;
  536. else
  537. continue_srcs = 0+src_cnt;
  538. spin_lock_bh(&iop_chan->lock);
  539. slot_cnt = iop_chan_pq_slot_count(len, continue_srcs, &slots_per_op);
  540. sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
  541. if (sw_desc) {
  542. int i;
  543. g = sw_desc->group_head;
  544. iop_desc_set_byte_count(g, iop_chan, len);
  545. /* even if P is disabled its destination address (bits
  546. * [3:0]) must match Q. It is ok if P points to an
  547. * invalid address, it won't be written.
  548. */
  549. if (flags & DMA_PREP_PQ_DISABLE_P)
  550. dst[0] = dst[1] & 0x7;
  551. iop_desc_set_pq_addr(g, dst);
  552. sw_desc->async_tx.flags = flags;
  553. for (i = 0; i < src_cnt; i++)
  554. iop_desc_set_pq_src_addr(g, i, src[i], scf[i]);
  555. /* if we are continuing a previous operation factor in
  556. * the old p and q values, see the comment for dma_maxpq
  557. * in include/linux/dmaengine.h
  558. */
  559. if (dmaf_p_disabled_continue(flags))
  560. iop_desc_set_pq_src_addr(g, i++, dst[1], 1);
  561. else if (dmaf_continue(flags)) {
  562. iop_desc_set_pq_src_addr(g, i++, dst[0], 0);
  563. iop_desc_set_pq_src_addr(g, i++, dst[1], 1);
  564. iop_desc_set_pq_src_addr(g, i++, dst[1], 0);
  565. }
  566. iop_desc_init_pq(g, i, flags);
  567. }
  568. spin_unlock_bh(&iop_chan->lock);
  569. return sw_desc ? &sw_desc->async_tx : NULL;
  570. }
  571. static struct dma_async_tx_descriptor *
  572. iop_adma_prep_dma_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
  573. unsigned int src_cnt, const unsigned char *scf,
  574. size_t len, enum sum_check_flags *pqres,
  575. unsigned long flags)
  576. {
  577. struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
  578. struct iop_adma_desc_slot *sw_desc, *g;
  579. int slot_cnt, slots_per_op;
  580. if (unlikely(!len))
  581. return NULL;
  582. BUG_ON(len > IOP_ADMA_XOR_MAX_BYTE_COUNT);
  583. dev_dbg(iop_chan->device->common.dev, "%s src_cnt: %d len: %zu\n",
  584. __func__, src_cnt, len);
  585. spin_lock_bh(&iop_chan->lock);
  586. slot_cnt = iop_chan_pq_zero_sum_slot_count(len, src_cnt + 2, &slots_per_op);
  587. sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
  588. if (sw_desc) {
  589. /* for validate operations p and q are tagged onto the
  590. * end of the source list
  591. */
  592. int pq_idx = src_cnt;
  593. g = sw_desc->group_head;
  594. iop_desc_init_pq_zero_sum(g, src_cnt+2, flags);
  595. iop_desc_set_pq_zero_sum_byte_count(g, len);
  596. g->pq_check_result = pqres;
  597. pr_debug("\t%s: g->pq_check_result: %p\n",
  598. __func__, g->pq_check_result);
  599. sw_desc->async_tx.flags = flags;
  600. while (src_cnt--)
  601. iop_desc_set_pq_zero_sum_src_addr(g, src_cnt,
  602. src[src_cnt],
  603. scf[src_cnt]);
  604. iop_desc_set_pq_zero_sum_addr(g, pq_idx, src);
  605. }
  606. spin_unlock_bh(&iop_chan->lock);
  607. return sw_desc ? &sw_desc->async_tx : NULL;
  608. }
  609. static void iop_adma_free_chan_resources(struct dma_chan *chan)
  610. {
  611. struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
  612. struct iop_adma_desc_slot *iter, *_iter;
  613. int in_use_descs = 0;
  614. iop_adma_slot_cleanup(iop_chan);
  615. spin_lock_bh(&iop_chan->lock);
  616. list_for_each_entry_safe(iter, _iter, &iop_chan->chain,
  617. chain_node) {
  618. in_use_descs++;
  619. list_del(&iter->chain_node);
  620. }
  621. list_for_each_entry_safe_reverse(
  622. iter, _iter, &iop_chan->all_slots, slot_node) {
  623. list_del(&iter->slot_node);
  624. kfree(iter);
  625. iop_chan->slots_allocated--;
  626. }
  627. iop_chan->last_used = NULL;
  628. dev_dbg(iop_chan->device->common.dev, "%s slots_allocated %d\n",
  629. __func__, iop_chan->slots_allocated);
  630. spin_unlock_bh(&iop_chan->lock);
  631. /* one is ok since we left it on there on purpose */
  632. if (in_use_descs > 1)
  633. printk(KERN_ERR "IOP: Freeing %d in use descriptors!\n",
  634. in_use_descs - 1);
  635. }
  636. /**
  637. * iop_adma_status - poll the status of an ADMA transaction
  638. * @chan: ADMA channel handle
  639. * @cookie: ADMA transaction identifier
  640. * @txstate: a holder for the current state of the channel or NULL
  641. */
  642. static enum dma_status iop_adma_status(struct dma_chan *chan,
  643. dma_cookie_t cookie,
  644. struct dma_tx_state *txstate)
  645. {
  646. struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
  647. int ret;
  648. ret = dma_cookie_status(chan, cookie, txstate);
  649. if (ret == DMA_COMPLETE)
  650. return ret;
  651. iop_adma_slot_cleanup(iop_chan);
  652. return dma_cookie_status(chan, cookie, txstate);
  653. }
  654. static irqreturn_t iop_adma_eot_handler(int irq, void *data)
  655. {
  656. struct iop_adma_chan *chan = data;
  657. dev_dbg(chan->device->common.dev, "%s\n", __func__);
  658. tasklet_schedule(&chan->irq_tasklet);
  659. iop_adma_device_clear_eot_status(chan);
  660. return IRQ_HANDLED;
  661. }
  662. static irqreturn_t iop_adma_eoc_handler(int irq, void *data)
  663. {
  664. struct iop_adma_chan *chan = data;
  665. dev_dbg(chan->device->common.dev, "%s\n", __func__);
  666. tasklet_schedule(&chan->irq_tasklet);
  667. iop_adma_device_clear_eoc_status(chan);
  668. return IRQ_HANDLED;
  669. }
  670. static irqreturn_t iop_adma_err_handler(int irq, void *data)
  671. {
  672. struct iop_adma_chan *chan = data;
  673. unsigned long status = iop_chan_get_status(chan);
  674. dev_err(chan->device->common.dev,
  675. "error ( %s%s%s%s%s%s%s)\n",
  676. iop_is_err_int_parity(status, chan) ? "int_parity " : "",
  677. iop_is_err_mcu_abort(status, chan) ? "mcu_abort " : "",
  678. iop_is_err_int_tabort(status, chan) ? "int_tabort " : "",
  679. iop_is_err_int_mabort(status, chan) ? "int_mabort " : "",
  680. iop_is_err_pci_tabort(status, chan) ? "pci_tabort " : "",
  681. iop_is_err_pci_mabort(status, chan) ? "pci_mabort " : "",
  682. iop_is_err_split_tx(status, chan) ? "split_tx " : "");
  683. iop_adma_device_clear_err_status(chan);
  684. BUG();
  685. return IRQ_HANDLED;
  686. }
  687. static void iop_adma_issue_pending(struct dma_chan *chan)
  688. {
  689. struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
  690. if (iop_chan->pending) {
  691. iop_chan->pending = 0;
  692. iop_chan_append(iop_chan);
  693. }
  694. }
  695. /*
  696. * Perform a transaction to verify the HW works.
  697. */
  698. #define IOP_ADMA_TEST_SIZE 2000
  699. static int iop_adma_memcpy_self_test(struct iop_adma_device *device)
  700. {
  701. int i;
  702. void *src, *dest;
  703. dma_addr_t src_dma, dest_dma;
  704. struct dma_chan *dma_chan;
  705. dma_cookie_t cookie;
  706. struct dma_async_tx_descriptor *tx;
  707. int err = 0;
  708. struct iop_adma_chan *iop_chan;
  709. dev_dbg(device->common.dev, "%s\n", __func__);
  710. src = kmalloc(IOP_ADMA_TEST_SIZE, GFP_KERNEL);
  711. if (!src)
  712. return -ENOMEM;
  713. dest = kzalloc(IOP_ADMA_TEST_SIZE, GFP_KERNEL);
  714. if (!dest) {
  715. kfree(src);
  716. return -ENOMEM;
  717. }
  718. /* Fill in src buffer */
  719. for (i = 0; i < IOP_ADMA_TEST_SIZE; i++)
  720. ((u8 *) src)[i] = (u8)i;
  721. /* Start copy, using first DMA channel */
  722. dma_chan = container_of(device->common.channels.next,
  723. struct dma_chan,
  724. device_node);
  725. if (iop_adma_alloc_chan_resources(dma_chan) < 1) {
  726. err = -ENODEV;
  727. goto out;
  728. }
  729. dest_dma = dma_map_single(dma_chan->device->dev, dest,
  730. IOP_ADMA_TEST_SIZE, DMA_FROM_DEVICE);
  731. src_dma = dma_map_single(dma_chan->device->dev, src,
  732. IOP_ADMA_TEST_SIZE, DMA_TO_DEVICE);
  733. tx = iop_adma_prep_dma_memcpy(dma_chan, dest_dma, src_dma,
  734. IOP_ADMA_TEST_SIZE,
  735. DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
  736. cookie = iop_adma_tx_submit(tx);
  737. iop_adma_issue_pending(dma_chan);
  738. msleep(1);
  739. if (iop_adma_status(dma_chan, cookie, NULL) !=
  740. DMA_COMPLETE) {
  741. dev_err(dma_chan->device->dev,
  742. "Self-test copy timed out, disabling\n");
  743. err = -ENODEV;
  744. goto free_resources;
  745. }
  746. iop_chan = to_iop_adma_chan(dma_chan);
  747. dma_sync_single_for_cpu(&iop_chan->device->pdev->dev, dest_dma,
  748. IOP_ADMA_TEST_SIZE, DMA_FROM_DEVICE);
  749. if (memcmp(src, dest, IOP_ADMA_TEST_SIZE)) {
  750. dev_err(dma_chan->device->dev,
  751. "Self-test copy failed compare, disabling\n");
  752. err = -ENODEV;
  753. goto free_resources;
  754. }
  755. free_resources:
  756. iop_adma_free_chan_resources(dma_chan);
  757. out:
  758. kfree(src);
  759. kfree(dest);
  760. return err;
  761. }
  762. #define IOP_ADMA_NUM_SRC_TEST 4 /* must be <= 15 */
  763. static int
  764. iop_adma_xor_val_self_test(struct iop_adma_device *device)
  765. {
  766. int i, src_idx;
  767. struct page *dest;
  768. struct page *xor_srcs[IOP_ADMA_NUM_SRC_TEST];
  769. struct page *zero_sum_srcs[IOP_ADMA_NUM_SRC_TEST + 1];
  770. dma_addr_t dma_srcs[IOP_ADMA_NUM_SRC_TEST + 1];
  771. dma_addr_t dest_dma;
  772. struct dma_async_tx_descriptor *tx;
  773. struct dma_chan *dma_chan;
  774. dma_cookie_t cookie;
  775. u8 cmp_byte = 0;
  776. u32 cmp_word;
  777. u32 zero_sum_result;
  778. int err = 0;
  779. struct iop_adma_chan *iop_chan;
  780. dev_dbg(device->common.dev, "%s\n", __func__);
  781. for (src_idx = 0; src_idx < IOP_ADMA_NUM_SRC_TEST; src_idx++) {
  782. xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
  783. if (!xor_srcs[src_idx]) {
  784. while (src_idx--)
  785. __free_page(xor_srcs[src_idx]);
  786. return -ENOMEM;
  787. }
  788. }
  789. dest = alloc_page(GFP_KERNEL);
  790. if (!dest) {
  791. while (src_idx--)
  792. __free_page(xor_srcs[src_idx]);
  793. return -ENOMEM;
  794. }
  795. /* Fill in src buffers */
  796. for (src_idx = 0; src_idx < IOP_ADMA_NUM_SRC_TEST; src_idx++) {
  797. u8 *ptr = page_address(xor_srcs[src_idx]);
  798. for (i = 0; i < PAGE_SIZE; i++)
  799. ptr[i] = (1 << src_idx);
  800. }
  801. for (src_idx = 0; src_idx < IOP_ADMA_NUM_SRC_TEST; src_idx++)
  802. cmp_byte ^= (u8) (1 << src_idx);
  803. cmp_word = (cmp_byte << 24) | (cmp_byte << 16) |
  804. (cmp_byte << 8) | cmp_byte;
  805. memset(page_address(dest), 0, PAGE_SIZE);
  806. dma_chan = container_of(device->common.channels.next,
  807. struct dma_chan,
  808. device_node);
  809. if (iop_adma_alloc_chan_resources(dma_chan) < 1) {
  810. err = -ENODEV;
  811. goto out;
  812. }
  813. /* test xor */
  814. dest_dma = dma_map_page(dma_chan->device->dev, dest, 0,
  815. PAGE_SIZE, DMA_FROM_DEVICE);
  816. for (i = 0; i < IOP_ADMA_NUM_SRC_TEST; i++)
  817. dma_srcs[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i],
  818. 0, PAGE_SIZE, DMA_TO_DEVICE);
  819. tx = iop_adma_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
  820. IOP_ADMA_NUM_SRC_TEST, PAGE_SIZE,
  821. DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
  822. cookie = iop_adma_tx_submit(tx);
  823. iop_adma_issue_pending(dma_chan);
  824. msleep(8);
  825. if (iop_adma_status(dma_chan, cookie, NULL) !=
  826. DMA_COMPLETE) {
  827. dev_err(dma_chan->device->dev,
  828. "Self-test xor timed out, disabling\n");
  829. err = -ENODEV;
  830. goto free_resources;
  831. }
  832. iop_chan = to_iop_adma_chan(dma_chan);
  833. dma_sync_single_for_cpu(&iop_chan->device->pdev->dev, dest_dma,
  834. PAGE_SIZE, DMA_FROM_DEVICE);
  835. for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) {
  836. u32 *ptr = page_address(dest);
  837. if (ptr[i] != cmp_word) {
  838. dev_err(dma_chan->device->dev,
  839. "Self-test xor failed compare, disabling\n");
  840. err = -ENODEV;
  841. goto free_resources;
  842. }
  843. }
  844. dma_sync_single_for_device(&iop_chan->device->pdev->dev, dest_dma,
  845. PAGE_SIZE, DMA_TO_DEVICE);
  846. /* skip zero sum if the capability is not present */
  847. if (!dma_has_cap(DMA_XOR_VAL, dma_chan->device->cap_mask))
  848. goto free_resources;
  849. /* zero sum the sources with the destintation page */
  850. for (i = 0; i < IOP_ADMA_NUM_SRC_TEST; i++)
  851. zero_sum_srcs[i] = xor_srcs[i];
  852. zero_sum_srcs[i] = dest;
  853. zero_sum_result = 1;
  854. for (i = 0; i < IOP_ADMA_NUM_SRC_TEST + 1; i++)
  855. dma_srcs[i] = dma_map_page(dma_chan->device->dev,
  856. zero_sum_srcs[i], 0, PAGE_SIZE,
  857. DMA_TO_DEVICE);
  858. tx = iop_adma_prep_dma_xor_val(dma_chan, dma_srcs,
  859. IOP_ADMA_NUM_SRC_TEST + 1, PAGE_SIZE,
  860. &zero_sum_result,
  861. DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
  862. cookie = iop_adma_tx_submit(tx);
  863. iop_adma_issue_pending(dma_chan);
  864. msleep(8);
  865. if (iop_adma_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
  866. dev_err(dma_chan->device->dev,
  867. "Self-test zero sum timed out, disabling\n");
  868. err = -ENODEV;
  869. goto free_resources;
  870. }
  871. if (zero_sum_result != 0) {
  872. dev_err(dma_chan->device->dev,
  873. "Self-test zero sum failed compare, disabling\n");
  874. err = -ENODEV;
  875. goto free_resources;
  876. }
  877. /* test for non-zero parity sum */
  878. zero_sum_result = 0;
  879. for (i = 0; i < IOP_ADMA_NUM_SRC_TEST + 1; i++)
  880. dma_srcs[i] = dma_map_page(dma_chan->device->dev,
  881. zero_sum_srcs[i], 0, PAGE_SIZE,
  882. DMA_TO_DEVICE);
  883. tx = iop_adma_prep_dma_xor_val(dma_chan, dma_srcs,
  884. IOP_ADMA_NUM_SRC_TEST + 1, PAGE_SIZE,
  885. &zero_sum_result,
  886. DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
  887. cookie = iop_adma_tx_submit(tx);
  888. iop_adma_issue_pending(dma_chan);
  889. msleep(8);
  890. if (iop_adma_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
  891. dev_err(dma_chan->device->dev,
  892. "Self-test non-zero sum timed out, disabling\n");
  893. err = -ENODEV;
  894. goto free_resources;
  895. }
  896. if (zero_sum_result != 1) {
  897. dev_err(dma_chan->device->dev,
  898. "Self-test non-zero sum failed compare, disabling\n");
  899. err = -ENODEV;
  900. goto free_resources;
  901. }
  902. free_resources:
  903. iop_adma_free_chan_resources(dma_chan);
  904. out:
  905. src_idx = IOP_ADMA_NUM_SRC_TEST;
  906. while (src_idx--)
  907. __free_page(xor_srcs[src_idx]);
  908. __free_page(dest);
  909. return err;
  910. }
  911. #ifdef CONFIG_RAID6_PQ
  912. static int
  913. iop_adma_pq_zero_sum_self_test(struct iop_adma_device *device)
  914. {
  915. /* combined sources, software pq results, and extra hw pq results */
  916. struct page *pq[IOP_ADMA_NUM_SRC_TEST+2+2];
  917. /* ptr to the extra hw pq buffers defined above */
  918. struct page **pq_hw = &pq[IOP_ADMA_NUM_SRC_TEST+2];
  919. /* address conversion buffers (dma_map / page_address) */
  920. void *pq_sw[IOP_ADMA_NUM_SRC_TEST+2];
  921. dma_addr_t pq_src[IOP_ADMA_NUM_SRC_TEST+2];
  922. dma_addr_t *pq_dest = &pq_src[IOP_ADMA_NUM_SRC_TEST];
  923. int i;
  924. struct dma_async_tx_descriptor *tx;
  925. struct dma_chan *dma_chan;
  926. dma_cookie_t cookie;
  927. u32 zero_sum_result;
  928. int err = 0;
  929. struct device *dev;
  930. dev_dbg(device->common.dev, "%s\n", __func__);
  931. for (i = 0; i < ARRAY_SIZE(pq); i++) {
  932. pq[i] = alloc_page(GFP_KERNEL);
  933. if (!pq[i]) {
  934. while (i--)
  935. __free_page(pq[i]);
  936. return -ENOMEM;
  937. }
  938. }
  939. /* Fill in src buffers */
  940. for (i = 0; i < IOP_ADMA_NUM_SRC_TEST; i++) {
  941. pq_sw[i] = page_address(pq[i]);
  942. memset(pq_sw[i], 0x11111111 * (1<<i), PAGE_SIZE);
  943. }
  944. pq_sw[i] = page_address(pq[i]);
  945. pq_sw[i+1] = page_address(pq[i+1]);
  946. dma_chan = container_of(device->common.channels.next,
  947. struct dma_chan,
  948. device_node);
  949. if (iop_adma_alloc_chan_resources(dma_chan) < 1) {
  950. err = -ENODEV;
  951. goto out;
  952. }
  953. dev = dma_chan->device->dev;
  954. /* initialize the dests */
  955. memset(page_address(pq_hw[0]), 0 , PAGE_SIZE);
  956. memset(page_address(pq_hw[1]), 0 , PAGE_SIZE);
  957. /* test pq */
  958. pq_dest[0] = dma_map_page(dev, pq_hw[0], 0, PAGE_SIZE, DMA_FROM_DEVICE);
  959. pq_dest[1] = dma_map_page(dev, pq_hw[1], 0, PAGE_SIZE, DMA_FROM_DEVICE);
  960. for (i = 0; i < IOP_ADMA_NUM_SRC_TEST; i++)
  961. pq_src[i] = dma_map_page(dev, pq[i], 0, PAGE_SIZE,
  962. DMA_TO_DEVICE);
  963. tx = iop_adma_prep_dma_pq(dma_chan, pq_dest, pq_src,
  964. IOP_ADMA_NUM_SRC_TEST, (u8 *)raid6_gfexp,
  965. PAGE_SIZE,
  966. DMA_PREP_INTERRUPT |
  967. DMA_CTRL_ACK);
  968. cookie = iop_adma_tx_submit(tx);
  969. iop_adma_issue_pending(dma_chan);
  970. msleep(8);
  971. if (iop_adma_status(dma_chan, cookie, NULL) !=
  972. DMA_COMPLETE) {
  973. dev_err(dev, "Self-test pq timed out, disabling\n");
  974. err = -ENODEV;
  975. goto free_resources;
  976. }
  977. raid6_call.gen_syndrome(IOP_ADMA_NUM_SRC_TEST+2, PAGE_SIZE, pq_sw);
  978. if (memcmp(pq_sw[IOP_ADMA_NUM_SRC_TEST],
  979. page_address(pq_hw[0]), PAGE_SIZE) != 0) {
  980. dev_err(dev, "Self-test p failed compare, disabling\n");
  981. err = -ENODEV;
  982. goto free_resources;
  983. }
  984. if (memcmp(pq_sw[IOP_ADMA_NUM_SRC_TEST+1],
  985. page_address(pq_hw[1]), PAGE_SIZE) != 0) {
  986. dev_err(dev, "Self-test q failed compare, disabling\n");
  987. err = -ENODEV;
  988. goto free_resources;
  989. }
  990. /* test correct zero sum using the software generated pq values */
  991. for (i = 0; i < IOP_ADMA_NUM_SRC_TEST + 2; i++)
  992. pq_src[i] = dma_map_page(dev, pq[i], 0, PAGE_SIZE,
  993. DMA_TO_DEVICE);
  994. zero_sum_result = ~0;
  995. tx = iop_adma_prep_dma_pq_val(dma_chan, &pq_src[IOP_ADMA_NUM_SRC_TEST],
  996. pq_src, IOP_ADMA_NUM_SRC_TEST,
  997. raid6_gfexp, PAGE_SIZE, &zero_sum_result,
  998. DMA_PREP_INTERRUPT|DMA_CTRL_ACK);
  999. cookie = iop_adma_tx_submit(tx);
  1000. iop_adma_issue_pending(dma_chan);
  1001. msleep(8);
  1002. if (iop_adma_status(dma_chan, cookie, NULL) !=
  1003. DMA_COMPLETE) {
  1004. dev_err(dev, "Self-test pq-zero-sum timed out, disabling\n");
  1005. err = -ENODEV;
  1006. goto free_resources;
  1007. }
  1008. if (zero_sum_result != 0) {
  1009. dev_err(dev, "Self-test pq-zero-sum failed to validate: %x\n",
  1010. zero_sum_result);
  1011. err = -ENODEV;
  1012. goto free_resources;
  1013. }
  1014. /* test incorrect zero sum */
  1015. i = IOP_ADMA_NUM_SRC_TEST;
  1016. memset(pq_sw[i] + 100, 0, 100);
  1017. memset(pq_sw[i+1] + 200, 0, 200);
  1018. for (i = 0; i < IOP_ADMA_NUM_SRC_TEST + 2; i++)
  1019. pq_src[i] = dma_map_page(dev, pq[i], 0, PAGE_SIZE,
  1020. DMA_TO_DEVICE);
  1021. zero_sum_result = 0;
  1022. tx = iop_adma_prep_dma_pq_val(dma_chan, &pq_src[IOP_ADMA_NUM_SRC_TEST],
  1023. pq_src, IOP_ADMA_NUM_SRC_TEST,
  1024. raid6_gfexp, PAGE_SIZE, &zero_sum_result,
  1025. DMA_PREP_INTERRUPT|DMA_CTRL_ACK);
  1026. cookie = iop_adma_tx_submit(tx);
  1027. iop_adma_issue_pending(dma_chan);
  1028. msleep(8);
  1029. if (iop_adma_status(dma_chan, cookie, NULL) !=
  1030. DMA_COMPLETE) {
  1031. dev_err(dev, "Self-test !pq-zero-sum timed out, disabling\n");
  1032. err = -ENODEV;
  1033. goto free_resources;
  1034. }
  1035. if (zero_sum_result != (SUM_CHECK_P_RESULT | SUM_CHECK_Q_RESULT)) {
  1036. dev_err(dev, "Self-test !pq-zero-sum failed to validate: %x\n",
  1037. zero_sum_result);
  1038. err = -ENODEV;
  1039. goto free_resources;
  1040. }
  1041. free_resources:
  1042. iop_adma_free_chan_resources(dma_chan);
  1043. out:
  1044. i = ARRAY_SIZE(pq);
  1045. while (i--)
  1046. __free_page(pq[i]);
  1047. return err;
  1048. }
  1049. #endif
  1050. static int iop_adma_remove(struct platform_device *dev)
  1051. {
  1052. struct iop_adma_device *device = platform_get_drvdata(dev);
  1053. struct dma_chan *chan, *_chan;
  1054. struct iop_adma_chan *iop_chan;
  1055. struct iop_adma_platform_data *plat_data = dev_get_platdata(&dev->dev);
  1056. dma_async_device_unregister(&device->common);
  1057. dma_free_coherent(&dev->dev, plat_data->pool_size,
  1058. device->dma_desc_pool_virt, device->dma_desc_pool);
  1059. list_for_each_entry_safe(chan, _chan, &device->common.channels,
  1060. device_node) {
  1061. iop_chan = to_iop_adma_chan(chan);
  1062. list_del(&chan->device_node);
  1063. kfree(iop_chan);
  1064. }
  1065. kfree(device);
  1066. return 0;
  1067. }
  1068. static int iop_adma_probe(struct platform_device *pdev)
  1069. {
  1070. struct resource *res;
  1071. int ret = 0, i;
  1072. struct iop_adma_device *adev;
  1073. struct iop_adma_chan *iop_chan;
  1074. struct dma_device *dma_dev;
  1075. struct iop_adma_platform_data *plat_data = dev_get_platdata(&pdev->dev);
  1076. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1077. if (!res)
  1078. return -ENODEV;
  1079. if (!devm_request_mem_region(&pdev->dev, res->start,
  1080. resource_size(res), pdev->name))
  1081. return -EBUSY;
  1082. adev = kzalloc(sizeof(*adev), GFP_KERNEL);
  1083. if (!adev)
  1084. return -ENOMEM;
  1085. dma_dev = &adev->common;
  1086. /* allocate coherent memory for hardware descriptors
  1087. * note: writecombine gives slightly better performance, but
  1088. * requires that we explicitly flush the writes
  1089. */
  1090. adev->dma_desc_pool_virt = dma_alloc_wc(&pdev->dev,
  1091. plat_data->pool_size,
  1092. &adev->dma_desc_pool,
  1093. GFP_KERNEL);
  1094. if (!adev->dma_desc_pool_virt) {
  1095. ret = -ENOMEM;
  1096. goto err_free_adev;
  1097. }
  1098. dev_dbg(&pdev->dev, "%s: allocated descriptor pool virt %p phys %pad\n",
  1099. __func__, adev->dma_desc_pool_virt, &adev->dma_desc_pool);
  1100. adev->id = plat_data->hw_id;
  1101. /* discover transaction capabilites from the platform data */
  1102. dma_dev->cap_mask = plat_data->cap_mask;
  1103. adev->pdev = pdev;
  1104. platform_set_drvdata(pdev, adev);
  1105. INIT_LIST_HEAD(&dma_dev->channels);
  1106. /* set base routines */
  1107. dma_dev->device_alloc_chan_resources = iop_adma_alloc_chan_resources;
  1108. dma_dev->device_free_chan_resources = iop_adma_free_chan_resources;
  1109. dma_dev->device_tx_status = iop_adma_status;
  1110. dma_dev->device_issue_pending = iop_adma_issue_pending;
  1111. dma_dev->dev = &pdev->dev;
  1112. /* set prep routines based on capability */
  1113. if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask))
  1114. dma_dev->device_prep_dma_memcpy = iop_adma_prep_dma_memcpy;
  1115. if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
  1116. dma_dev->max_xor = iop_adma_get_max_xor();
  1117. dma_dev->device_prep_dma_xor = iop_adma_prep_dma_xor;
  1118. }
  1119. if (dma_has_cap(DMA_XOR_VAL, dma_dev->cap_mask))
  1120. dma_dev->device_prep_dma_xor_val =
  1121. iop_adma_prep_dma_xor_val;
  1122. if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) {
  1123. dma_set_maxpq(dma_dev, iop_adma_get_max_pq(), 0);
  1124. dma_dev->device_prep_dma_pq = iop_adma_prep_dma_pq;
  1125. }
  1126. if (dma_has_cap(DMA_PQ_VAL, dma_dev->cap_mask))
  1127. dma_dev->device_prep_dma_pq_val =
  1128. iop_adma_prep_dma_pq_val;
  1129. if (dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask))
  1130. dma_dev->device_prep_dma_interrupt =
  1131. iop_adma_prep_dma_interrupt;
  1132. iop_chan = kzalloc(sizeof(*iop_chan), GFP_KERNEL);
  1133. if (!iop_chan) {
  1134. ret = -ENOMEM;
  1135. goto err_free_dma;
  1136. }
  1137. iop_chan->device = adev;
  1138. iop_chan->mmr_base = devm_ioremap(&pdev->dev, res->start,
  1139. resource_size(res));
  1140. if (!iop_chan->mmr_base) {
  1141. ret = -ENOMEM;
  1142. goto err_free_iop_chan;
  1143. }
  1144. tasklet_setup(&iop_chan->irq_tasklet, iop_adma_tasklet);
  1145. /* clear errors before enabling interrupts */
  1146. iop_adma_device_clear_err_status(iop_chan);
  1147. for (i = 0; i < 3; i++) {
  1148. static const irq_handler_t handler[] = {
  1149. iop_adma_eot_handler,
  1150. iop_adma_eoc_handler,
  1151. iop_adma_err_handler
  1152. };
  1153. int irq = platform_get_irq(pdev, i);
  1154. if (irq < 0) {
  1155. ret = -ENXIO;
  1156. goto err_free_iop_chan;
  1157. } else {
  1158. ret = devm_request_irq(&pdev->dev, irq,
  1159. handler[i], 0, pdev->name, iop_chan);
  1160. if (ret)
  1161. goto err_free_iop_chan;
  1162. }
  1163. }
  1164. spin_lock_init(&iop_chan->lock);
  1165. INIT_LIST_HEAD(&iop_chan->chain);
  1166. INIT_LIST_HEAD(&iop_chan->all_slots);
  1167. iop_chan->common.device = dma_dev;
  1168. dma_cookie_init(&iop_chan->common);
  1169. list_add_tail(&iop_chan->common.device_node, &dma_dev->channels);
  1170. if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
  1171. ret = iop_adma_memcpy_self_test(adev);
  1172. dev_dbg(&pdev->dev, "memcpy self test returned %d\n", ret);
  1173. if (ret)
  1174. goto err_free_iop_chan;
  1175. }
  1176. if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
  1177. ret = iop_adma_xor_val_self_test(adev);
  1178. dev_dbg(&pdev->dev, "xor self test returned %d\n", ret);
  1179. if (ret)
  1180. goto err_free_iop_chan;
  1181. }
  1182. if (dma_has_cap(DMA_PQ, dma_dev->cap_mask) &&
  1183. dma_has_cap(DMA_PQ_VAL, dma_dev->cap_mask)) {
  1184. #ifdef CONFIG_RAID6_PQ
  1185. ret = iop_adma_pq_zero_sum_self_test(adev);
  1186. dev_dbg(&pdev->dev, "pq self test returned %d\n", ret);
  1187. #else
  1188. /* can not test raid6, so do not publish capability */
  1189. dma_cap_clear(DMA_PQ, dma_dev->cap_mask);
  1190. dma_cap_clear(DMA_PQ_VAL, dma_dev->cap_mask);
  1191. ret = 0;
  1192. #endif
  1193. if (ret)
  1194. goto err_free_iop_chan;
  1195. }
  1196. dev_info(&pdev->dev, "Intel(R) IOP: ( %s%s%s%s%s%s)\n",
  1197. dma_has_cap(DMA_PQ, dma_dev->cap_mask) ? "pq " : "",
  1198. dma_has_cap(DMA_PQ_VAL, dma_dev->cap_mask) ? "pq_val " : "",
  1199. dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
  1200. dma_has_cap(DMA_XOR_VAL, dma_dev->cap_mask) ? "xor_val " : "",
  1201. dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "",
  1202. dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : "");
  1203. dma_async_device_register(dma_dev);
  1204. goto out;
  1205. err_free_iop_chan:
  1206. kfree(iop_chan);
  1207. err_free_dma:
  1208. dma_free_coherent(&adev->pdev->dev, plat_data->pool_size,
  1209. adev->dma_desc_pool_virt, adev->dma_desc_pool);
  1210. err_free_adev:
  1211. kfree(adev);
  1212. out:
  1213. return ret;
  1214. }
  1215. static void iop_chan_start_null_memcpy(struct iop_adma_chan *iop_chan)
  1216. {
  1217. struct iop_adma_desc_slot *sw_desc, *grp_start;
  1218. dma_cookie_t cookie;
  1219. int slot_cnt, slots_per_op;
  1220. dev_dbg(iop_chan->device->common.dev, "%s\n", __func__);
  1221. spin_lock_bh(&iop_chan->lock);
  1222. slot_cnt = iop_chan_memcpy_slot_count(0, &slots_per_op);
  1223. sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
  1224. if (sw_desc) {
  1225. grp_start = sw_desc->group_head;
  1226. list_splice_init(&sw_desc->tx_list, &iop_chan->chain);
  1227. async_tx_ack(&sw_desc->async_tx);
  1228. iop_desc_init_memcpy(grp_start, 0);
  1229. iop_desc_set_byte_count(grp_start, iop_chan, 0);
  1230. iop_desc_set_dest_addr(grp_start, iop_chan, 0);
  1231. iop_desc_set_memcpy_src_addr(grp_start, 0);
  1232. cookie = dma_cookie_assign(&sw_desc->async_tx);
  1233. /* initialize the completed cookie to be less than
  1234. * the most recently used cookie
  1235. */
  1236. iop_chan->common.completed_cookie = cookie - 1;
  1237. /* channel should not be busy */
  1238. BUG_ON(iop_chan_is_busy(iop_chan));
  1239. /* clear any prior error-status bits */
  1240. iop_adma_device_clear_err_status(iop_chan);
  1241. /* disable operation */
  1242. iop_chan_disable(iop_chan);
  1243. /* set the descriptor address */
  1244. iop_chan_set_next_descriptor(iop_chan, sw_desc->async_tx.phys);
  1245. /* 1/ don't add pre-chained descriptors
  1246. * 2/ dummy read to flush next_desc write
  1247. */
  1248. BUG_ON(iop_desc_get_next_desc(sw_desc));
  1249. /* run the descriptor */
  1250. iop_chan_enable(iop_chan);
  1251. } else
  1252. dev_err(iop_chan->device->common.dev,
  1253. "failed to allocate null descriptor\n");
  1254. spin_unlock_bh(&iop_chan->lock);
  1255. }
  1256. static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan)
  1257. {
  1258. struct iop_adma_desc_slot *sw_desc, *grp_start;
  1259. dma_cookie_t cookie;
  1260. int slot_cnt, slots_per_op;
  1261. dev_dbg(iop_chan->device->common.dev, "%s\n", __func__);
  1262. spin_lock_bh(&iop_chan->lock);
  1263. slot_cnt = iop_chan_xor_slot_count(0, 2, &slots_per_op);
  1264. sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
  1265. if (sw_desc) {
  1266. grp_start = sw_desc->group_head;
  1267. list_splice_init(&sw_desc->tx_list, &iop_chan->chain);
  1268. async_tx_ack(&sw_desc->async_tx);
  1269. iop_desc_init_null_xor(grp_start, 2, 0);
  1270. iop_desc_set_byte_count(grp_start, iop_chan, 0);
  1271. iop_desc_set_dest_addr(grp_start, iop_chan, 0);
  1272. iop_desc_set_xor_src_addr(grp_start, 0, 0);
  1273. iop_desc_set_xor_src_addr(grp_start, 1, 0);
  1274. cookie = dma_cookie_assign(&sw_desc->async_tx);
  1275. /* initialize the completed cookie to be less than
  1276. * the most recently used cookie
  1277. */
  1278. iop_chan->common.completed_cookie = cookie - 1;
  1279. /* channel should not be busy */
  1280. BUG_ON(iop_chan_is_busy(iop_chan));
  1281. /* clear any prior error-status bits */
  1282. iop_adma_device_clear_err_status(iop_chan);
  1283. /* disable operation */
  1284. iop_chan_disable(iop_chan);
  1285. /* set the descriptor address */
  1286. iop_chan_set_next_descriptor(iop_chan, sw_desc->async_tx.phys);
  1287. /* 1/ don't add pre-chained descriptors
  1288. * 2/ dummy read to flush next_desc write
  1289. */
  1290. BUG_ON(iop_desc_get_next_desc(sw_desc));
  1291. /* run the descriptor */
  1292. iop_chan_enable(iop_chan);
  1293. } else
  1294. dev_err(iop_chan->device->common.dev,
  1295. "failed to allocate null descriptor\n");
  1296. spin_unlock_bh(&iop_chan->lock);
  1297. }
  1298. static struct platform_driver iop_adma_driver = {
  1299. .probe = iop_adma_probe,
  1300. .remove = iop_adma_remove,
  1301. .driver = {
  1302. .name = "iop-adma",
  1303. },
  1304. };
  1305. module_platform_driver(iop_adma_driver);
  1306. MODULE_AUTHOR("Intel Corporation");
  1307. MODULE_DESCRIPTION("IOP ADMA Engine Driver");
  1308. MODULE_LICENSE("GPL");
  1309. MODULE_ALIAS("platform:iop-adma");