bcm-sba-raid.c 48 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779
  1. /*
  2. * Copyright (C) 2017 Broadcom
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License as
  6. * published by the Free Software Foundation version 2.
  7. *
  8. * This program is distributed "as is" WITHOUT ANY WARRANTY of any
  9. * kind, whether express or implied; without even the implied warranty
  10. * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. * GNU General Public License for more details.
  12. */
  13. /*
  14. * Broadcom SBA RAID Driver
  15. *
  16. * The Broadcom stream buffer accelerator (SBA) provides offloading
  17. * capabilities for RAID operations. The SBA offload engine is accessible
  18. * via Broadcom SoC specific ring manager. Two or more offload engines
  19. * can share same Broadcom SoC specific ring manager due to this Broadcom
  20. * SoC specific ring manager driver is implemented as a mailbox controller
  21. * driver and offload engine drivers are implemented as mallbox clients.
  22. *
  23. * Typically, Broadcom SoC specific ring manager will implement larger
  24. * number of hardware rings over one or more SBA hardware devices. By
  25. * design, the internal buffer size of SBA hardware device is limited
  26. * but all offload operations supported by SBA can be broken down into
  27. * multiple small size requests and executed parallely on multiple SBA
  28. * hardware devices for achieving high through-put.
  29. *
  30. * The Broadcom SBA RAID driver does not require any register programming
  31. * except submitting request to SBA hardware device via mailbox channels.
  32. * This driver implements a DMA device with one DMA channel using a single
  33. * mailbox channel provided by Broadcom SoC specific ring manager driver.
  34. * For having more SBA DMA channels, we can create more SBA device nodes
  35. * in Broadcom SoC specific DTS based on number of hardware rings supported
  36. * by Broadcom SoC ring manager.
  37. */
  38. #include <linux/bitops.h>
  39. #include <linux/debugfs.h>
  40. #include <linux/dma-mapping.h>
  41. #include <linux/dmaengine.h>
  42. #include <linux/list.h>
  43. #include <linux/mailbox_client.h>
  44. #include <linux/mailbox/brcm-message.h>
  45. #include <linux/module.h>
  46. #include <linux/of_device.h>
  47. #include <linux/slab.h>
  48. #include <linux/raid/pq.h>
  49. #include "dmaengine.h"
  50. /* ====== Driver macros and defines ===== */
  51. #define SBA_TYPE_SHIFT 48
  52. #define SBA_TYPE_MASK GENMASK(1, 0)
  53. #define SBA_TYPE_A 0x0
  54. #define SBA_TYPE_B 0x2
  55. #define SBA_TYPE_C 0x3
  56. #define SBA_USER_DEF_SHIFT 32
  57. #define SBA_USER_DEF_MASK GENMASK(15, 0)
  58. #define SBA_R_MDATA_SHIFT 24
  59. #define SBA_R_MDATA_MASK GENMASK(7, 0)
  60. #define SBA_C_MDATA_MS_SHIFT 18
  61. #define SBA_C_MDATA_MS_MASK GENMASK(1, 0)
  62. #define SBA_INT_SHIFT 17
  63. #define SBA_INT_MASK BIT(0)
  64. #define SBA_RESP_SHIFT 16
  65. #define SBA_RESP_MASK BIT(0)
  66. #define SBA_C_MDATA_SHIFT 8
  67. #define SBA_C_MDATA_MASK GENMASK(7, 0)
  68. #define SBA_C_MDATA_BNUMx_SHIFT(__bnum) (2 * (__bnum))
  69. #define SBA_C_MDATA_BNUMx_MASK GENMASK(1, 0)
  70. #define SBA_C_MDATA_DNUM_SHIFT 5
  71. #define SBA_C_MDATA_DNUM_MASK GENMASK(4, 0)
  72. #define SBA_C_MDATA_LS(__v) ((__v) & 0xff)
  73. #define SBA_C_MDATA_MS(__v) (((__v) >> 8) & 0x3)
  74. #define SBA_CMD_SHIFT 0
  75. #define SBA_CMD_MASK GENMASK(3, 0)
  76. #define SBA_CMD_ZERO_BUFFER 0x4
  77. #define SBA_CMD_ZERO_ALL_BUFFERS 0x8
  78. #define SBA_CMD_LOAD_BUFFER 0x9
  79. #define SBA_CMD_XOR 0xa
  80. #define SBA_CMD_GALOIS_XOR 0xb
  81. #define SBA_CMD_WRITE_BUFFER 0xc
  82. #define SBA_CMD_GALOIS 0xe
  83. #define SBA_MAX_REQ_PER_MBOX_CHANNEL 8192
  84. #define SBA_MAX_MSG_SEND_PER_MBOX_CHANNEL 8
  85. /* Driver helper macros */
  86. #define to_sba_request(tx) \
  87. container_of(tx, struct sba_request, tx)
  88. #define to_sba_device(dchan) \
  89. container_of(dchan, struct sba_device, dma_chan)
  90. /* ===== Driver data structures ===== */
  91. enum sba_request_flags {
  92. SBA_REQUEST_STATE_FREE = 0x001,
  93. SBA_REQUEST_STATE_ALLOCED = 0x002,
  94. SBA_REQUEST_STATE_PENDING = 0x004,
  95. SBA_REQUEST_STATE_ACTIVE = 0x008,
  96. SBA_REQUEST_STATE_ABORTED = 0x010,
  97. SBA_REQUEST_STATE_MASK = 0x0ff,
  98. SBA_REQUEST_FENCE = 0x100,
  99. };
  100. struct sba_request {
  101. /* Global state */
  102. struct list_head node;
  103. struct sba_device *sba;
  104. u32 flags;
  105. /* Chained requests management */
  106. struct sba_request *first;
  107. struct list_head next;
  108. atomic_t next_pending_count;
  109. /* BRCM message data */
  110. struct brcm_message msg;
  111. struct dma_async_tx_descriptor tx;
  112. /* SBA commands */
  113. struct brcm_sba_command cmds[];
  114. };
  115. enum sba_version {
  116. SBA_VER_1 = 0,
  117. SBA_VER_2
  118. };
  119. struct sba_device {
  120. /* Underlying device */
  121. struct device *dev;
  122. /* DT configuration parameters */
  123. enum sba_version ver;
  124. /* Derived configuration parameters */
  125. u32 max_req;
  126. u32 hw_buf_size;
  127. u32 hw_resp_size;
  128. u32 max_pq_coefs;
  129. u32 max_pq_srcs;
  130. u32 max_cmd_per_req;
  131. u32 max_xor_srcs;
  132. u32 max_resp_pool_size;
  133. u32 max_cmds_pool_size;
  134. /* Maibox client and Mailbox channels */
  135. struct mbox_client client;
  136. struct mbox_chan *mchan;
  137. struct device *mbox_dev;
  138. /* DMA device and DMA channel */
  139. struct dma_device dma_dev;
  140. struct dma_chan dma_chan;
  141. /* DMA channel resources */
  142. void *resp_base;
  143. dma_addr_t resp_dma_base;
  144. void *cmds_base;
  145. dma_addr_t cmds_dma_base;
  146. spinlock_t reqs_lock;
  147. bool reqs_fence;
  148. struct list_head reqs_alloc_list;
  149. struct list_head reqs_pending_list;
  150. struct list_head reqs_active_list;
  151. struct list_head reqs_aborted_list;
  152. struct list_head reqs_free_list;
  153. /* DebugFS directory entries */
  154. struct dentry *root;
  155. };
  156. /* ====== Command helper routines ===== */
  157. static inline u64 __pure sba_cmd_enc(u64 cmd, u32 val, u32 shift, u32 mask)
  158. {
  159. cmd &= ~((u64)mask << shift);
  160. cmd |= ((u64)(val & mask) << shift);
  161. return cmd;
  162. }
  163. static inline u32 __pure sba_cmd_load_c_mdata(u32 b0)
  164. {
  165. return b0 & SBA_C_MDATA_BNUMx_MASK;
  166. }
  167. static inline u32 __pure sba_cmd_write_c_mdata(u32 b0)
  168. {
  169. return b0 & SBA_C_MDATA_BNUMx_MASK;
  170. }
  171. static inline u32 __pure sba_cmd_xor_c_mdata(u32 b1, u32 b0)
  172. {
  173. return (b0 & SBA_C_MDATA_BNUMx_MASK) |
  174. ((b1 & SBA_C_MDATA_BNUMx_MASK) << SBA_C_MDATA_BNUMx_SHIFT(1));
  175. }
  176. static inline u32 __pure sba_cmd_pq_c_mdata(u32 d, u32 b1, u32 b0)
  177. {
  178. return (b0 & SBA_C_MDATA_BNUMx_MASK) |
  179. ((b1 & SBA_C_MDATA_BNUMx_MASK) << SBA_C_MDATA_BNUMx_SHIFT(1)) |
  180. ((d & SBA_C_MDATA_DNUM_MASK) << SBA_C_MDATA_DNUM_SHIFT);
  181. }
  182. /* ====== General helper routines ===== */
  183. static struct sba_request *sba_alloc_request(struct sba_device *sba)
  184. {
  185. bool found = false;
  186. unsigned long flags;
  187. struct sba_request *req = NULL;
  188. spin_lock_irqsave(&sba->reqs_lock, flags);
  189. list_for_each_entry(req, &sba->reqs_free_list, node) {
  190. if (async_tx_test_ack(&req->tx)) {
  191. list_move_tail(&req->node, &sba->reqs_alloc_list);
  192. found = true;
  193. break;
  194. }
  195. }
  196. spin_unlock_irqrestore(&sba->reqs_lock, flags);
  197. if (!found) {
  198. /*
  199. * We have no more free requests so, we peek
  200. * mailbox channels hoping few active requests
  201. * would have completed which will create more
  202. * room for new requests.
  203. */
  204. mbox_client_peek_data(sba->mchan);
  205. return NULL;
  206. }
  207. req->flags = SBA_REQUEST_STATE_ALLOCED;
  208. req->first = req;
  209. INIT_LIST_HEAD(&req->next);
  210. atomic_set(&req->next_pending_count, 1);
  211. dma_async_tx_descriptor_init(&req->tx, &sba->dma_chan);
  212. async_tx_ack(&req->tx);
  213. return req;
  214. }
  215. /* Note: Must be called with sba->reqs_lock held */
  216. static void _sba_pending_request(struct sba_device *sba,
  217. struct sba_request *req)
  218. {
  219. lockdep_assert_held(&sba->reqs_lock);
  220. req->flags &= ~SBA_REQUEST_STATE_MASK;
  221. req->flags |= SBA_REQUEST_STATE_PENDING;
  222. list_move_tail(&req->node, &sba->reqs_pending_list);
  223. if (list_empty(&sba->reqs_active_list))
  224. sba->reqs_fence = false;
  225. }
  226. /* Note: Must be called with sba->reqs_lock held */
  227. static bool _sba_active_request(struct sba_device *sba,
  228. struct sba_request *req)
  229. {
  230. lockdep_assert_held(&sba->reqs_lock);
  231. if (list_empty(&sba->reqs_active_list))
  232. sba->reqs_fence = false;
  233. if (sba->reqs_fence)
  234. return false;
  235. req->flags &= ~SBA_REQUEST_STATE_MASK;
  236. req->flags |= SBA_REQUEST_STATE_ACTIVE;
  237. list_move_tail(&req->node, &sba->reqs_active_list);
  238. if (req->flags & SBA_REQUEST_FENCE)
  239. sba->reqs_fence = true;
  240. return true;
  241. }
  242. /* Note: Must be called with sba->reqs_lock held */
  243. static void _sba_abort_request(struct sba_device *sba,
  244. struct sba_request *req)
  245. {
  246. lockdep_assert_held(&sba->reqs_lock);
  247. req->flags &= ~SBA_REQUEST_STATE_MASK;
  248. req->flags |= SBA_REQUEST_STATE_ABORTED;
  249. list_move_tail(&req->node, &sba->reqs_aborted_list);
  250. if (list_empty(&sba->reqs_active_list))
  251. sba->reqs_fence = false;
  252. }
  253. /* Note: Must be called with sba->reqs_lock held */
  254. static void _sba_free_request(struct sba_device *sba,
  255. struct sba_request *req)
  256. {
  257. lockdep_assert_held(&sba->reqs_lock);
  258. req->flags &= ~SBA_REQUEST_STATE_MASK;
  259. req->flags |= SBA_REQUEST_STATE_FREE;
  260. list_move_tail(&req->node, &sba->reqs_free_list);
  261. if (list_empty(&sba->reqs_active_list))
  262. sba->reqs_fence = false;
  263. }
  264. static void sba_free_chained_requests(struct sba_request *req)
  265. {
  266. unsigned long flags;
  267. struct sba_request *nreq;
  268. struct sba_device *sba = req->sba;
  269. spin_lock_irqsave(&sba->reqs_lock, flags);
  270. _sba_free_request(sba, req);
  271. list_for_each_entry(nreq, &req->next, next)
  272. _sba_free_request(sba, nreq);
  273. spin_unlock_irqrestore(&sba->reqs_lock, flags);
  274. }
  275. static void sba_chain_request(struct sba_request *first,
  276. struct sba_request *req)
  277. {
  278. unsigned long flags;
  279. struct sba_device *sba = req->sba;
  280. spin_lock_irqsave(&sba->reqs_lock, flags);
  281. list_add_tail(&req->next, &first->next);
  282. req->first = first;
  283. atomic_inc(&first->next_pending_count);
  284. spin_unlock_irqrestore(&sba->reqs_lock, flags);
  285. }
  286. static void sba_cleanup_nonpending_requests(struct sba_device *sba)
  287. {
  288. unsigned long flags;
  289. struct sba_request *req, *req1;
  290. spin_lock_irqsave(&sba->reqs_lock, flags);
  291. /* Freeup all alloced request */
  292. list_for_each_entry_safe(req, req1, &sba->reqs_alloc_list, node)
  293. _sba_free_request(sba, req);
  294. /* Set all active requests as aborted */
  295. list_for_each_entry_safe(req, req1, &sba->reqs_active_list, node)
  296. _sba_abort_request(sba, req);
  297. /*
  298. * Note: We expect that aborted request will be eventually
  299. * freed by sba_receive_message()
  300. */
  301. spin_unlock_irqrestore(&sba->reqs_lock, flags);
  302. }
  303. static void sba_cleanup_pending_requests(struct sba_device *sba)
  304. {
  305. unsigned long flags;
  306. struct sba_request *req, *req1;
  307. spin_lock_irqsave(&sba->reqs_lock, flags);
  308. /* Freeup all pending request */
  309. list_for_each_entry_safe(req, req1, &sba->reqs_pending_list, node)
  310. _sba_free_request(sba, req);
  311. spin_unlock_irqrestore(&sba->reqs_lock, flags);
  312. }
  313. static int sba_send_mbox_request(struct sba_device *sba,
  314. struct sba_request *req)
  315. {
  316. int ret = 0;
  317. /* Send message for the request */
  318. req->msg.error = 0;
  319. ret = mbox_send_message(sba->mchan, &req->msg);
  320. if (ret < 0) {
  321. dev_err(sba->dev, "send message failed with error %d", ret);
  322. return ret;
  323. }
  324. /* Check error returned by mailbox controller */
  325. ret = req->msg.error;
  326. if (ret < 0) {
  327. dev_err(sba->dev, "message error %d", ret);
  328. }
  329. /* Signal txdone for mailbox channel */
  330. mbox_client_txdone(sba->mchan, ret);
  331. return ret;
  332. }
  333. /* Note: Must be called with sba->reqs_lock held */
  334. static void _sba_process_pending_requests(struct sba_device *sba)
  335. {
  336. int ret;
  337. u32 count;
  338. struct sba_request *req;
  339. /* Process few pending requests */
  340. count = SBA_MAX_MSG_SEND_PER_MBOX_CHANNEL;
  341. while (!list_empty(&sba->reqs_pending_list) && count) {
  342. /* Get the first pending request */
  343. req = list_first_entry(&sba->reqs_pending_list,
  344. struct sba_request, node);
  345. /* Try to make request active */
  346. if (!_sba_active_request(sba, req))
  347. break;
  348. /* Send request to mailbox channel */
  349. ret = sba_send_mbox_request(sba, req);
  350. if (ret < 0) {
  351. _sba_pending_request(sba, req);
  352. break;
  353. }
  354. count--;
  355. }
  356. }
  357. static void sba_process_received_request(struct sba_device *sba,
  358. struct sba_request *req)
  359. {
  360. unsigned long flags;
  361. struct dma_async_tx_descriptor *tx;
  362. struct sba_request *nreq, *first = req->first;
  363. /* Process only after all chained requests are received */
  364. if (!atomic_dec_return(&first->next_pending_count)) {
  365. tx = &first->tx;
  366. WARN_ON(tx->cookie < 0);
  367. if (tx->cookie > 0) {
  368. spin_lock_irqsave(&sba->reqs_lock, flags);
  369. dma_cookie_complete(tx);
  370. spin_unlock_irqrestore(&sba->reqs_lock, flags);
  371. dmaengine_desc_get_callback_invoke(tx, NULL);
  372. dma_descriptor_unmap(tx);
  373. tx->callback = NULL;
  374. tx->callback_result = NULL;
  375. }
  376. dma_run_dependencies(tx);
  377. spin_lock_irqsave(&sba->reqs_lock, flags);
  378. /* Free all requests chained to first request */
  379. list_for_each_entry(nreq, &first->next, next)
  380. _sba_free_request(sba, nreq);
  381. INIT_LIST_HEAD(&first->next);
  382. /* Free the first request */
  383. _sba_free_request(sba, first);
  384. /* Process pending requests */
  385. _sba_process_pending_requests(sba);
  386. spin_unlock_irqrestore(&sba->reqs_lock, flags);
  387. }
  388. }
  389. static void sba_write_stats_in_seqfile(struct sba_device *sba,
  390. struct seq_file *file)
  391. {
  392. unsigned long flags;
  393. struct sba_request *req;
  394. u32 free_count = 0, alloced_count = 0;
  395. u32 pending_count = 0, active_count = 0, aborted_count = 0;
  396. spin_lock_irqsave(&sba->reqs_lock, flags);
  397. list_for_each_entry(req, &sba->reqs_free_list, node)
  398. if (async_tx_test_ack(&req->tx))
  399. free_count++;
  400. list_for_each_entry(req, &sba->reqs_alloc_list, node)
  401. alloced_count++;
  402. list_for_each_entry(req, &sba->reqs_pending_list, node)
  403. pending_count++;
  404. list_for_each_entry(req, &sba->reqs_active_list, node)
  405. active_count++;
  406. list_for_each_entry(req, &sba->reqs_aborted_list, node)
  407. aborted_count++;
  408. spin_unlock_irqrestore(&sba->reqs_lock, flags);
  409. seq_printf(file, "maximum requests = %d\n", sba->max_req);
  410. seq_printf(file, "free requests = %d\n", free_count);
  411. seq_printf(file, "alloced requests = %d\n", alloced_count);
  412. seq_printf(file, "pending requests = %d\n", pending_count);
  413. seq_printf(file, "active requests = %d\n", active_count);
  414. seq_printf(file, "aborted requests = %d\n", aborted_count);
  415. }
  416. /* ====== DMAENGINE callbacks ===== */
  417. static void sba_free_chan_resources(struct dma_chan *dchan)
  418. {
  419. /*
  420. * Channel resources are pre-alloced so we just free-up
  421. * whatever we can so that we can re-use pre-alloced
  422. * channel resources next time.
  423. */
  424. sba_cleanup_nonpending_requests(to_sba_device(dchan));
  425. }
  426. static int sba_device_terminate_all(struct dma_chan *dchan)
  427. {
  428. /* Cleanup all pending requests */
  429. sba_cleanup_pending_requests(to_sba_device(dchan));
  430. return 0;
  431. }
  432. static void sba_issue_pending(struct dma_chan *dchan)
  433. {
  434. unsigned long flags;
  435. struct sba_device *sba = to_sba_device(dchan);
  436. /* Process pending requests */
  437. spin_lock_irqsave(&sba->reqs_lock, flags);
  438. _sba_process_pending_requests(sba);
  439. spin_unlock_irqrestore(&sba->reqs_lock, flags);
  440. }
  441. static dma_cookie_t sba_tx_submit(struct dma_async_tx_descriptor *tx)
  442. {
  443. unsigned long flags;
  444. dma_cookie_t cookie;
  445. struct sba_device *sba;
  446. struct sba_request *req, *nreq;
  447. if (unlikely(!tx))
  448. return -EINVAL;
  449. sba = to_sba_device(tx->chan);
  450. req = to_sba_request(tx);
  451. /* Assign cookie and mark all chained requests pending */
  452. spin_lock_irqsave(&sba->reqs_lock, flags);
  453. cookie = dma_cookie_assign(tx);
  454. _sba_pending_request(sba, req);
  455. list_for_each_entry(nreq, &req->next, next)
  456. _sba_pending_request(sba, nreq);
  457. spin_unlock_irqrestore(&sba->reqs_lock, flags);
  458. return cookie;
  459. }
  460. static enum dma_status sba_tx_status(struct dma_chan *dchan,
  461. dma_cookie_t cookie,
  462. struct dma_tx_state *txstate)
  463. {
  464. enum dma_status ret;
  465. struct sba_device *sba = to_sba_device(dchan);
  466. ret = dma_cookie_status(dchan, cookie, txstate);
  467. if (ret == DMA_COMPLETE)
  468. return ret;
  469. mbox_client_peek_data(sba->mchan);
  470. return dma_cookie_status(dchan, cookie, txstate);
  471. }
  472. static void sba_fillup_interrupt_msg(struct sba_request *req,
  473. struct brcm_sba_command *cmds,
  474. struct brcm_message *msg)
  475. {
  476. u64 cmd;
  477. u32 c_mdata;
  478. dma_addr_t resp_dma = req->tx.phys;
  479. struct brcm_sba_command *cmdsp = cmds;
  480. /* Type-B command to load dummy data into buf0 */
  481. cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
  482. SBA_TYPE_SHIFT, SBA_TYPE_MASK);
  483. cmd = sba_cmd_enc(cmd, req->sba->hw_resp_size,
  484. SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
  485. c_mdata = sba_cmd_load_c_mdata(0);
  486. cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
  487. SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
  488. cmd = sba_cmd_enc(cmd, SBA_CMD_LOAD_BUFFER,
  489. SBA_CMD_SHIFT, SBA_CMD_MASK);
  490. cmdsp->cmd = cmd;
  491. *cmdsp->cmd_dma = cpu_to_le64(cmd);
  492. cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
  493. cmdsp->data = resp_dma;
  494. cmdsp->data_len = req->sba->hw_resp_size;
  495. cmdsp++;
  496. /* Type-A command to write buf0 to dummy location */
  497. cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
  498. SBA_TYPE_SHIFT, SBA_TYPE_MASK);
  499. cmd = sba_cmd_enc(cmd, req->sba->hw_resp_size,
  500. SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
  501. cmd = sba_cmd_enc(cmd, 0x1,
  502. SBA_RESP_SHIFT, SBA_RESP_MASK);
  503. c_mdata = sba_cmd_write_c_mdata(0);
  504. cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
  505. SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
  506. cmd = sba_cmd_enc(cmd, SBA_CMD_WRITE_BUFFER,
  507. SBA_CMD_SHIFT, SBA_CMD_MASK);
  508. cmdsp->cmd = cmd;
  509. *cmdsp->cmd_dma = cpu_to_le64(cmd);
  510. cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
  511. if (req->sba->hw_resp_size) {
  512. cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP;
  513. cmdsp->resp = resp_dma;
  514. cmdsp->resp_len = req->sba->hw_resp_size;
  515. }
  516. cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT;
  517. cmdsp->data = resp_dma;
  518. cmdsp->data_len = req->sba->hw_resp_size;
  519. cmdsp++;
  520. /* Fillup brcm_message */
  521. msg->type = BRCM_MESSAGE_SBA;
  522. msg->sba.cmds = cmds;
  523. msg->sba.cmds_count = cmdsp - cmds;
  524. msg->ctx = req;
  525. msg->error = 0;
  526. }
  527. static struct dma_async_tx_descriptor *
  528. sba_prep_dma_interrupt(struct dma_chan *dchan, unsigned long flags)
  529. {
  530. struct sba_request *req = NULL;
  531. struct sba_device *sba = to_sba_device(dchan);
  532. /* Alloc new request */
  533. req = sba_alloc_request(sba);
  534. if (!req)
  535. return NULL;
  536. /*
  537. * Force fence so that no requests are submitted
  538. * until DMA callback for this request is invoked.
  539. */
  540. req->flags |= SBA_REQUEST_FENCE;
  541. /* Fillup request message */
  542. sba_fillup_interrupt_msg(req, req->cmds, &req->msg);
  543. /* Init async_tx descriptor */
  544. req->tx.flags = flags;
  545. req->tx.cookie = -EBUSY;
  546. return &req->tx;
  547. }
  548. static void sba_fillup_memcpy_msg(struct sba_request *req,
  549. struct brcm_sba_command *cmds,
  550. struct brcm_message *msg,
  551. dma_addr_t msg_offset, size_t msg_len,
  552. dma_addr_t dst, dma_addr_t src)
  553. {
  554. u64 cmd;
  555. u32 c_mdata;
  556. dma_addr_t resp_dma = req->tx.phys;
  557. struct brcm_sba_command *cmdsp = cmds;
  558. /* Type-B command to load data into buf0 */
  559. cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
  560. SBA_TYPE_SHIFT, SBA_TYPE_MASK);
  561. cmd = sba_cmd_enc(cmd, msg_len,
  562. SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
  563. c_mdata = sba_cmd_load_c_mdata(0);
  564. cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
  565. SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
  566. cmd = sba_cmd_enc(cmd, SBA_CMD_LOAD_BUFFER,
  567. SBA_CMD_SHIFT, SBA_CMD_MASK);
  568. cmdsp->cmd = cmd;
  569. *cmdsp->cmd_dma = cpu_to_le64(cmd);
  570. cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
  571. cmdsp->data = src + msg_offset;
  572. cmdsp->data_len = msg_len;
  573. cmdsp++;
  574. /* Type-A command to write buf0 */
  575. cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
  576. SBA_TYPE_SHIFT, SBA_TYPE_MASK);
  577. cmd = sba_cmd_enc(cmd, msg_len,
  578. SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
  579. cmd = sba_cmd_enc(cmd, 0x1,
  580. SBA_RESP_SHIFT, SBA_RESP_MASK);
  581. c_mdata = sba_cmd_write_c_mdata(0);
  582. cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
  583. SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
  584. cmd = sba_cmd_enc(cmd, SBA_CMD_WRITE_BUFFER,
  585. SBA_CMD_SHIFT, SBA_CMD_MASK);
  586. cmdsp->cmd = cmd;
  587. *cmdsp->cmd_dma = cpu_to_le64(cmd);
  588. cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
  589. if (req->sba->hw_resp_size) {
  590. cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP;
  591. cmdsp->resp = resp_dma;
  592. cmdsp->resp_len = req->sba->hw_resp_size;
  593. }
  594. cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT;
  595. cmdsp->data = dst + msg_offset;
  596. cmdsp->data_len = msg_len;
  597. cmdsp++;
  598. /* Fillup brcm_message */
  599. msg->type = BRCM_MESSAGE_SBA;
  600. msg->sba.cmds = cmds;
  601. msg->sba.cmds_count = cmdsp - cmds;
  602. msg->ctx = req;
  603. msg->error = 0;
  604. }
  605. static struct sba_request *
  606. sba_prep_dma_memcpy_req(struct sba_device *sba,
  607. dma_addr_t off, dma_addr_t dst, dma_addr_t src,
  608. size_t len, unsigned long flags)
  609. {
  610. struct sba_request *req = NULL;
  611. /* Alloc new request */
  612. req = sba_alloc_request(sba);
  613. if (!req)
  614. return NULL;
  615. if (flags & DMA_PREP_FENCE)
  616. req->flags |= SBA_REQUEST_FENCE;
  617. /* Fillup request message */
  618. sba_fillup_memcpy_msg(req, req->cmds, &req->msg,
  619. off, len, dst, src);
  620. /* Init async_tx descriptor */
  621. req->tx.flags = flags;
  622. req->tx.cookie = -EBUSY;
  623. return req;
  624. }
  625. static struct dma_async_tx_descriptor *
  626. sba_prep_dma_memcpy(struct dma_chan *dchan, dma_addr_t dst, dma_addr_t src,
  627. size_t len, unsigned long flags)
  628. {
  629. size_t req_len;
  630. dma_addr_t off = 0;
  631. struct sba_device *sba = to_sba_device(dchan);
  632. struct sba_request *first = NULL, *req;
  633. /* Create chained requests where each request is upto hw_buf_size */
  634. while (len) {
  635. req_len = (len < sba->hw_buf_size) ? len : sba->hw_buf_size;
  636. req = sba_prep_dma_memcpy_req(sba, off, dst, src,
  637. req_len, flags);
  638. if (!req) {
  639. if (first)
  640. sba_free_chained_requests(first);
  641. return NULL;
  642. }
  643. if (first)
  644. sba_chain_request(first, req);
  645. else
  646. first = req;
  647. off += req_len;
  648. len -= req_len;
  649. }
  650. return (first) ? &first->tx : NULL;
  651. }
  652. static void sba_fillup_xor_msg(struct sba_request *req,
  653. struct brcm_sba_command *cmds,
  654. struct brcm_message *msg,
  655. dma_addr_t msg_offset, size_t msg_len,
  656. dma_addr_t dst, dma_addr_t *src, u32 src_cnt)
  657. {
  658. u64 cmd;
  659. u32 c_mdata;
  660. unsigned int i;
  661. dma_addr_t resp_dma = req->tx.phys;
  662. struct brcm_sba_command *cmdsp = cmds;
  663. /* Type-B command to load data into buf0 */
  664. cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
  665. SBA_TYPE_SHIFT, SBA_TYPE_MASK);
  666. cmd = sba_cmd_enc(cmd, msg_len,
  667. SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
  668. c_mdata = sba_cmd_load_c_mdata(0);
  669. cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
  670. SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
  671. cmd = sba_cmd_enc(cmd, SBA_CMD_LOAD_BUFFER,
  672. SBA_CMD_SHIFT, SBA_CMD_MASK);
  673. cmdsp->cmd = cmd;
  674. *cmdsp->cmd_dma = cpu_to_le64(cmd);
  675. cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
  676. cmdsp->data = src[0] + msg_offset;
  677. cmdsp->data_len = msg_len;
  678. cmdsp++;
  679. /* Type-B commands to xor data with buf0 and put it back in buf0 */
  680. for (i = 1; i < src_cnt; i++) {
  681. cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
  682. SBA_TYPE_SHIFT, SBA_TYPE_MASK);
  683. cmd = sba_cmd_enc(cmd, msg_len,
  684. SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
  685. c_mdata = sba_cmd_xor_c_mdata(0, 0);
  686. cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
  687. SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
  688. cmd = sba_cmd_enc(cmd, SBA_CMD_XOR,
  689. SBA_CMD_SHIFT, SBA_CMD_MASK);
  690. cmdsp->cmd = cmd;
  691. *cmdsp->cmd_dma = cpu_to_le64(cmd);
  692. cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
  693. cmdsp->data = src[i] + msg_offset;
  694. cmdsp->data_len = msg_len;
  695. cmdsp++;
  696. }
  697. /* Type-A command to write buf0 */
  698. cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
  699. SBA_TYPE_SHIFT, SBA_TYPE_MASK);
  700. cmd = sba_cmd_enc(cmd, msg_len,
  701. SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
  702. cmd = sba_cmd_enc(cmd, 0x1,
  703. SBA_RESP_SHIFT, SBA_RESP_MASK);
  704. c_mdata = sba_cmd_write_c_mdata(0);
  705. cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
  706. SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
  707. cmd = sba_cmd_enc(cmd, SBA_CMD_WRITE_BUFFER,
  708. SBA_CMD_SHIFT, SBA_CMD_MASK);
  709. cmdsp->cmd = cmd;
  710. *cmdsp->cmd_dma = cpu_to_le64(cmd);
  711. cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
  712. if (req->sba->hw_resp_size) {
  713. cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP;
  714. cmdsp->resp = resp_dma;
  715. cmdsp->resp_len = req->sba->hw_resp_size;
  716. }
  717. cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT;
  718. cmdsp->data = dst + msg_offset;
  719. cmdsp->data_len = msg_len;
  720. cmdsp++;
  721. /* Fillup brcm_message */
  722. msg->type = BRCM_MESSAGE_SBA;
  723. msg->sba.cmds = cmds;
  724. msg->sba.cmds_count = cmdsp - cmds;
  725. msg->ctx = req;
  726. msg->error = 0;
  727. }
  728. static struct sba_request *
  729. sba_prep_dma_xor_req(struct sba_device *sba,
  730. dma_addr_t off, dma_addr_t dst, dma_addr_t *src,
  731. u32 src_cnt, size_t len, unsigned long flags)
  732. {
  733. struct sba_request *req = NULL;
  734. /* Alloc new request */
  735. req = sba_alloc_request(sba);
  736. if (!req)
  737. return NULL;
  738. if (flags & DMA_PREP_FENCE)
  739. req->flags |= SBA_REQUEST_FENCE;
  740. /* Fillup request message */
  741. sba_fillup_xor_msg(req, req->cmds, &req->msg,
  742. off, len, dst, src, src_cnt);
  743. /* Init async_tx descriptor */
  744. req->tx.flags = flags;
  745. req->tx.cookie = -EBUSY;
  746. return req;
  747. }
  748. static struct dma_async_tx_descriptor *
  749. sba_prep_dma_xor(struct dma_chan *dchan, dma_addr_t dst, dma_addr_t *src,
  750. u32 src_cnt, size_t len, unsigned long flags)
  751. {
  752. size_t req_len;
  753. dma_addr_t off = 0;
  754. struct sba_device *sba = to_sba_device(dchan);
  755. struct sba_request *first = NULL, *req;
  756. /* Sanity checks */
  757. if (unlikely(src_cnt > sba->max_xor_srcs))
  758. return NULL;
  759. /* Create chained requests where each request is upto hw_buf_size */
  760. while (len) {
  761. req_len = (len < sba->hw_buf_size) ? len : sba->hw_buf_size;
  762. req = sba_prep_dma_xor_req(sba, off, dst, src, src_cnt,
  763. req_len, flags);
  764. if (!req) {
  765. if (first)
  766. sba_free_chained_requests(first);
  767. return NULL;
  768. }
  769. if (first)
  770. sba_chain_request(first, req);
  771. else
  772. first = req;
  773. off += req_len;
  774. len -= req_len;
  775. }
  776. return (first) ? &first->tx : NULL;
  777. }
  778. static void sba_fillup_pq_msg(struct sba_request *req,
  779. bool pq_continue,
  780. struct brcm_sba_command *cmds,
  781. struct brcm_message *msg,
  782. dma_addr_t msg_offset, size_t msg_len,
  783. dma_addr_t *dst_p, dma_addr_t *dst_q,
  784. const u8 *scf, dma_addr_t *src, u32 src_cnt)
  785. {
  786. u64 cmd;
  787. u32 c_mdata;
  788. unsigned int i;
  789. dma_addr_t resp_dma = req->tx.phys;
  790. struct brcm_sba_command *cmdsp = cmds;
  791. if (pq_continue) {
  792. /* Type-B command to load old P into buf0 */
  793. if (dst_p) {
  794. cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
  795. SBA_TYPE_SHIFT, SBA_TYPE_MASK);
  796. cmd = sba_cmd_enc(cmd, msg_len,
  797. SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
  798. c_mdata = sba_cmd_load_c_mdata(0);
  799. cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
  800. SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
  801. cmd = sba_cmd_enc(cmd, SBA_CMD_LOAD_BUFFER,
  802. SBA_CMD_SHIFT, SBA_CMD_MASK);
  803. cmdsp->cmd = cmd;
  804. *cmdsp->cmd_dma = cpu_to_le64(cmd);
  805. cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
  806. cmdsp->data = *dst_p + msg_offset;
  807. cmdsp->data_len = msg_len;
  808. cmdsp++;
  809. }
  810. /* Type-B command to load old Q into buf1 */
  811. if (dst_q) {
  812. cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
  813. SBA_TYPE_SHIFT, SBA_TYPE_MASK);
  814. cmd = sba_cmd_enc(cmd, msg_len,
  815. SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
  816. c_mdata = sba_cmd_load_c_mdata(1);
  817. cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
  818. SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
  819. cmd = sba_cmd_enc(cmd, SBA_CMD_LOAD_BUFFER,
  820. SBA_CMD_SHIFT, SBA_CMD_MASK);
  821. cmdsp->cmd = cmd;
  822. *cmdsp->cmd_dma = cpu_to_le64(cmd);
  823. cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
  824. cmdsp->data = *dst_q + msg_offset;
  825. cmdsp->data_len = msg_len;
  826. cmdsp++;
  827. }
  828. } else {
  829. /* Type-A command to zero all buffers */
  830. cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
  831. SBA_TYPE_SHIFT, SBA_TYPE_MASK);
  832. cmd = sba_cmd_enc(cmd, msg_len,
  833. SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
  834. cmd = sba_cmd_enc(cmd, SBA_CMD_ZERO_ALL_BUFFERS,
  835. SBA_CMD_SHIFT, SBA_CMD_MASK);
  836. cmdsp->cmd = cmd;
  837. *cmdsp->cmd_dma = cpu_to_le64(cmd);
  838. cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
  839. cmdsp++;
  840. }
  841. /* Type-B commands for generate P onto buf0 and Q onto buf1 */
  842. for (i = 0; i < src_cnt; i++) {
  843. cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
  844. SBA_TYPE_SHIFT, SBA_TYPE_MASK);
  845. cmd = sba_cmd_enc(cmd, msg_len,
  846. SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
  847. c_mdata = sba_cmd_pq_c_mdata(raid6_gflog[scf[i]], 1, 0);
  848. cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
  849. SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
  850. cmd = sba_cmd_enc(cmd, SBA_C_MDATA_MS(c_mdata),
  851. SBA_C_MDATA_MS_SHIFT, SBA_C_MDATA_MS_MASK);
  852. cmd = sba_cmd_enc(cmd, SBA_CMD_GALOIS_XOR,
  853. SBA_CMD_SHIFT, SBA_CMD_MASK);
  854. cmdsp->cmd = cmd;
  855. *cmdsp->cmd_dma = cpu_to_le64(cmd);
  856. cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
  857. cmdsp->data = src[i] + msg_offset;
  858. cmdsp->data_len = msg_len;
  859. cmdsp++;
  860. }
  861. /* Type-A command to write buf0 */
  862. if (dst_p) {
  863. cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
  864. SBA_TYPE_SHIFT, SBA_TYPE_MASK);
  865. cmd = sba_cmd_enc(cmd, msg_len,
  866. SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
  867. cmd = sba_cmd_enc(cmd, 0x1,
  868. SBA_RESP_SHIFT, SBA_RESP_MASK);
  869. c_mdata = sba_cmd_write_c_mdata(0);
  870. cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
  871. SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
  872. cmd = sba_cmd_enc(cmd, SBA_CMD_WRITE_BUFFER,
  873. SBA_CMD_SHIFT, SBA_CMD_MASK);
  874. cmdsp->cmd = cmd;
  875. *cmdsp->cmd_dma = cpu_to_le64(cmd);
  876. cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
  877. if (req->sba->hw_resp_size) {
  878. cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP;
  879. cmdsp->resp = resp_dma;
  880. cmdsp->resp_len = req->sba->hw_resp_size;
  881. }
  882. cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT;
  883. cmdsp->data = *dst_p + msg_offset;
  884. cmdsp->data_len = msg_len;
  885. cmdsp++;
  886. }
  887. /* Type-A command to write buf1 */
  888. if (dst_q) {
  889. cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
  890. SBA_TYPE_SHIFT, SBA_TYPE_MASK);
  891. cmd = sba_cmd_enc(cmd, msg_len,
  892. SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
  893. cmd = sba_cmd_enc(cmd, 0x1,
  894. SBA_RESP_SHIFT, SBA_RESP_MASK);
  895. c_mdata = sba_cmd_write_c_mdata(1);
  896. cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
  897. SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
  898. cmd = sba_cmd_enc(cmd, SBA_CMD_WRITE_BUFFER,
  899. SBA_CMD_SHIFT, SBA_CMD_MASK);
  900. cmdsp->cmd = cmd;
  901. *cmdsp->cmd_dma = cpu_to_le64(cmd);
  902. cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
  903. if (req->sba->hw_resp_size) {
  904. cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP;
  905. cmdsp->resp = resp_dma;
  906. cmdsp->resp_len = req->sba->hw_resp_size;
  907. }
  908. cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT;
  909. cmdsp->data = *dst_q + msg_offset;
  910. cmdsp->data_len = msg_len;
  911. cmdsp++;
  912. }
  913. /* Fillup brcm_message */
  914. msg->type = BRCM_MESSAGE_SBA;
  915. msg->sba.cmds = cmds;
  916. msg->sba.cmds_count = cmdsp - cmds;
  917. msg->ctx = req;
  918. msg->error = 0;
  919. }
  920. static struct sba_request *
  921. sba_prep_dma_pq_req(struct sba_device *sba, dma_addr_t off,
  922. dma_addr_t *dst_p, dma_addr_t *dst_q, dma_addr_t *src,
  923. u32 src_cnt, const u8 *scf, size_t len, unsigned long flags)
  924. {
  925. struct sba_request *req = NULL;
  926. /* Alloc new request */
  927. req = sba_alloc_request(sba);
  928. if (!req)
  929. return NULL;
  930. if (flags & DMA_PREP_FENCE)
  931. req->flags |= SBA_REQUEST_FENCE;
  932. /* Fillup request messages */
  933. sba_fillup_pq_msg(req, dmaf_continue(flags),
  934. req->cmds, &req->msg,
  935. off, len, dst_p, dst_q, scf, src, src_cnt);
  936. /* Init async_tx descriptor */
  937. req->tx.flags = flags;
  938. req->tx.cookie = -EBUSY;
  939. return req;
  940. }
  941. static void sba_fillup_pq_single_msg(struct sba_request *req,
  942. bool pq_continue,
  943. struct brcm_sba_command *cmds,
  944. struct brcm_message *msg,
  945. dma_addr_t msg_offset, size_t msg_len,
  946. dma_addr_t *dst_p, dma_addr_t *dst_q,
  947. dma_addr_t src, u8 scf)
  948. {
  949. u64 cmd;
  950. u32 c_mdata;
  951. u8 pos, dpos = raid6_gflog[scf];
  952. dma_addr_t resp_dma = req->tx.phys;
  953. struct brcm_sba_command *cmdsp = cmds;
  954. if (!dst_p)
  955. goto skip_p;
  956. if (pq_continue) {
  957. /* Type-B command to load old P into buf0 */
  958. cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
  959. SBA_TYPE_SHIFT, SBA_TYPE_MASK);
  960. cmd = sba_cmd_enc(cmd, msg_len,
  961. SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
  962. c_mdata = sba_cmd_load_c_mdata(0);
  963. cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
  964. SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
  965. cmd = sba_cmd_enc(cmd, SBA_CMD_LOAD_BUFFER,
  966. SBA_CMD_SHIFT, SBA_CMD_MASK);
  967. cmdsp->cmd = cmd;
  968. *cmdsp->cmd_dma = cpu_to_le64(cmd);
  969. cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
  970. cmdsp->data = *dst_p + msg_offset;
  971. cmdsp->data_len = msg_len;
  972. cmdsp++;
  973. /*
  974. * Type-B commands to xor data with buf0 and put it
  975. * back in buf0
  976. */
  977. cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
  978. SBA_TYPE_SHIFT, SBA_TYPE_MASK);
  979. cmd = sba_cmd_enc(cmd, msg_len,
  980. SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
  981. c_mdata = sba_cmd_xor_c_mdata(0, 0);
  982. cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
  983. SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
  984. cmd = sba_cmd_enc(cmd, SBA_CMD_XOR,
  985. SBA_CMD_SHIFT, SBA_CMD_MASK);
  986. cmdsp->cmd = cmd;
  987. *cmdsp->cmd_dma = cpu_to_le64(cmd);
  988. cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
  989. cmdsp->data = src + msg_offset;
  990. cmdsp->data_len = msg_len;
  991. cmdsp++;
  992. } else {
  993. /* Type-B command to load old P into buf0 */
  994. cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
  995. SBA_TYPE_SHIFT, SBA_TYPE_MASK);
  996. cmd = sba_cmd_enc(cmd, msg_len,
  997. SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
  998. c_mdata = sba_cmd_load_c_mdata(0);
  999. cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
  1000. SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
  1001. cmd = sba_cmd_enc(cmd, SBA_CMD_LOAD_BUFFER,
  1002. SBA_CMD_SHIFT, SBA_CMD_MASK);
  1003. cmdsp->cmd = cmd;
  1004. *cmdsp->cmd_dma = cpu_to_le64(cmd);
  1005. cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
  1006. cmdsp->data = src + msg_offset;
  1007. cmdsp->data_len = msg_len;
  1008. cmdsp++;
  1009. }
  1010. /* Type-A command to write buf0 */
  1011. cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
  1012. SBA_TYPE_SHIFT, SBA_TYPE_MASK);
  1013. cmd = sba_cmd_enc(cmd, msg_len,
  1014. SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
  1015. cmd = sba_cmd_enc(cmd, 0x1,
  1016. SBA_RESP_SHIFT, SBA_RESP_MASK);
  1017. c_mdata = sba_cmd_write_c_mdata(0);
  1018. cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
  1019. SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
  1020. cmd = sba_cmd_enc(cmd, SBA_CMD_WRITE_BUFFER,
  1021. SBA_CMD_SHIFT, SBA_CMD_MASK);
  1022. cmdsp->cmd = cmd;
  1023. *cmdsp->cmd_dma = cpu_to_le64(cmd);
  1024. cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
  1025. if (req->sba->hw_resp_size) {
  1026. cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP;
  1027. cmdsp->resp = resp_dma;
  1028. cmdsp->resp_len = req->sba->hw_resp_size;
  1029. }
  1030. cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT;
  1031. cmdsp->data = *dst_p + msg_offset;
  1032. cmdsp->data_len = msg_len;
  1033. cmdsp++;
  1034. skip_p:
  1035. if (!dst_q)
  1036. goto skip_q;
  1037. /* Type-A command to zero all buffers */
  1038. cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
  1039. SBA_TYPE_SHIFT, SBA_TYPE_MASK);
  1040. cmd = sba_cmd_enc(cmd, msg_len,
  1041. SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
  1042. cmd = sba_cmd_enc(cmd, SBA_CMD_ZERO_ALL_BUFFERS,
  1043. SBA_CMD_SHIFT, SBA_CMD_MASK);
  1044. cmdsp->cmd = cmd;
  1045. *cmdsp->cmd_dma = cpu_to_le64(cmd);
  1046. cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
  1047. cmdsp++;
  1048. if (dpos == 255)
  1049. goto skip_q_computation;
  1050. pos = (dpos < req->sba->max_pq_coefs) ?
  1051. dpos : (req->sba->max_pq_coefs - 1);
  1052. /*
  1053. * Type-B command to generate initial Q from data
  1054. * and store output into buf0
  1055. */
  1056. cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
  1057. SBA_TYPE_SHIFT, SBA_TYPE_MASK);
  1058. cmd = sba_cmd_enc(cmd, msg_len,
  1059. SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
  1060. c_mdata = sba_cmd_pq_c_mdata(pos, 0, 0);
  1061. cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
  1062. SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
  1063. cmd = sba_cmd_enc(cmd, SBA_C_MDATA_MS(c_mdata),
  1064. SBA_C_MDATA_MS_SHIFT, SBA_C_MDATA_MS_MASK);
  1065. cmd = sba_cmd_enc(cmd, SBA_CMD_GALOIS,
  1066. SBA_CMD_SHIFT, SBA_CMD_MASK);
  1067. cmdsp->cmd = cmd;
  1068. *cmdsp->cmd_dma = cpu_to_le64(cmd);
  1069. cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
  1070. cmdsp->data = src + msg_offset;
  1071. cmdsp->data_len = msg_len;
  1072. cmdsp++;
  1073. dpos -= pos;
  1074. /* Multiple Type-A command to generate final Q */
  1075. while (dpos) {
  1076. pos = (dpos < req->sba->max_pq_coefs) ?
  1077. dpos : (req->sba->max_pq_coefs - 1);
  1078. /*
  1079. * Type-A command to generate Q with buf0 and
  1080. * buf1 store result in buf0
  1081. */
  1082. cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
  1083. SBA_TYPE_SHIFT, SBA_TYPE_MASK);
  1084. cmd = sba_cmd_enc(cmd, msg_len,
  1085. SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
  1086. c_mdata = sba_cmd_pq_c_mdata(pos, 0, 1);
  1087. cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
  1088. SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
  1089. cmd = sba_cmd_enc(cmd, SBA_C_MDATA_MS(c_mdata),
  1090. SBA_C_MDATA_MS_SHIFT, SBA_C_MDATA_MS_MASK);
  1091. cmd = sba_cmd_enc(cmd, SBA_CMD_GALOIS,
  1092. SBA_CMD_SHIFT, SBA_CMD_MASK);
  1093. cmdsp->cmd = cmd;
  1094. *cmdsp->cmd_dma = cpu_to_le64(cmd);
  1095. cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
  1096. cmdsp++;
  1097. dpos -= pos;
  1098. }
  1099. skip_q_computation:
  1100. if (pq_continue) {
  1101. /*
  1102. * Type-B command to XOR previous output with
  1103. * buf0 and write it into buf0
  1104. */
  1105. cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
  1106. SBA_TYPE_SHIFT, SBA_TYPE_MASK);
  1107. cmd = sba_cmd_enc(cmd, msg_len,
  1108. SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
  1109. c_mdata = sba_cmd_xor_c_mdata(0, 0);
  1110. cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
  1111. SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
  1112. cmd = sba_cmd_enc(cmd, SBA_CMD_XOR,
  1113. SBA_CMD_SHIFT, SBA_CMD_MASK);
  1114. cmdsp->cmd = cmd;
  1115. *cmdsp->cmd_dma = cpu_to_le64(cmd);
  1116. cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
  1117. cmdsp->data = *dst_q + msg_offset;
  1118. cmdsp->data_len = msg_len;
  1119. cmdsp++;
  1120. }
  1121. /* Type-A command to write buf0 */
  1122. cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
  1123. SBA_TYPE_SHIFT, SBA_TYPE_MASK);
  1124. cmd = sba_cmd_enc(cmd, msg_len,
  1125. SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
  1126. cmd = sba_cmd_enc(cmd, 0x1,
  1127. SBA_RESP_SHIFT, SBA_RESP_MASK);
  1128. c_mdata = sba_cmd_write_c_mdata(0);
  1129. cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
  1130. SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
  1131. cmd = sba_cmd_enc(cmd, SBA_CMD_WRITE_BUFFER,
  1132. SBA_CMD_SHIFT, SBA_CMD_MASK);
  1133. cmdsp->cmd = cmd;
  1134. *cmdsp->cmd_dma = cpu_to_le64(cmd);
  1135. cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
  1136. if (req->sba->hw_resp_size) {
  1137. cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP;
  1138. cmdsp->resp = resp_dma;
  1139. cmdsp->resp_len = req->sba->hw_resp_size;
  1140. }
  1141. cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT;
  1142. cmdsp->data = *dst_q + msg_offset;
  1143. cmdsp->data_len = msg_len;
  1144. cmdsp++;
  1145. skip_q:
  1146. /* Fillup brcm_message */
  1147. msg->type = BRCM_MESSAGE_SBA;
  1148. msg->sba.cmds = cmds;
  1149. msg->sba.cmds_count = cmdsp - cmds;
  1150. msg->ctx = req;
  1151. msg->error = 0;
  1152. }
  1153. static struct sba_request *
  1154. sba_prep_dma_pq_single_req(struct sba_device *sba, dma_addr_t off,
  1155. dma_addr_t *dst_p, dma_addr_t *dst_q,
  1156. dma_addr_t src, u8 scf, size_t len,
  1157. unsigned long flags)
  1158. {
  1159. struct sba_request *req = NULL;
  1160. /* Alloc new request */
  1161. req = sba_alloc_request(sba);
  1162. if (!req)
  1163. return NULL;
  1164. if (flags & DMA_PREP_FENCE)
  1165. req->flags |= SBA_REQUEST_FENCE;
  1166. /* Fillup request messages */
  1167. sba_fillup_pq_single_msg(req, dmaf_continue(flags),
  1168. req->cmds, &req->msg, off, len,
  1169. dst_p, dst_q, src, scf);
  1170. /* Init async_tx descriptor */
  1171. req->tx.flags = flags;
  1172. req->tx.cookie = -EBUSY;
  1173. return req;
  1174. }
  1175. static struct dma_async_tx_descriptor *
  1176. sba_prep_dma_pq(struct dma_chan *dchan, dma_addr_t *dst, dma_addr_t *src,
  1177. u32 src_cnt, const u8 *scf, size_t len, unsigned long flags)
  1178. {
  1179. u32 i, dst_q_index;
  1180. size_t req_len;
  1181. bool slow = false;
  1182. dma_addr_t off = 0;
  1183. dma_addr_t *dst_p = NULL, *dst_q = NULL;
  1184. struct sba_device *sba = to_sba_device(dchan);
  1185. struct sba_request *first = NULL, *req;
  1186. /* Sanity checks */
  1187. if (unlikely(src_cnt > sba->max_pq_srcs))
  1188. return NULL;
  1189. for (i = 0; i < src_cnt; i++)
  1190. if (sba->max_pq_coefs <= raid6_gflog[scf[i]])
  1191. slow = true;
  1192. /* Figure-out P and Q destination addresses */
  1193. if (!(flags & DMA_PREP_PQ_DISABLE_P))
  1194. dst_p = &dst[0];
  1195. if (!(flags & DMA_PREP_PQ_DISABLE_Q))
  1196. dst_q = &dst[1];
  1197. /* Create chained requests where each request is upto hw_buf_size */
  1198. while (len) {
  1199. req_len = (len < sba->hw_buf_size) ? len : sba->hw_buf_size;
  1200. if (slow) {
  1201. dst_q_index = src_cnt;
  1202. if (dst_q) {
  1203. for (i = 0; i < src_cnt; i++) {
  1204. if (*dst_q == src[i]) {
  1205. dst_q_index = i;
  1206. break;
  1207. }
  1208. }
  1209. }
  1210. if (dst_q_index < src_cnt) {
  1211. i = dst_q_index;
  1212. req = sba_prep_dma_pq_single_req(sba,
  1213. off, dst_p, dst_q, src[i], scf[i],
  1214. req_len, flags | DMA_PREP_FENCE);
  1215. if (!req)
  1216. goto fail;
  1217. if (first)
  1218. sba_chain_request(first, req);
  1219. else
  1220. first = req;
  1221. flags |= DMA_PREP_CONTINUE;
  1222. }
  1223. for (i = 0; i < src_cnt; i++) {
  1224. if (dst_q_index == i)
  1225. continue;
  1226. req = sba_prep_dma_pq_single_req(sba,
  1227. off, dst_p, dst_q, src[i], scf[i],
  1228. req_len, flags | DMA_PREP_FENCE);
  1229. if (!req)
  1230. goto fail;
  1231. if (first)
  1232. sba_chain_request(first, req);
  1233. else
  1234. first = req;
  1235. flags |= DMA_PREP_CONTINUE;
  1236. }
  1237. } else {
  1238. req = sba_prep_dma_pq_req(sba, off,
  1239. dst_p, dst_q, src, src_cnt,
  1240. scf, req_len, flags);
  1241. if (!req)
  1242. goto fail;
  1243. if (first)
  1244. sba_chain_request(first, req);
  1245. else
  1246. first = req;
  1247. }
  1248. off += req_len;
  1249. len -= req_len;
  1250. }
  1251. return (first) ? &first->tx : NULL;
  1252. fail:
  1253. if (first)
  1254. sba_free_chained_requests(first);
  1255. return NULL;
  1256. }
  1257. /* ====== Mailbox callbacks ===== */
  1258. static void sba_receive_message(struct mbox_client *cl, void *msg)
  1259. {
  1260. struct brcm_message *m = msg;
  1261. struct sba_request *req = m->ctx;
  1262. struct sba_device *sba = req->sba;
  1263. /* Error count if message has error */
  1264. if (m->error < 0)
  1265. dev_err(sba->dev, "%s got message with error %d",
  1266. dma_chan_name(&sba->dma_chan), m->error);
  1267. /* Process received request */
  1268. sba_process_received_request(sba, req);
  1269. }
  1270. /* ====== Debugfs callbacks ====== */
  1271. static int sba_debugfs_stats_show(struct seq_file *file, void *offset)
  1272. {
  1273. struct sba_device *sba = dev_get_drvdata(file->private);
  1274. /* Write stats in file */
  1275. sba_write_stats_in_seqfile(sba, file);
  1276. return 0;
  1277. }
  1278. /* ====== Platform driver routines ===== */
  1279. static int sba_prealloc_channel_resources(struct sba_device *sba)
  1280. {
  1281. int i, j, ret = 0;
  1282. struct sba_request *req = NULL;
  1283. sba->resp_base = dma_alloc_coherent(sba->mbox_dev,
  1284. sba->max_resp_pool_size,
  1285. &sba->resp_dma_base, GFP_KERNEL);
  1286. if (!sba->resp_base)
  1287. return -ENOMEM;
  1288. sba->cmds_base = dma_alloc_coherent(sba->mbox_dev,
  1289. sba->max_cmds_pool_size,
  1290. &sba->cmds_dma_base, GFP_KERNEL);
  1291. if (!sba->cmds_base) {
  1292. ret = -ENOMEM;
  1293. goto fail_free_resp_pool;
  1294. }
  1295. spin_lock_init(&sba->reqs_lock);
  1296. sba->reqs_fence = false;
  1297. INIT_LIST_HEAD(&sba->reqs_alloc_list);
  1298. INIT_LIST_HEAD(&sba->reqs_pending_list);
  1299. INIT_LIST_HEAD(&sba->reqs_active_list);
  1300. INIT_LIST_HEAD(&sba->reqs_aborted_list);
  1301. INIT_LIST_HEAD(&sba->reqs_free_list);
  1302. for (i = 0; i < sba->max_req; i++) {
  1303. req = devm_kzalloc(sba->dev,
  1304. struct_size(req, cmds, sba->max_cmd_per_req),
  1305. GFP_KERNEL);
  1306. if (!req) {
  1307. ret = -ENOMEM;
  1308. goto fail_free_cmds_pool;
  1309. }
  1310. INIT_LIST_HEAD(&req->node);
  1311. req->sba = sba;
  1312. req->flags = SBA_REQUEST_STATE_FREE;
  1313. INIT_LIST_HEAD(&req->next);
  1314. atomic_set(&req->next_pending_count, 0);
  1315. for (j = 0; j < sba->max_cmd_per_req; j++) {
  1316. req->cmds[j].cmd = 0;
  1317. req->cmds[j].cmd_dma = sba->cmds_base +
  1318. (i * sba->max_cmd_per_req + j) * sizeof(u64);
  1319. req->cmds[j].cmd_dma_addr = sba->cmds_dma_base +
  1320. (i * sba->max_cmd_per_req + j) * sizeof(u64);
  1321. req->cmds[j].flags = 0;
  1322. }
  1323. memset(&req->msg, 0, sizeof(req->msg));
  1324. dma_async_tx_descriptor_init(&req->tx, &sba->dma_chan);
  1325. async_tx_ack(&req->tx);
  1326. req->tx.tx_submit = sba_tx_submit;
  1327. req->tx.phys = sba->resp_dma_base + i * sba->hw_resp_size;
  1328. list_add_tail(&req->node, &sba->reqs_free_list);
  1329. }
  1330. return 0;
  1331. fail_free_cmds_pool:
  1332. dma_free_coherent(sba->mbox_dev,
  1333. sba->max_cmds_pool_size,
  1334. sba->cmds_base, sba->cmds_dma_base);
  1335. fail_free_resp_pool:
  1336. dma_free_coherent(sba->mbox_dev,
  1337. sba->max_resp_pool_size,
  1338. sba->resp_base, sba->resp_dma_base);
  1339. return ret;
  1340. }
  1341. static void sba_freeup_channel_resources(struct sba_device *sba)
  1342. {
  1343. dmaengine_terminate_all(&sba->dma_chan);
  1344. dma_free_coherent(sba->mbox_dev, sba->max_cmds_pool_size,
  1345. sba->cmds_base, sba->cmds_dma_base);
  1346. dma_free_coherent(sba->mbox_dev, sba->max_resp_pool_size,
  1347. sba->resp_base, sba->resp_dma_base);
  1348. sba->resp_base = NULL;
  1349. sba->resp_dma_base = 0;
  1350. }
  1351. static int sba_async_register(struct sba_device *sba)
  1352. {
  1353. int ret;
  1354. struct dma_device *dma_dev = &sba->dma_dev;
  1355. /* Initialize DMA channel cookie */
  1356. sba->dma_chan.device = dma_dev;
  1357. dma_cookie_init(&sba->dma_chan);
  1358. /* Initialize DMA device capability mask */
  1359. dma_cap_zero(dma_dev->cap_mask);
  1360. dma_cap_set(DMA_INTERRUPT, dma_dev->cap_mask);
  1361. dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
  1362. dma_cap_set(DMA_XOR, dma_dev->cap_mask);
  1363. dma_cap_set(DMA_PQ, dma_dev->cap_mask);
  1364. /*
  1365. * Set mailbox channel device as the base device of
  1366. * our dma_device because the actual memory accesses
  1367. * will be done by mailbox controller
  1368. */
  1369. dma_dev->dev = sba->mbox_dev;
  1370. /* Set base prep routines */
  1371. dma_dev->device_free_chan_resources = sba_free_chan_resources;
  1372. dma_dev->device_terminate_all = sba_device_terminate_all;
  1373. dma_dev->device_issue_pending = sba_issue_pending;
  1374. dma_dev->device_tx_status = sba_tx_status;
  1375. /* Set interrupt routine */
  1376. if (dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask))
  1377. dma_dev->device_prep_dma_interrupt = sba_prep_dma_interrupt;
  1378. /* Set memcpy routine */
  1379. if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask))
  1380. dma_dev->device_prep_dma_memcpy = sba_prep_dma_memcpy;
  1381. /* Set xor routine and capability */
  1382. if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
  1383. dma_dev->device_prep_dma_xor = sba_prep_dma_xor;
  1384. dma_dev->max_xor = sba->max_xor_srcs;
  1385. }
  1386. /* Set pq routine and capability */
  1387. if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) {
  1388. dma_dev->device_prep_dma_pq = sba_prep_dma_pq;
  1389. dma_set_maxpq(dma_dev, sba->max_pq_srcs, 0);
  1390. }
  1391. /* Initialize DMA device channel list */
  1392. INIT_LIST_HEAD(&dma_dev->channels);
  1393. list_add_tail(&sba->dma_chan.device_node, &dma_dev->channels);
  1394. /* Register with Linux async DMA framework*/
  1395. ret = dma_async_device_register(dma_dev);
  1396. if (ret) {
  1397. dev_err(sba->dev, "async device register error %d", ret);
  1398. return ret;
  1399. }
  1400. dev_info(sba->dev, "%s capabilities: %s%s%s%s\n",
  1401. dma_chan_name(&sba->dma_chan),
  1402. dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "interrupt " : "",
  1403. dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "memcpy " : "",
  1404. dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
  1405. dma_has_cap(DMA_PQ, dma_dev->cap_mask) ? "pq " : "");
  1406. return 0;
  1407. }
  1408. static int sba_probe(struct platform_device *pdev)
  1409. {
  1410. int ret = 0;
  1411. struct sba_device *sba;
  1412. struct platform_device *mbox_pdev;
  1413. struct of_phandle_args args;
  1414. /* Allocate main SBA struct */
  1415. sba = devm_kzalloc(&pdev->dev, sizeof(*sba), GFP_KERNEL);
  1416. if (!sba)
  1417. return -ENOMEM;
  1418. sba->dev = &pdev->dev;
  1419. platform_set_drvdata(pdev, sba);
  1420. /* Number of mailbox channels should be atleast 1 */
  1421. ret = of_count_phandle_with_args(pdev->dev.of_node,
  1422. "mboxes", "#mbox-cells");
  1423. if (ret <= 0)
  1424. return -ENODEV;
  1425. /* Determine SBA version from DT compatible string */
  1426. if (of_device_is_compatible(sba->dev->of_node, "brcm,iproc-sba"))
  1427. sba->ver = SBA_VER_1;
  1428. else if (of_device_is_compatible(sba->dev->of_node,
  1429. "brcm,iproc-sba-v2"))
  1430. sba->ver = SBA_VER_2;
  1431. else
  1432. return -ENODEV;
  1433. /* Derived Configuration parameters */
  1434. switch (sba->ver) {
  1435. case SBA_VER_1:
  1436. sba->hw_buf_size = 4096;
  1437. sba->hw_resp_size = 8;
  1438. sba->max_pq_coefs = 6;
  1439. sba->max_pq_srcs = 6;
  1440. break;
  1441. case SBA_VER_2:
  1442. sba->hw_buf_size = 4096;
  1443. sba->hw_resp_size = 8;
  1444. sba->max_pq_coefs = 30;
  1445. /*
  1446. * We can support max_pq_srcs == max_pq_coefs because
  1447. * we are limited by number of SBA commands that we can
  1448. * fit in one message for underlying ring manager HW.
  1449. */
  1450. sba->max_pq_srcs = 12;
  1451. break;
  1452. default:
  1453. return -EINVAL;
  1454. }
  1455. sba->max_req = SBA_MAX_REQ_PER_MBOX_CHANNEL;
  1456. sba->max_cmd_per_req = sba->max_pq_srcs + 3;
  1457. sba->max_xor_srcs = sba->max_cmd_per_req - 1;
  1458. sba->max_resp_pool_size = sba->max_req * sba->hw_resp_size;
  1459. sba->max_cmds_pool_size = sba->max_req *
  1460. sba->max_cmd_per_req * sizeof(u64);
  1461. /* Setup mailbox client */
  1462. sba->client.dev = &pdev->dev;
  1463. sba->client.rx_callback = sba_receive_message;
  1464. sba->client.tx_block = false;
  1465. sba->client.knows_txdone = true;
  1466. sba->client.tx_tout = 0;
  1467. /* Request mailbox channel */
  1468. sba->mchan = mbox_request_channel(&sba->client, 0);
  1469. if (IS_ERR(sba->mchan)) {
  1470. ret = PTR_ERR(sba->mchan);
  1471. goto fail_free_mchan;
  1472. }
  1473. /* Find-out underlying mailbox device */
  1474. ret = of_parse_phandle_with_args(pdev->dev.of_node,
  1475. "mboxes", "#mbox-cells", 0, &args);
  1476. if (ret)
  1477. goto fail_free_mchan;
  1478. mbox_pdev = of_find_device_by_node(args.np);
  1479. of_node_put(args.np);
  1480. if (!mbox_pdev) {
  1481. ret = -ENODEV;
  1482. goto fail_free_mchan;
  1483. }
  1484. sba->mbox_dev = &mbox_pdev->dev;
  1485. /* Prealloc channel resource */
  1486. ret = sba_prealloc_channel_resources(sba);
  1487. if (ret)
  1488. goto fail_free_mchan;
  1489. /* Check availability of debugfs */
  1490. if (!debugfs_initialized())
  1491. goto skip_debugfs;
  1492. /* Create debugfs root entry */
  1493. sba->root = debugfs_create_dir(dev_name(sba->dev), NULL);
  1494. /* Create debugfs stats entry */
  1495. debugfs_create_devm_seqfile(sba->dev, "stats", sba->root,
  1496. sba_debugfs_stats_show);
  1497. skip_debugfs:
  1498. /* Register DMA device with Linux async framework */
  1499. ret = sba_async_register(sba);
  1500. if (ret)
  1501. goto fail_free_resources;
  1502. /* Print device info */
  1503. dev_info(sba->dev, "%s using SBAv%d mailbox channel from %s",
  1504. dma_chan_name(&sba->dma_chan), sba->ver+1,
  1505. dev_name(sba->mbox_dev));
  1506. return 0;
  1507. fail_free_resources:
  1508. debugfs_remove_recursive(sba->root);
  1509. sba_freeup_channel_resources(sba);
  1510. fail_free_mchan:
  1511. mbox_free_channel(sba->mchan);
  1512. return ret;
  1513. }
  1514. static int sba_remove(struct platform_device *pdev)
  1515. {
  1516. struct sba_device *sba = platform_get_drvdata(pdev);
  1517. dma_async_device_unregister(&sba->dma_dev);
  1518. debugfs_remove_recursive(sba->root);
  1519. sba_freeup_channel_resources(sba);
  1520. mbox_free_channel(sba->mchan);
  1521. return 0;
  1522. }
  1523. static const struct of_device_id sba_of_match[] = {
  1524. { .compatible = "brcm,iproc-sba", },
  1525. { .compatible = "brcm,iproc-sba-v2", },
  1526. {},
  1527. };
  1528. MODULE_DEVICE_TABLE(of, sba_of_match);
  1529. static struct platform_driver sba_driver = {
  1530. .probe = sba_probe,
  1531. .remove = sba_remove,
  1532. .driver = {
  1533. .name = "bcm-sba-raid",
  1534. .of_match_table = sba_of_match,
  1535. },
  1536. };
  1537. module_platform_driver(sba_driver);
  1538. MODULE_DESCRIPTION("Broadcom SBA RAID driver");
  1539. MODULE_AUTHOR("Anup Patel <anup.patel@broadcom.com>");
  1540. MODULE_LICENSE("GPL v2");