smc_wr.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Shared Memory Communications over RDMA (SMC-R) and RoCE
  4. *
  5. * Work Requests exploiting Infiniband API
  6. *
  7. * Work requests (WR) of type ib_post_send or ib_post_recv respectively
  8. * are submitted to either RC SQ or RC RQ respectively
  9. * (reliably connected send/receive queue)
  10. * and become work queue entries (WQEs).
  11. * While an SQ WR/WQE is pending, we track it until transmission completion.
  12. * Through a send or receive completion queue (CQ) respectively,
  13. * we get completion queue entries (CQEs) [aka work completions (WCs)].
  14. * Since the CQ callback is called from IRQ context, we split work by using
  15. * bottom halves implemented by tasklets.
  16. *
  17. * SMC uses this to exchange LLC (link layer control)
  18. * and CDC (connection data control) messages.
  19. *
  20. * Copyright IBM Corp. 2016
  21. *
  22. * Author(s): Steffen Maier <maier@linux.vnet.ibm.com>
  23. */
  24. #include <linux/atomic.h>
  25. #include <linux/hashtable.h>
  26. #include <linux/wait.h>
  27. #include <rdma/ib_verbs.h>
  28. #include <asm/div64.h>
  29. #include "smc.h"
  30. #include "smc_wr.h"
  31. #define SMC_WR_MAX_POLL_CQE 10 /* max. # of compl. queue elements in 1 poll */
  32. #define SMC_WR_RX_HASH_BITS 4
  33. static DEFINE_HASHTABLE(smc_wr_rx_hash, SMC_WR_RX_HASH_BITS);
  34. static DEFINE_SPINLOCK(smc_wr_rx_hash_lock);
  35. struct smc_wr_tx_pend { /* control data for a pending send request */
  36. u64 wr_id; /* work request id sent */
  37. smc_wr_tx_handler handler;
  38. enum ib_wc_status wc_status; /* CQE status */
  39. struct smc_link *link;
  40. u32 idx;
  41. struct smc_wr_tx_pend_priv priv;
  42. u8 compl_requested;
  43. };
  44. /******************************** send queue *********************************/
  45. /*------------------------------- completion --------------------------------*/
  46. /* returns true if at least one tx work request is pending on the given link */
  47. static inline bool smc_wr_is_tx_pend(struct smc_link *link)
  48. {
  49. if (find_first_bit(link->wr_tx_mask, link->wr_tx_cnt) !=
  50. link->wr_tx_cnt) {
  51. return true;
  52. }
  53. return false;
  54. }
  55. /* wait till all pending tx work requests on the given link are completed */
  56. void smc_wr_tx_wait_no_pending_sends(struct smc_link *link)
  57. {
  58. wait_event(link->wr_tx_wait, !smc_wr_is_tx_pend(link));
  59. }
  60. static inline int smc_wr_tx_find_pending_index(struct smc_link *link, u64 wr_id)
  61. {
  62. u32 i;
  63. for (i = 0; i < link->wr_tx_cnt; i++) {
  64. if (link->wr_tx_pends[i].wr_id == wr_id)
  65. return i;
  66. }
  67. return link->wr_tx_cnt;
  68. }
  69. static inline void smc_wr_tx_process_cqe(struct ib_wc *wc)
  70. {
  71. struct smc_wr_tx_pend pnd_snd;
  72. struct smc_link *link;
  73. u32 pnd_snd_idx;
  74. link = wc->qp->qp_context;
  75. if (wc->opcode == IB_WC_REG_MR) {
  76. if (wc->status)
  77. link->wr_reg_state = FAILED;
  78. else
  79. link->wr_reg_state = CONFIRMED;
  80. smc_wr_wakeup_reg_wait(link);
  81. return;
  82. }
  83. pnd_snd_idx = smc_wr_tx_find_pending_index(link, wc->wr_id);
  84. if (pnd_snd_idx == link->wr_tx_cnt)
  85. return;
  86. link->wr_tx_pends[pnd_snd_idx].wc_status = wc->status;
  87. if (link->wr_tx_pends[pnd_snd_idx].compl_requested)
  88. complete(&link->wr_tx_compl[pnd_snd_idx]);
  89. memcpy(&pnd_snd, &link->wr_tx_pends[pnd_snd_idx], sizeof(pnd_snd));
  90. /* clear the full struct smc_wr_tx_pend including .priv */
  91. memset(&link->wr_tx_pends[pnd_snd_idx], 0,
  92. sizeof(link->wr_tx_pends[pnd_snd_idx]));
  93. memset(&link->wr_tx_bufs[pnd_snd_idx], 0,
  94. sizeof(link->wr_tx_bufs[pnd_snd_idx]));
  95. if (!test_and_clear_bit(pnd_snd_idx, link->wr_tx_mask))
  96. return;
  97. if (wc->status) {
  98. /* terminate link */
  99. smcr_link_down_cond_sched(link);
  100. }
  101. if (pnd_snd.handler)
  102. pnd_snd.handler(&pnd_snd.priv, link, wc->status);
  103. wake_up(&link->wr_tx_wait);
  104. }
  105. static void smc_wr_tx_tasklet_fn(unsigned long data)
  106. {
  107. struct smc_ib_device *dev = (struct smc_ib_device *)data;
  108. struct ib_wc wc[SMC_WR_MAX_POLL_CQE];
  109. int i = 0, rc;
  110. int polled = 0;
  111. again:
  112. polled++;
  113. do {
  114. memset(&wc, 0, sizeof(wc));
  115. rc = ib_poll_cq(dev->roce_cq_send, SMC_WR_MAX_POLL_CQE, wc);
  116. if (polled == 1) {
  117. ib_req_notify_cq(dev->roce_cq_send,
  118. IB_CQ_NEXT_COMP |
  119. IB_CQ_REPORT_MISSED_EVENTS);
  120. }
  121. if (!rc)
  122. break;
  123. for (i = 0; i < rc; i++)
  124. smc_wr_tx_process_cqe(&wc[i]);
  125. } while (rc > 0);
  126. if (polled == 1)
  127. goto again;
  128. }
  129. void smc_wr_tx_cq_handler(struct ib_cq *ib_cq, void *cq_context)
  130. {
  131. struct smc_ib_device *dev = (struct smc_ib_device *)cq_context;
  132. tasklet_schedule(&dev->send_tasklet);
  133. }
  134. /*---------------------------- request submission ---------------------------*/
  135. static inline int smc_wr_tx_get_free_slot_index(struct smc_link *link, u32 *idx)
  136. {
  137. *idx = link->wr_tx_cnt;
  138. if (!smc_link_sendable(link))
  139. return -ENOLINK;
  140. for_each_clear_bit(*idx, link->wr_tx_mask, link->wr_tx_cnt) {
  141. if (!test_and_set_bit(*idx, link->wr_tx_mask))
  142. return 0;
  143. }
  144. *idx = link->wr_tx_cnt;
  145. return -EBUSY;
  146. }
  147. /**
  148. * smc_wr_tx_get_free_slot() - returns buffer for message assembly,
  149. * and sets info for pending transmit tracking
  150. * @link: Pointer to smc_link used to later send the message.
  151. * @handler: Send completion handler function pointer.
  152. * @wr_buf: Out value returns pointer to message buffer.
  153. * @wr_rdma_buf: Out value returns pointer to rdma work request.
  154. * @wr_pend_priv: Out value returns pointer serving as handler context.
  155. *
  156. * Return: 0 on success, or -errno on error.
  157. */
  158. int smc_wr_tx_get_free_slot(struct smc_link *link,
  159. smc_wr_tx_handler handler,
  160. struct smc_wr_buf **wr_buf,
  161. struct smc_rdma_wr **wr_rdma_buf,
  162. struct smc_wr_tx_pend_priv **wr_pend_priv)
  163. {
  164. struct smc_link_group *lgr = smc_get_lgr(link);
  165. struct smc_wr_tx_pend *wr_pend;
  166. u32 idx = link->wr_tx_cnt;
  167. struct ib_send_wr *wr_ib;
  168. u64 wr_id;
  169. int rc;
  170. *wr_buf = NULL;
  171. *wr_pend_priv = NULL;
  172. if (in_softirq() || lgr->terminating) {
  173. rc = smc_wr_tx_get_free_slot_index(link, &idx);
  174. if (rc)
  175. return rc;
  176. } else {
  177. rc = wait_event_interruptible_timeout(
  178. link->wr_tx_wait,
  179. !smc_link_sendable(link) ||
  180. lgr->terminating ||
  181. (smc_wr_tx_get_free_slot_index(link, &idx) != -EBUSY),
  182. SMC_WR_TX_WAIT_FREE_SLOT_TIME);
  183. if (!rc) {
  184. /* timeout - terminate link */
  185. smcr_link_down_cond_sched(link);
  186. return -EPIPE;
  187. }
  188. if (idx == link->wr_tx_cnt)
  189. return -EPIPE;
  190. }
  191. wr_id = smc_wr_tx_get_next_wr_id(link);
  192. wr_pend = &link->wr_tx_pends[idx];
  193. wr_pend->wr_id = wr_id;
  194. wr_pend->handler = handler;
  195. wr_pend->link = link;
  196. wr_pend->idx = idx;
  197. wr_ib = &link->wr_tx_ibs[idx];
  198. wr_ib->wr_id = wr_id;
  199. *wr_buf = &link->wr_tx_bufs[idx];
  200. if (wr_rdma_buf)
  201. *wr_rdma_buf = &link->wr_tx_rdmas[idx];
  202. *wr_pend_priv = &wr_pend->priv;
  203. return 0;
  204. }
  205. int smc_wr_tx_put_slot(struct smc_link *link,
  206. struct smc_wr_tx_pend_priv *wr_pend_priv)
  207. {
  208. struct smc_wr_tx_pend *pend;
  209. pend = container_of(wr_pend_priv, struct smc_wr_tx_pend, priv);
  210. if (pend->idx < link->wr_tx_cnt) {
  211. u32 idx = pend->idx;
  212. /* clear the full struct smc_wr_tx_pend including .priv */
  213. memset(&link->wr_tx_pends[idx], 0,
  214. sizeof(link->wr_tx_pends[idx]));
  215. memset(&link->wr_tx_bufs[idx], 0,
  216. sizeof(link->wr_tx_bufs[idx]));
  217. test_and_clear_bit(idx, link->wr_tx_mask);
  218. wake_up(&link->wr_tx_wait);
  219. return 1;
  220. }
  221. return 0;
  222. }
  223. /* Send prepared WR slot via ib_post_send.
  224. * @priv: pointer to smc_wr_tx_pend_priv identifying prepared message buffer
  225. */
  226. int smc_wr_tx_send(struct smc_link *link, struct smc_wr_tx_pend_priv *priv)
  227. {
  228. struct smc_wr_tx_pend *pend;
  229. int rc;
  230. ib_req_notify_cq(link->smcibdev->roce_cq_send,
  231. IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS);
  232. pend = container_of(priv, struct smc_wr_tx_pend, priv);
  233. rc = ib_post_send(link->roce_qp, &link->wr_tx_ibs[pend->idx], NULL);
  234. if (rc) {
  235. smc_wr_tx_put_slot(link, priv);
  236. smcr_link_down_cond_sched(link);
  237. }
  238. return rc;
  239. }
  240. /* Send prepared WR slot via ib_post_send and wait for send completion
  241. * notification.
  242. * @priv: pointer to smc_wr_tx_pend_priv identifying prepared message buffer
  243. */
  244. int smc_wr_tx_send_wait(struct smc_link *link, struct smc_wr_tx_pend_priv *priv,
  245. unsigned long timeout)
  246. {
  247. struct smc_wr_tx_pend *pend;
  248. u32 pnd_idx;
  249. int rc;
  250. pend = container_of(priv, struct smc_wr_tx_pend, priv);
  251. pend->compl_requested = 1;
  252. pnd_idx = pend->idx;
  253. init_completion(&link->wr_tx_compl[pnd_idx]);
  254. rc = smc_wr_tx_send(link, priv);
  255. if (rc)
  256. return rc;
  257. /* wait for completion by smc_wr_tx_process_cqe() */
  258. rc = wait_for_completion_interruptible_timeout(
  259. &link->wr_tx_compl[pnd_idx], timeout);
  260. if (rc <= 0)
  261. rc = -ENODATA;
  262. if (rc > 0)
  263. rc = 0;
  264. return rc;
  265. }
  266. /* Register a memory region and wait for result. */
  267. int smc_wr_reg_send(struct smc_link *link, struct ib_mr *mr)
  268. {
  269. int rc;
  270. ib_req_notify_cq(link->smcibdev->roce_cq_send,
  271. IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS);
  272. link->wr_reg_state = POSTED;
  273. link->wr_reg.wr.wr_id = (u64)(uintptr_t)mr;
  274. link->wr_reg.mr = mr;
  275. link->wr_reg.key = mr->rkey;
  276. rc = ib_post_send(link->roce_qp, &link->wr_reg.wr, NULL);
  277. if (rc)
  278. return rc;
  279. atomic_inc(&link->wr_reg_refcnt);
  280. rc = wait_event_interruptible_timeout(link->wr_reg_wait,
  281. (link->wr_reg_state != POSTED),
  282. SMC_WR_REG_MR_WAIT_TIME);
  283. if (atomic_dec_and_test(&link->wr_reg_refcnt))
  284. wake_up_all(&link->wr_reg_wait);
  285. if (!rc) {
  286. /* timeout - terminate link */
  287. smcr_link_down_cond_sched(link);
  288. return -EPIPE;
  289. }
  290. if (rc == -ERESTARTSYS)
  291. return -EINTR;
  292. switch (link->wr_reg_state) {
  293. case CONFIRMED:
  294. rc = 0;
  295. break;
  296. case FAILED:
  297. rc = -EIO;
  298. break;
  299. case POSTED:
  300. rc = -EPIPE;
  301. break;
  302. }
  303. return rc;
  304. }
  305. /****************************** receive queue ********************************/
  306. int smc_wr_rx_register_handler(struct smc_wr_rx_handler *handler)
  307. {
  308. struct smc_wr_rx_handler *h_iter;
  309. int rc = 0;
  310. spin_lock(&smc_wr_rx_hash_lock);
  311. hash_for_each_possible(smc_wr_rx_hash, h_iter, list, handler->type) {
  312. if (h_iter->type == handler->type) {
  313. rc = -EEXIST;
  314. goto out_unlock;
  315. }
  316. }
  317. hash_add(smc_wr_rx_hash, &handler->list, handler->type);
  318. out_unlock:
  319. spin_unlock(&smc_wr_rx_hash_lock);
  320. return rc;
  321. }
  322. /* Demultiplex a received work request based on the message type to its handler.
  323. * Relies on smc_wr_rx_hash having been completely filled before any IB WRs,
  324. * and not being modified any more afterwards so we don't need to lock it.
  325. */
  326. static inline void smc_wr_rx_demultiplex(struct ib_wc *wc)
  327. {
  328. struct smc_link *link = (struct smc_link *)wc->qp->qp_context;
  329. struct smc_wr_rx_handler *handler;
  330. struct smc_wr_rx_hdr *wr_rx;
  331. u64 temp_wr_id;
  332. u32 index;
  333. if (wc->byte_len < sizeof(*wr_rx))
  334. return; /* short message */
  335. temp_wr_id = wc->wr_id;
  336. index = do_div(temp_wr_id, link->wr_rx_cnt);
  337. wr_rx = (struct smc_wr_rx_hdr *)&link->wr_rx_bufs[index];
  338. hash_for_each_possible(smc_wr_rx_hash, handler, list, wr_rx->type) {
  339. if (handler->type == wr_rx->type)
  340. handler->handler(wc, wr_rx);
  341. }
  342. }
  343. static inline void smc_wr_rx_process_cqes(struct ib_wc wc[], int num)
  344. {
  345. struct smc_link *link;
  346. int i;
  347. for (i = 0; i < num; i++) {
  348. link = wc[i].qp->qp_context;
  349. if (wc[i].status == IB_WC_SUCCESS) {
  350. link->wr_rx_tstamp = jiffies;
  351. smc_wr_rx_demultiplex(&wc[i]);
  352. smc_wr_rx_post(link); /* refill WR RX */
  353. } else {
  354. /* handle status errors */
  355. switch (wc[i].status) {
  356. case IB_WC_RETRY_EXC_ERR:
  357. case IB_WC_RNR_RETRY_EXC_ERR:
  358. case IB_WC_WR_FLUSH_ERR:
  359. smcr_link_down_cond_sched(link);
  360. break;
  361. default:
  362. smc_wr_rx_post(link); /* refill WR RX */
  363. break;
  364. }
  365. }
  366. }
  367. }
  368. static void smc_wr_rx_tasklet_fn(unsigned long data)
  369. {
  370. struct smc_ib_device *dev = (struct smc_ib_device *)data;
  371. struct ib_wc wc[SMC_WR_MAX_POLL_CQE];
  372. int polled = 0;
  373. int rc;
  374. again:
  375. polled++;
  376. do {
  377. memset(&wc, 0, sizeof(wc));
  378. rc = ib_poll_cq(dev->roce_cq_recv, SMC_WR_MAX_POLL_CQE, wc);
  379. if (polled == 1) {
  380. ib_req_notify_cq(dev->roce_cq_recv,
  381. IB_CQ_SOLICITED_MASK
  382. | IB_CQ_REPORT_MISSED_EVENTS);
  383. }
  384. if (!rc)
  385. break;
  386. smc_wr_rx_process_cqes(&wc[0], rc);
  387. } while (rc > 0);
  388. if (polled == 1)
  389. goto again;
  390. }
  391. void smc_wr_rx_cq_handler(struct ib_cq *ib_cq, void *cq_context)
  392. {
  393. struct smc_ib_device *dev = (struct smc_ib_device *)cq_context;
  394. tasklet_schedule(&dev->recv_tasklet);
  395. }
  396. int smc_wr_rx_post_init(struct smc_link *link)
  397. {
  398. u32 i;
  399. int rc = 0;
  400. for (i = 0; i < link->wr_rx_cnt; i++)
  401. rc = smc_wr_rx_post(link);
  402. return rc;
  403. }
  404. /***************************** init, exit, misc ******************************/
  405. void smc_wr_remember_qp_attr(struct smc_link *lnk)
  406. {
  407. struct ib_qp_attr *attr = &lnk->qp_attr;
  408. struct ib_qp_init_attr init_attr;
  409. memset(attr, 0, sizeof(*attr));
  410. memset(&init_attr, 0, sizeof(init_attr));
  411. ib_query_qp(lnk->roce_qp, attr,
  412. IB_QP_STATE |
  413. IB_QP_CUR_STATE |
  414. IB_QP_PKEY_INDEX |
  415. IB_QP_PORT |
  416. IB_QP_QKEY |
  417. IB_QP_AV |
  418. IB_QP_PATH_MTU |
  419. IB_QP_TIMEOUT |
  420. IB_QP_RETRY_CNT |
  421. IB_QP_RNR_RETRY |
  422. IB_QP_RQ_PSN |
  423. IB_QP_ALT_PATH |
  424. IB_QP_MIN_RNR_TIMER |
  425. IB_QP_SQ_PSN |
  426. IB_QP_PATH_MIG_STATE |
  427. IB_QP_CAP |
  428. IB_QP_DEST_QPN,
  429. &init_attr);
  430. lnk->wr_tx_cnt = min_t(size_t, SMC_WR_BUF_CNT,
  431. lnk->qp_attr.cap.max_send_wr);
  432. lnk->wr_rx_cnt = min_t(size_t, SMC_WR_BUF_CNT * 3,
  433. lnk->qp_attr.cap.max_recv_wr);
  434. }
  435. static void smc_wr_init_sge(struct smc_link *lnk)
  436. {
  437. u32 i;
  438. for (i = 0; i < lnk->wr_tx_cnt; i++) {
  439. lnk->wr_tx_sges[i].addr =
  440. lnk->wr_tx_dma_addr + i * SMC_WR_BUF_SIZE;
  441. lnk->wr_tx_sges[i].length = SMC_WR_TX_SIZE;
  442. lnk->wr_tx_sges[i].lkey = lnk->roce_pd->local_dma_lkey;
  443. lnk->wr_tx_rdma_sges[i].tx_rdma_sge[0].wr_tx_rdma_sge[0].lkey =
  444. lnk->roce_pd->local_dma_lkey;
  445. lnk->wr_tx_rdma_sges[i].tx_rdma_sge[0].wr_tx_rdma_sge[1].lkey =
  446. lnk->roce_pd->local_dma_lkey;
  447. lnk->wr_tx_rdma_sges[i].tx_rdma_sge[1].wr_tx_rdma_sge[0].lkey =
  448. lnk->roce_pd->local_dma_lkey;
  449. lnk->wr_tx_rdma_sges[i].tx_rdma_sge[1].wr_tx_rdma_sge[1].lkey =
  450. lnk->roce_pd->local_dma_lkey;
  451. lnk->wr_tx_ibs[i].next = NULL;
  452. lnk->wr_tx_ibs[i].sg_list = &lnk->wr_tx_sges[i];
  453. lnk->wr_tx_ibs[i].num_sge = 1;
  454. lnk->wr_tx_ibs[i].opcode = IB_WR_SEND;
  455. lnk->wr_tx_ibs[i].send_flags =
  456. IB_SEND_SIGNALED | IB_SEND_SOLICITED;
  457. lnk->wr_tx_rdmas[i].wr_tx_rdma[0].wr.opcode = IB_WR_RDMA_WRITE;
  458. lnk->wr_tx_rdmas[i].wr_tx_rdma[1].wr.opcode = IB_WR_RDMA_WRITE;
  459. lnk->wr_tx_rdmas[i].wr_tx_rdma[0].wr.sg_list =
  460. lnk->wr_tx_rdma_sges[i].tx_rdma_sge[0].wr_tx_rdma_sge;
  461. lnk->wr_tx_rdmas[i].wr_tx_rdma[1].wr.sg_list =
  462. lnk->wr_tx_rdma_sges[i].tx_rdma_sge[1].wr_tx_rdma_sge;
  463. }
  464. for (i = 0; i < lnk->wr_rx_cnt; i++) {
  465. lnk->wr_rx_sges[i].addr =
  466. lnk->wr_rx_dma_addr + i * SMC_WR_BUF_SIZE;
  467. lnk->wr_rx_sges[i].length = SMC_WR_BUF_SIZE;
  468. lnk->wr_rx_sges[i].lkey = lnk->roce_pd->local_dma_lkey;
  469. lnk->wr_rx_ibs[i].next = NULL;
  470. lnk->wr_rx_ibs[i].sg_list = &lnk->wr_rx_sges[i];
  471. lnk->wr_rx_ibs[i].num_sge = 1;
  472. }
  473. lnk->wr_reg.wr.next = NULL;
  474. lnk->wr_reg.wr.num_sge = 0;
  475. lnk->wr_reg.wr.send_flags = IB_SEND_SIGNALED;
  476. lnk->wr_reg.wr.opcode = IB_WR_REG_MR;
  477. lnk->wr_reg.access = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE;
  478. }
  479. void smc_wr_free_link(struct smc_link *lnk)
  480. {
  481. struct ib_device *ibdev;
  482. if (!lnk->smcibdev)
  483. return;
  484. ibdev = lnk->smcibdev->ibdev;
  485. smc_wr_wakeup_reg_wait(lnk);
  486. smc_wr_wakeup_tx_wait(lnk);
  487. smc_wr_tx_wait_no_pending_sends(lnk);
  488. wait_event(lnk->wr_reg_wait, (!atomic_read(&lnk->wr_reg_refcnt)));
  489. wait_event(lnk->wr_tx_wait, (!atomic_read(&lnk->wr_tx_refcnt)));
  490. if (lnk->wr_rx_dma_addr) {
  491. ib_dma_unmap_single(ibdev, lnk->wr_rx_dma_addr,
  492. SMC_WR_BUF_SIZE * lnk->wr_rx_cnt,
  493. DMA_FROM_DEVICE);
  494. lnk->wr_rx_dma_addr = 0;
  495. }
  496. if (lnk->wr_tx_dma_addr) {
  497. ib_dma_unmap_single(ibdev, lnk->wr_tx_dma_addr,
  498. SMC_WR_BUF_SIZE * lnk->wr_tx_cnt,
  499. DMA_TO_DEVICE);
  500. lnk->wr_tx_dma_addr = 0;
  501. }
  502. }
  503. void smc_wr_free_link_mem(struct smc_link *lnk)
  504. {
  505. kfree(lnk->wr_tx_compl);
  506. lnk->wr_tx_compl = NULL;
  507. kfree(lnk->wr_tx_pends);
  508. lnk->wr_tx_pends = NULL;
  509. kfree(lnk->wr_tx_mask);
  510. lnk->wr_tx_mask = NULL;
  511. kfree(lnk->wr_tx_sges);
  512. lnk->wr_tx_sges = NULL;
  513. kfree(lnk->wr_tx_rdma_sges);
  514. lnk->wr_tx_rdma_sges = NULL;
  515. kfree(lnk->wr_rx_sges);
  516. lnk->wr_rx_sges = NULL;
  517. kfree(lnk->wr_tx_rdmas);
  518. lnk->wr_tx_rdmas = NULL;
  519. kfree(lnk->wr_rx_ibs);
  520. lnk->wr_rx_ibs = NULL;
  521. kfree(lnk->wr_tx_ibs);
  522. lnk->wr_tx_ibs = NULL;
  523. kfree(lnk->wr_tx_bufs);
  524. lnk->wr_tx_bufs = NULL;
  525. kfree(lnk->wr_rx_bufs);
  526. lnk->wr_rx_bufs = NULL;
  527. }
  528. int smc_wr_alloc_link_mem(struct smc_link *link)
  529. {
  530. /* allocate link related memory */
  531. link->wr_tx_bufs = kcalloc(SMC_WR_BUF_CNT, SMC_WR_BUF_SIZE, GFP_KERNEL);
  532. if (!link->wr_tx_bufs)
  533. goto no_mem;
  534. link->wr_rx_bufs = kcalloc(SMC_WR_BUF_CNT * 3, SMC_WR_BUF_SIZE,
  535. GFP_KERNEL);
  536. if (!link->wr_rx_bufs)
  537. goto no_mem_wr_tx_bufs;
  538. link->wr_tx_ibs = kcalloc(SMC_WR_BUF_CNT, sizeof(link->wr_tx_ibs[0]),
  539. GFP_KERNEL);
  540. if (!link->wr_tx_ibs)
  541. goto no_mem_wr_rx_bufs;
  542. link->wr_rx_ibs = kcalloc(SMC_WR_BUF_CNT * 3,
  543. sizeof(link->wr_rx_ibs[0]),
  544. GFP_KERNEL);
  545. if (!link->wr_rx_ibs)
  546. goto no_mem_wr_tx_ibs;
  547. link->wr_tx_rdmas = kcalloc(SMC_WR_BUF_CNT,
  548. sizeof(link->wr_tx_rdmas[0]),
  549. GFP_KERNEL);
  550. if (!link->wr_tx_rdmas)
  551. goto no_mem_wr_rx_ibs;
  552. link->wr_tx_rdma_sges = kcalloc(SMC_WR_BUF_CNT,
  553. sizeof(link->wr_tx_rdma_sges[0]),
  554. GFP_KERNEL);
  555. if (!link->wr_tx_rdma_sges)
  556. goto no_mem_wr_tx_rdmas;
  557. link->wr_tx_sges = kcalloc(SMC_WR_BUF_CNT, sizeof(link->wr_tx_sges[0]),
  558. GFP_KERNEL);
  559. if (!link->wr_tx_sges)
  560. goto no_mem_wr_tx_rdma_sges;
  561. link->wr_rx_sges = kcalloc(SMC_WR_BUF_CNT * 3,
  562. sizeof(link->wr_rx_sges[0]),
  563. GFP_KERNEL);
  564. if (!link->wr_rx_sges)
  565. goto no_mem_wr_tx_sges;
  566. link->wr_tx_mask = kcalloc(BITS_TO_LONGS(SMC_WR_BUF_CNT),
  567. sizeof(*link->wr_tx_mask),
  568. GFP_KERNEL);
  569. if (!link->wr_tx_mask)
  570. goto no_mem_wr_rx_sges;
  571. link->wr_tx_pends = kcalloc(SMC_WR_BUF_CNT,
  572. sizeof(link->wr_tx_pends[0]),
  573. GFP_KERNEL);
  574. if (!link->wr_tx_pends)
  575. goto no_mem_wr_tx_mask;
  576. link->wr_tx_compl = kcalloc(SMC_WR_BUF_CNT,
  577. sizeof(link->wr_tx_compl[0]),
  578. GFP_KERNEL);
  579. if (!link->wr_tx_compl)
  580. goto no_mem_wr_tx_pends;
  581. return 0;
  582. no_mem_wr_tx_pends:
  583. kfree(link->wr_tx_pends);
  584. no_mem_wr_tx_mask:
  585. kfree(link->wr_tx_mask);
  586. no_mem_wr_rx_sges:
  587. kfree(link->wr_rx_sges);
  588. no_mem_wr_tx_sges:
  589. kfree(link->wr_tx_sges);
  590. no_mem_wr_tx_rdma_sges:
  591. kfree(link->wr_tx_rdma_sges);
  592. no_mem_wr_tx_rdmas:
  593. kfree(link->wr_tx_rdmas);
  594. no_mem_wr_rx_ibs:
  595. kfree(link->wr_rx_ibs);
  596. no_mem_wr_tx_ibs:
  597. kfree(link->wr_tx_ibs);
  598. no_mem_wr_rx_bufs:
  599. kfree(link->wr_rx_bufs);
  600. no_mem_wr_tx_bufs:
  601. kfree(link->wr_tx_bufs);
  602. no_mem:
  603. return -ENOMEM;
  604. }
  605. void smc_wr_remove_dev(struct smc_ib_device *smcibdev)
  606. {
  607. tasklet_kill(&smcibdev->recv_tasklet);
  608. tasklet_kill(&smcibdev->send_tasklet);
  609. }
  610. void smc_wr_add_dev(struct smc_ib_device *smcibdev)
  611. {
  612. tasklet_init(&smcibdev->recv_tasklet, smc_wr_rx_tasklet_fn,
  613. (unsigned long)smcibdev);
  614. tasklet_init(&smcibdev->send_tasklet, smc_wr_tx_tasklet_fn,
  615. (unsigned long)smcibdev);
  616. }
  617. int smc_wr_create_link(struct smc_link *lnk)
  618. {
  619. struct ib_device *ibdev = lnk->smcibdev->ibdev;
  620. int rc = 0;
  621. smc_wr_tx_set_wr_id(&lnk->wr_tx_id, 0);
  622. lnk->wr_rx_id = 0;
  623. lnk->wr_rx_dma_addr = ib_dma_map_single(
  624. ibdev, lnk->wr_rx_bufs, SMC_WR_BUF_SIZE * lnk->wr_rx_cnt,
  625. DMA_FROM_DEVICE);
  626. if (ib_dma_mapping_error(ibdev, lnk->wr_rx_dma_addr)) {
  627. lnk->wr_rx_dma_addr = 0;
  628. rc = -EIO;
  629. goto out;
  630. }
  631. lnk->wr_tx_dma_addr = ib_dma_map_single(
  632. ibdev, lnk->wr_tx_bufs, SMC_WR_BUF_SIZE * lnk->wr_tx_cnt,
  633. DMA_TO_DEVICE);
  634. if (ib_dma_mapping_error(ibdev, lnk->wr_tx_dma_addr)) {
  635. rc = -EIO;
  636. goto dma_unmap;
  637. }
  638. smc_wr_init_sge(lnk);
  639. memset(lnk->wr_tx_mask, 0,
  640. BITS_TO_LONGS(SMC_WR_BUF_CNT) * sizeof(*lnk->wr_tx_mask));
  641. init_waitqueue_head(&lnk->wr_tx_wait);
  642. atomic_set(&lnk->wr_tx_refcnt, 0);
  643. init_waitqueue_head(&lnk->wr_reg_wait);
  644. atomic_set(&lnk->wr_reg_refcnt, 0);
  645. return rc;
  646. dma_unmap:
  647. ib_dma_unmap_single(ibdev, lnk->wr_rx_dma_addr,
  648. SMC_WR_BUF_SIZE * lnk->wr_rx_cnt,
  649. DMA_FROM_DEVICE);
  650. lnk->wr_rx_dma_addr = 0;
  651. out:
  652. return rc;
  653. }