frwr_ops.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (c) 2015, 2017 Oracle. All rights reserved.
  4. * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
  5. */
  6. /* Lightweight memory registration using Fast Registration Work
  7. * Requests (FRWR).
  8. *
  9. * FRWR features ordered asynchronous registration and invalidation
  10. * of arbitrarily-sized memory regions. This is the fastest and safest
  11. * but most complex memory registration mode.
  12. */
  13. /* Normal operation
  14. *
  15. * A Memory Region is prepared for RDMA Read or Write using a FAST_REG
  16. * Work Request (frwr_map). When the RDMA operation is finished, this
  17. * Memory Region is invalidated using a LOCAL_INV Work Request
  18. * (frwr_unmap_async and frwr_unmap_sync).
  19. *
  20. * Typically FAST_REG Work Requests are not signaled, and neither are
  21. * RDMA Send Work Requests (with the exception of signaling occasionally
  22. * to prevent provider work queue overflows). This greatly reduces HCA
  23. * interrupt workload.
  24. */
  25. /* Transport recovery
  26. *
  27. * frwr_map and frwr_unmap_* cannot run at the same time the transport
  28. * connect worker is running. The connect worker holds the transport
  29. * send lock, just as ->send_request does. This prevents frwr_map and
  30. * the connect worker from running concurrently. When a connection is
  31. * closed, the Receive completion queue is drained before the allowing
  32. * the connect worker to get control. This prevents frwr_unmap and the
  33. * connect worker from running concurrently.
  34. *
  35. * When the underlying transport disconnects, MRs that are in flight
  36. * are flushed and are likely unusable. Thus all MRs are destroyed.
  37. * New MRs are created on demand.
  38. */
  39. #include <linux/sunrpc/svc_rdma.h>
  40. #include "xprt_rdma.h"
  41. #include <trace/events/rpcrdma.h>
  42. #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
  43. # define RPCDBG_FACILITY RPCDBG_TRANS
  44. #endif
  45. /**
  46. * frwr_release_mr - Destroy one MR
  47. * @mr: MR allocated by frwr_mr_init
  48. *
  49. */
  50. void frwr_release_mr(struct rpcrdma_mr *mr)
  51. {
  52. int rc;
  53. rc = ib_dereg_mr(mr->frwr.fr_mr);
  54. if (rc)
  55. trace_xprtrdma_frwr_dereg(mr, rc);
  56. kfree(mr->mr_sg);
  57. kfree(mr);
  58. }
  59. static void frwr_mr_recycle(struct rpcrdma_mr *mr)
  60. {
  61. struct rpcrdma_xprt *r_xprt = mr->mr_xprt;
  62. trace_xprtrdma_mr_recycle(mr);
  63. if (mr->mr_dir != DMA_NONE) {
  64. trace_xprtrdma_mr_unmap(mr);
  65. ib_dma_unmap_sg(r_xprt->rx_ep->re_id->device,
  66. mr->mr_sg, mr->mr_nents, mr->mr_dir);
  67. mr->mr_dir = DMA_NONE;
  68. }
  69. spin_lock(&r_xprt->rx_buf.rb_lock);
  70. list_del(&mr->mr_all);
  71. r_xprt->rx_stats.mrs_recycled++;
  72. spin_unlock(&r_xprt->rx_buf.rb_lock);
  73. frwr_release_mr(mr);
  74. }
  75. /* frwr_reset - Place MRs back on the free list
  76. * @req: request to reset
  77. *
  78. * Used after a failed marshal. For FRWR, this means the MRs
  79. * don't have to be fully released and recreated.
  80. *
  81. * NB: This is safe only as long as none of @req's MRs are
  82. * involved with an ongoing asynchronous FAST_REG or LOCAL_INV
  83. * Work Request.
  84. */
  85. void frwr_reset(struct rpcrdma_req *req)
  86. {
  87. struct rpcrdma_mr *mr;
  88. while ((mr = rpcrdma_mr_pop(&req->rl_registered)))
  89. rpcrdma_mr_put(mr);
  90. }
  91. /**
  92. * frwr_mr_init - Initialize one MR
  93. * @r_xprt: controlling transport instance
  94. * @mr: generic MR to prepare for FRWR
  95. *
  96. * Returns zero if successful. Otherwise a negative errno
  97. * is returned.
  98. */
  99. int frwr_mr_init(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr *mr)
  100. {
  101. struct rpcrdma_ep *ep = r_xprt->rx_ep;
  102. unsigned int depth = ep->re_max_fr_depth;
  103. struct scatterlist *sg;
  104. struct ib_mr *frmr;
  105. int rc;
  106. frmr = ib_alloc_mr(ep->re_pd, ep->re_mrtype, depth);
  107. if (IS_ERR(frmr))
  108. goto out_mr_err;
  109. sg = kmalloc_array(depth, sizeof(*sg), GFP_NOFS);
  110. if (!sg)
  111. goto out_list_err;
  112. mr->mr_xprt = r_xprt;
  113. mr->frwr.fr_mr = frmr;
  114. mr->mr_dir = DMA_NONE;
  115. INIT_LIST_HEAD(&mr->mr_list);
  116. init_completion(&mr->frwr.fr_linv_done);
  117. sg_init_table(sg, depth);
  118. mr->mr_sg = sg;
  119. return 0;
  120. out_mr_err:
  121. rc = PTR_ERR(frmr);
  122. trace_xprtrdma_frwr_alloc(mr, rc);
  123. return rc;
  124. out_list_err:
  125. ib_dereg_mr(frmr);
  126. return -ENOMEM;
  127. }
  128. /**
  129. * frwr_query_device - Prepare a transport for use with FRWR
  130. * @ep: endpoint to fill in
  131. * @device: RDMA device to query
  132. *
  133. * On success, sets:
  134. * ep->re_attr
  135. * ep->re_max_requests
  136. * ep->re_max_rdma_segs
  137. * ep->re_max_fr_depth
  138. * ep->re_mrtype
  139. *
  140. * Return values:
  141. * On success, returns zero.
  142. * %-EINVAL - the device does not support FRWR memory registration
  143. * %-ENOMEM - the device is not sufficiently capable for NFS/RDMA
  144. */
  145. int frwr_query_device(struct rpcrdma_ep *ep, const struct ib_device *device)
  146. {
  147. const struct ib_device_attr *attrs = &device->attrs;
  148. int max_qp_wr, depth, delta;
  149. unsigned int max_sge;
  150. if (!(attrs->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) ||
  151. attrs->max_fast_reg_page_list_len == 0) {
  152. pr_err("rpcrdma: 'frwr' mode is not supported by device %s\n",
  153. device->name);
  154. return -EINVAL;
  155. }
  156. max_sge = min_t(unsigned int, attrs->max_send_sge,
  157. RPCRDMA_MAX_SEND_SGES);
  158. if (max_sge < RPCRDMA_MIN_SEND_SGES) {
  159. pr_err("rpcrdma: HCA provides only %u send SGEs\n", max_sge);
  160. return -ENOMEM;
  161. }
  162. ep->re_attr.cap.max_send_sge = max_sge;
  163. ep->re_attr.cap.max_recv_sge = 1;
  164. ep->re_mrtype = IB_MR_TYPE_MEM_REG;
  165. if (attrs->device_cap_flags & IB_DEVICE_SG_GAPS_REG)
  166. ep->re_mrtype = IB_MR_TYPE_SG_GAPS;
  167. /* Quirk: Some devices advertise a large max_fast_reg_page_list_len
  168. * capability, but perform optimally when the MRs are not larger
  169. * than a page.
  170. */
  171. if (attrs->max_sge_rd > RPCRDMA_MAX_HDR_SEGS)
  172. ep->re_max_fr_depth = attrs->max_sge_rd;
  173. else
  174. ep->re_max_fr_depth = attrs->max_fast_reg_page_list_len;
  175. if (ep->re_max_fr_depth > RPCRDMA_MAX_DATA_SEGS)
  176. ep->re_max_fr_depth = RPCRDMA_MAX_DATA_SEGS;
  177. /* Add room for frwr register and invalidate WRs.
  178. * 1. FRWR reg WR for head
  179. * 2. FRWR invalidate WR for head
  180. * 3. N FRWR reg WRs for pagelist
  181. * 4. N FRWR invalidate WRs for pagelist
  182. * 5. FRWR reg WR for tail
  183. * 6. FRWR invalidate WR for tail
  184. * 7. The RDMA_SEND WR
  185. */
  186. depth = 7;
  187. /* Calculate N if the device max FRWR depth is smaller than
  188. * RPCRDMA_MAX_DATA_SEGS.
  189. */
  190. if (ep->re_max_fr_depth < RPCRDMA_MAX_DATA_SEGS) {
  191. delta = RPCRDMA_MAX_DATA_SEGS - ep->re_max_fr_depth;
  192. do {
  193. depth += 2; /* FRWR reg + invalidate */
  194. delta -= ep->re_max_fr_depth;
  195. } while (delta > 0);
  196. }
  197. max_qp_wr = attrs->max_qp_wr;
  198. max_qp_wr -= RPCRDMA_BACKWARD_WRS;
  199. max_qp_wr -= 1;
  200. if (max_qp_wr < RPCRDMA_MIN_SLOT_TABLE)
  201. return -ENOMEM;
  202. if (ep->re_max_requests > max_qp_wr)
  203. ep->re_max_requests = max_qp_wr;
  204. ep->re_attr.cap.max_send_wr = ep->re_max_requests * depth;
  205. if (ep->re_attr.cap.max_send_wr > max_qp_wr) {
  206. ep->re_max_requests = max_qp_wr / depth;
  207. if (!ep->re_max_requests)
  208. return -ENOMEM;
  209. ep->re_attr.cap.max_send_wr = ep->re_max_requests * depth;
  210. }
  211. ep->re_attr.cap.max_send_wr += RPCRDMA_BACKWARD_WRS;
  212. ep->re_attr.cap.max_send_wr += 1; /* for ib_drain_sq */
  213. ep->re_attr.cap.max_recv_wr = ep->re_max_requests;
  214. ep->re_attr.cap.max_recv_wr += RPCRDMA_BACKWARD_WRS;
  215. ep->re_attr.cap.max_recv_wr += RPCRDMA_MAX_RECV_BATCH;
  216. ep->re_attr.cap.max_recv_wr += 1; /* for ib_drain_rq */
  217. ep->re_max_rdma_segs =
  218. DIV_ROUND_UP(RPCRDMA_MAX_DATA_SEGS, ep->re_max_fr_depth);
  219. /* Reply chunks require segments for head and tail buffers */
  220. ep->re_max_rdma_segs += 2;
  221. if (ep->re_max_rdma_segs > RPCRDMA_MAX_HDR_SEGS)
  222. ep->re_max_rdma_segs = RPCRDMA_MAX_HDR_SEGS;
  223. /* Ensure the underlying device is capable of conveying the
  224. * largest r/wsize NFS will ask for. This guarantees that
  225. * failing over from one RDMA device to another will not
  226. * break NFS I/O.
  227. */
  228. if ((ep->re_max_rdma_segs * ep->re_max_fr_depth) < RPCRDMA_MAX_SEGS)
  229. return -ENOMEM;
  230. return 0;
  231. }
  232. /**
  233. * frwr_map - Register a memory region
  234. * @r_xprt: controlling transport
  235. * @seg: memory region co-ordinates
  236. * @nsegs: number of segments remaining
  237. * @writing: true when RDMA Write will be used
  238. * @xid: XID of RPC using the registered memory
  239. * @mr: MR to fill in
  240. *
  241. * Prepare a REG_MR Work Request to register a memory region
  242. * for remote access via RDMA READ or RDMA WRITE.
  243. *
  244. * Returns the next segment or a negative errno pointer.
  245. * On success, @mr is filled in.
  246. */
  247. struct rpcrdma_mr_seg *frwr_map(struct rpcrdma_xprt *r_xprt,
  248. struct rpcrdma_mr_seg *seg,
  249. int nsegs, bool writing, __be32 xid,
  250. struct rpcrdma_mr *mr)
  251. {
  252. struct rpcrdma_ep *ep = r_xprt->rx_ep;
  253. struct ib_reg_wr *reg_wr;
  254. int i, n, dma_nents;
  255. struct ib_mr *ibmr;
  256. u8 key;
  257. if (nsegs > ep->re_max_fr_depth)
  258. nsegs = ep->re_max_fr_depth;
  259. for (i = 0; i < nsegs;) {
  260. if (seg->mr_page)
  261. sg_set_page(&mr->mr_sg[i],
  262. seg->mr_page,
  263. seg->mr_len,
  264. offset_in_page(seg->mr_offset));
  265. else
  266. sg_set_buf(&mr->mr_sg[i], seg->mr_offset,
  267. seg->mr_len);
  268. ++seg;
  269. ++i;
  270. if (ep->re_mrtype == IB_MR_TYPE_SG_GAPS)
  271. continue;
  272. if ((i < nsegs && offset_in_page(seg->mr_offset)) ||
  273. offset_in_page((seg-1)->mr_offset + (seg-1)->mr_len))
  274. break;
  275. }
  276. mr->mr_dir = rpcrdma_data_dir(writing);
  277. mr->mr_nents = i;
  278. dma_nents = ib_dma_map_sg(ep->re_id->device, mr->mr_sg, mr->mr_nents,
  279. mr->mr_dir);
  280. if (!dma_nents)
  281. goto out_dmamap_err;
  282. ibmr = mr->frwr.fr_mr;
  283. n = ib_map_mr_sg(ibmr, mr->mr_sg, dma_nents, NULL, PAGE_SIZE);
  284. if (n != dma_nents)
  285. goto out_mapmr_err;
  286. ibmr->iova &= 0x00000000ffffffff;
  287. ibmr->iova |= ((u64)be32_to_cpu(xid)) << 32;
  288. key = (u8)(ibmr->rkey & 0x000000FF);
  289. ib_update_fast_reg_key(ibmr, ++key);
  290. reg_wr = &mr->frwr.fr_regwr;
  291. reg_wr->mr = ibmr;
  292. reg_wr->key = ibmr->rkey;
  293. reg_wr->access = writing ?
  294. IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE :
  295. IB_ACCESS_REMOTE_READ;
  296. mr->mr_handle = ibmr->rkey;
  297. mr->mr_length = ibmr->length;
  298. mr->mr_offset = ibmr->iova;
  299. trace_xprtrdma_mr_map(mr);
  300. return seg;
  301. out_dmamap_err:
  302. mr->mr_dir = DMA_NONE;
  303. trace_xprtrdma_frwr_sgerr(mr, i);
  304. return ERR_PTR(-EIO);
  305. out_mapmr_err:
  306. trace_xprtrdma_frwr_maperr(mr, n);
  307. return ERR_PTR(-EIO);
  308. }
  309. /**
  310. * frwr_wc_fastreg - Invoked by RDMA provider for a flushed FastReg WC
  311. * @cq: completion queue
  312. * @wc: WCE for a completed FastReg WR
  313. *
  314. */
  315. static void frwr_wc_fastreg(struct ib_cq *cq, struct ib_wc *wc)
  316. {
  317. struct ib_cqe *cqe = wc->wr_cqe;
  318. struct rpcrdma_frwr *frwr =
  319. container_of(cqe, struct rpcrdma_frwr, fr_cqe);
  320. /* WARNING: Only wr_cqe and status are reliable at this point */
  321. trace_xprtrdma_wc_fastreg(wc, frwr);
  322. /* The MR will get recycled when the associated req is retransmitted */
  323. rpcrdma_flush_disconnect(cq->cq_context, wc);
  324. }
  325. /**
  326. * frwr_send - post Send WRs containing the RPC Call message
  327. * @r_xprt: controlling transport instance
  328. * @req: prepared RPC Call
  329. *
  330. * For FRWR, chain any FastReg WRs to the Send WR. Only a
  331. * single ib_post_send call is needed to register memory
  332. * and then post the Send WR.
  333. *
  334. * Returns the return code from ib_post_send.
  335. *
  336. * Caller must hold the transport send lock to ensure that the
  337. * pointers to the transport's rdma_cm_id and QP are stable.
  338. */
  339. int frwr_send(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
  340. {
  341. struct ib_send_wr *post_wr;
  342. struct rpcrdma_mr *mr;
  343. post_wr = &req->rl_wr;
  344. list_for_each_entry(mr, &req->rl_registered, mr_list) {
  345. struct rpcrdma_frwr *frwr;
  346. frwr = &mr->frwr;
  347. frwr->fr_cqe.done = frwr_wc_fastreg;
  348. frwr->fr_regwr.wr.next = post_wr;
  349. frwr->fr_regwr.wr.wr_cqe = &frwr->fr_cqe;
  350. frwr->fr_regwr.wr.num_sge = 0;
  351. frwr->fr_regwr.wr.opcode = IB_WR_REG_MR;
  352. frwr->fr_regwr.wr.send_flags = 0;
  353. post_wr = &frwr->fr_regwr.wr;
  354. }
  355. return ib_post_send(r_xprt->rx_ep->re_id->qp, post_wr, NULL);
  356. }
  357. /**
  358. * frwr_reminv - handle a remotely invalidated mr on the @mrs list
  359. * @rep: Received reply
  360. * @mrs: list of MRs to check
  361. *
  362. */
  363. void frwr_reminv(struct rpcrdma_rep *rep, struct list_head *mrs)
  364. {
  365. struct rpcrdma_mr *mr;
  366. list_for_each_entry(mr, mrs, mr_list)
  367. if (mr->mr_handle == rep->rr_inv_rkey) {
  368. list_del_init(&mr->mr_list);
  369. trace_xprtrdma_mr_reminv(mr);
  370. rpcrdma_mr_put(mr);
  371. break; /* only one invalidated MR per RPC */
  372. }
  373. }
  374. static void __frwr_release_mr(struct ib_wc *wc, struct rpcrdma_mr *mr)
  375. {
  376. if (wc->status != IB_WC_SUCCESS)
  377. frwr_mr_recycle(mr);
  378. else
  379. rpcrdma_mr_put(mr);
  380. }
  381. /**
  382. * frwr_wc_localinv - Invoked by RDMA provider for a LOCAL_INV WC
  383. * @cq: completion queue
  384. * @wc: WCE for a completed LocalInv WR
  385. *
  386. */
  387. static void frwr_wc_localinv(struct ib_cq *cq, struct ib_wc *wc)
  388. {
  389. struct ib_cqe *cqe = wc->wr_cqe;
  390. struct rpcrdma_frwr *frwr =
  391. container_of(cqe, struct rpcrdma_frwr, fr_cqe);
  392. struct rpcrdma_mr *mr = container_of(frwr, struct rpcrdma_mr, frwr);
  393. /* WARNING: Only wr_cqe and status are reliable at this point */
  394. trace_xprtrdma_wc_li(wc, frwr);
  395. __frwr_release_mr(wc, mr);
  396. rpcrdma_flush_disconnect(cq->cq_context, wc);
  397. }
  398. /**
  399. * frwr_wc_localinv_wake - Invoked by RDMA provider for a LOCAL_INV WC
  400. * @cq: completion queue
  401. * @wc: WCE for a completed LocalInv WR
  402. *
  403. * Awaken anyone waiting for an MR to finish being fenced.
  404. */
  405. static void frwr_wc_localinv_wake(struct ib_cq *cq, struct ib_wc *wc)
  406. {
  407. struct ib_cqe *cqe = wc->wr_cqe;
  408. struct rpcrdma_frwr *frwr =
  409. container_of(cqe, struct rpcrdma_frwr, fr_cqe);
  410. struct rpcrdma_mr *mr = container_of(frwr, struct rpcrdma_mr, frwr);
  411. /* WARNING: Only wr_cqe and status are reliable at this point */
  412. trace_xprtrdma_wc_li_wake(wc, frwr);
  413. __frwr_release_mr(wc, mr);
  414. complete(&frwr->fr_linv_done);
  415. rpcrdma_flush_disconnect(cq->cq_context, wc);
  416. }
  417. /**
  418. * frwr_unmap_sync - invalidate memory regions that were registered for @req
  419. * @r_xprt: controlling transport instance
  420. * @req: rpcrdma_req with a non-empty list of MRs to process
  421. *
  422. * Sleeps until it is safe for the host CPU to access the previously mapped
  423. * memory regions. This guarantees that registered MRs are properly fenced
  424. * from the server before the RPC consumer accesses the data in them. It
  425. * also ensures proper Send flow control: waking the next RPC waits until
  426. * this RPC has relinquished all its Send Queue entries.
  427. */
  428. void frwr_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
  429. {
  430. struct ib_send_wr *first, **prev, *last;
  431. const struct ib_send_wr *bad_wr;
  432. struct rpcrdma_frwr *frwr;
  433. struct rpcrdma_mr *mr;
  434. int rc;
  435. /* ORDER: Invalidate all of the MRs first
  436. *
  437. * Chain the LOCAL_INV Work Requests and post them with
  438. * a single ib_post_send() call.
  439. */
  440. frwr = NULL;
  441. prev = &first;
  442. while ((mr = rpcrdma_mr_pop(&req->rl_registered))) {
  443. trace_xprtrdma_mr_localinv(mr);
  444. r_xprt->rx_stats.local_inv_needed++;
  445. frwr = &mr->frwr;
  446. frwr->fr_cqe.done = frwr_wc_localinv;
  447. last = &frwr->fr_invwr;
  448. last->next = NULL;
  449. last->wr_cqe = &frwr->fr_cqe;
  450. last->sg_list = NULL;
  451. last->num_sge = 0;
  452. last->opcode = IB_WR_LOCAL_INV;
  453. last->send_flags = IB_SEND_SIGNALED;
  454. last->ex.invalidate_rkey = mr->mr_handle;
  455. *prev = last;
  456. prev = &last->next;
  457. }
  458. /* Strong send queue ordering guarantees that when the
  459. * last WR in the chain completes, all WRs in the chain
  460. * are complete.
  461. */
  462. frwr->fr_cqe.done = frwr_wc_localinv_wake;
  463. reinit_completion(&frwr->fr_linv_done);
  464. /* Transport disconnect drains the receive CQ before it
  465. * replaces the QP. The RPC reply handler won't call us
  466. * unless re_id->qp is a valid pointer.
  467. */
  468. bad_wr = NULL;
  469. rc = ib_post_send(r_xprt->rx_ep->re_id->qp, first, &bad_wr);
  470. /* The final LOCAL_INV WR in the chain is supposed to
  471. * do the wake. If it was never posted, the wake will
  472. * not happen, so don't wait in that case.
  473. */
  474. if (bad_wr != first)
  475. wait_for_completion(&frwr->fr_linv_done);
  476. if (!rc)
  477. return;
  478. /* Recycle MRs in the LOCAL_INV chain that did not get posted.
  479. */
  480. trace_xprtrdma_post_linv(req, rc);
  481. while (bad_wr) {
  482. frwr = container_of(bad_wr, struct rpcrdma_frwr,
  483. fr_invwr);
  484. mr = container_of(frwr, struct rpcrdma_mr, frwr);
  485. bad_wr = bad_wr->next;
  486. frwr_mr_recycle(mr);
  487. }
  488. }
  489. /**
  490. * frwr_wc_localinv_done - Invoked by RDMA provider for a signaled LOCAL_INV WC
  491. * @cq: completion queue
  492. * @wc: WCE for a completed LocalInv WR
  493. *
  494. */
  495. static void frwr_wc_localinv_done(struct ib_cq *cq, struct ib_wc *wc)
  496. {
  497. struct ib_cqe *cqe = wc->wr_cqe;
  498. struct rpcrdma_frwr *frwr =
  499. container_of(cqe, struct rpcrdma_frwr, fr_cqe);
  500. struct rpcrdma_mr *mr = container_of(frwr, struct rpcrdma_mr, frwr);
  501. struct rpcrdma_rep *rep = mr->mr_req->rl_reply;
  502. /* WARNING: Only wr_cqe and status are reliable at this point */
  503. trace_xprtrdma_wc_li_done(wc, frwr);
  504. __frwr_release_mr(wc, mr);
  505. /* Ensure @rep is generated before __frwr_release_mr */
  506. smp_rmb();
  507. rpcrdma_complete_rqst(rep);
  508. rpcrdma_flush_disconnect(cq->cq_context, wc);
  509. }
  510. /**
  511. * frwr_unmap_async - invalidate memory regions that were registered for @req
  512. * @r_xprt: controlling transport instance
  513. * @req: rpcrdma_req with a non-empty list of MRs to process
  514. *
  515. * This guarantees that registered MRs are properly fenced from the
  516. * server before the RPC consumer accesses the data in them. It also
  517. * ensures proper Send flow control: waking the next RPC waits until
  518. * this RPC has relinquished all its Send Queue entries.
  519. */
  520. void frwr_unmap_async(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
  521. {
  522. struct ib_send_wr *first, *last, **prev;
  523. const struct ib_send_wr *bad_wr;
  524. struct rpcrdma_frwr *frwr;
  525. struct rpcrdma_mr *mr;
  526. int rc;
  527. /* Chain the LOCAL_INV Work Requests and post them with
  528. * a single ib_post_send() call.
  529. */
  530. frwr = NULL;
  531. prev = &first;
  532. while ((mr = rpcrdma_mr_pop(&req->rl_registered))) {
  533. trace_xprtrdma_mr_localinv(mr);
  534. r_xprt->rx_stats.local_inv_needed++;
  535. frwr = &mr->frwr;
  536. frwr->fr_cqe.done = frwr_wc_localinv;
  537. last = &frwr->fr_invwr;
  538. last->next = NULL;
  539. last->wr_cqe = &frwr->fr_cqe;
  540. last->sg_list = NULL;
  541. last->num_sge = 0;
  542. last->opcode = IB_WR_LOCAL_INV;
  543. last->send_flags = IB_SEND_SIGNALED;
  544. last->ex.invalidate_rkey = mr->mr_handle;
  545. *prev = last;
  546. prev = &last->next;
  547. }
  548. /* Strong send queue ordering guarantees that when the
  549. * last WR in the chain completes, all WRs in the chain
  550. * are complete. The last completion will wake up the
  551. * RPC waiter.
  552. */
  553. frwr->fr_cqe.done = frwr_wc_localinv_done;
  554. /* Transport disconnect drains the receive CQ before it
  555. * replaces the QP. The RPC reply handler won't call us
  556. * unless re_id->qp is a valid pointer.
  557. */
  558. bad_wr = NULL;
  559. rc = ib_post_send(r_xprt->rx_ep->re_id->qp, first, &bad_wr);
  560. if (!rc)
  561. return;
  562. /* Recycle MRs in the LOCAL_INV chain that did not get posted.
  563. */
  564. trace_xprtrdma_post_linv(req, rc);
  565. while (bad_wr) {
  566. frwr = container_of(bad_wr, struct rpcrdma_frwr, fr_invwr);
  567. mr = container_of(frwr, struct rpcrdma_mr, frwr);
  568. bad_wr = bad_wr->next;
  569. frwr_mr_recycle(mr);
  570. }
  571. /* The final LOCAL_INV WR in the chain is supposed to
  572. * do the wake. If it was never posted, the wake will
  573. * not happen, so wake here in that case.
  574. */
  575. rpcrdma_complete_rqst(req->rl_reply);
  576. }