svc_rdma_rw.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (c) 2016-2018 Oracle. All rights reserved.
  4. *
  5. * Use the core R/W API to move RPC-over-RDMA Read and Write chunks.
  6. */
  7. #include <rdma/rw.h>
  8. #include <linux/sunrpc/xdr.h>
  9. #include <linux/sunrpc/rpc_rdma.h>
  10. #include <linux/sunrpc/svc_rdma.h>
  11. #include "xprt_rdma.h"
  12. #include <trace/events/rpcrdma.h>
  13. static void svc_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc);
  14. static void svc_rdma_wc_read_done(struct ib_cq *cq, struct ib_wc *wc);
  15. /* Each R/W context contains state for one chain of RDMA Read or
  16. * Write Work Requests.
  17. *
  18. * Each WR chain handles a single contiguous server-side buffer,
  19. * because scatterlist entries after the first have to start on
  20. * page alignment. xdr_buf iovecs cannot guarantee alignment.
  21. *
  22. * Each WR chain handles only one R_key. Each RPC-over-RDMA segment
  23. * from a client may contain a unique R_key, so each WR chain moves
  24. * up to one segment at a time.
  25. *
  26. * The scatterlist makes this data structure over 4KB in size. To
  27. * make it less likely to fail, and to handle the allocation for
  28. * smaller I/O requests without disabling bottom-halves, these
  29. * contexts are created on demand, but cached and reused until the
  30. * controlling svcxprt_rdma is destroyed.
  31. */
  32. struct svc_rdma_rw_ctxt {
  33. struct list_head rw_list;
  34. struct rdma_rw_ctx rw_ctx;
  35. unsigned int rw_nents;
  36. struct sg_table rw_sg_table;
  37. struct scatterlist rw_first_sgl[];
  38. };
  39. static inline struct svc_rdma_rw_ctxt *
  40. svc_rdma_next_ctxt(struct list_head *list)
  41. {
  42. return list_first_entry_or_null(list, struct svc_rdma_rw_ctxt,
  43. rw_list);
  44. }
  45. static struct svc_rdma_rw_ctxt *
  46. svc_rdma_get_rw_ctxt(struct svcxprt_rdma *rdma, unsigned int sges)
  47. {
  48. struct svc_rdma_rw_ctxt *ctxt;
  49. spin_lock(&rdma->sc_rw_ctxt_lock);
  50. ctxt = svc_rdma_next_ctxt(&rdma->sc_rw_ctxts);
  51. if (ctxt) {
  52. list_del(&ctxt->rw_list);
  53. spin_unlock(&rdma->sc_rw_ctxt_lock);
  54. } else {
  55. spin_unlock(&rdma->sc_rw_ctxt_lock);
  56. ctxt = kmalloc(struct_size(ctxt, rw_first_sgl, SG_CHUNK_SIZE),
  57. GFP_KERNEL);
  58. if (!ctxt)
  59. goto out_noctx;
  60. INIT_LIST_HEAD(&ctxt->rw_list);
  61. }
  62. ctxt->rw_sg_table.sgl = ctxt->rw_first_sgl;
  63. if (sg_alloc_table_chained(&ctxt->rw_sg_table, sges,
  64. ctxt->rw_sg_table.sgl,
  65. SG_CHUNK_SIZE))
  66. goto out_free;
  67. return ctxt;
  68. out_free:
  69. kfree(ctxt);
  70. out_noctx:
  71. trace_svcrdma_no_rwctx_err(rdma, sges);
  72. return NULL;
  73. }
  74. static void svc_rdma_put_rw_ctxt(struct svcxprt_rdma *rdma,
  75. struct svc_rdma_rw_ctxt *ctxt)
  76. {
  77. sg_free_table_chained(&ctxt->rw_sg_table, SG_CHUNK_SIZE);
  78. spin_lock(&rdma->sc_rw_ctxt_lock);
  79. list_add(&ctxt->rw_list, &rdma->sc_rw_ctxts);
  80. spin_unlock(&rdma->sc_rw_ctxt_lock);
  81. }
  82. /**
  83. * svc_rdma_destroy_rw_ctxts - Free accumulated R/W contexts
  84. * @rdma: transport about to be destroyed
  85. *
  86. */
  87. void svc_rdma_destroy_rw_ctxts(struct svcxprt_rdma *rdma)
  88. {
  89. struct svc_rdma_rw_ctxt *ctxt;
  90. while ((ctxt = svc_rdma_next_ctxt(&rdma->sc_rw_ctxts)) != NULL) {
  91. list_del(&ctxt->rw_list);
  92. kfree(ctxt);
  93. }
  94. }
  95. /**
  96. * svc_rdma_rw_ctx_init - Prepare a R/W context for I/O
  97. * @rdma: controlling transport instance
  98. * @ctxt: R/W context to prepare
  99. * @offset: RDMA offset
  100. * @handle: RDMA tag/handle
  101. * @direction: I/O direction
  102. *
  103. * Returns on success, the number of WQEs that will be needed
  104. * on the workqueue, or a negative errno.
  105. */
  106. static int svc_rdma_rw_ctx_init(struct svcxprt_rdma *rdma,
  107. struct svc_rdma_rw_ctxt *ctxt,
  108. u64 offset, u32 handle,
  109. enum dma_data_direction direction)
  110. {
  111. int ret;
  112. ret = rdma_rw_ctx_init(&ctxt->rw_ctx, rdma->sc_qp, rdma->sc_port_num,
  113. ctxt->rw_sg_table.sgl, ctxt->rw_nents,
  114. 0, offset, handle, direction);
  115. if (unlikely(ret < 0)) {
  116. svc_rdma_put_rw_ctxt(rdma, ctxt);
  117. trace_svcrdma_dma_map_rw_err(rdma, ctxt->rw_nents, ret);
  118. }
  119. return ret;
  120. }
  121. /* A chunk context tracks all I/O for moving one Read or Write
  122. * chunk. This is a set of rdma_rw's that handle data movement
  123. * for all segments of one chunk.
  124. *
  125. * These are small, acquired with a single allocator call, and
  126. * no more than one is needed per chunk. They are allocated on
  127. * demand, and not cached.
  128. */
  129. struct svc_rdma_chunk_ctxt {
  130. struct rpc_rdma_cid cc_cid;
  131. struct ib_cqe cc_cqe;
  132. struct svcxprt_rdma *cc_rdma;
  133. struct list_head cc_rwctxts;
  134. int cc_sqecount;
  135. };
  136. static void svc_rdma_cc_cid_init(struct svcxprt_rdma *rdma,
  137. struct rpc_rdma_cid *cid)
  138. {
  139. cid->ci_queue_id = rdma->sc_sq_cq->res.id;
  140. cid->ci_completion_id = atomic_inc_return(&rdma->sc_completion_ids);
  141. }
  142. static void svc_rdma_cc_init(struct svcxprt_rdma *rdma,
  143. struct svc_rdma_chunk_ctxt *cc)
  144. {
  145. svc_rdma_cc_cid_init(rdma, &cc->cc_cid);
  146. cc->cc_rdma = rdma;
  147. INIT_LIST_HEAD(&cc->cc_rwctxts);
  148. cc->cc_sqecount = 0;
  149. }
  150. static void svc_rdma_cc_release(struct svc_rdma_chunk_ctxt *cc,
  151. enum dma_data_direction dir)
  152. {
  153. struct svcxprt_rdma *rdma = cc->cc_rdma;
  154. struct svc_rdma_rw_ctxt *ctxt;
  155. while ((ctxt = svc_rdma_next_ctxt(&cc->cc_rwctxts)) != NULL) {
  156. list_del(&ctxt->rw_list);
  157. rdma_rw_ctx_destroy(&ctxt->rw_ctx, rdma->sc_qp,
  158. rdma->sc_port_num, ctxt->rw_sg_table.sgl,
  159. ctxt->rw_nents, dir);
  160. svc_rdma_put_rw_ctxt(rdma, ctxt);
  161. }
  162. }
  163. /* State for sending a Write or Reply chunk.
  164. * - Tracks progress of writing one chunk over all its segments
  165. * - Stores arguments for the SGL constructor functions
  166. */
  167. struct svc_rdma_write_info {
  168. /* write state of this chunk */
  169. unsigned int wi_seg_off;
  170. unsigned int wi_seg_no;
  171. unsigned int wi_nsegs;
  172. __be32 *wi_segs;
  173. /* SGL constructor arguments */
  174. struct xdr_buf *wi_xdr;
  175. unsigned char *wi_base;
  176. unsigned int wi_next_off;
  177. struct svc_rdma_chunk_ctxt wi_cc;
  178. };
  179. static struct svc_rdma_write_info *
  180. svc_rdma_write_info_alloc(struct svcxprt_rdma *rdma, __be32 *chunk)
  181. {
  182. struct svc_rdma_write_info *info;
  183. info = kmalloc(sizeof(*info), GFP_KERNEL);
  184. if (!info)
  185. return info;
  186. info->wi_seg_off = 0;
  187. info->wi_seg_no = 0;
  188. info->wi_nsegs = be32_to_cpup(++chunk);
  189. info->wi_segs = ++chunk;
  190. svc_rdma_cc_init(rdma, &info->wi_cc);
  191. info->wi_cc.cc_cqe.done = svc_rdma_write_done;
  192. return info;
  193. }
  194. static void svc_rdma_write_info_free(struct svc_rdma_write_info *info)
  195. {
  196. svc_rdma_cc_release(&info->wi_cc, DMA_TO_DEVICE);
  197. kfree(info);
  198. }
  199. /**
  200. * svc_rdma_write_done - Write chunk completion
  201. * @cq: controlling Completion Queue
  202. * @wc: Work Completion
  203. *
  204. * Pages under I/O are freed by a subsequent Send completion.
  205. */
  206. static void svc_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc)
  207. {
  208. struct ib_cqe *cqe = wc->wr_cqe;
  209. struct svc_rdma_chunk_ctxt *cc =
  210. container_of(cqe, struct svc_rdma_chunk_ctxt, cc_cqe);
  211. struct svcxprt_rdma *rdma = cc->cc_rdma;
  212. struct svc_rdma_write_info *info =
  213. container_of(cc, struct svc_rdma_write_info, wi_cc);
  214. trace_svcrdma_wc_write(wc, &cc->cc_cid);
  215. atomic_add(cc->cc_sqecount, &rdma->sc_sq_avail);
  216. wake_up(&rdma->sc_send_wait);
  217. if (unlikely(wc->status != IB_WC_SUCCESS))
  218. set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags);
  219. svc_rdma_write_info_free(info);
  220. }
  221. /* State for pulling a Read chunk.
  222. */
  223. struct svc_rdma_read_info {
  224. struct svc_rdma_recv_ctxt *ri_readctxt;
  225. unsigned int ri_position;
  226. unsigned int ri_pageno;
  227. unsigned int ri_pageoff;
  228. unsigned int ri_chunklen;
  229. struct svc_rdma_chunk_ctxt ri_cc;
  230. };
  231. static struct svc_rdma_read_info *
  232. svc_rdma_read_info_alloc(struct svcxprt_rdma *rdma)
  233. {
  234. struct svc_rdma_read_info *info;
  235. info = kmalloc(sizeof(*info), GFP_KERNEL);
  236. if (!info)
  237. return info;
  238. svc_rdma_cc_init(rdma, &info->ri_cc);
  239. info->ri_cc.cc_cqe.done = svc_rdma_wc_read_done;
  240. return info;
  241. }
  242. static void svc_rdma_read_info_free(struct svc_rdma_read_info *info)
  243. {
  244. svc_rdma_cc_release(&info->ri_cc, DMA_FROM_DEVICE);
  245. kfree(info);
  246. }
  247. /**
  248. * svc_rdma_wc_read_done - Handle completion of an RDMA Read ctx
  249. * @cq: controlling Completion Queue
  250. * @wc: Work Completion
  251. *
  252. */
  253. static void svc_rdma_wc_read_done(struct ib_cq *cq, struct ib_wc *wc)
  254. {
  255. struct ib_cqe *cqe = wc->wr_cqe;
  256. struct svc_rdma_chunk_ctxt *cc =
  257. container_of(cqe, struct svc_rdma_chunk_ctxt, cc_cqe);
  258. struct svcxprt_rdma *rdma = cc->cc_rdma;
  259. struct svc_rdma_read_info *info =
  260. container_of(cc, struct svc_rdma_read_info, ri_cc);
  261. trace_svcrdma_wc_read(wc, &cc->cc_cid);
  262. atomic_add(cc->cc_sqecount, &rdma->sc_sq_avail);
  263. wake_up(&rdma->sc_send_wait);
  264. if (unlikely(wc->status != IB_WC_SUCCESS)) {
  265. set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags);
  266. svc_rdma_recv_ctxt_put(rdma, info->ri_readctxt);
  267. } else {
  268. spin_lock(&rdma->sc_rq_dto_lock);
  269. list_add_tail(&info->ri_readctxt->rc_list,
  270. &rdma->sc_read_complete_q);
  271. /* Note the unlock pairs with the smp_rmb in svc_xprt_ready: */
  272. set_bit(XPT_DATA, &rdma->sc_xprt.xpt_flags);
  273. spin_unlock(&rdma->sc_rq_dto_lock);
  274. svc_xprt_enqueue(&rdma->sc_xprt);
  275. }
  276. svc_rdma_read_info_free(info);
  277. }
  278. /* This function sleeps when the transport's Send Queue is congested.
  279. *
  280. * Assumptions:
  281. * - If ib_post_send() succeeds, only one completion is expected,
  282. * even if one or more WRs are flushed. This is true when posting
  283. * an rdma_rw_ctx or when posting a single signaled WR.
  284. */
  285. static int svc_rdma_post_chunk_ctxt(struct svc_rdma_chunk_ctxt *cc)
  286. {
  287. struct svcxprt_rdma *rdma = cc->cc_rdma;
  288. struct svc_xprt *xprt = &rdma->sc_xprt;
  289. struct ib_send_wr *first_wr;
  290. const struct ib_send_wr *bad_wr;
  291. struct list_head *tmp;
  292. struct ib_cqe *cqe;
  293. int ret;
  294. if (cc->cc_sqecount > rdma->sc_sq_depth)
  295. return -EINVAL;
  296. first_wr = NULL;
  297. cqe = &cc->cc_cqe;
  298. list_for_each(tmp, &cc->cc_rwctxts) {
  299. struct svc_rdma_rw_ctxt *ctxt;
  300. ctxt = list_entry(tmp, struct svc_rdma_rw_ctxt, rw_list);
  301. first_wr = rdma_rw_ctx_wrs(&ctxt->rw_ctx, rdma->sc_qp,
  302. rdma->sc_port_num, cqe, first_wr);
  303. cqe = NULL;
  304. }
  305. do {
  306. if (atomic_sub_return(cc->cc_sqecount,
  307. &rdma->sc_sq_avail) > 0) {
  308. trace_svcrdma_post_chunk(&cc->cc_cid, cc->cc_sqecount);
  309. ret = ib_post_send(rdma->sc_qp, first_wr, &bad_wr);
  310. if (ret)
  311. break;
  312. return 0;
  313. }
  314. trace_svcrdma_sq_full(rdma);
  315. atomic_add(cc->cc_sqecount, &rdma->sc_sq_avail);
  316. wait_event(rdma->sc_send_wait,
  317. atomic_read(&rdma->sc_sq_avail) > cc->cc_sqecount);
  318. trace_svcrdma_sq_retry(rdma);
  319. } while (1);
  320. trace_svcrdma_sq_post_err(rdma, ret);
  321. set_bit(XPT_CLOSE, &xprt->xpt_flags);
  322. /* If even one was posted, there will be a completion. */
  323. if (bad_wr != first_wr)
  324. return 0;
  325. atomic_add(cc->cc_sqecount, &rdma->sc_sq_avail);
  326. wake_up(&rdma->sc_send_wait);
  327. return -ENOTCONN;
  328. }
  329. /* Build and DMA-map an SGL that covers one kvec in an xdr_buf
  330. */
  331. static void svc_rdma_vec_to_sg(struct svc_rdma_write_info *info,
  332. unsigned int len,
  333. struct svc_rdma_rw_ctxt *ctxt)
  334. {
  335. struct scatterlist *sg = ctxt->rw_sg_table.sgl;
  336. sg_set_buf(&sg[0], info->wi_base, len);
  337. info->wi_base += len;
  338. ctxt->rw_nents = 1;
  339. }
  340. /* Build and DMA-map an SGL that covers part of an xdr_buf's pagelist.
  341. */
  342. static void svc_rdma_pagelist_to_sg(struct svc_rdma_write_info *info,
  343. unsigned int remaining,
  344. struct svc_rdma_rw_ctxt *ctxt)
  345. {
  346. unsigned int sge_no, sge_bytes, page_off, page_no;
  347. struct xdr_buf *xdr = info->wi_xdr;
  348. struct scatterlist *sg;
  349. struct page **page;
  350. page_off = info->wi_next_off + xdr->page_base;
  351. page_no = page_off >> PAGE_SHIFT;
  352. page_off = offset_in_page(page_off);
  353. page = xdr->pages + page_no;
  354. info->wi_next_off += remaining;
  355. sg = ctxt->rw_sg_table.sgl;
  356. sge_no = 0;
  357. do {
  358. sge_bytes = min_t(unsigned int, remaining,
  359. PAGE_SIZE - page_off);
  360. sg_set_page(sg, *page, sge_bytes, page_off);
  361. remaining -= sge_bytes;
  362. sg = sg_next(sg);
  363. page_off = 0;
  364. sge_no++;
  365. page++;
  366. } while (remaining);
  367. ctxt->rw_nents = sge_no;
  368. }
  369. /* Construct RDMA Write WRs to send a portion of an xdr_buf containing
  370. * an RPC Reply.
  371. */
  372. static int
  373. svc_rdma_build_writes(struct svc_rdma_write_info *info,
  374. void (*constructor)(struct svc_rdma_write_info *info,
  375. unsigned int len,
  376. struct svc_rdma_rw_ctxt *ctxt),
  377. unsigned int remaining)
  378. {
  379. struct svc_rdma_chunk_ctxt *cc = &info->wi_cc;
  380. struct svcxprt_rdma *rdma = cc->cc_rdma;
  381. struct svc_rdma_rw_ctxt *ctxt;
  382. __be32 *seg;
  383. int ret;
  384. seg = info->wi_segs + info->wi_seg_no * rpcrdma_segment_maxsz;
  385. do {
  386. unsigned int write_len;
  387. u32 handle, length;
  388. u64 offset;
  389. if (info->wi_seg_no >= info->wi_nsegs)
  390. goto out_overflow;
  391. xdr_decode_rdma_segment(seg, &handle, &length, &offset);
  392. offset += info->wi_seg_off;
  393. write_len = min(remaining, length - info->wi_seg_off);
  394. ctxt = svc_rdma_get_rw_ctxt(rdma,
  395. (write_len >> PAGE_SHIFT) + 2);
  396. if (!ctxt)
  397. return -ENOMEM;
  398. constructor(info, write_len, ctxt);
  399. ret = svc_rdma_rw_ctx_init(rdma, ctxt, offset, handle,
  400. DMA_TO_DEVICE);
  401. if (ret < 0)
  402. return -EIO;
  403. trace_svcrdma_send_wseg(handle, write_len, offset);
  404. list_add(&ctxt->rw_list, &cc->cc_rwctxts);
  405. cc->cc_sqecount += ret;
  406. if (write_len == length - info->wi_seg_off) {
  407. seg += 4;
  408. info->wi_seg_no++;
  409. info->wi_seg_off = 0;
  410. } else {
  411. info->wi_seg_off += write_len;
  412. }
  413. remaining -= write_len;
  414. } while (remaining);
  415. return 0;
  416. out_overflow:
  417. trace_svcrdma_small_wrch_err(rdma, remaining, info->wi_seg_no,
  418. info->wi_nsegs);
  419. return -E2BIG;
  420. }
  421. /* Send one of an xdr_buf's kvecs by itself. To send a Reply
  422. * chunk, the whole RPC Reply is written back to the client.
  423. * This function writes either the head or tail of the xdr_buf
  424. * containing the Reply.
  425. */
  426. static int svc_rdma_send_xdr_kvec(struct svc_rdma_write_info *info,
  427. struct kvec *vec)
  428. {
  429. info->wi_base = vec->iov_base;
  430. return svc_rdma_build_writes(info, svc_rdma_vec_to_sg,
  431. vec->iov_len);
  432. }
  433. /* Send an xdr_buf's page list by itself. A Write chunk is just
  434. * the page list. A Reply chunk is @xdr's head, page list, and
  435. * tail. This function is shared between the two types of chunk.
  436. */
  437. static int svc_rdma_send_xdr_pagelist(struct svc_rdma_write_info *info,
  438. struct xdr_buf *xdr,
  439. unsigned int offset,
  440. unsigned long length)
  441. {
  442. info->wi_xdr = xdr;
  443. info->wi_next_off = offset - xdr->head[0].iov_len;
  444. return svc_rdma_build_writes(info, svc_rdma_pagelist_to_sg,
  445. length);
  446. }
  447. /**
  448. * svc_rdma_send_write_chunk - Write all segments in a Write chunk
  449. * @rdma: controlling RDMA transport
  450. * @wr_ch: Write chunk provided by client
  451. * @xdr: xdr_buf containing the data payload
  452. * @offset: payload's byte offset in @xdr
  453. * @length: size of payload, in bytes
  454. *
  455. * Returns a non-negative number of bytes the chunk consumed, or
  456. * %-E2BIG if the payload was larger than the Write chunk,
  457. * %-EINVAL if client provided too many segments,
  458. * %-ENOMEM if rdma_rw context pool was exhausted,
  459. * %-ENOTCONN if posting failed (connection is lost),
  460. * %-EIO if rdma_rw initialization failed (DMA mapping, etc).
  461. */
  462. int svc_rdma_send_write_chunk(struct svcxprt_rdma *rdma, __be32 *wr_ch,
  463. struct xdr_buf *xdr,
  464. unsigned int offset, unsigned long length)
  465. {
  466. struct svc_rdma_write_info *info;
  467. int ret;
  468. if (!length)
  469. return 0;
  470. info = svc_rdma_write_info_alloc(rdma, wr_ch);
  471. if (!info)
  472. return -ENOMEM;
  473. ret = svc_rdma_send_xdr_pagelist(info, xdr, offset, length);
  474. if (ret < 0)
  475. goto out_err;
  476. ret = svc_rdma_post_chunk_ctxt(&info->wi_cc);
  477. if (ret < 0)
  478. goto out_err;
  479. trace_svcrdma_send_write_chunk(xdr->page_len);
  480. return length;
  481. out_err:
  482. svc_rdma_write_info_free(info);
  483. return ret;
  484. }
  485. /**
  486. * svc_rdma_send_reply_chunk - Write all segments in the Reply chunk
  487. * @rdma: controlling RDMA transport
  488. * @rctxt: Write and Reply chunks from client
  489. * @xdr: xdr_buf containing an RPC Reply
  490. *
  491. * Returns a non-negative number of bytes the chunk consumed, or
  492. * %-E2BIG if the payload was larger than the Reply chunk,
  493. * %-EINVAL if client provided too many segments,
  494. * %-ENOMEM if rdma_rw context pool was exhausted,
  495. * %-ENOTCONN if posting failed (connection is lost),
  496. * %-EIO if rdma_rw initialization failed (DMA mapping, etc).
  497. */
  498. int svc_rdma_send_reply_chunk(struct svcxprt_rdma *rdma,
  499. const struct svc_rdma_recv_ctxt *rctxt,
  500. struct xdr_buf *xdr)
  501. {
  502. struct svc_rdma_write_info *info;
  503. int consumed, ret;
  504. info = svc_rdma_write_info_alloc(rdma, rctxt->rc_reply_chunk);
  505. if (!info)
  506. return -ENOMEM;
  507. ret = svc_rdma_send_xdr_kvec(info, &xdr->head[0]);
  508. if (ret < 0)
  509. goto out_err;
  510. consumed = xdr->head[0].iov_len;
  511. /* Send the page list in the Reply chunk only if the
  512. * client did not provide Write chunks.
  513. */
  514. if (!rctxt->rc_write_list && xdr->page_len) {
  515. ret = svc_rdma_send_xdr_pagelist(info, xdr,
  516. xdr->head[0].iov_len,
  517. xdr->page_len);
  518. if (ret < 0)
  519. goto out_err;
  520. consumed += xdr->page_len;
  521. }
  522. if (xdr->tail[0].iov_len) {
  523. ret = svc_rdma_send_xdr_kvec(info, &xdr->tail[0]);
  524. if (ret < 0)
  525. goto out_err;
  526. consumed += xdr->tail[0].iov_len;
  527. }
  528. ret = svc_rdma_post_chunk_ctxt(&info->wi_cc);
  529. if (ret < 0)
  530. goto out_err;
  531. trace_svcrdma_send_reply_chunk(consumed);
  532. return consumed;
  533. out_err:
  534. svc_rdma_write_info_free(info);
  535. return ret;
  536. }
  537. static int svc_rdma_build_read_segment(struct svc_rdma_read_info *info,
  538. struct svc_rqst *rqstp,
  539. u32 rkey, u32 len, u64 offset)
  540. {
  541. struct svc_rdma_recv_ctxt *head = info->ri_readctxt;
  542. struct svc_rdma_chunk_ctxt *cc = &info->ri_cc;
  543. struct svc_rdma_rw_ctxt *ctxt;
  544. unsigned int sge_no, seg_len;
  545. struct scatterlist *sg;
  546. int ret;
  547. sge_no = PAGE_ALIGN(info->ri_pageoff + len) >> PAGE_SHIFT;
  548. ctxt = svc_rdma_get_rw_ctxt(cc->cc_rdma, sge_no);
  549. if (!ctxt)
  550. return -ENOMEM;
  551. ctxt->rw_nents = sge_no;
  552. sg = ctxt->rw_sg_table.sgl;
  553. for (sge_no = 0; sge_no < ctxt->rw_nents; sge_no++) {
  554. seg_len = min_t(unsigned int, len,
  555. PAGE_SIZE - info->ri_pageoff);
  556. head->rc_arg.pages[info->ri_pageno] =
  557. rqstp->rq_pages[info->ri_pageno];
  558. if (!info->ri_pageoff)
  559. head->rc_page_count++;
  560. sg_set_page(sg, rqstp->rq_pages[info->ri_pageno],
  561. seg_len, info->ri_pageoff);
  562. sg = sg_next(sg);
  563. info->ri_pageoff += seg_len;
  564. if (info->ri_pageoff == PAGE_SIZE) {
  565. info->ri_pageno++;
  566. info->ri_pageoff = 0;
  567. }
  568. len -= seg_len;
  569. /* Safety check */
  570. if (len &&
  571. &rqstp->rq_pages[info->ri_pageno + 1] > rqstp->rq_page_end)
  572. goto out_overrun;
  573. }
  574. ret = svc_rdma_rw_ctx_init(cc->cc_rdma, ctxt, offset, rkey,
  575. DMA_FROM_DEVICE);
  576. if (ret < 0)
  577. return -EIO;
  578. list_add(&ctxt->rw_list, &cc->cc_rwctxts);
  579. cc->cc_sqecount += ret;
  580. return 0;
  581. out_overrun:
  582. trace_svcrdma_page_overrun_err(cc->cc_rdma, rqstp, info->ri_pageno);
  583. return -EINVAL;
  584. }
  585. /* Walk the segments in the Read chunk starting at @p and construct
  586. * RDMA Read operations to pull the chunk to the server.
  587. */
  588. static int svc_rdma_build_read_chunk(struct svc_rqst *rqstp,
  589. struct svc_rdma_read_info *info,
  590. __be32 *p)
  591. {
  592. int ret;
  593. ret = -EINVAL;
  594. info->ri_chunklen = 0;
  595. while (*p++ != xdr_zero && be32_to_cpup(p++) == info->ri_position) {
  596. u32 handle, length;
  597. u64 offset;
  598. p = xdr_decode_rdma_segment(p, &handle, &length, &offset);
  599. ret = svc_rdma_build_read_segment(info, rqstp, handle, length,
  600. offset);
  601. if (ret < 0)
  602. break;
  603. trace_svcrdma_send_rseg(handle, length, offset);
  604. info->ri_chunklen += length;
  605. }
  606. return ret;
  607. }
  608. /* Construct RDMA Reads to pull over a normal Read chunk. The chunk
  609. * data lands in the page list of head->rc_arg.pages.
  610. *
  611. * Currently NFSD does not look at the head->rc_arg.tail[0] iovec.
  612. * Therefore, XDR round-up of the Read chunk and trailing
  613. * inline content must both be added at the end of the pagelist.
  614. */
  615. static int svc_rdma_build_normal_read_chunk(struct svc_rqst *rqstp,
  616. struct svc_rdma_read_info *info,
  617. __be32 *p)
  618. {
  619. struct svc_rdma_recv_ctxt *head = info->ri_readctxt;
  620. int ret;
  621. ret = svc_rdma_build_read_chunk(rqstp, info, p);
  622. if (ret < 0)
  623. goto out;
  624. trace_svcrdma_send_read_chunk(info->ri_chunklen, info->ri_position);
  625. head->rc_hdr_count = 0;
  626. /* Split the Receive buffer between the head and tail
  627. * buffers at Read chunk's position. XDR roundup of the
  628. * chunk is not included in either the pagelist or in
  629. * the tail.
  630. */
  631. head->rc_arg.tail[0].iov_base =
  632. head->rc_arg.head[0].iov_base + info->ri_position;
  633. head->rc_arg.tail[0].iov_len =
  634. head->rc_arg.head[0].iov_len - info->ri_position;
  635. head->rc_arg.head[0].iov_len = info->ri_position;
  636. /* Read chunk may need XDR roundup (see RFC 8166, s. 3.4.5.2).
  637. *
  638. * If the client already rounded up the chunk length, the
  639. * length does not change. Otherwise, the length of the page
  640. * list is increased to include XDR round-up.
  641. *
  642. * Currently these chunks always start at page offset 0,
  643. * thus the rounded-up length never crosses a page boundary.
  644. */
  645. info->ri_chunklen = XDR_QUADLEN(info->ri_chunklen) << 2;
  646. head->rc_arg.page_len = info->ri_chunklen;
  647. head->rc_arg.len += info->ri_chunklen;
  648. head->rc_arg.buflen += info->ri_chunklen;
  649. out:
  650. return ret;
  651. }
  652. /* Construct RDMA Reads to pull over a Position Zero Read chunk.
  653. * The start of the data lands in the first page just after
  654. * the Transport header, and the rest lands in the page list of
  655. * head->rc_arg.pages.
  656. *
  657. * Assumptions:
  658. * - A PZRC has an XDR-aligned length (no implicit round-up).
  659. * - There can be no trailing inline content (IOW, we assume
  660. * a PZRC is never sent in an RDMA_MSG message, though it's
  661. * allowed by spec).
  662. */
  663. static int svc_rdma_build_pz_read_chunk(struct svc_rqst *rqstp,
  664. struct svc_rdma_read_info *info,
  665. __be32 *p)
  666. {
  667. struct svc_rdma_recv_ctxt *head = info->ri_readctxt;
  668. int ret;
  669. ret = svc_rdma_build_read_chunk(rqstp, info, p);
  670. if (ret < 0)
  671. goto out;
  672. trace_svcrdma_send_pzr(info->ri_chunklen);
  673. head->rc_arg.len += info->ri_chunklen;
  674. head->rc_arg.buflen += info->ri_chunklen;
  675. head->rc_hdr_count = 1;
  676. head->rc_arg.head[0].iov_base = page_address(head->rc_pages[0]);
  677. head->rc_arg.head[0].iov_len = min_t(size_t, PAGE_SIZE,
  678. info->ri_chunklen);
  679. head->rc_arg.page_len = info->ri_chunklen -
  680. head->rc_arg.head[0].iov_len;
  681. out:
  682. return ret;
  683. }
  684. /* Pages under I/O have been copied to head->rc_pages. Ensure they
  685. * are not released by svc_xprt_release() until the I/O is complete.
  686. *
  687. * This has to be done after all Read WRs are constructed to properly
  688. * handle a page that is part of I/O on behalf of two different RDMA
  689. * segments.
  690. *
  691. * Do this only if I/O has been posted. Otherwise, we do indeed want
  692. * svc_xprt_release() to clean things up properly.
  693. */
  694. static void svc_rdma_save_io_pages(struct svc_rqst *rqstp,
  695. const unsigned int start,
  696. const unsigned int num_pages)
  697. {
  698. unsigned int i;
  699. for (i = start; i < num_pages + start; i++)
  700. rqstp->rq_pages[i] = NULL;
  701. }
  702. /**
  703. * svc_rdma_recv_read_chunk - Pull a Read chunk from the client
  704. * @rdma: controlling RDMA transport
  705. * @rqstp: set of pages to use as Read sink buffers
  706. * @head: pages under I/O collect here
  707. * @p: pointer to start of Read chunk
  708. *
  709. * Returns:
  710. * %0 if all needed RDMA Reads were posted successfully,
  711. * %-EINVAL if client provided too many segments,
  712. * %-ENOMEM if rdma_rw context pool was exhausted,
  713. * %-ENOTCONN if posting failed (connection is lost),
  714. * %-EIO if rdma_rw initialization failed (DMA mapping, etc).
  715. *
  716. * Assumptions:
  717. * - All Read segments in @p have the same Position value.
  718. */
  719. int svc_rdma_recv_read_chunk(struct svcxprt_rdma *rdma, struct svc_rqst *rqstp,
  720. struct svc_rdma_recv_ctxt *head, __be32 *p)
  721. {
  722. struct svc_rdma_read_info *info;
  723. int ret;
  724. /* The request (with page list) is constructed in
  725. * head->rc_arg. Pages involved with RDMA Read I/O are
  726. * transferred there.
  727. */
  728. head->rc_arg.head[0] = rqstp->rq_arg.head[0];
  729. head->rc_arg.tail[0] = rqstp->rq_arg.tail[0];
  730. head->rc_arg.pages = head->rc_pages;
  731. head->rc_arg.page_base = 0;
  732. head->rc_arg.page_len = 0;
  733. head->rc_arg.len = rqstp->rq_arg.len;
  734. head->rc_arg.buflen = rqstp->rq_arg.buflen;
  735. info = svc_rdma_read_info_alloc(rdma);
  736. if (!info)
  737. return -ENOMEM;
  738. info->ri_readctxt = head;
  739. info->ri_pageno = 0;
  740. info->ri_pageoff = 0;
  741. info->ri_position = be32_to_cpup(p + 1);
  742. if (info->ri_position)
  743. ret = svc_rdma_build_normal_read_chunk(rqstp, info, p);
  744. else
  745. ret = svc_rdma_build_pz_read_chunk(rqstp, info, p);
  746. if (ret < 0)
  747. goto out_err;
  748. ret = svc_rdma_post_chunk_ctxt(&info->ri_cc);
  749. if (ret < 0)
  750. goto out_err;
  751. svc_rdma_save_io_pages(rqstp, 0, head->rc_page_count);
  752. return 0;
  753. out_err:
  754. svc_rdma_read_info_free(info);
  755. return ret;
  756. }