xdr.c 48 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * linux/net/sunrpc/xdr.c
  4. *
  5. * Generic XDR support.
  6. *
  7. * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
  8. */
  9. #include <linux/module.h>
  10. #include <linux/slab.h>
  11. #include <linux/types.h>
  12. #include <linux/string.h>
  13. #include <linux/kernel.h>
  14. #include <linux/pagemap.h>
  15. #include <linux/errno.h>
  16. #include <linux/sunrpc/xdr.h>
  17. #include <linux/sunrpc/msg_prot.h>
  18. #include <linux/bvec.h>
  19. #include <trace/events/sunrpc.h>
  20. static void _copy_to_pages(struct page **, size_t, const char *, size_t);
  21. /*
  22. * XDR functions for basic NFS types
  23. */
  24. __be32 *
  25. xdr_encode_netobj(__be32 *p, const struct xdr_netobj *obj)
  26. {
  27. unsigned int quadlen = XDR_QUADLEN(obj->len);
  28. p[quadlen] = 0; /* zero trailing bytes */
  29. *p++ = cpu_to_be32(obj->len);
  30. memcpy(p, obj->data, obj->len);
  31. return p + XDR_QUADLEN(obj->len);
  32. }
  33. EXPORT_SYMBOL_GPL(xdr_encode_netobj);
  34. __be32 *
  35. xdr_decode_netobj(__be32 *p, struct xdr_netobj *obj)
  36. {
  37. unsigned int len;
  38. if ((len = be32_to_cpu(*p++)) > XDR_MAX_NETOBJ)
  39. return NULL;
  40. obj->len = len;
  41. obj->data = (u8 *) p;
  42. return p + XDR_QUADLEN(len);
  43. }
  44. EXPORT_SYMBOL_GPL(xdr_decode_netobj);
  45. /**
  46. * xdr_encode_opaque_fixed - Encode fixed length opaque data
  47. * @p: pointer to current position in XDR buffer.
  48. * @ptr: pointer to data to encode (or NULL)
  49. * @nbytes: size of data.
  50. *
  51. * Copy the array of data of length nbytes at ptr to the XDR buffer
  52. * at position p, then align to the next 32-bit boundary by padding
  53. * with zero bytes (see RFC1832).
  54. * Note: if ptr is NULL, only the padding is performed.
  55. *
  56. * Returns the updated current XDR buffer position
  57. *
  58. */
  59. __be32 *xdr_encode_opaque_fixed(__be32 *p, const void *ptr, unsigned int nbytes)
  60. {
  61. if (likely(nbytes != 0)) {
  62. unsigned int quadlen = XDR_QUADLEN(nbytes);
  63. unsigned int padding = (quadlen << 2) - nbytes;
  64. if (ptr != NULL)
  65. memcpy(p, ptr, nbytes);
  66. if (padding != 0)
  67. memset((char *)p + nbytes, 0, padding);
  68. p += quadlen;
  69. }
  70. return p;
  71. }
  72. EXPORT_SYMBOL_GPL(xdr_encode_opaque_fixed);
  73. /**
  74. * xdr_encode_opaque - Encode variable length opaque data
  75. * @p: pointer to current position in XDR buffer.
  76. * @ptr: pointer to data to encode (or NULL)
  77. * @nbytes: size of data.
  78. *
  79. * Returns the updated current XDR buffer position
  80. */
  81. __be32 *xdr_encode_opaque(__be32 *p, const void *ptr, unsigned int nbytes)
  82. {
  83. *p++ = cpu_to_be32(nbytes);
  84. return xdr_encode_opaque_fixed(p, ptr, nbytes);
  85. }
  86. EXPORT_SYMBOL_GPL(xdr_encode_opaque);
  87. __be32 *
  88. xdr_encode_string(__be32 *p, const char *string)
  89. {
  90. return xdr_encode_array(p, string, strlen(string));
  91. }
  92. EXPORT_SYMBOL_GPL(xdr_encode_string);
  93. __be32 *
  94. xdr_decode_string_inplace(__be32 *p, char **sp,
  95. unsigned int *lenp, unsigned int maxlen)
  96. {
  97. u32 len;
  98. len = be32_to_cpu(*p++);
  99. if (len > maxlen)
  100. return NULL;
  101. *lenp = len;
  102. *sp = (char *) p;
  103. return p + XDR_QUADLEN(len);
  104. }
  105. EXPORT_SYMBOL_GPL(xdr_decode_string_inplace);
  106. /**
  107. * xdr_terminate_string - '\0'-terminate a string residing in an xdr_buf
  108. * @buf: XDR buffer where string resides
  109. * @len: length of string, in bytes
  110. *
  111. */
  112. void
  113. xdr_terminate_string(struct xdr_buf *buf, const u32 len)
  114. {
  115. char *kaddr;
  116. kaddr = kmap_atomic(buf->pages[0]);
  117. kaddr[buf->page_base + len] = '\0';
  118. kunmap_atomic(kaddr);
  119. }
  120. EXPORT_SYMBOL_GPL(xdr_terminate_string);
  121. size_t
  122. xdr_buf_pagecount(struct xdr_buf *buf)
  123. {
  124. if (!buf->page_len)
  125. return 0;
  126. return (buf->page_base + buf->page_len + PAGE_SIZE - 1) >> PAGE_SHIFT;
  127. }
  128. int
  129. xdr_alloc_bvec(struct xdr_buf *buf, gfp_t gfp)
  130. {
  131. size_t i, n = xdr_buf_pagecount(buf);
  132. if (n != 0 && buf->bvec == NULL) {
  133. buf->bvec = kmalloc_array(n, sizeof(buf->bvec[0]), gfp);
  134. if (!buf->bvec)
  135. return -ENOMEM;
  136. for (i = 0; i < n; i++) {
  137. buf->bvec[i].bv_page = buf->pages[i];
  138. buf->bvec[i].bv_len = PAGE_SIZE;
  139. buf->bvec[i].bv_offset = 0;
  140. }
  141. }
  142. return 0;
  143. }
  144. void
  145. xdr_free_bvec(struct xdr_buf *buf)
  146. {
  147. kfree(buf->bvec);
  148. buf->bvec = NULL;
  149. }
  150. /**
  151. * xdr_inline_pages - Prepare receive buffer for a large reply
  152. * @xdr: xdr_buf into which reply will be placed
  153. * @offset: expected offset where data payload will start, in bytes
  154. * @pages: vector of struct page pointers
  155. * @base: offset in first page where receive should start, in bytes
  156. * @len: expected size of the upper layer data payload, in bytes
  157. *
  158. */
  159. void
  160. xdr_inline_pages(struct xdr_buf *xdr, unsigned int offset,
  161. struct page **pages, unsigned int base, unsigned int len)
  162. {
  163. struct kvec *head = xdr->head;
  164. struct kvec *tail = xdr->tail;
  165. char *buf = (char *)head->iov_base;
  166. unsigned int buflen = head->iov_len;
  167. head->iov_len = offset;
  168. xdr->pages = pages;
  169. xdr->page_base = base;
  170. xdr->page_len = len;
  171. tail->iov_base = buf + offset;
  172. tail->iov_len = buflen - offset;
  173. if ((xdr->page_len & 3) == 0)
  174. tail->iov_len -= sizeof(__be32);
  175. xdr->buflen += len;
  176. }
  177. EXPORT_SYMBOL_GPL(xdr_inline_pages);
  178. /*
  179. * Helper routines for doing 'memmove' like operations on a struct xdr_buf
  180. */
  181. /**
  182. * _shift_data_left_pages
  183. * @pages: vector of pages containing both the source and dest memory area.
  184. * @pgto_base: page vector address of destination
  185. * @pgfrom_base: page vector address of source
  186. * @len: number of bytes to copy
  187. *
  188. * Note: the addresses pgto_base and pgfrom_base are both calculated in
  189. * the same way:
  190. * if a memory area starts at byte 'base' in page 'pages[i]',
  191. * then its address is given as (i << PAGE_CACHE_SHIFT) + base
  192. * Alse note: pgto_base must be < pgfrom_base, but the memory areas
  193. * they point to may overlap.
  194. */
  195. static void
  196. _shift_data_left_pages(struct page **pages, size_t pgto_base,
  197. size_t pgfrom_base, size_t len)
  198. {
  199. struct page **pgfrom, **pgto;
  200. char *vfrom, *vto;
  201. size_t copy;
  202. BUG_ON(pgfrom_base <= pgto_base);
  203. pgto = pages + (pgto_base >> PAGE_SHIFT);
  204. pgfrom = pages + (pgfrom_base >> PAGE_SHIFT);
  205. pgto_base &= ~PAGE_MASK;
  206. pgfrom_base &= ~PAGE_MASK;
  207. do {
  208. if (pgto_base >= PAGE_SIZE) {
  209. pgto_base = 0;
  210. pgto++;
  211. }
  212. if (pgfrom_base >= PAGE_SIZE){
  213. pgfrom_base = 0;
  214. pgfrom++;
  215. }
  216. copy = len;
  217. if (copy > (PAGE_SIZE - pgto_base))
  218. copy = PAGE_SIZE - pgto_base;
  219. if (copy > (PAGE_SIZE - pgfrom_base))
  220. copy = PAGE_SIZE - pgfrom_base;
  221. vto = kmap_atomic(*pgto);
  222. if (*pgto != *pgfrom) {
  223. vfrom = kmap_atomic(*pgfrom);
  224. memcpy(vto + pgto_base, vfrom + pgfrom_base, copy);
  225. kunmap_atomic(vfrom);
  226. } else
  227. memmove(vto + pgto_base, vto + pgfrom_base, copy);
  228. flush_dcache_page(*pgto);
  229. kunmap_atomic(vto);
  230. pgto_base += copy;
  231. pgfrom_base += copy;
  232. } while ((len -= copy) != 0);
  233. }
  234. static void
  235. _shift_data_left_tail(struct xdr_buf *buf, unsigned int pgto, size_t len)
  236. {
  237. struct kvec *tail = buf->tail;
  238. if (len > tail->iov_len)
  239. len = tail->iov_len;
  240. _copy_to_pages(buf->pages,
  241. buf->page_base + pgto,
  242. (char *)tail->iov_base,
  243. len);
  244. tail->iov_len -= len;
  245. if (tail->iov_len > 0)
  246. memmove((char *)tail->iov_base,
  247. tail->iov_base + len,
  248. tail->iov_len);
  249. }
  250. /**
  251. * _shift_data_right_pages
  252. * @pages: vector of pages containing both the source and dest memory area.
  253. * @pgto_base: page vector address of destination
  254. * @pgfrom_base: page vector address of source
  255. * @len: number of bytes to copy
  256. *
  257. * Note: the addresses pgto_base and pgfrom_base are both calculated in
  258. * the same way:
  259. * if a memory area starts at byte 'base' in page 'pages[i]',
  260. * then its address is given as (i << PAGE_SHIFT) + base
  261. * Also note: pgfrom_base must be < pgto_base, but the memory areas
  262. * they point to may overlap.
  263. */
  264. static void
  265. _shift_data_right_pages(struct page **pages, size_t pgto_base,
  266. size_t pgfrom_base, size_t len)
  267. {
  268. struct page **pgfrom, **pgto;
  269. char *vfrom, *vto;
  270. size_t copy;
  271. BUG_ON(pgto_base <= pgfrom_base);
  272. pgto_base += len;
  273. pgfrom_base += len;
  274. pgto = pages + (pgto_base >> PAGE_SHIFT);
  275. pgfrom = pages + (pgfrom_base >> PAGE_SHIFT);
  276. pgto_base &= ~PAGE_MASK;
  277. pgfrom_base &= ~PAGE_MASK;
  278. do {
  279. /* Are any pointers crossing a page boundary? */
  280. if (pgto_base == 0) {
  281. pgto_base = PAGE_SIZE;
  282. pgto--;
  283. }
  284. if (pgfrom_base == 0) {
  285. pgfrom_base = PAGE_SIZE;
  286. pgfrom--;
  287. }
  288. copy = len;
  289. if (copy > pgto_base)
  290. copy = pgto_base;
  291. if (copy > pgfrom_base)
  292. copy = pgfrom_base;
  293. pgto_base -= copy;
  294. pgfrom_base -= copy;
  295. vto = kmap_atomic(*pgto);
  296. if (*pgto != *pgfrom) {
  297. vfrom = kmap_atomic(*pgfrom);
  298. memcpy(vto + pgto_base, vfrom + pgfrom_base, copy);
  299. kunmap_atomic(vfrom);
  300. } else
  301. memmove(vto + pgto_base, vto + pgfrom_base, copy);
  302. flush_dcache_page(*pgto);
  303. kunmap_atomic(vto);
  304. } while ((len -= copy) != 0);
  305. }
  306. static unsigned int
  307. _shift_data_right_tail(struct xdr_buf *buf, unsigned int pgfrom, size_t len)
  308. {
  309. struct kvec *tail = buf->tail;
  310. unsigned int tailbuf_len;
  311. unsigned int result = 0;
  312. size_t copy;
  313. tailbuf_len = buf->buflen - buf->head->iov_len - buf->page_len;
  314. /* Shift the tail first */
  315. if (tailbuf_len != 0) {
  316. unsigned int free_space = tailbuf_len - tail->iov_len;
  317. if (len < free_space)
  318. free_space = len;
  319. if (len > free_space)
  320. len = free_space;
  321. tail->iov_len += free_space;
  322. copy = len;
  323. if (tail->iov_len > len) {
  324. char *p = (char *)tail->iov_base + len;
  325. memmove(p, tail->iov_base, tail->iov_len - free_space);
  326. result += tail->iov_len - free_space;
  327. } else
  328. copy = tail->iov_len;
  329. /* Copy from the inlined pages into the tail */
  330. _copy_from_pages((char *)tail->iov_base,
  331. buf->pages,
  332. buf->page_base + pgfrom,
  333. copy);
  334. result += copy;
  335. }
  336. return result;
  337. }
  338. /**
  339. * _copy_to_pages
  340. * @pages: array of pages
  341. * @pgbase: page vector address of destination
  342. * @p: pointer to source data
  343. * @len: length
  344. *
  345. * Copies data from an arbitrary memory location into an array of pages
  346. * The copy is assumed to be non-overlapping.
  347. */
  348. static void
  349. _copy_to_pages(struct page **pages, size_t pgbase, const char *p, size_t len)
  350. {
  351. struct page **pgto;
  352. char *vto;
  353. size_t copy;
  354. pgto = pages + (pgbase >> PAGE_SHIFT);
  355. pgbase &= ~PAGE_MASK;
  356. for (;;) {
  357. copy = PAGE_SIZE - pgbase;
  358. if (copy > len)
  359. copy = len;
  360. vto = kmap_atomic(*pgto);
  361. memcpy(vto + pgbase, p, copy);
  362. kunmap_atomic(vto);
  363. len -= copy;
  364. if (len == 0)
  365. break;
  366. pgbase += copy;
  367. if (pgbase == PAGE_SIZE) {
  368. flush_dcache_page(*pgto);
  369. pgbase = 0;
  370. pgto++;
  371. }
  372. p += copy;
  373. }
  374. flush_dcache_page(*pgto);
  375. }
  376. /**
  377. * _copy_from_pages
  378. * @p: pointer to destination
  379. * @pages: array of pages
  380. * @pgbase: offset of source data
  381. * @len: length
  382. *
  383. * Copies data into an arbitrary memory location from an array of pages
  384. * The copy is assumed to be non-overlapping.
  385. */
  386. void
  387. _copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len)
  388. {
  389. struct page **pgfrom;
  390. char *vfrom;
  391. size_t copy;
  392. pgfrom = pages + (pgbase >> PAGE_SHIFT);
  393. pgbase &= ~PAGE_MASK;
  394. do {
  395. copy = PAGE_SIZE - pgbase;
  396. if (copy > len)
  397. copy = len;
  398. vfrom = kmap_atomic(*pgfrom);
  399. memcpy(p, vfrom + pgbase, copy);
  400. kunmap_atomic(vfrom);
  401. pgbase += copy;
  402. if (pgbase == PAGE_SIZE) {
  403. pgbase = 0;
  404. pgfrom++;
  405. }
  406. p += copy;
  407. } while ((len -= copy) != 0);
  408. }
  409. EXPORT_SYMBOL_GPL(_copy_from_pages);
  410. /**
  411. * _zero_pages
  412. * @pages: array of pages
  413. * @pgbase: beginning page vector address
  414. * @len: length
  415. */
  416. static void
  417. _zero_pages(struct page **pages, size_t pgbase, size_t len)
  418. {
  419. struct page **page;
  420. char *vpage;
  421. size_t zero;
  422. page = pages + (pgbase >> PAGE_SHIFT);
  423. pgbase &= ~PAGE_MASK;
  424. do {
  425. zero = PAGE_SIZE - pgbase;
  426. if (zero > len)
  427. zero = len;
  428. vpage = kmap_atomic(*page);
  429. memset(vpage + pgbase, 0, zero);
  430. kunmap_atomic(vpage);
  431. flush_dcache_page(*page);
  432. pgbase = 0;
  433. page++;
  434. } while ((len -= zero) != 0);
  435. }
  436. /**
  437. * xdr_shrink_bufhead
  438. * @buf: xdr_buf
  439. * @len: bytes to remove from buf->head[0]
  440. *
  441. * Shrinks XDR buffer's header kvec buf->head[0] by
  442. * 'len' bytes. The extra data is not lost, but is instead
  443. * moved into the inlined pages and/or the tail.
  444. */
  445. static unsigned int
  446. xdr_shrink_bufhead(struct xdr_buf *buf, size_t len)
  447. {
  448. struct kvec *head, *tail;
  449. size_t copy, offs;
  450. unsigned int pglen = buf->page_len;
  451. unsigned int result;
  452. result = 0;
  453. tail = buf->tail;
  454. head = buf->head;
  455. WARN_ON_ONCE(len > head->iov_len);
  456. if (len > head->iov_len)
  457. len = head->iov_len;
  458. /* Shift the tail first */
  459. if (tail->iov_len != 0) {
  460. if (tail->iov_len > len) {
  461. copy = tail->iov_len - len;
  462. memmove((char *)tail->iov_base + len,
  463. tail->iov_base, copy);
  464. result += copy;
  465. }
  466. /* Copy from the inlined pages into the tail */
  467. copy = len;
  468. if (copy > pglen)
  469. copy = pglen;
  470. offs = len - copy;
  471. if (offs >= tail->iov_len)
  472. copy = 0;
  473. else if (copy > tail->iov_len - offs)
  474. copy = tail->iov_len - offs;
  475. if (copy != 0) {
  476. _copy_from_pages((char *)tail->iov_base + offs,
  477. buf->pages,
  478. buf->page_base + pglen + offs - len,
  479. copy);
  480. result += copy;
  481. }
  482. /* Do we also need to copy data from the head into the tail ? */
  483. if (len > pglen) {
  484. offs = copy = len - pglen;
  485. if (copy > tail->iov_len)
  486. copy = tail->iov_len;
  487. memcpy(tail->iov_base,
  488. (char *)head->iov_base +
  489. head->iov_len - offs,
  490. copy);
  491. result += copy;
  492. }
  493. }
  494. /* Now handle pages */
  495. if (pglen != 0) {
  496. if (pglen > len)
  497. _shift_data_right_pages(buf->pages,
  498. buf->page_base + len,
  499. buf->page_base,
  500. pglen - len);
  501. copy = len;
  502. if (len > pglen)
  503. copy = pglen;
  504. _copy_to_pages(buf->pages, buf->page_base,
  505. (char *)head->iov_base + head->iov_len - len,
  506. copy);
  507. result += copy;
  508. }
  509. head->iov_len -= len;
  510. buf->buflen -= len;
  511. /* Have we truncated the message? */
  512. if (buf->len > buf->buflen)
  513. buf->len = buf->buflen;
  514. return result;
  515. }
  516. /**
  517. * xdr_shrink_pagelen - shrinks buf->pages by up to @len bytes
  518. * @buf: xdr_buf
  519. * @len: bytes to remove from buf->pages
  520. *
  521. * The extra data is not lost, but is instead moved into buf->tail.
  522. * Returns the actual number of bytes moved.
  523. */
  524. static unsigned int
  525. xdr_shrink_pagelen(struct xdr_buf *buf, size_t len)
  526. {
  527. unsigned int pglen = buf->page_len;
  528. unsigned int result;
  529. if (len > buf->page_len)
  530. len = buf-> page_len;
  531. result = _shift_data_right_tail(buf, pglen - len, len);
  532. buf->page_len -= len;
  533. buf->buflen -= len;
  534. /* Have we truncated the message? */
  535. if (buf->len > buf->buflen)
  536. buf->len = buf->buflen;
  537. return result;
  538. }
  539. void
  540. xdr_shift_buf(struct xdr_buf *buf, size_t len)
  541. {
  542. xdr_shrink_bufhead(buf, len);
  543. }
  544. EXPORT_SYMBOL_GPL(xdr_shift_buf);
  545. /**
  546. * xdr_stream_pos - Return the current offset from the start of the xdr_stream
  547. * @xdr: pointer to struct xdr_stream
  548. */
  549. unsigned int xdr_stream_pos(const struct xdr_stream *xdr)
  550. {
  551. return (unsigned int)(XDR_QUADLEN(xdr->buf->len) - xdr->nwords) << 2;
  552. }
  553. EXPORT_SYMBOL_GPL(xdr_stream_pos);
  554. /**
  555. * xdr_page_pos - Return the current offset from the start of the xdr pages
  556. * @xdr: pointer to struct xdr_stream
  557. */
  558. unsigned int xdr_page_pos(const struct xdr_stream *xdr)
  559. {
  560. unsigned int pos = xdr_stream_pos(xdr);
  561. WARN_ON(pos < xdr->buf->head[0].iov_len);
  562. return pos - xdr->buf->head[0].iov_len;
  563. }
  564. EXPORT_SYMBOL_GPL(xdr_page_pos);
  565. /**
  566. * xdr_init_encode - Initialize a struct xdr_stream for sending data.
  567. * @xdr: pointer to xdr_stream struct
  568. * @buf: pointer to XDR buffer in which to encode data
  569. * @p: current pointer inside XDR buffer
  570. * @rqst: pointer to controlling rpc_rqst, for debugging
  571. *
  572. * Note: at the moment the RPC client only passes the length of our
  573. * scratch buffer in the xdr_buf's header kvec. Previously this
  574. * meant we needed to call xdr_adjust_iovec() after encoding the
  575. * data. With the new scheme, the xdr_stream manages the details
  576. * of the buffer length, and takes care of adjusting the kvec
  577. * length for us.
  578. */
  579. void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p,
  580. struct rpc_rqst *rqst)
  581. {
  582. struct kvec *iov = buf->head;
  583. int scratch_len = buf->buflen - buf->page_len - buf->tail[0].iov_len;
  584. xdr_set_scratch_buffer(xdr, NULL, 0);
  585. BUG_ON(scratch_len < 0);
  586. xdr->buf = buf;
  587. xdr->iov = iov;
  588. xdr->p = (__be32 *)((char *)iov->iov_base + iov->iov_len);
  589. xdr->end = (__be32 *)((char *)iov->iov_base + scratch_len);
  590. BUG_ON(iov->iov_len > scratch_len);
  591. if (p != xdr->p && p != NULL) {
  592. size_t len;
  593. BUG_ON(p < xdr->p || p > xdr->end);
  594. len = (char *)p - (char *)xdr->p;
  595. xdr->p = p;
  596. buf->len += len;
  597. iov->iov_len += len;
  598. }
  599. xdr->rqst = rqst;
  600. }
  601. EXPORT_SYMBOL_GPL(xdr_init_encode);
  602. /**
  603. * xdr_commit_encode - Ensure all data is written to buffer
  604. * @xdr: pointer to xdr_stream
  605. *
  606. * We handle encoding across page boundaries by giving the caller a
  607. * temporary location to write to, then later copying the data into
  608. * place; xdr_commit_encode does that copying.
  609. *
  610. * Normally the caller doesn't need to call this directly, as the
  611. * following xdr_reserve_space will do it. But an explicit call may be
  612. * required at the end of encoding, or any other time when the xdr_buf
  613. * data might be read.
  614. */
  615. inline void xdr_commit_encode(struct xdr_stream *xdr)
  616. {
  617. int shift = xdr->scratch.iov_len;
  618. void *page;
  619. if (shift == 0)
  620. return;
  621. page = page_address(*xdr->page_ptr);
  622. memcpy(xdr->scratch.iov_base, page, shift);
  623. memmove(page, page + shift, (void *)xdr->p - page);
  624. xdr->scratch.iov_len = 0;
  625. }
  626. EXPORT_SYMBOL_GPL(xdr_commit_encode);
  627. static __be32 *xdr_get_next_encode_buffer(struct xdr_stream *xdr,
  628. size_t nbytes)
  629. {
  630. __be32 *p;
  631. int space_left;
  632. int frag1bytes, frag2bytes;
  633. if (nbytes > PAGE_SIZE)
  634. goto out_overflow; /* Bigger buffers require special handling */
  635. if (xdr->buf->len + nbytes > xdr->buf->buflen)
  636. goto out_overflow; /* Sorry, we're totally out of space */
  637. frag1bytes = (xdr->end - xdr->p) << 2;
  638. frag2bytes = nbytes - frag1bytes;
  639. if (xdr->iov)
  640. xdr->iov->iov_len += frag1bytes;
  641. else
  642. xdr->buf->page_len += frag1bytes;
  643. xdr->page_ptr++;
  644. xdr->iov = NULL;
  645. /*
  646. * If the last encode didn't end exactly on a page boundary, the
  647. * next one will straddle boundaries. Encode into the next
  648. * page, then copy it back later in xdr_commit_encode. We use
  649. * the "scratch" iov to track any temporarily unused fragment of
  650. * space at the end of the previous buffer:
  651. */
  652. xdr->scratch.iov_base = xdr->p;
  653. xdr->scratch.iov_len = frag1bytes;
  654. p = page_address(*xdr->page_ptr);
  655. /*
  656. * Note this is where the next encode will start after we've
  657. * shifted this one back:
  658. */
  659. xdr->p = (void *)p + frag2bytes;
  660. space_left = xdr->buf->buflen - xdr->buf->len;
  661. xdr->end = (void *)p + min_t(int, space_left, PAGE_SIZE);
  662. xdr->buf->page_len += frag2bytes;
  663. xdr->buf->len += nbytes;
  664. return p;
  665. out_overflow:
  666. trace_rpc_xdr_overflow(xdr, nbytes);
  667. return NULL;
  668. }
  669. /**
  670. * xdr_reserve_space - Reserve buffer space for sending
  671. * @xdr: pointer to xdr_stream
  672. * @nbytes: number of bytes to reserve
  673. *
  674. * Checks that we have enough buffer space to encode 'nbytes' more
  675. * bytes of data. If so, update the total xdr_buf length, and
  676. * adjust the length of the current kvec.
  677. */
  678. __be32 * xdr_reserve_space(struct xdr_stream *xdr, size_t nbytes)
  679. {
  680. __be32 *p = xdr->p;
  681. __be32 *q;
  682. xdr_commit_encode(xdr);
  683. /* align nbytes on the next 32-bit boundary */
  684. nbytes += 3;
  685. nbytes &= ~3;
  686. q = p + (nbytes >> 2);
  687. if (unlikely(q > xdr->end || q < p))
  688. return xdr_get_next_encode_buffer(xdr, nbytes);
  689. xdr->p = q;
  690. if (xdr->iov)
  691. xdr->iov->iov_len += nbytes;
  692. else
  693. xdr->buf->page_len += nbytes;
  694. xdr->buf->len += nbytes;
  695. return p;
  696. }
  697. EXPORT_SYMBOL_GPL(xdr_reserve_space);
  698. /**
  699. * xdr_reserve_space_vec - Reserves a large amount of buffer space for sending
  700. * @xdr: pointer to xdr_stream
  701. * @vec: pointer to a kvec array
  702. * @nbytes: number of bytes to reserve
  703. *
  704. * Reserves enough buffer space to encode 'nbytes' of data and stores the
  705. * pointers in 'vec'. The size argument passed to xdr_reserve_space() is
  706. * determined based on the number of bytes remaining in the current page to
  707. * avoid invalidating iov_base pointers when xdr_commit_encode() is called.
  708. */
  709. int xdr_reserve_space_vec(struct xdr_stream *xdr, struct kvec *vec, size_t nbytes)
  710. {
  711. int thislen;
  712. int v = 0;
  713. __be32 *p;
  714. /*
  715. * svcrdma requires every READ payload to start somewhere
  716. * in xdr->pages.
  717. */
  718. if (xdr->iov == xdr->buf->head) {
  719. xdr->iov = NULL;
  720. xdr->end = xdr->p;
  721. }
  722. while (nbytes) {
  723. thislen = xdr->buf->page_len % PAGE_SIZE;
  724. thislen = min_t(size_t, nbytes, PAGE_SIZE - thislen);
  725. p = xdr_reserve_space(xdr, thislen);
  726. if (!p)
  727. return -EIO;
  728. vec[v].iov_base = p;
  729. vec[v].iov_len = thislen;
  730. v++;
  731. nbytes -= thislen;
  732. }
  733. return v;
  734. }
  735. EXPORT_SYMBOL_GPL(xdr_reserve_space_vec);
  736. /**
  737. * xdr_truncate_encode - truncate an encode buffer
  738. * @xdr: pointer to xdr_stream
  739. * @len: new length of buffer
  740. *
  741. * Truncates the xdr stream, so that xdr->buf->len == len,
  742. * and xdr->p points at offset len from the start of the buffer, and
  743. * head, tail, and page lengths are adjusted to correspond.
  744. *
  745. * If this means moving xdr->p to a different buffer, we assume that
  746. * the end pointer should be set to the end of the current page,
  747. * except in the case of the head buffer when we assume the head
  748. * buffer's current length represents the end of the available buffer.
  749. *
  750. * This is *not* safe to use on a buffer that already has inlined page
  751. * cache pages (as in a zero-copy server read reply), except for the
  752. * simple case of truncating from one position in the tail to another.
  753. *
  754. */
  755. void xdr_truncate_encode(struct xdr_stream *xdr, size_t len)
  756. {
  757. struct xdr_buf *buf = xdr->buf;
  758. struct kvec *head = buf->head;
  759. struct kvec *tail = buf->tail;
  760. int fraglen;
  761. int new;
  762. if (len > buf->len) {
  763. WARN_ON_ONCE(1);
  764. return;
  765. }
  766. xdr_commit_encode(xdr);
  767. fraglen = min_t(int, buf->len - len, tail->iov_len);
  768. tail->iov_len -= fraglen;
  769. buf->len -= fraglen;
  770. if (tail->iov_len) {
  771. xdr->p = tail->iov_base + tail->iov_len;
  772. WARN_ON_ONCE(!xdr->end);
  773. WARN_ON_ONCE(!xdr->iov);
  774. return;
  775. }
  776. WARN_ON_ONCE(fraglen);
  777. fraglen = min_t(int, buf->len - len, buf->page_len);
  778. buf->page_len -= fraglen;
  779. buf->len -= fraglen;
  780. new = buf->page_base + buf->page_len;
  781. xdr->page_ptr = buf->pages + (new >> PAGE_SHIFT);
  782. if (buf->page_len) {
  783. xdr->p = page_address(*xdr->page_ptr);
  784. xdr->end = (void *)xdr->p + PAGE_SIZE;
  785. xdr->p = (void *)xdr->p + (new % PAGE_SIZE);
  786. WARN_ON_ONCE(xdr->iov);
  787. return;
  788. }
  789. if (fraglen)
  790. xdr->end = head->iov_base + head->iov_len;
  791. /* (otherwise assume xdr->end is already set) */
  792. xdr->page_ptr--;
  793. head->iov_len = len;
  794. buf->len = len;
  795. xdr->p = head->iov_base + head->iov_len;
  796. xdr->iov = buf->head;
  797. }
  798. EXPORT_SYMBOL(xdr_truncate_encode);
  799. /**
  800. * xdr_restrict_buflen - decrease available buffer space
  801. * @xdr: pointer to xdr_stream
  802. * @newbuflen: new maximum number of bytes available
  803. *
  804. * Adjust our idea of how much space is available in the buffer.
  805. * If we've already used too much space in the buffer, returns -1.
  806. * If the available space is already smaller than newbuflen, returns 0
  807. * and does nothing. Otherwise, adjusts xdr->buf->buflen to newbuflen
  808. * and ensures xdr->end is set at most offset newbuflen from the start
  809. * of the buffer.
  810. */
  811. int xdr_restrict_buflen(struct xdr_stream *xdr, int newbuflen)
  812. {
  813. struct xdr_buf *buf = xdr->buf;
  814. int left_in_this_buf = (void *)xdr->end - (void *)xdr->p;
  815. int end_offset = buf->len + left_in_this_buf;
  816. if (newbuflen < 0 || newbuflen < buf->len)
  817. return -1;
  818. if (newbuflen > buf->buflen)
  819. return 0;
  820. if (newbuflen < end_offset)
  821. xdr->end = (void *)xdr->end + newbuflen - end_offset;
  822. buf->buflen = newbuflen;
  823. return 0;
  824. }
  825. EXPORT_SYMBOL(xdr_restrict_buflen);
  826. /**
  827. * xdr_write_pages - Insert a list of pages into an XDR buffer for sending
  828. * @xdr: pointer to xdr_stream
  829. * @pages: list of pages
  830. * @base: offset of first byte
  831. * @len: length of data in bytes
  832. *
  833. */
  834. void xdr_write_pages(struct xdr_stream *xdr, struct page **pages, unsigned int base,
  835. unsigned int len)
  836. {
  837. struct xdr_buf *buf = xdr->buf;
  838. struct kvec *iov = buf->tail;
  839. buf->pages = pages;
  840. buf->page_base = base;
  841. buf->page_len = len;
  842. iov->iov_base = (char *)xdr->p;
  843. iov->iov_len = 0;
  844. xdr->iov = iov;
  845. if (len & 3) {
  846. unsigned int pad = 4 - (len & 3);
  847. BUG_ON(xdr->p >= xdr->end);
  848. iov->iov_base = (char *)xdr->p + (len & 3);
  849. iov->iov_len += pad;
  850. len += pad;
  851. *xdr->p++ = 0;
  852. }
  853. buf->buflen += len;
  854. buf->len += len;
  855. }
  856. EXPORT_SYMBOL_GPL(xdr_write_pages);
  857. static void xdr_set_iov(struct xdr_stream *xdr, struct kvec *iov,
  858. unsigned int len)
  859. {
  860. if (len > iov->iov_len)
  861. len = iov->iov_len;
  862. xdr->p = (__be32*)iov->iov_base;
  863. xdr->end = (__be32*)(iov->iov_base + len);
  864. xdr->iov = iov;
  865. xdr->page_ptr = NULL;
  866. }
  867. static int xdr_set_page_base(struct xdr_stream *xdr,
  868. unsigned int base, unsigned int len)
  869. {
  870. unsigned int pgnr;
  871. unsigned int maxlen;
  872. unsigned int pgoff;
  873. unsigned int pgend;
  874. void *kaddr;
  875. maxlen = xdr->buf->page_len;
  876. if (base >= maxlen)
  877. return -EINVAL;
  878. maxlen -= base;
  879. if (len > maxlen)
  880. len = maxlen;
  881. base += xdr->buf->page_base;
  882. pgnr = base >> PAGE_SHIFT;
  883. xdr->page_ptr = &xdr->buf->pages[pgnr];
  884. kaddr = page_address(*xdr->page_ptr);
  885. pgoff = base & ~PAGE_MASK;
  886. xdr->p = (__be32*)(kaddr + pgoff);
  887. pgend = pgoff + len;
  888. if (pgend > PAGE_SIZE)
  889. pgend = PAGE_SIZE;
  890. xdr->end = (__be32*)(kaddr + pgend);
  891. xdr->iov = NULL;
  892. return 0;
  893. }
  894. static void xdr_set_page(struct xdr_stream *xdr, unsigned int base,
  895. unsigned int len)
  896. {
  897. if (xdr_set_page_base(xdr, base, len) < 0)
  898. xdr_set_iov(xdr, xdr->buf->tail, xdr->nwords << 2);
  899. }
  900. static void xdr_set_next_page(struct xdr_stream *xdr)
  901. {
  902. unsigned int newbase;
  903. newbase = (1 + xdr->page_ptr - xdr->buf->pages) << PAGE_SHIFT;
  904. newbase -= xdr->buf->page_base;
  905. xdr_set_page(xdr, newbase, PAGE_SIZE);
  906. }
  907. static bool xdr_set_next_buffer(struct xdr_stream *xdr)
  908. {
  909. if (xdr->page_ptr != NULL)
  910. xdr_set_next_page(xdr);
  911. else if (xdr->iov == xdr->buf->head) {
  912. xdr_set_page(xdr, 0, PAGE_SIZE);
  913. }
  914. return xdr->p != xdr->end;
  915. }
  916. /**
  917. * xdr_init_decode - Initialize an xdr_stream for decoding data.
  918. * @xdr: pointer to xdr_stream struct
  919. * @buf: pointer to XDR buffer from which to decode data
  920. * @p: current pointer inside XDR buffer
  921. * @rqst: pointer to controlling rpc_rqst, for debugging
  922. */
  923. void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p,
  924. struct rpc_rqst *rqst)
  925. {
  926. xdr->buf = buf;
  927. xdr->scratch.iov_base = NULL;
  928. xdr->scratch.iov_len = 0;
  929. xdr->nwords = XDR_QUADLEN(buf->len);
  930. if (buf->head[0].iov_len != 0)
  931. xdr_set_iov(xdr, buf->head, buf->len);
  932. else if (buf->page_len != 0)
  933. xdr_set_page_base(xdr, 0, buf->len);
  934. else
  935. xdr_set_iov(xdr, buf->head, buf->len);
  936. if (p != NULL && p > xdr->p && xdr->end >= p) {
  937. xdr->nwords -= p - xdr->p;
  938. xdr->p = p;
  939. }
  940. xdr->rqst = rqst;
  941. }
  942. EXPORT_SYMBOL_GPL(xdr_init_decode);
  943. /**
  944. * xdr_init_decode_pages - Initialize an xdr_stream for decoding into pages
  945. * @xdr: pointer to xdr_stream struct
  946. * @buf: pointer to XDR buffer from which to decode data
  947. * @pages: list of pages to decode into
  948. * @len: length in bytes of buffer in pages
  949. */
  950. void xdr_init_decode_pages(struct xdr_stream *xdr, struct xdr_buf *buf,
  951. struct page **pages, unsigned int len)
  952. {
  953. memset(buf, 0, sizeof(*buf));
  954. buf->pages = pages;
  955. buf->page_len = len;
  956. buf->buflen = len;
  957. buf->len = len;
  958. xdr_init_decode(xdr, buf, NULL, NULL);
  959. }
  960. EXPORT_SYMBOL_GPL(xdr_init_decode_pages);
  961. static __be32 * __xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes)
  962. {
  963. unsigned int nwords = XDR_QUADLEN(nbytes);
  964. __be32 *p = xdr->p;
  965. __be32 *q = p + nwords;
  966. if (unlikely(nwords > xdr->nwords || q > xdr->end || q < p))
  967. return NULL;
  968. xdr->p = q;
  969. xdr->nwords -= nwords;
  970. return p;
  971. }
  972. /**
  973. * xdr_set_scratch_buffer - Attach a scratch buffer for decoding data.
  974. * @xdr: pointer to xdr_stream struct
  975. * @buf: pointer to an empty buffer
  976. * @buflen: size of 'buf'
  977. *
  978. * The scratch buffer is used when decoding from an array of pages.
  979. * If an xdr_inline_decode() call spans across page boundaries, then
  980. * we copy the data into the scratch buffer in order to allow linear
  981. * access.
  982. */
  983. void xdr_set_scratch_buffer(struct xdr_stream *xdr, void *buf, size_t buflen)
  984. {
  985. xdr->scratch.iov_base = buf;
  986. xdr->scratch.iov_len = buflen;
  987. }
  988. EXPORT_SYMBOL_GPL(xdr_set_scratch_buffer);
  989. static __be32 *xdr_copy_to_scratch(struct xdr_stream *xdr, size_t nbytes)
  990. {
  991. __be32 *p;
  992. char *cpdest = xdr->scratch.iov_base;
  993. size_t cplen = (char *)xdr->end - (char *)xdr->p;
  994. if (nbytes > xdr->scratch.iov_len)
  995. goto out_overflow;
  996. p = __xdr_inline_decode(xdr, cplen);
  997. if (p == NULL)
  998. return NULL;
  999. memcpy(cpdest, p, cplen);
  1000. if (!xdr_set_next_buffer(xdr))
  1001. goto out_overflow;
  1002. cpdest += cplen;
  1003. nbytes -= cplen;
  1004. p = __xdr_inline_decode(xdr, nbytes);
  1005. if (p == NULL)
  1006. return NULL;
  1007. memcpy(cpdest, p, nbytes);
  1008. return xdr->scratch.iov_base;
  1009. out_overflow:
  1010. trace_rpc_xdr_overflow(xdr, nbytes);
  1011. return NULL;
  1012. }
  1013. /**
  1014. * xdr_inline_decode - Retrieve XDR data to decode
  1015. * @xdr: pointer to xdr_stream struct
  1016. * @nbytes: number of bytes of data to decode
  1017. *
  1018. * Check if the input buffer is long enough to enable us to decode
  1019. * 'nbytes' more bytes of data starting at the current position.
  1020. * If so return the current pointer, then update the current
  1021. * pointer position.
  1022. */
  1023. __be32 * xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes)
  1024. {
  1025. __be32 *p;
  1026. if (unlikely(nbytes == 0))
  1027. return xdr->p;
  1028. if (xdr->p == xdr->end && !xdr_set_next_buffer(xdr))
  1029. goto out_overflow;
  1030. p = __xdr_inline_decode(xdr, nbytes);
  1031. if (p != NULL)
  1032. return p;
  1033. return xdr_copy_to_scratch(xdr, nbytes);
  1034. out_overflow:
  1035. trace_rpc_xdr_overflow(xdr, nbytes);
  1036. return NULL;
  1037. }
  1038. EXPORT_SYMBOL_GPL(xdr_inline_decode);
  1039. static void xdr_realign_pages(struct xdr_stream *xdr)
  1040. {
  1041. struct xdr_buf *buf = xdr->buf;
  1042. struct kvec *iov = buf->head;
  1043. unsigned int cur = xdr_stream_pos(xdr);
  1044. unsigned int copied, offset;
  1045. /* Realign pages to current pointer position */
  1046. if (iov->iov_len > cur) {
  1047. offset = iov->iov_len - cur;
  1048. copied = xdr_shrink_bufhead(buf, offset);
  1049. trace_rpc_xdr_alignment(xdr, offset, copied);
  1050. xdr->nwords = XDR_QUADLEN(buf->len - cur);
  1051. }
  1052. }
  1053. static unsigned int xdr_align_pages(struct xdr_stream *xdr, unsigned int len)
  1054. {
  1055. struct xdr_buf *buf = xdr->buf;
  1056. unsigned int nwords = XDR_QUADLEN(len);
  1057. unsigned int cur = xdr_stream_pos(xdr);
  1058. unsigned int copied, offset;
  1059. if (xdr->nwords == 0)
  1060. return 0;
  1061. xdr_realign_pages(xdr);
  1062. if (nwords > xdr->nwords) {
  1063. nwords = xdr->nwords;
  1064. len = nwords << 2;
  1065. }
  1066. if (buf->page_len <= len)
  1067. len = buf->page_len;
  1068. else if (nwords < xdr->nwords) {
  1069. /* Truncate page data and move it into the tail */
  1070. offset = buf->page_len - len;
  1071. copied = xdr_shrink_pagelen(buf, offset);
  1072. trace_rpc_xdr_alignment(xdr, offset, copied);
  1073. xdr->nwords = XDR_QUADLEN(buf->len - cur);
  1074. }
  1075. return len;
  1076. }
  1077. /**
  1078. * xdr_read_pages - Ensure page-based XDR data to decode is aligned at current pointer position
  1079. * @xdr: pointer to xdr_stream struct
  1080. * @len: number of bytes of page data
  1081. *
  1082. * Moves data beyond the current pointer position from the XDR head[] buffer
  1083. * into the page list. Any data that lies beyond current position + "len"
  1084. * bytes is moved into the XDR tail[].
  1085. *
  1086. * Returns the number of XDR encoded bytes now contained in the pages
  1087. */
  1088. unsigned int xdr_read_pages(struct xdr_stream *xdr, unsigned int len)
  1089. {
  1090. struct xdr_buf *buf = xdr->buf;
  1091. struct kvec *iov;
  1092. unsigned int nwords;
  1093. unsigned int end;
  1094. unsigned int padding;
  1095. len = xdr_align_pages(xdr, len);
  1096. if (len == 0)
  1097. return 0;
  1098. nwords = XDR_QUADLEN(len);
  1099. padding = (nwords << 2) - len;
  1100. xdr->iov = iov = buf->tail;
  1101. /* Compute remaining message length. */
  1102. end = ((xdr->nwords - nwords) << 2) + padding;
  1103. if (end > iov->iov_len)
  1104. end = iov->iov_len;
  1105. /*
  1106. * Position current pointer at beginning of tail, and
  1107. * set remaining message length.
  1108. */
  1109. xdr->p = (__be32 *)((char *)iov->iov_base + padding);
  1110. xdr->end = (__be32 *)((char *)iov->iov_base + end);
  1111. xdr->page_ptr = NULL;
  1112. xdr->nwords = XDR_QUADLEN(end - padding);
  1113. return len;
  1114. }
  1115. EXPORT_SYMBOL_GPL(xdr_read_pages);
  1116. uint64_t xdr_align_data(struct xdr_stream *xdr, uint64_t offset, uint32_t length)
  1117. {
  1118. struct xdr_buf *buf = xdr->buf;
  1119. unsigned int from, bytes;
  1120. unsigned int shift = 0;
  1121. if ((offset + length) < offset ||
  1122. (offset + length) > buf->page_len)
  1123. length = buf->page_len - offset;
  1124. xdr_realign_pages(xdr);
  1125. from = xdr_page_pos(xdr);
  1126. bytes = xdr->nwords << 2;
  1127. if (length < bytes)
  1128. bytes = length;
  1129. /* Move page data to the left */
  1130. if (from > offset) {
  1131. shift = min_t(unsigned int, bytes, buf->page_len - from);
  1132. _shift_data_left_pages(buf->pages,
  1133. buf->page_base + offset,
  1134. buf->page_base + from,
  1135. shift);
  1136. bytes -= shift;
  1137. /* Move tail data into the pages, if necessary */
  1138. if (bytes > 0)
  1139. _shift_data_left_tail(buf, offset + shift, bytes);
  1140. }
  1141. xdr->nwords -= XDR_QUADLEN(length);
  1142. xdr_set_page(xdr, from + length, PAGE_SIZE);
  1143. return length;
  1144. }
  1145. EXPORT_SYMBOL_GPL(xdr_align_data);
  1146. uint64_t xdr_expand_hole(struct xdr_stream *xdr, uint64_t offset, uint64_t length)
  1147. {
  1148. struct xdr_buf *buf = xdr->buf;
  1149. unsigned int bytes;
  1150. unsigned int from;
  1151. unsigned int truncated = 0;
  1152. if ((offset + length) < offset ||
  1153. (offset + length) > buf->page_len)
  1154. length = buf->page_len - offset;
  1155. xdr_realign_pages(xdr);
  1156. from = xdr_page_pos(xdr);
  1157. bytes = xdr->nwords << 2;
  1158. if (offset + length + bytes > buf->page_len) {
  1159. unsigned int shift = (offset + length + bytes) - buf->page_len;
  1160. unsigned int res = _shift_data_right_tail(buf, from + bytes - shift, shift);
  1161. truncated = shift - res;
  1162. xdr->nwords -= XDR_QUADLEN(truncated);
  1163. bytes -= shift;
  1164. }
  1165. /* Now move the page data over and zero pages */
  1166. if (bytes > 0)
  1167. _shift_data_right_pages(buf->pages,
  1168. buf->page_base + offset + length,
  1169. buf->page_base + from,
  1170. bytes);
  1171. _zero_pages(buf->pages, buf->page_base + offset, length);
  1172. buf->len += length - (from - offset) - truncated;
  1173. xdr_set_page(xdr, offset + length, PAGE_SIZE);
  1174. return length;
  1175. }
  1176. EXPORT_SYMBOL_GPL(xdr_expand_hole);
  1177. /**
  1178. * xdr_enter_page - decode data from the XDR page
  1179. * @xdr: pointer to xdr_stream struct
  1180. * @len: number of bytes of page data
  1181. *
  1182. * Moves data beyond the current pointer position from the XDR head[] buffer
  1183. * into the page list. Any data that lies beyond current position + "len"
  1184. * bytes is moved into the XDR tail[]. The current pointer is then
  1185. * repositioned at the beginning of the first XDR page.
  1186. */
  1187. void xdr_enter_page(struct xdr_stream *xdr, unsigned int len)
  1188. {
  1189. len = xdr_align_pages(xdr, len);
  1190. /*
  1191. * Position current pointer at beginning of tail, and
  1192. * set remaining message length.
  1193. */
  1194. if (len != 0)
  1195. xdr_set_page_base(xdr, 0, len);
  1196. }
  1197. EXPORT_SYMBOL_GPL(xdr_enter_page);
  1198. static const struct kvec empty_iov = {.iov_base = NULL, .iov_len = 0};
  1199. void
  1200. xdr_buf_from_iov(struct kvec *iov, struct xdr_buf *buf)
  1201. {
  1202. buf->head[0] = *iov;
  1203. buf->tail[0] = empty_iov;
  1204. buf->page_len = 0;
  1205. buf->buflen = buf->len = iov->iov_len;
  1206. }
  1207. EXPORT_SYMBOL_GPL(xdr_buf_from_iov);
  1208. /**
  1209. * xdr_buf_subsegment - set subbuf to a portion of buf
  1210. * @buf: an xdr buffer
  1211. * @subbuf: the result buffer
  1212. * @base: beginning of range in bytes
  1213. * @len: length of range in bytes
  1214. *
  1215. * sets @subbuf to an xdr buffer representing the portion of @buf of
  1216. * length @len starting at offset @base.
  1217. *
  1218. * @buf and @subbuf may be pointers to the same struct xdr_buf.
  1219. *
  1220. * Returns -1 if base of length are out of bounds.
  1221. */
  1222. int
  1223. xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf,
  1224. unsigned int base, unsigned int len)
  1225. {
  1226. subbuf->buflen = subbuf->len = len;
  1227. if (base < buf->head[0].iov_len) {
  1228. subbuf->head[0].iov_base = buf->head[0].iov_base + base;
  1229. subbuf->head[0].iov_len = min_t(unsigned int, len,
  1230. buf->head[0].iov_len - base);
  1231. len -= subbuf->head[0].iov_len;
  1232. base = 0;
  1233. } else {
  1234. base -= buf->head[0].iov_len;
  1235. subbuf->head[0].iov_base = buf->head[0].iov_base;
  1236. subbuf->head[0].iov_len = 0;
  1237. }
  1238. if (base < buf->page_len) {
  1239. subbuf->page_len = min(buf->page_len - base, len);
  1240. base += buf->page_base;
  1241. subbuf->page_base = base & ~PAGE_MASK;
  1242. subbuf->pages = &buf->pages[base >> PAGE_SHIFT];
  1243. len -= subbuf->page_len;
  1244. base = 0;
  1245. } else {
  1246. base -= buf->page_len;
  1247. subbuf->pages = buf->pages;
  1248. subbuf->page_base = 0;
  1249. subbuf->page_len = 0;
  1250. }
  1251. if (base < buf->tail[0].iov_len) {
  1252. subbuf->tail[0].iov_base = buf->tail[0].iov_base + base;
  1253. subbuf->tail[0].iov_len = min_t(unsigned int, len,
  1254. buf->tail[0].iov_len - base);
  1255. len -= subbuf->tail[0].iov_len;
  1256. base = 0;
  1257. } else {
  1258. base -= buf->tail[0].iov_len;
  1259. subbuf->tail[0].iov_base = buf->tail[0].iov_base;
  1260. subbuf->tail[0].iov_len = 0;
  1261. }
  1262. if (base || len)
  1263. return -1;
  1264. return 0;
  1265. }
  1266. EXPORT_SYMBOL_GPL(xdr_buf_subsegment);
  1267. /**
  1268. * xdr_buf_trim - lop at most "len" bytes off the end of "buf"
  1269. * @buf: buf to be trimmed
  1270. * @len: number of bytes to reduce "buf" by
  1271. *
  1272. * Trim an xdr_buf by the given number of bytes by fixing up the lengths. Note
  1273. * that it's possible that we'll trim less than that amount if the xdr_buf is
  1274. * too small, or if (for instance) it's all in the head and the parser has
  1275. * already read too far into it.
  1276. */
  1277. void xdr_buf_trim(struct xdr_buf *buf, unsigned int len)
  1278. {
  1279. size_t cur;
  1280. unsigned int trim = len;
  1281. if (buf->tail[0].iov_len) {
  1282. cur = min_t(size_t, buf->tail[0].iov_len, trim);
  1283. buf->tail[0].iov_len -= cur;
  1284. trim -= cur;
  1285. if (!trim)
  1286. goto fix_len;
  1287. }
  1288. if (buf->page_len) {
  1289. cur = min_t(unsigned int, buf->page_len, trim);
  1290. buf->page_len -= cur;
  1291. trim -= cur;
  1292. if (!trim)
  1293. goto fix_len;
  1294. }
  1295. if (buf->head[0].iov_len) {
  1296. cur = min_t(size_t, buf->head[0].iov_len, trim);
  1297. buf->head[0].iov_len -= cur;
  1298. trim -= cur;
  1299. }
  1300. fix_len:
  1301. buf->len -= (len - trim);
  1302. }
  1303. EXPORT_SYMBOL_GPL(xdr_buf_trim);
  1304. static void __read_bytes_from_xdr_buf(struct xdr_buf *subbuf, void *obj, unsigned int len)
  1305. {
  1306. unsigned int this_len;
  1307. this_len = min_t(unsigned int, len, subbuf->head[0].iov_len);
  1308. memcpy(obj, subbuf->head[0].iov_base, this_len);
  1309. len -= this_len;
  1310. obj += this_len;
  1311. this_len = min_t(unsigned int, len, subbuf->page_len);
  1312. if (this_len)
  1313. _copy_from_pages(obj, subbuf->pages, subbuf->page_base, this_len);
  1314. len -= this_len;
  1315. obj += this_len;
  1316. this_len = min_t(unsigned int, len, subbuf->tail[0].iov_len);
  1317. memcpy(obj, subbuf->tail[0].iov_base, this_len);
  1318. }
  1319. /* obj is assumed to point to allocated memory of size at least len: */
  1320. int read_bytes_from_xdr_buf(struct xdr_buf *buf, unsigned int base, void *obj, unsigned int len)
  1321. {
  1322. struct xdr_buf subbuf;
  1323. int status;
  1324. status = xdr_buf_subsegment(buf, &subbuf, base, len);
  1325. if (status != 0)
  1326. return status;
  1327. __read_bytes_from_xdr_buf(&subbuf, obj, len);
  1328. return 0;
  1329. }
  1330. EXPORT_SYMBOL_GPL(read_bytes_from_xdr_buf);
  1331. static void __write_bytes_to_xdr_buf(struct xdr_buf *subbuf, void *obj, unsigned int len)
  1332. {
  1333. unsigned int this_len;
  1334. this_len = min_t(unsigned int, len, subbuf->head[0].iov_len);
  1335. memcpy(subbuf->head[0].iov_base, obj, this_len);
  1336. len -= this_len;
  1337. obj += this_len;
  1338. this_len = min_t(unsigned int, len, subbuf->page_len);
  1339. if (this_len)
  1340. _copy_to_pages(subbuf->pages, subbuf->page_base, obj, this_len);
  1341. len -= this_len;
  1342. obj += this_len;
  1343. this_len = min_t(unsigned int, len, subbuf->tail[0].iov_len);
  1344. memcpy(subbuf->tail[0].iov_base, obj, this_len);
  1345. }
  1346. /* obj is assumed to point to allocated memory of size at least len: */
  1347. int write_bytes_to_xdr_buf(struct xdr_buf *buf, unsigned int base, void *obj, unsigned int len)
  1348. {
  1349. struct xdr_buf subbuf;
  1350. int status;
  1351. status = xdr_buf_subsegment(buf, &subbuf, base, len);
  1352. if (status != 0)
  1353. return status;
  1354. __write_bytes_to_xdr_buf(&subbuf, obj, len);
  1355. return 0;
  1356. }
  1357. EXPORT_SYMBOL_GPL(write_bytes_to_xdr_buf);
  1358. int
  1359. xdr_decode_word(struct xdr_buf *buf, unsigned int base, u32 *obj)
  1360. {
  1361. __be32 raw;
  1362. int status;
  1363. status = read_bytes_from_xdr_buf(buf, base, &raw, sizeof(*obj));
  1364. if (status)
  1365. return status;
  1366. *obj = be32_to_cpu(raw);
  1367. return 0;
  1368. }
  1369. EXPORT_SYMBOL_GPL(xdr_decode_word);
  1370. int
  1371. xdr_encode_word(struct xdr_buf *buf, unsigned int base, u32 obj)
  1372. {
  1373. __be32 raw = cpu_to_be32(obj);
  1374. return write_bytes_to_xdr_buf(buf, base, &raw, sizeof(obj));
  1375. }
  1376. EXPORT_SYMBOL_GPL(xdr_encode_word);
  1377. /* Returns 0 on success, or else a negative error code. */
  1378. static int
  1379. xdr_xcode_array2(struct xdr_buf *buf, unsigned int base,
  1380. struct xdr_array2_desc *desc, int encode)
  1381. {
  1382. char *elem = NULL, *c;
  1383. unsigned int copied = 0, todo, avail_here;
  1384. struct page **ppages = NULL;
  1385. int err;
  1386. if (encode) {
  1387. if (xdr_encode_word(buf, base, desc->array_len) != 0)
  1388. return -EINVAL;
  1389. } else {
  1390. if (xdr_decode_word(buf, base, &desc->array_len) != 0 ||
  1391. desc->array_len > desc->array_maxlen ||
  1392. (unsigned long) base + 4 + desc->array_len *
  1393. desc->elem_size > buf->len)
  1394. return -EINVAL;
  1395. }
  1396. base += 4;
  1397. if (!desc->xcode)
  1398. return 0;
  1399. todo = desc->array_len * desc->elem_size;
  1400. /* process head */
  1401. if (todo && base < buf->head->iov_len) {
  1402. c = buf->head->iov_base + base;
  1403. avail_here = min_t(unsigned int, todo,
  1404. buf->head->iov_len - base);
  1405. todo -= avail_here;
  1406. while (avail_here >= desc->elem_size) {
  1407. err = desc->xcode(desc, c);
  1408. if (err)
  1409. goto out;
  1410. c += desc->elem_size;
  1411. avail_here -= desc->elem_size;
  1412. }
  1413. if (avail_here) {
  1414. if (!elem) {
  1415. elem = kmalloc(desc->elem_size, GFP_KERNEL);
  1416. err = -ENOMEM;
  1417. if (!elem)
  1418. goto out;
  1419. }
  1420. if (encode) {
  1421. err = desc->xcode(desc, elem);
  1422. if (err)
  1423. goto out;
  1424. memcpy(c, elem, avail_here);
  1425. } else
  1426. memcpy(elem, c, avail_here);
  1427. copied = avail_here;
  1428. }
  1429. base = buf->head->iov_len; /* align to start of pages */
  1430. }
  1431. /* process pages array */
  1432. base -= buf->head->iov_len;
  1433. if (todo && base < buf->page_len) {
  1434. unsigned int avail_page;
  1435. avail_here = min(todo, buf->page_len - base);
  1436. todo -= avail_here;
  1437. base += buf->page_base;
  1438. ppages = buf->pages + (base >> PAGE_SHIFT);
  1439. base &= ~PAGE_MASK;
  1440. avail_page = min_t(unsigned int, PAGE_SIZE - base,
  1441. avail_here);
  1442. c = kmap(*ppages) + base;
  1443. while (avail_here) {
  1444. avail_here -= avail_page;
  1445. if (copied || avail_page < desc->elem_size) {
  1446. unsigned int l = min(avail_page,
  1447. desc->elem_size - copied);
  1448. if (!elem) {
  1449. elem = kmalloc(desc->elem_size,
  1450. GFP_KERNEL);
  1451. err = -ENOMEM;
  1452. if (!elem)
  1453. goto out;
  1454. }
  1455. if (encode) {
  1456. if (!copied) {
  1457. err = desc->xcode(desc, elem);
  1458. if (err)
  1459. goto out;
  1460. }
  1461. memcpy(c, elem + copied, l);
  1462. copied += l;
  1463. if (copied == desc->elem_size)
  1464. copied = 0;
  1465. } else {
  1466. memcpy(elem + copied, c, l);
  1467. copied += l;
  1468. if (copied == desc->elem_size) {
  1469. err = desc->xcode(desc, elem);
  1470. if (err)
  1471. goto out;
  1472. copied = 0;
  1473. }
  1474. }
  1475. avail_page -= l;
  1476. c += l;
  1477. }
  1478. while (avail_page >= desc->elem_size) {
  1479. err = desc->xcode(desc, c);
  1480. if (err)
  1481. goto out;
  1482. c += desc->elem_size;
  1483. avail_page -= desc->elem_size;
  1484. }
  1485. if (avail_page) {
  1486. unsigned int l = min(avail_page,
  1487. desc->elem_size - copied);
  1488. if (!elem) {
  1489. elem = kmalloc(desc->elem_size,
  1490. GFP_KERNEL);
  1491. err = -ENOMEM;
  1492. if (!elem)
  1493. goto out;
  1494. }
  1495. if (encode) {
  1496. if (!copied) {
  1497. err = desc->xcode(desc, elem);
  1498. if (err)
  1499. goto out;
  1500. }
  1501. memcpy(c, elem + copied, l);
  1502. copied += l;
  1503. if (copied == desc->elem_size)
  1504. copied = 0;
  1505. } else {
  1506. memcpy(elem + copied, c, l);
  1507. copied += l;
  1508. if (copied == desc->elem_size) {
  1509. err = desc->xcode(desc, elem);
  1510. if (err)
  1511. goto out;
  1512. copied = 0;
  1513. }
  1514. }
  1515. }
  1516. if (avail_here) {
  1517. kunmap(*ppages);
  1518. ppages++;
  1519. c = kmap(*ppages);
  1520. }
  1521. avail_page = min(avail_here,
  1522. (unsigned int) PAGE_SIZE);
  1523. }
  1524. base = buf->page_len; /* align to start of tail */
  1525. }
  1526. /* process tail */
  1527. base -= buf->page_len;
  1528. if (todo) {
  1529. c = buf->tail->iov_base + base;
  1530. if (copied) {
  1531. unsigned int l = desc->elem_size - copied;
  1532. if (encode)
  1533. memcpy(c, elem + copied, l);
  1534. else {
  1535. memcpy(elem + copied, c, l);
  1536. err = desc->xcode(desc, elem);
  1537. if (err)
  1538. goto out;
  1539. }
  1540. todo -= l;
  1541. c += l;
  1542. }
  1543. while (todo) {
  1544. err = desc->xcode(desc, c);
  1545. if (err)
  1546. goto out;
  1547. c += desc->elem_size;
  1548. todo -= desc->elem_size;
  1549. }
  1550. }
  1551. err = 0;
  1552. out:
  1553. kfree(elem);
  1554. if (ppages)
  1555. kunmap(*ppages);
  1556. return err;
  1557. }
  1558. int
  1559. xdr_decode_array2(struct xdr_buf *buf, unsigned int base,
  1560. struct xdr_array2_desc *desc)
  1561. {
  1562. if (base >= buf->len)
  1563. return -EINVAL;
  1564. return xdr_xcode_array2(buf, base, desc, 0);
  1565. }
  1566. EXPORT_SYMBOL_GPL(xdr_decode_array2);
  1567. int
  1568. xdr_encode_array2(struct xdr_buf *buf, unsigned int base,
  1569. struct xdr_array2_desc *desc)
  1570. {
  1571. if ((unsigned long) base + 4 + desc->array_len * desc->elem_size >
  1572. buf->head->iov_len + buf->page_len + buf->tail->iov_len)
  1573. return -EINVAL;
  1574. return xdr_xcode_array2(buf, base, desc, 1);
  1575. }
  1576. EXPORT_SYMBOL_GPL(xdr_encode_array2);
  1577. int
  1578. xdr_process_buf(struct xdr_buf *buf, unsigned int offset, unsigned int len,
  1579. int (*actor)(struct scatterlist *, void *), void *data)
  1580. {
  1581. int i, ret = 0;
  1582. unsigned int page_len, thislen, page_offset;
  1583. struct scatterlist sg[1];
  1584. sg_init_table(sg, 1);
  1585. if (offset >= buf->head[0].iov_len) {
  1586. offset -= buf->head[0].iov_len;
  1587. } else {
  1588. thislen = buf->head[0].iov_len - offset;
  1589. if (thislen > len)
  1590. thislen = len;
  1591. sg_set_buf(sg, buf->head[0].iov_base + offset, thislen);
  1592. ret = actor(sg, data);
  1593. if (ret)
  1594. goto out;
  1595. offset = 0;
  1596. len -= thislen;
  1597. }
  1598. if (len == 0)
  1599. goto out;
  1600. if (offset >= buf->page_len) {
  1601. offset -= buf->page_len;
  1602. } else {
  1603. page_len = buf->page_len - offset;
  1604. if (page_len > len)
  1605. page_len = len;
  1606. len -= page_len;
  1607. page_offset = (offset + buf->page_base) & (PAGE_SIZE - 1);
  1608. i = (offset + buf->page_base) >> PAGE_SHIFT;
  1609. thislen = PAGE_SIZE - page_offset;
  1610. do {
  1611. if (thislen > page_len)
  1612. thislen = page_len;
  1613. sg_set_page(sg, buf->pages[i], thislen, page_offset);
  1614. ret = actor(sg, data);
  1615. if (ret)
  1616. goto out;
  1617. page_len -= thislen;
  1618. i++;
  1619. page_offset = 0;
  1620. thislen = PAGE_SIZE;
  1621. } while (page_len != 0);
  1622. offset = 0;
  1623. }
  1624. if (len == 0)
  1625. goto out;
  1626. if (offset < buf->tail[0].iov_len) {
  1627. thislen = buf->tail[0].iov_len - offset;
  1628. if (thislen > len)
  1629. thislen = len;
  1630. sg_set_buf(sg, buf->tail[0].iov_base + offset, thislen);
  1631. ret = actor(sg, data);
  1632. len -= thislen;
  1633. }
  1634. if (len != 0)
  1635. ret = -EINVAL;
  1636. out:
  1637. return ret;
  1638. }
  1639. EXPORT_SYMBOL_GPL(xdr_process_buf);
  1640. /**
  1641. * xdr_stream_decode_opaque - Decode variable length opaque
  1642. * @xdr: pointer to xdr_stream
  1643. * @ptr: location to store opaque data
  1644. * @size: size of storage buffer @ptr
  1645. *
  1646. * Return values:
  1647. * On success, returns size of object stored in *@ptr
  1648. * %-EBADMSG on XDR buffer overflow
  1649. * %-EMSGSIZE on overflow of storage buffer @ptr
  1650. */
  1651. ssize_t xdr_stream_decode_opaque(struct xdr_stream *xdr, void *ptr, size_t size)
  1652. {
  1653. ssize_t ret;
  1654. void *p;
  1655. ret = xdr_stream_decode_opaque_inline(xdr, &p, size);
  1656. if (ret <= 0)
  1657. return ret;
  1658. memcpy(ptr, p, ret);
  1659. return ret;
  1660. }
  1661. EXPORT_SYMBOL_GPL(xdr_stream_decode_opaque);
  1662. /**
  1663. * xdr_stream_decode_opaque_dup - Decode and duplicate variable length opaque
  1664. * @xdr: pointer to xdr_stream
  1665. * @ptr: location to store pointer to opaque data
  1666. * @maxlen: maximum acceptable object size
  1667. * @gfp_flags: GFP mask to use
  1668. *
  1669. * Return values:
  1670. * On success, returns size of object stored in *@ptr
  1671. * %-EBADMSG on XDR buffer overflow
  1672. * %-EMSGSIZE if the size of the object would exceed @maxlen
  1673. * %-ENOMEM on memory allocation failure
  1674. */
  1675. ssize_t xdr_stream_decode_opaque_dup(struct xdr_stream *xdr, void **ptr,
  1676. size_t maxlen, gfp_t gfp_flags)
  1677. {
  1678. ssize_t ret;
  1679. void *p;
  1680. ret = xdr_stream_decode_opaque_inline(xdr, &p, maxlen);
  1681. if (ret > 0) {
  1682. *ptr = kmemdup(p, ret, gfp_flags);
  1683. if (*ptr != NULL)
  1684. return ret;
  1685. ret = -ENOMEM;
  1686. }
  1687. *ptr = NULL;
  1688. return ret;
  1689. }
  1690. EXPORT_SYMBOL_GPL(xdr_stream_decode_opaque_dup);
  1691. /**
  1692. * xdr_stream_decode_string - Decode variable length string
  1693. * @xdr: pointer to xdr_stream
  1694. * @str: location to store string
  1695. * @size: size of storage buffer @str
  1696. *
  1697. * Return values:
  1698. * On success, returns length of NUL-terminated string stored in *@str
  1699. * %-EBADMSG on XDR buffer overflow
  1700. * %-EMSGSIZE on overflow of storage buffer @str
  1701. */
  1702. ssize_t xdr_stream_decode_string(struct xdr_stream *xdr, char *str, size_t size)
  1703. {
  1704. ssize_t ret;
  1705. void *p;
  1706. ret = xdr_stream_decode_opaque_inline(xdr, &p, size);
  1707. if (ret > 0) {
  1708. memcpy(str, p, ret);
  1709. str[ret] = '\0';
  1710. return strlen(str);
  1711. }
  1712. *str = '\0';
  1713. return ret;
  1714. }
  1715. EXPORT_SYMBOL_GPL(xdr_stream_decode_string);
  1716. /**
  1717. * xdr_stream_decode_string_dup - Decode and duplicate variable length string
  1718. * @xdr: pointer to xdr_stream
  1719. * @str: location to store pointer to string
  1720. * @maxlen: maximum acceptable string length
  1721. * @gfp_flags: GFP mask to use
  1722. *
  1723. * Return values:
  1724. * On success, returns length of NUL-terminated string stored in *@ptr
  1725. * %-EBADMSG on XDR buffer overflow
  1726. * %-EMSGSIZE if the size of the string would exceed @maxlen
  1727. * %-ENOMEM on memory allocation failure
  1728. */
  1729. ssize_t xdr_stream_decode_string_dup(struct xdr_stream *xdr, char **str,
  1730. size_t maxlen, gfp_t gfp_flags)
  1731. {
  1732. void *p;
  1733. ssize_t ret;
  1734. ret = xdr_stream_decode_opaque_inline(xdr, &p, maxlen);
  1735. if (ret > 0) {
  1736. char *s = kmalloc(ret + 1, gfp_flags);
  1737. if (s != NULL) {
  1738. memcpy(s, p, ret);
  1739. s[ret] = '\0';
  1740. *str = s;
  1741. return strlen(s);
  1742. }
  1743. ret = -ENOMEM;
  1744. }
  1745. *str = NULL;
  1746. return ret;
  1747. }
  1748. EXPORT_SYMBOL_GPL(xdr_stream_decode_string_dup);