xdr.c 26 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091
  1. /*
  2. * linux/net/sunrpc/xdr.c
  3. *
  4. * Generic XDR support.
  5. *
  6. * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
  7. */
  8. #include <linux/module.h>
  9. #include <linux/types.h>
  10. #include <linux/string.h>
  11. #include <linux/kernel.h>
  12. #include <linux/pagemap.h>
  13. #include <linux/errno.h>
  14. #include <linux/sunrpc/xdr.h>
  15. #include <linux/sunrpc/msg_prot.h>
  16. /*
  17. * XDR functions for basic NFS types
  18. */
  19. __be32 *
  20. xdr_encode_netobj(__be32 *p, const struct xdr_netobj *obj)
  21. {
  22. unsigned int quadlen = XDR_QUADLEN(obj->len);
  23. p[quadlen] = 0; /* zero trailing bytes */
  24. *p++ = htonl(obj->len);
  25. memcpy(p, obj->data, obj->len);
  26. return p + XDR_QUADLEN(obj->len);
  27. }
  28. __be32 *
  29. xdr_decode_netobj(__be32 *p, struct xdr_netobj *obj)
  30. {
  31. unsigned int len;
  32. if ((len = ntohl(*p++)) > XDR_MAX_NETOBJ)
  33. return NULL;
  34. obj->len = len;
  35. obj->data = (u8 *) p;
  36. return p + XDR_QUADLEN(len);
  37. }
  38. /**
  39. * xdr_encode_opaque_fixed - Encode fixed length opaque data
  40. * @p: pointer to current position in XDR buffer.
  41. * @ptr: pointer to data to encode (or NULL)
  42. * @nbytes: size of data.
  43. *
  44. * Copy the array of data of length nbytes at ptr to the XDR buffer
  45. * at position p, then align to the next 32-bit boundary by padding
  46. * with zero bytes (see RFC1832).
  47. * Note: if ptr is NULL, only the padding is performed.
  48. *
  49. * Returns the updated current XDR buffer position
  50. *
  51. */
  52. __be32 *xdr_encode_opaque_fixed(__be32 *p, const void *ptr, unsigned int nbytes)
  53. {
  54. if (likely(nbytes != 0)) {
  55. unsigned int quadlen = XDR_QUADLEN(nbytes);
  56. unsigned int padding = (quadlen << 2) - nbytes;
  57. if (ptr != NULL)
  58. memcpy(p, ptr, nbytes);
  59. if (padding != 0)
  60. memset((char *)p + nbytes, 0, padding);
  61. p += quadlen;
  62. }
  63. return p;
  64. }
  65. EXPORT_SYMBOL(xdr_encode_opaque_fixed);
  66. /**
  67. * xdr_encode_opaque - Encode variable length opaque data
  68. * @p: pointer to current position in XDR buffer.
  69. * @ptr: pointer to data to encode (or NULL)
  70. * @nbytes: size of data.
  71. *
  72. * Returns the updated current XDR buffer position
  73. */
  74. __be32 *xdr_encode_opaque(__be32 *p, const void *ptr, unsigned int nbytes)
  75. {
  76. *p++ = htonl(nbytes);
  77. return xdr_encode_opaque_fixed(p, ptr, nbytes);
  78. }
  79. EXPORT_SYMBOL(xdr_encode_opaque);
  80. __be32 *
  81. xdr_encode_string(__be32 *p, const char *string)
  82. {
  83. return xdr_encode_array(p, string, strlen(string));
  84. }
  85. __be32 *
  86. xdr_decode_string_inplace(__be32 *p, char **sp, int *lenp, int maxlen)
  87. {
  88. unsigned int len;
  89. if ((len = ntohl(*p++)) > maxlen)
  90. return NULL;
  91. *lenp = len;
  92. *sp = (char *) p;
  93. return p + XDR_QUADLEN(len);
  94. }
  95. void
  96. xdr_encode_pages(struct xdr_buf *xdr, struct page **pages, unsigned int base,
  97. unsigned int len)
  98. {
  99. struct kvec *tail = xdr->tail;
  100. u32 *p;
  101. xdr->pages = pages;
  102. xdr->page_base = base;
  103. xdr->page_len = len;
  104. p = (u32 *)xdr->head[0].iov_base + XDR_QUADLEN(xdr->head[0].iov_len);
  105. tail->iov_base = p;
  106. tail->iov_len = 0;
  107. if (len & 3) {
  108. unsigned int pad = 4 - (len & 3);
  109. *p = 0;
  110. tail->iov_base = (char *)p + (len & 3);
  111. tail->iov_len = pad;
  112. len += pad;
  113. }
  114. xdr->buflen += len;
  115. xdr->len += len;
  116. }
  117. void
  118. xdr_inline_pages(struct xdr_buf *xdr, unsigned int offset,
  119. struct page **pages, unsigned int base, unsigned int len)
  120. {
  121. struct kvec *head = xdr->head;
  122. struct kvec *tail = xdr->tail;
  123. char *buf = (char *)head->iov_base;
  124. unsigned int buflen = head->iov_len;
  125. head->iov_len = offset;
  126. xdr->pages = pages;
  127. xdr->page_base = base;
  128. xdr->page_len = len;
  129. tail->iov_base = buf + offset;
  130. tail->iov_len = buflen - offset;
  131. xdr->buflen += len;
  132. }
  133. /*
  134. * Helper routines for doing 'memmove' like operations on a struct xdr_buf
  135. *
  136. * _shift_data_right_pages
  137. * @pages: vector of pages containing both the source and dest memory area.
  138. * @pgto_base: page vector address of destination
  139. * @pgfrom_base: page vector address of source
  140. * @len: number of bytes to copy
  141. *
  142. * Note: the addresses pgto_base and pgfrom_base are both calculated in
  143. * the same way:
  144. * if a memory area starts at byte 'base' in page 'pages[i]',
  145. * then its address is given as (i << PAGE_CACHE_SHIFT) + base
  146. * Also note: pgfrom_base must be < pgto_base, but the memory areas
  147. * they point to may overlap.
  148. */
  149. static void
  150. _shift_data_right_pages(struct page **pages, size_t pgto_base,
  151. size_t pgfrom_base, size_t len)
  152. {
  153. struct page **pgfrom, **pgto;
  154. char *vfrom, *vto;
  155. size_t copy;
  156. BUG_ON(pgto_base <= pgfrom_base);
  157. pgto_base += len;
  158. pgfrom_base += len;
  159. pgto = pages + (pgto_base >> PAGE_CACHE_SHIFT);
  160. pgfrom = pages + (pgfrom_base >> PAGE_CACHE_SHIFT);
  161. pgto_base &= ~PAGE_CACHE_MASK;
  162. pgfrom_base &= ~PAGE_CACHE_MASK;
  163. do {
  164. /* Are any pointers crossing a page boundary? */
  165. if (pgto_base == 0) {
  166. pgto_base = PAGE_CACHE_SIZE;
  167. pgto--;
  168. }
  169. if (pgfrom_base == 0) {
  170. pgfrom_base = PAGE_CACHE_SIZE;
  171. pgfrom--;
  172. }
  173. copy = len;
  174. if (copy > pgto_base)
  175. copy = pgto_base;
  176. if (copy > pgfrom_base)
  177. copy = pgfrom_base;
  178. pgto_base -= copy;
  179. pgfrom_base -= copy;
  180. vto = kmap_atomic(*pgto, KM_USER0);
  181. vfrom = kmap_atomic(*pgfrom, KM_USER1);
  182. memmove(vto + pgto_base, vfrom + pgfrom_base, copy);
  183. flush_dcache_page(*pgto);
  184. kunmap_atomic(vfrom, KM_USER1);
  185. kunmap_atomic(vto, KM_USER0);
  186. } while ((len -= copy) != 0);
  187. }
  188. /*
  189. * _copy_to_pages
  190. * @pages: array of pages
  191. * @pgbase: page vector address of destination
  192. * @p: pointer to source data
  193. * @len: length
  194. *
  195. * Copies data from an arbitrary memory location into an array of pages
  196. * The copy is assumed to be non-overlapping.
  197. */
  198. static void
  199. _copy_to_pages(struct page **pages, size_t pgbase, const char *p, size_t len)
  200. {
  201. struct page **pgto;
  202. char *vto;
  203. size_t copy;
  204. pgto = pages + (pgbase >> PAGE_CACHE_SHIFT);
  205. pgbase &= ~PAGE_CACHE_MASK;
  206. do {
  207. copy = PAGE_CACHE_SIZE - pgbase;
  208. if (copy > len)
  209. copy = len;
  210. vto = kmap_atomic(*pgto, KM_USER0);
  211. memcpy(vto + pgbase, p, copy);
  212. kunmap_atomic(vto, KM_USER0);
  213. pgbase += copy;
  214. if (pgbase == PAGE_CACHE_SIZE) {
  215. flush_dcache_page(*pgto);
  216. pgbase = 0;
  217. pgto++;
  218. }
  219. p += copy;
  220. } while ((len -= copy) != 0);
  221. flush_dcache_page(*pgto);
  222. }
  223. /*
  224. * _copy_from_pages
  225. * @p: pointer to destination
  226. * @pages: array of pages
  227. * @pgbase: offset of source data
  228. * @len: length
  229. *
  230. * Copies data into an arbitrary memory location from an array of pages
  231. * The copy is assumed to be non-overlapping.
  232. */
  233. static void
  234. _copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len)
  235. {
  236. struct page **pgfrom;
  237. char *vfrom;
  238. size_t copy;
  239. pgfrom = pages + (pgbase >> PAGE_CACHE_SHIFT);
  240. pgbase &= ~PAGE_CACHE_MASK;
  241. do {
  242. copy = PAGE_CACHE_SIZE - pgbase;
  243. if (copy > len)
  244. copy = len;
  245. vfrom = kmap_atomic(*pgfrom, KM_USER0);
  246. memcpy(p, vfrom + pgbase, copy);
  247. kunmap_atomic(vfrom, KM_USER0);
  248. pgbase += copy;
  249. if (pgbase == PAGE_CACHE_SIZE) {
  250. pgbase = 0;
  251. pgfrom++;
  252. }
  253. p += copy;
  254. } while ((len -= copy) != 0);
  255. }
  256. /*
  257. * xdr_shrink_bufhead
  258. * @buf: xdr_buf
  259. * @len: bytes to remove from buf->head[0]
  260. *
  261. * Shrinks XDR buffer's header kvec buf->head[0] by
  262. * 'len' bytes. The extra data is not lost, but is instead
  263. * moved into the inlined pages and/or the tail.
  264. */
  265. static void
  266. xdr_shrink_bufhead(struct xdr_buf *buf, size_t len)
  267. {
  268. struct kvec *head, *tail;
  269. size_t copy, offs;
  270. unsigned int pglen = buf->page_len;
  271. tail = buf->tail;
  272. head = buf->head;
  273. BUG_ON (len > head->iov_len);
  274. /* Shift the tail first */
  275. if (tail->iov_len != 0) {
  276. if (tail->iov_len > len) {
  277. copy = tail->iov_len - len;
  278. memmove((char *)tail->iov_base + len,
  279. tail->iov_base, copy);
  280. }
  281. /* Copy from the inlined pages into the tail */
  282. copy = len;
  283. if (copy > pglen)
  284. copy = pglen;
  285. offs = len - copy;
  286. if (offs >= tail->iov_len)
  287. copy = 0;
  288. else if (copy > tail->iov_len - offs)
  289. copy = tail->iov_len - offs;
  290. if (copy != 0)
  291. _copy_from_pages((char *)tail->iov_base + offs,
  292. buf->pages,
  293. buf->page_base + pglen + offs - len,
  294. copy);
  295. /* Do we also need to copy data from the head into the tail ? */
  296. if (len > pglen) {
  297. offs = copy = len - pglen;
  298. if (copy > tail->iov_len)
  299. copy = tail->iov_len;
  300. memcpy(tail->iov_base,
  301. (char *)head->iov_base +
  302. head->iov_len - offs,
  303. copy);
  304. }
  305. }
  306. /* Now handle pages */
  307. if (pglen != 0) {
  308. if (pglen > len)
  309. _shift_data_right_pages(buf->pages,
  310. buf->page_base + len,
  311. buf->page_base,
  312. pglen - len);
  313. copy = len;
  314. if (len > pglen)
  315. copy = pglen;
  316. _copy_to_pages(buf->pages, buf->page_base,
  317. (char *)head->iov_base + head->iov_len - len,
  318. copy);
  319. }
  320. head->iov_len -= len;
  321. buf->buflen -= len;
  322. /* Have we truncated the message? */
  323. if (buf->len > buf->buflen)
  324. buf->len = buf->buflen;
  325. }
  326. /*
  327. * xdr_shrink_pagelen
  328. * @buf: xdr_buf
  329. * @len: bytes to remove from buf->pages
  330. *
  331. * Shrinks XDR buffer's page array buf->pages by
  332. * 'len' bytes. The extra data is not lost, but is instead
  333. * moved into the tail.
  334. */
  335. static void
  336. xdr_shrink_pagelen(struct xdr_buf *buf, size_t len)
  337. {
  338. struct kvec *tail;
  339. size_t copy;
  340. char *p;
  341. unsigned int pglen = buf->page_len;
  342. tail = buf->tail;
  343. BUG_ON (len > pglen);
  344. /* Shift the tail first */
  345. if (tail->iov_len != 0) {
  346. p = (char *)tail->iov_base + len;
  347. if (tail->iov_len > len) {
  348. copy = tail->iov_len - len;
  349. memmove(p, tail->iov_base, copy);
  350. } else
  351. buf->buflen -= len;
  352. /* Copy from the inlined pages into the tail */
  353. copy = len;
  354. if (copy > tail->iov_len)
  355. copy = tail->iov_len;
  356. _copy_from_pages((char *)tail->iov_base,
  357. buf->pages, buf->page_base + pglen - len,
  358. copy);
  359. }
  360. buf->page_len -= len;
  361. buf->buflen -= len;
  362. /* Have we truncated the message? */
  363. if (buf->len > buf->buflen)
  364. buf->len = buf->buflen;
  365. }
  366. void
  367. xdr_shift_buf(struct xdr_buf *buf, size_t len)
  368. {
  369. xdr_shrink_bufhead(buf, len);
  370. }
  371. /**
  372. * xdr_init_encode - Initialize a struct xdr_stream for sending data.
  373. * @xdr: pointer to xdr_stream struct
  374. * @buf: pointer to XDR buffer in which to encode data
  375. * @p: current pointer inside XDR buffer
  376. *
  377. * Note: at the moment the RPC client only passes the length of our
  378. * scratch buffer in the xdr_buf's header kvec. Previously this
  379. * meant we needed to call xdr_adjust_iovec() after encoding the
  380. * data. With the new scheme, the xdr_stream manages the details
  381. * of the buffer length, and takes care of adjusting the kvec
  382. * length for us.
  383. */
  384. void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p)
  385. {
  386. struct kvec *iov = buf->head;
  387. int scratch_len = buf->buflen - buf->page_len - buf->tail[0].iov_len;
  388. BUG_ON(scratch_len < 0);
  389. xdr->buf = buf;
  390. xdr->iov = iov;
  391. xdr->p = (__be32 *)((char *)iov->iov_base + iov->iov_len);
  392. xdr->end = (__be32 *)((char *)iov->iov_base + scratch_len);
  393. BUG_ON(iov->iov_len > scratch_len);
  394. if (p != xdr->p && p != NULL) {
  395. size_t len;
  396. BUG_ON(p < xdr->p || p > xdr->end);
  397. len = (char *)p - (char *)xdr->p;
  398. xdr->p = p;
  399. buf->len += len;
  400. iov->iov_len += len;
  401. }
  402. }
  403. EXPORT_SYMBOL(xdr_init_encode);
  404. /**
  405. * xdr_reserve_space - Reserve buffer space for sending
  406. * @xdr: pointer to xdr_stream
  407. * @nbytes: number of bytes to reserve
  408. *
  409. * Checks that we have enough buffer space to encode 'nbytes' more
  410. * bytes of data. If so, update the total xdr_buf length, and
  411. * adjust the length of the current kvec.
  412. */
  413. __be32 * xdr_reserve_space(struct xdr_stream *xdr, size_t nbytes)
  414. {
  415. __be32 *p = xdr->p;
  416. __be32 *q;
  417. /* align nbytes on the next 32-bit boundary */
  418. nbytes += 3;
  419. nbytes &= ~3;
  420. q = p + (nbytes >> 2);
  421. if (unlikely(q > xdr->end || q < p))
  422. return NULL;
  423. xdr->p = q;
  424. xdr->iov->iov_len += nbytes;
  425. xdr->buf->len += nbytes;
  426. return p;
  427. }
  428. EXPORT_SYMBOL(xdr_reserve_space);
  429. /**
  430. * xdr_write_pages - Insert a list of pages into an XDR buffer for sending
  431. * @xdr: pointer to xdr_stream
  432. * @pages: list of pages
  433. * @base: offset of first byte
  434. * @len: length of data in bytes
  435. *
  436. */
  437. void xdr_write_pages(struct xdr_stream *xdr, struct page **pages, unsigned int base,
  438. unsigned int len)
  439. {
  440. struct xdr_buf *buf = xdr->buf;
  441. struct kvec *iov = buf->tail;
  442. buf->pages = pages;
  443. buf->page_base = base;
  444. buf->page_len = len;
  445. iov->iov_base = (char *)xdr->p;
  446. iov->iov_len = 0;
  447. xdr->iov = iov;
  448. if (len & 3) {
  449. unsigned int pad = 4 - (len & 3);
  450. BUG_ON(xdr->p >= xdr->end);
  451. iov->iov_base = (char *)xdr->p + (len & 3);
  452. iov->iov_len += pad;
  453. len += pad;
  454. *xdr->p++ = 0;
  455. }
  456. buf->buflen += len;
  457. buf->len += len;
  458. }
  459. EXPORT_SYMBOL(xdr_write_pages);
  460. /**
  461. * xdr_init_decode - Initialize an xdr_stream for decoding data.
  462. * @xdr: pointer to xdr_stream struct
  463. * @buf: pointer to XDR buffer from which to decode data
  464. * @p: current pointer inside XDR buffer
  465. */
  466. void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p)
  467. {
  468. struct kvec *iov = buf->head;
  469. unsigned int len = iov->iov_len;
  470. if (len > buf->len)
  471. len = buf->len;
  472. xdr->buf = buf;
  473. xdr->iov = iov;
  474. xdr->p = p;
  475. xdr->end = (__be32 *)((char *)iov->iov_base + len);
  476. }
  477. EXPORT_SYMBOL(xdr_init_decode);
  478. /**
  479. * xdr_inline_decode - Retrieve non-page XDR data to decode
  480. * @xdr: pointer to xdr_stream struct
  481. * @nbytes: number of bytes of data to decode
  482. *
  483. * Check if the input buffer is long enough to enable us to decode
  484. * 'nbytes' more bytes of data starting at the current position.
  485. * If so return the current pointer, then update the current
  486. * pointer position.
  487. */
  488. __be32 * xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes)
  489. {
  490. __be32 *p = xdr->p;
  491. __be32 *q = p + XDR_QUADLEN(nbytes);
  492. if (unlikely(q > xdr->end || q < p))
  493. return NULL;
  494. xdr->p = q;
  495. return p;
  496. }
  497. EXPORT_SYMBOL(xdr_inline_decode);
  498. /**
  499. * xdr_read_pages - Ensure page-based XDR data to decode is aligned at current pointer position
  500. * @xdr: pointer to xdr_stream struct
  501. * @len: number of bytes of page data
  502. *
  503. * Moves data beyond the current pointer position from the XDR head[] buffer
  504. * into the page list. Any data that lies beyond current position + "len"
  505. * bytes is moved into the XDR tail[].
  506. */
  507. void xdr_read_pages(struct xdr_stream *xdr, unsigned int len)
  508. {
  509. struct xdr_buf *buf = xdr->buf;
  510. struct kvec *iov;
  511. ssize_t shift;
  512. unsigned int end;
  513. int padding;
  514. /* Realign pages to current pointer position */
  515. iov = buf->head;
  516. shift = iov->iov_len + (char *)iov->iov_base - (char *)xdr->p;
  517. if (shift > 0)
  518. xdr_shrink_bufhead(buf, shift);
  519. /* Truncate page data and move it into the tail */
  520. if (buf->page_len > len)
  521. xdr_shrink_pagelen(buf, buf->page_len - len);
  522. padding = (XDR_QUADLEN(len) << 2) - len;
  523. xdr->iov = iov = buf->tail;
  524. /* Compute remaining message length. */
  525. end = iov->iov_len;
  526. shift = buf->buflen - buf->len;
  527. if (shift < end)
  528. end -= shift;
  529. else if (shift > 0)
  530. end = 0;
  531. /*
  532. * Position current pointer at beginning of tail, and
  533. * set remaining message length.
  534. */
  535. xdr->p = (__be32 *)((char *)iov->iov_base + padding);
  536. xdr->end = (__be32 *)((char *)iov->iov_base + end);
  537. }
  538. EXPORT_SYMBOL(xdr_read_pages);
  539. /**
  540. * xdr_enter_page - decode data from the XDR page
  541. * @xdr: pointer to xdr_stream struct
  542. * @len: number of bytes of page data
  543. *
  544. * Moves data beyond the current pointer position from the XDR head[] buffer
  545. * into the page list. Any data that lies beyond current position + "len"
  546. * bytes is moved into the XDR tail[]. The current pointer is then
  547. * repositioned at the beginning of the first XDR page.
  548. */
  549. void xdr_enter_page(struct xdr_stream *xdr, unsigned int len)
  550. {
  551. char * kaddr = page_address(xdr->buf->pages[0]);
  552. xdr_read_pages(xdr, len);
  553. /*
  554. * Position current pointer at beginning of tail, and
  555. * set remaining message length.
  556. */
  557. if (len > PAGE_CACHE_SIZE - xdr->buf->page_base)
  558. len = PAGE_CACHE_SIZE - xdr->buf->page_base;
  559. xdr->p = (__be32 *)(kaddr + xdr->buf->page_base);
  560. xdr->end = (__be32 *)((char *)xdr->p + len);
  561. }
  562. EXPORT_SYMBOL(xdr_enter_page);
  563. static struct kvec empty_iov = {.iov_base = NULL, .iov_len = 0};
  564. void
  565. xdr_buf_from_iov(struct kvec *iov, struct xdr_buf *buf)
  566. {
  567. buf->head[0] = *iov;
  568. buf->tail[0] = empty_iov;
  569. buf->page_len = 0;
  570. buf->buflen = buf->len = iov->iov_len;
  571. }
  572. /* Sets subbuf to the portion of buf of length len beginning base bytes
  573. * from the start of buf. Returns -1 if base of length are out of bounds. */
  574. int
  575. xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf,
  576. unsigned int base, unsigned int len)
  577. {
  578. subbuf->buflen = subbuf->len = len;
  579. if (base < buf->head[0].iov_len) {
  580. subbuf->head[0].iov_base = buf->head[0].iov_base + base;
  581. subbuf->head[0].iov_len = min_t(unsigned int, len,
  582. buf->head[0].iov_len - base);
  583. len -= subbuf->head[0].iov_len;
  584. base = 0;
  585. } else {
  586. subbuf->head[0].iov_base = NULL;
  587. subbuf->head[0].iov_len = 0;
  588. base -= buf->head[0].iov_len;
  589. }
  590. if (base < buf->page_len) {
  591. subbuf->page_len = min(buf->page_len - base, len);
  592. base += buf->page_base;
  593. subbuf->page_base = base & ~PAGE_CACHE_MASK;
  594. subbuf->pages = &buf->pages[base >> PAGE_CACHE_SHIFT];
  595. len -= subbuf->page_len;
  596. base = 0;
  597. } else {
  598. base -= buf->page_len;
  599. subbuf->page_len = 0;
  600. }
  601. if (base < buf->tail[0].iov_len) {
  602. subbuf->tail[0].iov_base = buf->tail[0].iov_base + base;
  603. subbuf->tail[0].iov_len = min_t(unsigned int, len,
  604. buf->tail[0].iov_len - base);
  605. len -= subbuf->tail[0].iov_len;
  606. base = 0;
  607. } else {
  608. subbuf->tail[0].iov_base = NULL;
  609. subbuf->tail[0].iov_len = 0;
  610. base -= buf->tail[0].iov_len;
  611. }
  612. if (base || len)
  613. return -1;
  614. return 0;
  615. }
  616. static void __read_bytes_from_xdr_buf(struct xdr_buf *subbuf, void *obj, unsigned int len)
  617. {
  618. unsigned int this_len;
  619. this_len = min_t(unsigned int, len, subbuf->head[0].iov_len);
  620. memcpy(obj, subbuf->head[0].iov_base, this_len);
  621. len -= this_len;
  622. obj += this_len;
  623. this_len = min_t(unsigned int, len, subbuf->page_len);
  624. if (this_len)
  625. _copy_from_pages(obj, subbuf->pages, subbuf->page_base, this_len);
  626. len -= this_len;
  627. obj += this_len;
  628. this_len = min_t(unsigned int, len, subbuf->tail[0].iov_len);
  629. memcpy(obj, subbuf->tail[0].iov_base, this_len);
  630. }
  631. /* obj is assumed to point to allocated memory of size at least len: */
  632. int read_bytes_from_xdr_buf(struct xdr_buf *buf, unsigned int base, void *obj, unsigned int len)
  633. {
  634. struct xdr_buf subbuf;
  635. int status;
  636. status = xdr_buf_subsegment(buf, &subbuf, base, len);
  637. if (status != 0)
  638. return status;
  639. __read_bytes_from_xdr_buf(&subbuf, obj, len);
  640. return 0;
  641. }
  642. static void __write_bytes_to_xdr_buf(struct xdr_buf *subbuf, void *obj, unsigned int len)
  643. {
  644. unsigned int this_len;
  645. this_len = min_t(unsigned int, len, subbuf->head[0].iov_len);
  646. memcpy(subbuf->head[0].iov_base, obj, this_len);
  647. len -= this_len;
  648. obj += this_len;
  649. this_len = min_t(unsigned int, len, subbuf->page_len);
  650. if (this_len)
  651. _copy_to_pages(subbuf->pages, subbuf->page_base, obj, this_len);
  652. len -= this_len;
  653. obj += this_len;
  654. this_len = min_t(unsigned int, len, subbuf->tail[0].iov_len);
  655. memcpy(subbuf->tail[0].iov_base, obj, this_len);
  656. }
  657. /* obj is assumed to point to allocated memory of size at least len: */
  658. int write_bytes_to_xdr_buf(struct xdr_buf *buf, unsigned int base, void *obj, unsigned int len)
  659. {
  660. struct xdr_buf subbuf;
  661. int status;
  662. status = xdr_buf_subsegment(buf, &subbuf, base, len);
  663. if (status != 0)
  664. return status;
  665. __write_bytes_to_xdr_buf(&subbuf, obj, len);
  666. return 0;
  667. }
  668. int
  669. xdr_decode_word(struct xdr_buf *buf, unsigned int base, u32 *obj)
  670. {
  671. __be32 raw;
  672. int status;
  673. status = read_bytes_from_xdr_buf(buf, base, &raw, sizeof(*obj));
  674. if (status)
  675. return status;
  676. *obj = ntohl(raw);
  677. return 0;
  678. }
  679. int
  680. xdr_encode_word(struct xdr_buf *buf, unsigned int base, u32 obj)
  681. {
  682. __be32 raw = htonl(obj);
  683. return write_bytes_to_xdr_buf(buf, base, &raw, sizeof(obj));
  684. }
  685. /* If the netobj starting offset bytes from the start of xdr_buf is contained
  686. * entirely in the head or the tail, set object to point to it; otherwise
  687. * try to find space for it at the end of the tail, copy it there, and
  688. * set obj to point to it. */
  689. int xdr_buf_read_netobj(struct xdr_buf *buf, struct xdr_netobj *obj, unsigned int offset)
  690. {
  691. struct xdr_buf subbuf;
  692. if (xdr_decode_word(buf, offset, &obj->len))
  693. return -EFAULT;
  694. if (xdr_buf_subsegment(buf, &subbuf, offset + 4, obj->len))
  695. return -EFAULT;
  696. /* Is the obj contained entirely in the head? */
  697. obj->data = subbuf.head[0].iov_base;
  698. if (subbuf.head[0].iov_len == obj->len)
  699. return 0;
  700. /* ..or is the obj contained entirely in the tail? */
  701. obj->data = subbuf.tail[0].iov_base;
  702. if (subbuf.tail[0].iov_len == obj->len)
  703. return 0;
  704. /* use end of tail as storage for obj:
  705. * (We don't copy to the beginning because then we'd have
  706. * to worry about doing a potentially overlapping copy.
  707. * This assumes the object is at most half the length of the
  708. * tail.) */
  709. if (obj->len > buf->buflen - buf->len)
  710. return -ENOMEM;
  711. if (buf->tail[0].iov_len != 0)
  712. obj->data = buf->tail[0].iov_base + buf->tail[0].iov_len;
  713. else
  714. obj->data = buf->head[0].iov_base + buf->head[0].iov_len;
  715. __read_bytes_from_xdr_buf(&subbuf, obj->data, obj->len);
  716. return 0;
  717. }
  718. /* Returns 0 on success, or else a negative error code. */
  719. static int
  720. xdr_xcode_array2(struct xdr_buf *buf, unsigned int base,
  721. struct xdr_array2_desc *desc, int encode)
  722. {
  723. char *elem = NULL, *c;
  724. unsigned int copied = 0, todo, avail_here;
  725. struct page **ppages = NULL;
  726. int err;
  727. if (encode) {
  728. if (xdr_encode_word(buf, base, desc->array_len) != 0)
  729. return -EINVAL;
  730. } else {
  731. if (xdr_decode_word(buf, base, &desc->array_len) != 0 ||
  732. desc->array_len > desc->array_maxlen ||
  733. (unsigned long) base + 4 + desc->array_len *
  734. desc->elem_size > buf->len)
  735. return -EINVAL;
  736. }
  737. base += 4;
  738. if (!desc->xcode)
  739. return 0;
  740. todo = desc->array_len * desc->elem_size;
  741. /* process head */
  742. if (todo && base < buf->head->iov_len) {
  743. c = buf->head->iov_base + base;
  744. avail_here = min_t(unsigned int, todo,
  745. buf->head->iov_len - base);
  746. todo -= avail_here;
  747. while (avail_here >= desc->elem_size) {
  748. err = desc->xcode(desc, c);
  749. if (err)
  750. goto out;
  751. c += desc->elem_size;
  752. avail_here -= desc->elem_size;
  753. }
  754. if (avail_here) {
  755. if (!elem) {
  756. elem = kmalloc(desc->elem_size, GFP_KERNEL);
  757. err = -ENOMEM;
  758. if (!elem)
  759. goto out;
  760. }
  761. if (encode) {
  762. err = desc->xcode(desc, elem);
  763. if (err)
  764. goto out;
  765. memcpy(c, elem, avail_here);
  766. } else
  767. memcpy(elem, c, avail_here);
  768. copied = avail_here;
  769. }
  770. base = buf->head->iov_len; /* align to start of pages */
  771. }
  772. /* process pages array */
  773. base -= buf->head->iov_len;
  774. if (todo && base < buf->page_len) {
  775. unsigned int avail_page;
  776. avail_here = min(todo, buf->page_len - base);
  777. todo -= avail_here;
  778. base += buf->page_base;
  779. ppages = buf->pages + (base >> PAGE_CACHE_SHIFT);
  780. base &= ~PAGE_CACHE_MASK;
  781. avail_page = min_t(unsigned int, PAGE_CACHE_SIZE - base,
  782. avail_here);
  783. c = kmap(*ppages) + base;
  784. while (avail_here) {
  785. avail_here -= avail_page;
  786. if (copied || avail_page < desc->elem_size) {
  787. unsigned int l = min(avail_page,
  788. desc->elem_size - copied);
  789. if (!elem) {
  790. elem = kmalloc(desc->elem_size,
  791. GFP_KERNEL);
  792. err = -ENOMEM;
  793. if (!elem)
  794. goto out;
  795. }
  796. if (encode) {
  797. if (!copied) {
  798. err = desc->xcode(desc, elem);
  799. if (err)
  800. goto out;
  801. }
  802. memcpy(c, elem + copied, l);
  803. copied += l;
  804. if (copied == desc->elem_size)
  805. copied = 0;
  806. } else {
  807. memcpy(elem + copied, c, l);
  808. copied += l;
  809. if (copied == desc->elem_size) {
  810. err = desc->xcode(desc, elem);
  811. if (err)
  812. goto out;
  813. copied = 0;
  814. }
  815. }
  816. avail_page -= l;
  817. c += l;
  818. }
  819. while (avail_page >= desc->elem_size) {
  820. err = desc->xcode(desc, c);
  821. if (err)
  822. goto out;
  823. c += desc->elem_size;
  824. avail_page -= desc->elem_size;
  825. }
  826. if (avail_page) {
  827. unsigned int l = min(avail_page,
  828. desc->elem_size - copied);
  829. if (!elem) {
  830. elem = kmalloc(desc->elem_size,
  831. GFP_KERNEL);
  832. err = -ENOMEM;
  833. if (!elem)
  834. goto out;
  835. }
  836. if (encode) {
  837. if (!copied) {
  838. err = desc->xcode(desc, elem);
  839. if (err)
  840. goto out;
  841. }
  842. memcpy(c, elem + copied, l);
  843. copied += l;
  844. if (copied == desc->elem_size)
  845. copied = 0;
  846. } else {
  847. memcpy(elem + copied, c, l);
  848. copied += l;
  849. if (copied == desc->elem_size) {
  850. err = desc->xcode(desc, elem);
  851. if (err)
  852. goto out;
  853. copied = 0;
  854. }
  855. }
  856. }
  857. if (avail_here) {
  858. kunmap(*ppages);
  859. ppages++;
  860. c = kmap(*ppages);
  861. }
  862. avail_page = min(avail_here,
  863. (unsigned int) PAGE_CACHE_SIZE);
  864. }
  865. base = buf->page_len; /* align to start of tail */
  866. }
  867. /* process tail */
  868. base -= buf->page_len;
  869. if (todo) {
  870. c = buf->tail->iov_base + base;
  871. if (copied) {
  872. unsigned int l = desc->elem_size - copied;
  873. if (encode)
  874. memcpy(c, elem + copied, l);
  875. else {
  876. memcpy(elem + copied, c, l);
  877. err = desc->xcode(desc, elem);
  878. if (err)
  879. goto out;
  880. }
  881. todo -= l;
  882. c += l;
  883. }
  884. while (todo) {
  885. err = desc->xcode(desc, c);
  886. if (err)
  887. goto out;
  888. c += desc->elem_size;
  889. todo -= desc->elem_size;
  890. }
  891. }
  892. err = 0;
  893. out:
  894. kfree(elem);
  895. if (ppages)
  896. kunmap(*ppages);
  897. return err;
  898. }
  899. int
  900. xdr_decode_array2(struct xdr_buf *buf, unsigned int base,
  901. struct xdr_array2_desc *desc)
  902. {
  903. if (base >= buf->len)
  904. return -EINVAL;
  905. return xdr_xcode_array2(buf, base, desc, 0);
  906. }
  907. int
  908. xdr_encode_array2(struct xdr_buf *buf, unsigned int base,
  909. struct xdr_array2_desc *desc)
  910. {
  911. if ((unsigned long) base + 4 + desc->array_len * desc->elem_size >
  912. buf->head->iov_len + buf->page_len + buf->tail->iov_len)
  913. return -EINVAL;
  914. return xdr_xcode_array2(buf, base, desc, 1);
  915. }
  916. int
  917. xdr_process_buf(struct xdr_buf *buf, unsigned int offset, unsigned int len,
  918. int (*actor)(struct scatterlist *, void *), void *data)
  919. {
  920. int i, ret = 0;
  921. unsigned page_len, thislen, page_offset;
  922. struct scatterlist sg[1];
  923. if (offset >= buf->head[0].iov_len) {
  924. offset -= buf->head[0].iov_len;
  925. } else {
  926. thislen = buf->head[0].iov_len - offset;
  927. if (thislen > len)
  928. thislen = len;
  929. sg_set_buf(sg, buf->head[0].iov_base + offset, thislen);
  930. ret = actor(sg, data);
  931. if (ret)
  932. goto out;
  933. offset = 0;
  934. len -= thislen;
  935. }
  936. if (len == 0)
  937. goto out;
  938. if (offset >= buf->page_len) {
  939. offset -= buf->page_len;
  940. } else {
  941. page_len = buf->page_len - offset;
  942. if (page_len > len)
  943. page_len = len;
  944. len -= page_len;
  945. page_offset = (offset + buf->page_base) & (PAGE_CACHE_SIZE - 1);
  946. i = (offset + buf->page_base) >> PAGE_CACHE_SHIFT;
  947. thislen = PAGE_CACHE_SIZE - page_offset;
  948. do {
  949. if (thislen > page_len)
  950. thislen = page_len;
  951. sg->page = buf->pages[i];
  952. sg->offset = page_offset;
  953. sg->length = thislen;
  954. ret = actor(sg, data);
  955. if (ret)
  956. goto out;
  957. page_len -= thislen;
  958. i++;
  959. page_offset = 0;
  960. thislen = PAGE_CACHE_SIZE;
  961. } while (page_len != 0);
  962. offset = 0;
  963. }
  964. if (len == 0)
  965. goto out;
  966. if (offset < buf->tail[0].iov_len) {
  967. thislen = buf->tail[0].iov_len - offset;
  968. if (thislen > len)
  969. thislen = len;
  970. sg_set_buf(sg, buf->tail[0].iov_base + offset, thislen);
  971. ret = actor(sg, data);
  972. len -= thislen;
  973. }
  974. if (len != 0)
  975. ret = -EINVAL;
  976. out:
  977. return ret;
  978. }
  979. EXPORT_SYMBOL(xdr_process_buf);