uio.h 8.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291
  1. /* SPDX-License-Identifier: GPL-2.0-or-later */
  2. /*
  3. * Berkeley style UIO structures - Alan Cox 1994.
  4. */
  5. #ifndef __LINUX_UIO_H
  6. #define __LINUX_UIO_H
  7. #include <linux/kernel.h>
  8. #include <linux/thread_info.h>
  9. #include <uapi/linux/uio.h>
  10. struct page;
  11. struct pipe_inode_info;
  12. struct kvec {
  13. void *iov_base; /* and that should *never* hold a userland pointer */
  14. size_t iov_len;
  15. };
  16. enum iter_type {
  17. /* iter types */
  18. ITER_IOVEC = 4,
  19. ITER_KVEC = 8,
  20. ITER_BVEC = 16,
  21. ITER_PIPE = 32,
  22. ITER_DISCARD = 64,
  23. };
  24. struct iov_iter {
  25. /*
  26. * Bit 0 is the read/write bit, set if we're writing.
  27. * Bit 1 is the BVEC_FLAG_NO_REF bit, set if type is a bvec and
  28. * the caller isn't expecting to drop a page reference when done.
  29. */
  30. unsigned int type;
  31. size_t iov_offset;
  32. size_t count;
  33. union {
  34. const struct iovec *iov;
  35. const struct kvec *kvec;
  36. const struct bio_vec *bvec;
  37. struct pipe_inode_info *pipe;
  38. };
  39. union {
  40. unsigned long nr_segs;
  41. struct {
  42. unsigned int head;
  43. unsigned int start_head;
  44. };
  45. };
  46. };
  47. static inline enum iter_type iov_iter_type(const struct iov_iter *i)
  48. {
  49. return i->type & ~(READ | WRITE);
  50. }
  51. static inline bool iter_is_iovec(const struct iov_iter *i)
  52. {
  53. return iov_iter_type(i) == ITER_IOVEC;
  54. }
  55. static inline bool iov_iter_is_kvec(const struct iov_iter *i)
  56. {
  57. return iov_iter_type(i) == ITER_KVEC;
  58. }
  59. static inline bool iov_iter_is_bvec(const struct iov_iter *i)
  60. {
  61. return iov_iter_type(i) == ITER_BVEC;
  62. }
  63. static inline bool iov_iter_is_pipe(const struct iov_iter *i)
  64. {
  65. return iov_iter_type(i) == ITER_PIPE;
  66. }
  67. static inline bool iov_iter_is_discard(const struct iov_iter *i)
  68. {
  69. return iov_iter_type(i) == ITER_DISCARD;
  70. }
  71. static inline unsigned char iov_iter_rw(const struct iov_iter *i)
  72. {
  73. return i->type & (READ | WRITE);
  74. }
  75. /*
  76. * Total number of bytes covered by an iovec.
  77. *
  78. * NOTE that it is not safe to use this function until all the iovec's
  79. * segment lengths have been validated. Because the individual lengths can
  80. * overflow a size_t when added together.
  81. */
  82. static inline size_t iov_length(const struct iovec *iov, unsigned long nr_segs)
  83. {
  84. unsigned long seg;
  85. size_t ret = 0;
  86. for (seg = 0; seg < nr_segs; seg++)
  87. ret += iov[seg].iov_len;
  88. return ret;
  89. }
  90. static inline struct iovec iov_iter_iovec(const struct iov_iter *iter)
  91. {
  92. return (struct iovec) {
  93. .iov_base = iter->iov->iov_base + iter->iov_offset,
  94. .iov_len = min(iter->count,
  95. iter->iov->iov_len - iter->iov_offset),
  96. };
  97. }
  98. size_t iov_iter_copy_from_user_atomic(struct page *page,
  99. struct iov_iter *i, unsigned long offset, size_t bytes);
  100. void iov_iter_advance(struct iov_iter *i, size_t bytes);
  101. void iov_iter_revert(struct iov_iter *i, size_t bytes);
  102. int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes);
  103. size_t iov_iter_single_seg_count(const struct iov_iter *i);
  104. size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
  105. struct iov_iter *i);
  106. size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
  107. struct iov_iter *i);
  108. size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i);
  109. size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i);
  110. bool _copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i);
  111. size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i);
  112. bool _copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i);
  113. static __always_inline __must_check
  114. size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
  115. {
  116. if (unlikely(!check_copy_size(addr, bytes, true)))
  117. return 0;
  118. else
  119. return _copy_to_iter(addr, bytes, i);
  120. }
  121. static __always_inline __must_check
  122. size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
  123. {
  124. if (unlikely(!check_copy_size(addr, bytes, false)))
  125. return 0;
  126. else
  127. return _copy_from_iter(addr, bytes, i);
  128. }
  129. static __always_inline __must_check
  130. bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i)
  131. {
  132. if (unlikely(!check_copy_size(addr, bytes, false)))
  133. return false;
  134. else
  135. return _copy_from_iter_full(addr, bytes, i);
  136. }
  137. static __always_inline __must_check
  138. size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
  139. {
  140. if (unlikely(!check_copy_size(addr, bytes, false)))
  141. return 0;
  142. else
  143. return _copy_from_iter_nocache(addr, bytes, i);
  144. }
  145. static __always_inline __must_check
  146. bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i)
  147. {
  148. if (unlikely(!check_copy_size(addr, bytes, false)))
  149. return false;
  150. else
  151. return _copy_from_iter_full_nocache(addr, bytes, i);
  152. }
  153. #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
  154. /*
  155. * Note, users like pmem that depend on the stricter semantics of
  156. * copy_from_iter_flushcache() than copy_from_iter_nocache() must check for
  157. * IS_ENABLED(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) before assuming that the
  158. * destination is flushed from the cache on return.
  159. */
  160. size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i);
  161. #else
  162. #define _copy_from_iter_flushcache _copy_from_iter_nocache
  163. #endif
  164. #ifdef CONFIG_ARCH_HAS_COPY_MC
  165. size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i);
  166. #else
  167. #define _copy_mc_to_iter _copy_to_iter
  168. #endif
  169. static __always_inline __must_check
  170. size_t copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
  171. {
  172. if (unlikely(!check_copy_size(addr, bytes, false)))
  173. return 0;
  174. else
  175. return _copy_from_iter_flushcache(addr, bytes, i);
  176. }
  177. static __always_inline __must_check
  178. size_t copy_mc_to_iter(void *addr, size_t bytes, struct iov_iter *i)
  179. {
  180. if (unlikely(!check_copy_size(addr, bytes, true)))
  181. return 0;
  182. else
  183. return _copy_mc_to_iter(addr, bytes, i);
  184. }
  185. size_t iov_iter_zero(size_t bytes, struct iov_iter *);
  186. unsigned long iov_iter_alignment(const struct iov_iter *i);
  187. unsigned long iov_iter_gap_alignment(const struct iov_iter *i);
  188. void iov_iter_init(struct iov_iter *i, unsigned int direction, const struct iovec *iov,
  189. unsigned long nr_segs, size_t count);
  190. void iov_iter_kvec(struct iov_iter *i, unsigned int direction, const struct kvec *kvec,
  191. unsigned long nr_segs, size_t count);
  192. void iov_iter_bvec(struct iov_iter *i, unsigned int direction, const struct bio_vec *bvec,
  193. unsigned long nr_segs, size_t count);
  194. void iov_iter_pipe(struct iov_iter *i, unsigned int direction, struct pipe_inode_info *pipe,
  195. size_t count);
  196. void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count);
  197. ssize_t iov_iter_get_pages(struct iov_iter *i, struct page **pages,
  198. size_t maxsize, unsigned maxpages, size_t *start);
  199. ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, struct page ***pages,
  200. size_t maxsize, size_t *start);
  201. int iov_iter_npages(const struct iov_iter *i, int maxpages);
  202. const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags);
  203. static inline size_t iov_iter_count(const struct iov_iter *i)
  204. {
  205. return i->count;
  206. }
  207. /*
  208. * Cap the iov_iter by given limit; note that the second argument is
  209. * *not* the new size - it's upper limit for such. Passing it a value
  210. * greater than the amount of data in iov_iter is fine - it'll just do
  211. * nothing in that case.
  212. */
  213. static inline void iov_iter_truncate(struct iov_iter *i, u64 count)
  214. {
  215. /*
  216. * count doesn't have to fit in size_t - comparison extends both
  217. * operands to u64 here and any value that would be truncated by
  218. * conversion in assignement is by definition greater than all
  219. * values of size_t, including old i->count.
  220. */
  221. if (i->count > count)
  222. i->count = count;
  223. }
  224. /*
  225. * reexpand a previously truncated iterator; count must be no more than how much
  226. * we had shrunk it.
  227. */
  228. static inline void iov_iter_reexpand(struct iov_iter *i, size_t count)
  229. {
  230. i->count = count;
  231. }
  232. struct csum_state {
  233. __wsum csum;
  234. size_t off;
  235. };
  236. size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *csstate, struct iov_iter *i);
  237. size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
  238. bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
  239. size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp,
  240. struct iov_iter *i);
  241. struct iovec *iovec_from_user(const struct iovec __user *uvector,
  242. unsigned long nr_segs, unsigned long fast_segs,
  243. struct iovec *fast_iov, bool compat);
  244. ssize_t import_iovec(int type, const struct iovec __user *uvec,
  245. unsigned nr_segs, unsigned fast_segs, struct iovec **iovp,
  246. struct iov_iter *i);
  247. ssize_t __import_iovec(int type, const struct iovec __user *uvec,
  248. unsigned nr_segs, unsigned fast_segs, struct iovec **iovp,
  249. struct iov_iter *i, bool compat);
  250. int import_single_range(int type, void __user *buf, size_t len,
  251. struct iovec *iov, struct iov_iter *i);
  252. int iov_iter_for_each_range(struct iov_iter *i, size_t bytes,
  253. int (*f)(struct kvec *vec, void *context),
  254. void *context);
  255. #endif