vringh.h 8.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293
  1. /* SPDX-License-Identifier: GPL-2.0-or-later */
  2. /*
  3. * Linux host-side vring helpers; for when the kernel needs to access
  4. * someone else's vring.
  5. *
  6. * Copyright IBM Corporation, 2013.
  7. * Parts taken from drivers/vhost/vhost.c Copyright 2009 Red Hat, Inc.
  8. *
  9. * Written by: Rusty Russell <rusty@rustcorp.com.au>
  10. */
  11. #ifndef _LINUX_VRINGH_H
  12. #define _LINUX_VRINGH_H
  13. #include <uapi/linux/virtio_ring.h>
  14. #include <linux/virtio_byteorder.h>
  15. #include <linux/uio.h>
  16. #include <linux/slab.h>
  17. #if IS_REACHABLE(CONFIG_VHOST_IOTLB)
  18. #include <linux/dma-direction.h>
  19. #include <linux/vhost_iotlb.h>
  20. #endif
  21. #include <asm/barrier.h>
  22. /* virtio_ring with information needed for host access. */
  23. struct vringh {
  24. /* Everything is little endian */
  25. bool little_endian;
  26. /* Guest publishes used event idx (note: we always do). */
  27. bool event_indices;
  28. /* Can we get away with weak barriers? */
  29. bool weak_barriers;
  30. /* Last available index we saw (ie. where we're up to). */
  31. u16 last_avail_idx;
  32. /* Last index we used. */
  33. u16 last_used_idx;
  34. /* How many descriptors we've completed since last need_notify(). */
  35. u32 completed;
  36. /* The vring (note: it may contain user pointers!) */
  37. struct vring vring;
  38. /* IOTLB for this vring */
  39. struct vhost_iotlb *iotlb;
  40. /* The function to call to notify the guest about added buffers */
  41. void (*notify)(struct vringh *);
  42. };
  43. /**
  44. * struct vringh_config_ops - ops for creating a host vring from a virtio driver
  45. * @find_vrhs: find the host vrings and instantiate them
  46. * vdev: the virtio_device
  47. * nhvrs: the number of host vrings to find
  48. * hvrs: on success, includes new host vrings
  49. * callbacks: array of driver callbacks, for each host vring
  50. * include a NULL entry for vqs that do not need a callback
  51. * Returns 0 on success or error status
  52. * @del_vrhs: free the host vrings found by find_vrhs().
  53. */
  54. struct virtio_device;
  55. typedef void vrh_callback_t(struct virtio_device *, struct vringh *);
  56. struct vringh_config_ops {
  57. int (*find_vrhs)(struct virtio_device *vdev, unsigned nhvrs,
  58. struct vringh *vrhs[], vrh_callback_t *callbacks[]);
  59. void (*del_vrhs)(struct virtio_device *vdev);
  60. };
  61. /* The memory the vring can access, and what offset to apply. */
  62. struct vringh_range {
  63. u64 start, end_incl;
  64. u64 offset;
  65. };
  66. /**
  67. * struct vringh_iov - iovec mangler.
  68. *
  69. * Mangles iovec in place, and restores it.
  70. * Remaining data is iov + i, of used - i elements.
  71. */
  72. struct vringh_iov {
  73. struct iovec *iov;
  74. size_t consumed; /* Within iov[i] */
  75. unsigned i, used, max_num;
  76. };
  77. /**
  78. * struct vringh_iov - kvec mangler.
  79. *
  80. * Mangles kvec in place, and restores it.
  81. * Remaining data is iov + i, of used - i elements.
  82. */
  83. struct vringh_kiov {
  84. struct kvec *iov;
  85. size_t consumed; /* Within iov[i] */
  86. unsigned i, used, max_num;
  87. };
  88. /* Flag on max_num to indicate we're kmalloced. */
  89. #define VRINGH_IOV_ALLOCATED 0x8000000
  90. /* Helpers for userspace vrings. */
  91. int vringh_init_user(struct vringh *vrh, u64 features,
  92. unsigned int num, bool weak_barriers,
  93. vring_desc_t __user *desc,
  94. vring_avail_t __user *avail,
  95. vring_used_t __user *used);
  96. static inline void vringh_iov_init(struct vringh_iov *iov,
  97. struct iovec *iovec, unsigned num)
  98. {
  99. iov->used = iov->i = 0;
  100. iov->consumed = 0;
  101. iov->max_num = num;
  102. iov->iov = iovec;
  103. }
  104. static inline void vringh_iov_reset(struct vringh_iov *iov)
  105. {
  106. iov->iov[iov->i].iov_len += iov->consumed;
  107. iov->iov[iov->i].iov_base -= iov->consumed;
  108. iov->consumed = 0;
  109. iov->i = 0;
  110. }
  111. static inline void vringh_iov_cleanup(struct vringh_iov *iov)
  112. {
  113. if (iov->max_num & VRINGH_IOV_ALLOCATED)
  114. kfree(iov->iov);
  115. iov->max_num = iov->used = iov->i = iov->consumed = 0;
  116. iov->iov = NULL;
  117. }
  118. /* Convert a descriptor into iovecs. */
  119. int vringh_getdesc_user(struct vringh *vrh,
  120. struct vringh_iov *riov,
  121. struct vringh_iov *wiov,
  122. bool (*getrange)(struct vringh *vrh,
  123. u64 addr, struct vringh_range *r),
  124. u16 *head);
  125. /* Copy bytes from readable vsg, consuming it (and incrementing wiov->i). */
  126. ssize_t vringh_iov_pull_user(struct vringh_iov *riov, void *dst, size_t len);
  127. /* Copy bytes into writable vsg, consuming it (and incrementing wiov->i). */
  128. ssize_t vringh_iov_push_user(struct vringh_iov *wiov,
  129. const void *src, size_t len);
  130. /* Mark a descriptor as used. */
  131. int vringh_complete_user(struct vringh *vrh, u16 head, u32 len);
  132. int vringh_complete_multi_user(struct vringh *vrh,
  133. const struct vring_used_elem used[],
  134. unsigned num_used);
  135. /* Pretend we've never seen descriptor (for easy error handling). */
  136. void vringh_abandon_user(struct vringh *vrh, unsigned int num);
  137. /* Do we need to fire the eventfd to notify the other side? */
  138. int vringh_need_notify_user(struct vringh *vrh);
  139. bool vringh_notify_enable_user(struct vringh *vrh);
  140. void vringh_notify_disable_user(struct vringh *vrh);
  141. /* Helpers for kernelspace vrings. */
  142. int vringh_init_kern(struct vringh *vrh, u64 features,
  143. unsigned int num, bool weak_barriers,
  144. struct vring_desc *desc,
  145. struct vring_avail *avail,
  146. struct vring_used *used);
  147. static inline void vringh_kiov_init(struct vringh_kiov *kiov,
  148. struct kvec *kvec, unsigned num)
  149. {
  150. kiov->used = kiov->i = 0;
  151. kiov->consumed = 0;
  152. kiov->max_num = num;
  153. kiov->iov = kvec;
  154. }
  155. static inline void vringh_kiov_reset(struct vringh_kiov *kiov)
  156. {
  157. kiov->iov[kiov->i].iov_len += kiov->consumed;
  158. kiov->iov[kiov->i].iov_base -= kiov->consumed;
  159. kiov->consumed = 0;
  160. kiov->i = 0;
  161. }
  162. static inline void vringh_kiov_cleanup(struct vringh_kiov *kiov)
  163. {
  164. if (kiov->max_num & VRINGH_IOV_ALLOCATED)
  165. kfree(kiov->iov);
  166. kiov->max_num = kiov->used = kiov->i = kiov->consumed = 0;
  167. kiov->iov = NULL;
  168. }
  169. int vringh_getdesc_kern(struct vringh *vrh,
  170. struct vringh_kiov *riov,
  171. struct vringh_kiov *wiov,
  172. u16 *head,
  173. gfp_t gfp);
  174. ssize_t vringh_iov_pull_kern(struct vringh_kiov *riov, void *dst, size_t len);
  175. ssize_t vringh_iov_push_kern(struct vringh_kiov *wiov,
  176. const void *src, size_t len);
  177. void vringh_abandon_kern(struct vringh *vrh, unsigned int num);
  178. int vringh_complete_kern(struct vringh *vrh, u16 head, u32 len);
  179. bool vringh_notify_enable_kern(struct vringh *vrh);
  180. void vringh_notify_disable_kern(struct vringh *vrh);
  181. int vringh_need_notify_kern(struct vringh *vrh);
  182. /* Notify the guest about buffers added to the used ring */
  183. static inline void vringh_notify(struct vringh *vrh)
  184. {
  185. if (vrh->notify)
  186. vrh->notify(vrh);
  187. }
  188. static inline bool vringh_is_little_endian(const struct vringh *vrh)
  189. {
  190. return vrh->little_endian ||
  191. virtio_legacy_is_little_endian();
  192. }
  193. static inline u16 vringh16_to_cpu(const struct vringh *vrh, __virtio16 val)
  194. {
  195. return __virtio16_to_cpu(vringh_is_little_endian(vrh), val);
  196. }
  197. static inline __virtio16 cpu_to_vringh16(const struct vringh *vrh, u16 val)
  198. {
  199. return __cpu_to_virtio16(vringh_is_little_endian(vrh), val);
  200. }
  201. static inline u32 vringh32_to_cpu(const struct vringh *vrh, __virtio32 val)
  202. {
  203. return __virtio32_to_cpu(vringh_is_little_endian(vrh), val);
  204. }
  205. static inline __virtio32 cpu_to_vringh32(const struct vringh *vrh, u32 val)
  206. {
  207. return __cpu_to_virtio32(vringh_is_little_endian(vrh), val);
  208. }
  209. static inline u64 vringh64_to_cpu(const struct vringh *vrh, __virtio64 val)
  210. {
  211. return __virtio64_to_cpu(vringh_is_little_endian(vrh), val);
  212. }
  213. static inline __virtio64 cpu_to_vringh64(const struct vringh *vrh, u64 val)
  214. {
  215. return __cpu_to_virtio64(vringh_is_little_endian(vrh), val);
  216. }
  217. #if IS_REACHABLE(CONFIG_VHOST_IOTLB)
  218. void vringh_set_iotlb(struct vringh *vrh, struct vhost_iotlb *iotlb);
  219. int vringh_init_iotlb(struct vringh *vrh, u64 features,
  220. unsigned int num, bool weak_barriers,
  221. struct vring_desc *desc,
  222. struct vring_avail *avail,
  223. struct vring_used *used);
  224. int vringh_getdesc_iotlb(struct vringh *vrh,
  225. struct vringh_kiov *riov,
  226. struct vringh_kiov *wiov,
  227. u16 *head,
  228. gfp_t gfp);
  229. ssize_t vringh_iov_pull_iotlb(struct vringh *vrh,
  230. struct vringh_kiov *riov,
  231. void *dst, size_t len);
  232. ssize_t vringh_iov_push_iotlb(struct vringh *vrh,
  233. struct vringh_kiov *wiov,
  234. const void *src, size_t len);
  235. void vringh_abandon_iotlb(struct vringh *vrh, unsigned int num);
  236. int vringh_complete_iotlb(struct vringh *vrh, u16 head, u32 len);
  237. bool vringh_notify_enable_iotlb(struct vringh *vrh);
  238. void vringh_notify_disable_iotlb(struct vringh *vrh);
  239. int vringh_need_notify_iotlb(struct vringh *vrh);
  240. #endif /* CONFIG_VHOST_IOTLB */
  241. #endif /* _LINUX_VRINGH_H */