virtio_ring.h 9.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320
  1. /* SPDX-License-Identifier: BSD-3-Clause */
  2. /*
  3. * Copyright (C) 2018, Tuomas Tynkkynen <tuomas.tynkkynen@iki.fi>
  4. * Copyright (C) 2018, Bin Meng <bmeng.cn@gmail.com>
  5. *
  6. * From Linux kernel include/uapi/linux/virtio_ring.h
  7. */
  8. #ifndef _LINUX_VIRTIO_RING_H
  9. #define _LINUX_VIRTIO_RING_H
  10. #include <virtio_types.h>
  11. /* This marks a buffer as continuing via the next field */
  12. #define VRING_DESC_F_NEXT 1
  13. /* This marks a buffer as write-only (otherwise read-only) */
  14. #define VRING_DESC_F_WRITE 2
  15. /* This means the buffer contains a list of buffer descriptors */
  16. #define VRING_DESC_F_INDIRECT 4
  17. /*
  18. * The Host uses this in used->flags to advise the Guest: don't kick me when
  19. * you add a buffer. It's unreliable, so it's simply an optimization. Guest
  20. * will still kick if it's out of buffers.
  21. */
  22. #define VRING_USED_F_NO_NOTIFY 1
  23. /*
  24. * The Guest uses this in avail->flags to advise the Host: don't interrupt me
  25. * when you consume a buffer. It's unreliable, so it's simply an optimization.
  26. */
  27. #define VRING_AVAIL_F_NO_INTERRUPT 1
  28. /* We support indirect buffer descriptors */
  29. #define VIRTIO_RING_F_INDIRECT_DESC 28
  30. /*
  31. * The Guest publishes the used index for which it expects an interrupt
  32. * at the end of the avail ring. Host should ignore the avail->flags field.
  33. *
  34. * The Host publishes the avail index for which it expects a kick
  35. * at the end of the used ring. Guest should ignore the used->flags field.
  36. */
  37. #define VIRTIO_RING_F_EVENT_IDX 29
  38. /* Virtio ring descriptors: 16 bytes. These can chain together via "next". */
  39. struct vring_desc {
  40. /* Address (guest-physical) */
  41. __virtio64 addr;
  42. /* Length */
  43. __virtio32 len;
  44. /* The flags as indicated above */
  45. __virtio16 flags;
  46. /* We chain unused descriptors via this, too */
  47. __virtio16 next;
  48. };
  49. struct vring_avail {
  50. __virtio16 flags;
  51. __virtio16 idx;
  52. __virtio16 ring[];
  53. };
  54. struct vring_used_elem {
  55. /* Index of start of used descriptor chain */
  56. __virtio32 id;
  57. /* Total length of the descriptor chain which was used (written to) */
  58. __virtio32 len;
  59. };
  60. struct vring_used {
  61. __virtio16 flags;
  62. __virtio16 idx;
  63. struct vring_used_elem ring[];
  64. };
  65. struct vring {
  66. unsigned int num;
  67. struct vring_desc *desc;
  68. struct vring_avail *avail;
  69. struct vring_used *used;
  70. };
  71. /**
  72. * virtqueue - a queue to register buffers for sending or receiving.
  73. *
  74. * @list: the chain of virtqueues for this device
  75. * @vdev: the virtio device this queue was created for
  76. * @index: the zero-based ordinal number for this queue
  77. * @num_free: number of elements we expect to be able to fit
  78. * @vring: actual memory layout for this queue
  79. * @event: host publishes avail event idx
  80. * @free_head: head of free buffer list
  81. * @num_added: number we've added since last sync
  82. * @last_used_idx: last used index we've seen
  83. * @avail_flags_shadow: last written value to avail->flags
  84. * @avail_idx_shadow: last written value to avail->idx in guest byte order
  85. */
  86. struct virtqueue {
  87. struct list_head list;
  88. struct udevice *vdev;
  89. unsigned int index;
  90. unsigned int num_free;
  91. struct vring vring;
  92. bool event;
  93. unsigned int free_head;
  94. unsigned int num_added;
  95. u16 last_used_idx;
  96. u16 avail_flags_shadow;
  97. u16 avail_idx_shadow;
  98. };
  99. /*
  100. * Alignment requirements for vring elements.
  101. * When using pre-virtio 1.0 layout, these fall out naturally.
  102. */
  103. #define VRING_AVAIL_ALIGN_SIZE 2
  104. #define VRING_USED_ALIGN_SIZE 4
  105. #define VRING_DESC_ALIGN_SIZE 16
  106. /*
  107. * We publish the used event index at the end of the available ring,
  108. * and vice versa. They are at the end for backwards compatibility.
  109. */
  110. #define vring_used_event(vr) ((vr)->avail->ring[(vr)->num])
  111. #define vring_avail_event(vr) (*(__virtio16 *)&(vr)->used->ring[(vr)->num])
  112. static inline void vring_init(struct vring *vr, unsigned int num, void *p,
  113. unsigned long align)
  114. {
  115. vr->num = num;
  116. vr->desc = p;
  117. vr->avail = p + num * sizeof(struct vring_desc);
  118. vr->used = (void *)(((uintptr_t)&vr->avail->ring[num] +
  119. sizeof(__virtio16) + align - 1) & ~(align - 1));
  120. }
  121. static inline unsigned int vring_size(unsigned int num, unsigned long align)
  122. {
  123. return ((sizeof(struct vring_desc) * num +
  124. sizeof(__virtio16) * (3 + num) + align - 1) & ~(align - 1)) +
  125. sizeof(__virtio16) * 3 + sizeof(struct vring_used_elem) * num;
  126. }
  127. /*
  128. * The following is used with USED_EVENT_IDX and AVAIL_EVENT_IDX.
  129. * Assuming a given event_idx value from the other side, if we have just
  130. * incremented index from old to new_idx, should we trigger an event?
  131. */
  132. static inline int vring_need_event(__u16 event_idx, __u16 new_idx, __u16 old)
  133. {
  134. /*
  135. * Note: Xen has similar logic for notification hold-off
  136. * in include/xen/interface/io/ring.h with req_event and req_prod
  137. * corresponding to event_idx + 1 and new_idx respectively.
  138. * Note also that req_event and req_prod in Xen start at 1,
  139. * event indexes in virtio start at 0.
  140. */
  141. return (__u16)(new_idx - event_idx - 1) < (__u16)(new_idx - old);
  142. }
  143. struct virtio_sg;
  144. /**
  145. * virtqueue_add - expose buffers to other end
  146. *
  147. * @vq: the struct virtqueue we're talking about
  148. * @sgs: array of terminated scatterlists
  149. * @out_sgs: the number of scatterlists readable by other side
  150. * @in_sgs: the number of scatterlists which are writable
  151. * (after readable ones)
  152. *
  153. * Caller must ensure we don't call this with other virtqueue operations
  154. * at the same time (except where noted).
  155. *
  156. * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
  157. */
  158. int virtqueue_add(struct virtqueue *vq, struct virtio_sg *sgs[],
  159. unsigned int out_sgs, unsigned int in_sgs);
  160. /**
  161. * virtqueue_kick - update after add_buf
  162. *
  163. * @vq: the struct virtqueue
  164. *
  165. * After one or more virtqueue_add() calls, invoke this to kick
  166. * the other side.
  167. *
  168. * Caller must ensure we don't call this with other virtqueue
  169. * operations at the same time (except where noted).
  170. */
  171. void virtqueue_kick(struct virtqueue *vq);
  172. /**
  173. * virtqueue_get_buf - get the next used buffer
  174. *
  175. * @vq: the struct virtqueue we're talking about
  176. * @len: the length written into the buffer
  177. *
  178. * If the device wrote data into the buffer, @len will be set to the
  179. * amount written. This means you don't need to clear the buffer
  180. * beforehand to ensure there's no data leakage in the case of short
  181. * writes.
  182. *
  183. * Caller must ensure we don't call this with other virtqueue
  184. * operations at the same time (except where noted).
  185. *
  186. * Returns NULL if there are no used buffers, or the memory buffer
  187. * handed to virtqueue_add_*().
  188. */
  189. void *virtqueue_get_buf(struct virtqueue *vq, unsigned int *len);
  190. /**
  191. * vring_create_virtqueue - create a virtqueue for a virtio device
  192. *
  193. * @index: the index of the queue
  194. * @num: number of elements of the queue
  195. * @vring_align:the alignment requirement of the descriptor ring
  196. * @udev: the virtio transport udevice
  197. * @return: the virtqueue pointer or NULL if failed
  198. *
  199. * This creates a virtqueue and allocates the descriptor ring for a virtio
  200. * device. The caller should query virtqueue_get_ring_size() to learn the
  201. * actual size of the ring.
  202. *
  203. * This API is supposed to be called by the virtio transport driver in the
  204. * virtio find_vqs() uclass method.
  205. */
  206. struct virtqueue *vring_create_virtqueue(unsigned int index, unsigned int num,
  207. unsigned int vring_align,
  208. struct udevice *udev);
  209. /**
  210. * vring_del_virtqueue - destroy a virtqueue
  211. *
  212. * @vq: the struct virtqueue we're talking about
  213. *
  214. * This destroys a virtqueue. If created with vring_create_virtqueue(),
  215. * this also frees the descriptor ring.
  216. *
  217. * This API is supposed to be called by the virtio transport driver in the
  218. * virtio del_vqs() uclass method.
  219. */
  220. void vring_del_virtqueue(struct virtqueue *vq);
  221. /**
  222. * virtqueue_get_vring_size - get the size of the virtqueue's vring
  223. *
  224. * @vq: the struct virtqueue containing the vring of interest
  225. * @return: the size of the vring in a virtqueue.
  226. */
  227. unsigned int virtqueue_get_vring_size(struct virtqueue *vq);
  228. /**
  229. * virtqueue_get_desc_addr - get the vring descriptor table address
  230. *
  231. * @vq: the struct virtqueue containing the vring of interest
  232. * @return: the descriptor table address of the vring in a virtqueue.
  233. */
  234. ulong virtqueue_get_desc_addr(struct virtqueue *vq);
  235. /**
  236. * virtqueue_get_avail_addr - get the vring available ring address
  237. *
  238. * @vq: the struct virtqueue containing the vring of interest
  239. * @return: the available ring address of the vring in a virtqueue.
  240. */
  241. ulong virtqueue_get_avail_addr(struct virtqueue *vq);
  242. /**
  243. * virtqueue_get_used_addr - get the vring used ring address
  244. *
  245. * @vq: the struct virtqueue containing the vring of interest
  246. * @return: the used ring address of the vring in a virtqueue.
  247. */
  248. ulong virtqueue_get_used_addr(struct virtqueue *vq);
  249. /**
  250. * virtqueue_poll - query pending used buffers
  251. *
  252. * @vq: the struct virtqueue we're talking about
  253. * @last_used_idx: virtqueue last used index
  254. *
  255. * Returns "true" if there are pending used buffers in the queue.
  256. */
  257. bool virtqueue_poll(struct virtqueue *vq, u16 last_used_idx);
  258. /**
  259. * virtqueue_dump - dump the virtqueue for debugging
  260. *
  261. * @vq: the struct virtqueue we're talking about
  262. *
  263. * Caller must ensure we don't call this with other virtqueue operations
  264. * at the same time (except where noted).
  265. */
  266. void virtqueue_dump(struct virtqueue *vq);
  267. /*
  268. * Barriers in virtio are tricky. Since we are not in a hyperviosr/guest
  269. * scenario, having these as nops is enough to work as expected.
  270. */
  271. static inline void virtio_mb(void)
  272. {
  273. }
  274. static inline void virtio_rmb(void)
  275. {
  276. }
  277. static inline void virtio_wmb(void)
  278. {
  279. }
  280. static inline void virtio_store_mb(__virtio16 *p, __virtio16 v)
  281. {
  282. WRITE_ONCE(*p, v);
  283. }
  284. #endif /* _LINUX_VIRTIO_RING_H */