liburing.h 4.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187
  1. #ifndef LIB_URING_H
  2. #define LIB_URING_H
  3. #ifdef __cplusplus
  4. extern "C" {
  5. #endif
  6. #include <sys/uio.h>
  7. #include <signal.h>
  8. #include <string.h>
  9. #include "../../include/uapi/linux/io_uring.h"
  10. #include <inttypes.h>
  11. #include <linux/swab.h>
  12. #include "barrier.h"
  13. /*
  14. * Library interface to io_uring
  15. */
  16. struct io_uring_sq {
  17. unsigned *khead;
  18. unsigned *ktail;
  19. unsigned *kring_mask;
  20. unsigned *kring_entries;
  21. unsigned *kflags;
  22. unsigned *kdropped;
  23. unsigned *array;
  24. struct io_uring_sqe *sqes;
  25. unsigned sqe_head;
  26. unsigned sqe_tail;
  27. size_t ring_sz;
  28. };
  29. struct io_uring_cq {
  30. unsigned *khead;
  31. unsigned *ktail;
  32. unsigned *kring_mask;
  33. unsigned *kring_entries;
  34. unsigned *koverflow;
  35. struct io_uring_cqe *cqes;
  36. size_t ring_sz;
  37. };
  38. struct io_uring {
  39. struct io_uring_sq sq;
  40. struct io_uring_cq cq;
  41. int ring_fd;
  42. };
  43. /*
  44. * System calls
  45. */
  46. extern int io_uring_setup(unsigned entries, struct io_uring_params *p);
  47. extern int io_uring_enter(int fd, unsigned to_submit,
  48. unsigned min_complete, unsigned flags, sigset_t *sig);
  49. extern int io_uring_register(int fd, unsigned int opcode, void *arg,
  50. unsigned int nr_args);
  51. /*
  52. * Library interface
  53. */
  54. extern int io_uring_queue_init(unsigned entries, struct io_uring *ring,
  55. unsigned flags);
  56. extern int io_uring_queue_mmap(int fd, struct io_uring_params *p,
  57. struct io_uring *ring);
  58. extern void io_uring_queue_exit(struct io_uring *ring);
  59. extern int io_uring_peek_cqe(struct io_uring *ring,
  60. struct io_uring_cqe **cqe_ptr);
  61. extern int io_uring_wait_cqe(struct io_uring *ring,
  62. struct io_uring_cqe **cqe_ptr);
  63. extern int io_uring_submit(struct io_uring *ring);
  64. extern struct io_uring_sqe *io_uring_get_sqe(struct io_uring *ring);
  65. /*
  66. * Must be called after io_uring_{peek,wait}_cqe() after the cqe has
  67. * been processed by the application.
  68. */
  69. static inline void io_uring_cqe_seen(struct io_uring *ring,
  70. struct io_uring_cqe *cqe)
  71. {
  72. if (cqe) {
  73. struct io_uring_cq *cq = &ring->cq;
  74. (*cq->khead)++;
  75. /*
  76. * Ensure that the kernel sees our new head, the kernel has
  77. * the matching read barrier.
  78. */
  79. write_barrier();
  80. }
  81. }
  82. /*
  83. * Command prep helpers
  84. */
  85. static inline void io_uring_sqe_set_data(struct io_uring_sqe *sqe, void *data)
  86. {
  87. sqe->user_data = (unsigned long) data;
  88. }
  89. static inline void *io_uring_cqe_get_data(struct io_uring_cqe *cqe)
  90. {
  91. return (void *) (uintptr_t) cqe->user_data;
  92. }
  93. static inline void io_uring_prep_rw(int op, struct io_uring_sqe *sqe, int fd,
  94. const void *addr, unsigned len,
  95. off_t offset)
  96. {
  97. memset(sqe, 0, sizeof(*sqe));
  98. sqe->opcode = op;
  99. sqe->fd = fd;
  100. sqe->off = offset;
  101. sqe->addr = (unsigned long) addr;
  102. sqe->len = len;
  103. }
  104. static inline void io_uring_prep_readv(struct io_uring_sqe *sqe, int fd,
  105. const struct iovec *iovecs,
  106. unsigned nr_vecs, off_t offset)
  107. {
  108. io_uring_prep_rw(IORING_OP_READV, sqe, fd, iovecs, nr_vecs, offset);
  109. }
  110. static inline void io_uring_prep_read_fixed(struct io_uring_sqe *sqe, int fd,
  111. void *buf, unsigned nbytes,
  112. off_t offset)
  113. {
  114. io_uring_prep_rw(IORING_OP_READ_FIXED, sqe, fd, buf, nbytes, offset);
  115. }
  116. static inline void io_uring_prep_writev(struct io_uring_sqe *sqe, int fd,
  117. const struct iovec *iovecs,
  118. unsigned nr_vecs, off_t offset)
  119. {
  120. io_uring_prep_rw(IORING_OP_WRITEV, sqe, fd, iovecs, nr_vecs, offset);
  121. }
  122. static inline void io_uring_prep_write_fixed(struct io_uring_sqe *sqe, int fd,
  123. const void *buf, unsigned nbytes,
  124. off_t offset)
  125. {
  126. io_uring_prep_rw(IORING_OP_WRITE_FIXED, sqe, fd, buf, nbytes, offset);
  127. }
  128. static inline void io_uring_prep_poll_add(struct io_uring_sqe *sqe, int fd,
  129. unsigned poll_mask)
  130. {
  131. memset(sqe, 0, sizeof(*sqe));
  132. sqe->opcode = IORING_OP_POLL_ADD;
  133. sqe->fd = fd;
  134. #if __BYTE_ORDER == __BIG_ENDIAN
  135. poll_mask = __swahw32(poll_mask);
  136. #endif
  137. sqe->poll_events = poll_mask;
  138. }
  139. static inline void io_uring_prep_poll_remove(struct io_uring_sqe *sqe,
  140. void *user_data)
  141. {
  142. memset(sqe, 0, sizeof(*sqe));
  143. sqe->opcode = IORING_OP_POLL_REMOVE;
  144. sqe->addr = (unsigned long) user_data;
  145. }
  146. static inline void io_uring_prep_fsync(struct io_uring_sqe *sqe, int fd,
  147. unsigned fsync_flags)
  148. {
  149. memset(sqe, 0, sizeof(*sqe));
  150. sqe->opcode = IORING_OP_FSYNC;
  151. sqe->fd = fd;
  152. sqe->fsync_flags = fsync_flags;
  153. }
  154. static inline void io_uring_prep_nop(struct io_uring_sqe *sqe)
  155. {
  156. memset(sqe, 0, sizeof(*sqe));
  157. sqe->opcode = IORING_OP_NOP;
  158. }
  159. #ifdef __cplusplus
  160. }
  161. #endif
  162. #endif