queue.c 3.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156
  1. #include <sys/types.h>
  2. #include <sys/stat.h>
  3. #include <sys/mman.h>
  4. #include <unistd.h>
  5. #include <errno.h>
  6. #include <string.h>
  7. #include "liburing.h"
  8. #include "barrier.h"
  9. static int __io_uring_get_cqe(struct io_uring *ring,
  10. struct io_uring_cqe **cqe_ptr, int wait)
  11. {
  12. struct io_uring_cq *cq = &ring->cq;
  13. const unsigned mask = *cq->kring_mask;
  14. unsigned head;
  15. int ret;
  16. *cqe_ptr = NULL;
  17. head = *cq->khead;
  18. do {
  19. /*
  20. * It's necessary to use a read_barrier() before reading
  21. * the CQ tail, since the kernel updates it locklessly. The
  22. * kernel has the matching store barrier for the update. The
  23. * kernel also ensures that previous stores to CQEs are ordered
  24. * with the tail update.
  25. */
  26. read_barrier();
  27. if (head != *cq->ktail) {
  28. *cqe_ptr = &cq->cqes[head & mask];
  29. break;
  30. }
  31. if (!wait)
  32. break;
  33. ret = io_uring_enter(ring->ring_fd, 0, 1,
  34. IORING_ENTER_GETEVENTS, NULL);
  35. if (ret < 0)
  36. return -errno;
  37. } while (1);
  38. return 0;
  39. }
  40. /*
  41. * Return an IO completion, if one is readily available. Returns 0 with
  42. * cqe_ptr filled in on success, -errno on failure.
  43. */
  44. int io_uring_peek_cqe(struct io_uring *ring, struct io_uring_cqe **cqe_ptr)
  45. {
  46. return __io_uring_get_cqe(ring, cqe_ptr, 0);
  47. }
  48. /*
  49. * Return an IO completion, waiting for it if necessary. Returns 0 with
  50. * cqe_ptr filled in on success, -errno on failure.
  51. */
  52. int io_uring_wait_cqe(struct io_uring *ring, struct io_uring_cqe **cqe_ptr)
  53. {
  54. return __io_uring_get_cqe(ring, cqe_ptr, 1);
  55. }
  56. /*
  57. * Submit sqes acquired from io_uring_get_sqe() to the kernel.
  58. *
  59. * Returns number of sqes submitted
  60. */
  61. int io_uring_submit(struct io_uring *ring)
  62. {
  63. struct io_uring_sq *sq = &ring->sq;
  64. const unsigned mask = *sq->kring_mask;
  65. unsigned ktail, ktail_next, submitted, to_submit;
  66. int ret;
  67. /*
  68. * If we have pending IO in the kring, submit it first. We need a
  69. * read barrier here to match the kernels store barrier when updating
  70. * the SQ head.
  71. */
  72. read_barrier();
  73. if (*sq->khead != *sq->ktail) {
  74. submitted = *sq->kring_entries;
  75. goto submit;
  76. }
  77. if (sq->sqe_head == sq->sqe_tail)
  78. return 0;
  79. /*
  80. * Fill in sqes that we have queued up, adding them to the kernel ring
  81. */
  82. submitted = 0;
  83. ktail = ktail_next = *sq->ktail;
  84. to_submit = sq->sqe_tail - sq->sqe_head;
  85. while (to_submit--) {
  86. ktail_next++;
  87. read_barrier();
  88. sq->array[ktail & mask] = sq->sqe_head & mask;
  89. ktail = ktail_next;
  90. sq->sqe_head++;
  91. submitted++;
  92. }
  93. if (!submitted)
  94. return 0;
  95. if (*sq->ktail != ktail) {
  96. /*
  97. * First write barrier ensures that the SQE stores are updated
  98. * with the tail update. This is needed so that the kernel
  99. * will never see a tail update without the preceeding sQE
  100. * stores being done.
  101. */
  102. write_barrier();
  103. *sq->ktail = ktail;
  104. /*
  105. * The kernel has the matching read barrier for reading the
  106. * SQ tail.
  107. */
  108. write_barrier();
  109. }
  110. submit:
  111. ret = io_uring_enter(ring->ring_fd, submitted, 0,
  112. IORING_ENTER_GETEVENTS, NULL);
  113. if (ret < 0)
  114. return -errno;
  115. return ret;
  116. }
  117. /*
  118. * Return an sqe to fill. Application must later call io_uring_submit()
  119. * when it's ready to tell the kernel about it. The caller may call this
  120. * function multiple times before calling io_uring_submit().
  121. *
  122. * Returns a vacant sqe, or NULL if we're full.
  123. */
  124. struct io_uring_sqe *io_uring_get_sqe(struct io_uring *ring)
  125. {
  126. struct io_uring_sq *sq = &ring->sq;
  127. unsigned next = sq->sqe_tail + 1;
  128. struct io_uring_sqe *sqe;
  129. /*
  130. * All sqes are used
  131. */
  132. if (next - sq->sqe_head > *sq->kring_entries)
  133. return NULL;
  134. sqe = &sq->sqes[sq->sqe_tail & *sq->kring_mask];
  135. sq->sqe_tail = next;
  136. return sqe;
  137. }