stream_sched_rr.c 4.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /* SCTP kernel implementation
  3. * (C) Copyright Red Hat Inc. 2017
  4. *
  5. * This file is part of the SCTP kernel implementation
  6. *
  7. * These functions manipulate sctp stream queue/scheduling.
  8. *
  9. * Please send any bug reports or fixes you make to the
  10. * email addresched(es):
  11. * lksctp developers <linux-sctp@vger.kernel.org>
  12. *
  13. * Written or modified by:
  14. * Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
  15. */
  16. #include <linux/list.h>
  17. #include <net/sctp/sctp.h>
  18. #include <net/sctp/sm.h>
  19. #include <net/sctp/stream_sched.h>
  20. /* Priority handling
  21. * RFC DRAFT ndata section 3.2
  22. */
  23. static void sctp_sched_rr_unsched_all(struct sctp_stream *stream);
  24. static void sctp_sched_rr_next_stream(struct sctp_stream *stream)
  25. {
  26. struct list_head *pos;
  27. pos = stream->rr_next->rr_list.next;
  28. if (pos == &stream->rr_list)
  29. pos = pos->next;
  30. stream->rr_next = list_entry(pos, struct sctp_stream_out_ext, rr_list);
  31. }
  32. static void sctp_sched_rr_unsched(struct sctp_stream *stream,
  33. struct sctp_stream_out_ext *soute)
  34. {
  35. if (stream->rr_next == soute)
  36. /* Try to move to the next stream */
  37. sctp_sched_rr_next_stream(stream);
  38. list_del_init(&soute->rr_list);
  39. /* If we have no other stream queued, clear next */
  40. if (list_empty(&stream->rr_list))
  41. stream->rr_next = NULL;
  42. }
  43. static void sctp_sched_rr_sched(struct sctp_stream *stream,
  44. struct sctp_stream_out_ext *soute)
  45. {
  46. if (!list_empty(&soute->rr_list))
  47. /* Already scheduled. */
  48. return;
  49. /* Schedule the stream */
  50. list_add_tail(&soute->rr_list, &stream->rr_list);
  51. if (!stream->rr_next)
  52. stream->rr_next = soute;
  53. }
  54. static int sctp_sched_rr_set(struct sctp_stream *stream, __u16 sid,
  55. __u16 prio, gfp_t gfp)
  56. {
  57. return 0;
  58. }
  59. static int sctp_sched_rr_get(struct sctp_stream *stream, __u16 sid,
  60. __u16 *value)
  61. {
  62. return 0;
  63. }
  64. static int sctp_sched_rr_init(struct sctp_stream *stream)
  65. {
  66. INIT_LIST_HEAD(&stream->rr_list);
  67. stream->rr_next = NULL;
  68. return 0;
  69. }
  70. static int sctp_sched_rr_init_sid(struct sctp_stream *stream, __u16 sid,
  71. gfp_t gfp)
  72. {
  73. INIT_LIST_HEAD(&SCTP_SO(stream, sid)->ext->rr_list);
  74. return 0;
  75. }
  76. static void sctp_sched_rr_free(struct sctp_stream *stream)
  77. {
  78. sctp_sched_rr_unsched_all(stream);
  79. }
  80. static void sctp_sched_rr_enqueue(struct sctp_outq *q,
  81. struct sctp_datamsg *msg)
  82. {
  83. struct sctp_stream *stream;
  84. struct sctp_chunk *ch;
  85. __u16 sid;
  86. ch = list_first_entry(&msg->chunks, struct sctp_chunk, frag_list);
  87. sid = sctp_chunk_stream_no(ch);
  88. stream = &q->asoc->stream;
  89. sctp_sched_rr_sched(stream, SCTP_SO(stream, sid)->ext);
  90. }
  91. static struct sctp_chunk *sctp_sched_rr_dequeue(struct sctp_outq *q)
  92. {
  93. struct sctp_stream *stream = &q->asoc->stream;
  94. struct sctp_stream_out_ext *soute;
  95. struct sctp_chunk *ch = NULL;
  96. /* Bail out quickly if queue is empty */
  97. if (list_empty(&q->out_chunk_list))
  98. goto out;
  99. /* Find which chunk is next */
  100. if (stream->out_curr)
  101. soute = stream->out_curr->ext;
  102. else
  103. soute = stream->rr_next;
  104. ch = list_entry(soute->outq.next, struct sctp_chunk, stream_list);
  105. sctp_sched_dequeue_common(q, ch);
  106. out:
  107. return ch;
  108. }
  109. static void sctp_sched_rr_dequeue_done(struct sctp_outq *q,
  110. struct sctp_chunk *ch)
  111. {
  112. struct sctp_stream_out_ext *soute;
  113. __u16 sid;
  114. /* Last chunk on that msg, move to the next stream */
  115. sid = sctp_chunk_stream_no(ch);
  116. soute = SCTP_SO(&q->asoc->stream, sid)->ext;
  117. sctp_sched_rr_next_stream(&q->asoc->stream);
  118. if (list_empty(&soute->outq))
  119. sctp_sched_rr_unsched(&q->asoc->stream, soute);
  120. }
  121. static void sctp_sched_rr_sched_all(struct sctp_stream *stream)
  122. {
  123. struct sctp_association *asoc;
  124. struct sctp_stream_out_ext *soute;
  125. struct sctp_chunk *ch;
  126. asoc = container_of(stream, struct sctp_association, stream);
  127. list_for_each_entry(ch, &asoc->outqueue.out_chunk_list, list) {
  128. __u16 sid;
  129. sid = sctp_chunk_stream_no(ch);
  130. soute = SCTP_SO(stream, sid)->ext;
  131. if (soute)
  132. sctp_sched_rr_sched(stream, soute);
  133. }
  134. }
  135. static void sctp_sched_rr_unsched_all(struct sctp_stream *stream)
  136. {
  137. struct sctp_stream_out_ext *soute, *tmp;
  138. list_for_each_entry_safe(soute, tmp, &stream->rr_list, rr_list)
  139. sctp_sched_rr_unsched(stream, soute);
  140. }
  141. static struct sctp_sched_ops sctp_sched_rr = {
  142. .set = sctp_sched_rr_set,
  143. .get = sctp_sched_rr_get,
  144. .init = sctp_sched_rr_init,
  145. .init_sid = sctp_sched_rr_init_sid,
  146. .free = sctp_sched_rr_free,
  147. .enqueue = sctp_sched_rr_enqueue,
  148. .dequeue = sctp_sched_rr_dequeue,
  149. .dequeue_done = sctp_sched_rr_dequeue_done,
  150. .sched_all = sctp_sched_rr_sched_all,
  151. .unsched_all = sctp_sched_rr_unsched_all,
  152. };
  153. void sctp_sched_ops_rr_init(void)
  154. {
  155. sctp_sched_ops_register(SCTP_SS_RR, &sctp_sched_rr);
  156. }