tcp_rate.c 7.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. #include <net/tcp.h>
  3. /* The bandwidth estimator estimates the rate at which the network
  4. * can currently deliver outbound data packets for this flow. At a high
  5. * level, it operates by taking a delivery rate sample for each ACK.
  6. *
  7. * A rate sample records the rate at which the network delivered packets
  8. * for this flow, calculated over the time interval between the transmission
  9. * of a data packet and the acknowledgment of that packet.
  10. *
  11. * Specifically, over the interval between each transmit and corresponding ACK,
  12. * the estimator generates a delivery rate sample. Typically it uses the rate
  13. * at which packets were acknowledged. However, the approach of using only the
  14. * acknowledgment rate faces a challenge under the prevalent ACK decimation or
  15. * compression: packets can temporarily appear to be delivered much quicker
  16. * than the bottleneck rate. Since it is physically impossible to do that in a
  17. * sustained fashion, when the estimator notices that the ACK rate is faster
  18. * than the transmit rate, it uses the latter:
  19. *
  20. * send_rate = #pkts_delivered/(last_snd_time - first_snd_time)
  21. * ack_rate = #pkts_delivered/(last_ack_time - first_ack_time)
  22. * bw = min(send_rate, ack_rate)
  23. *
  24. * Notice the estimator essentially estimates the goodput, not always the
  25. * network bottleneck link rate when the sending or receiving is limited by
  26. * other factors like applications or receiver window limits. The estimator
  27. * deliberately avoids using the inter-packet spacing approach because that
  28. * approach requires a large number of samples and sophisticated filtering.
  29. *
  30. * TCP flows can often be application-limited in request/response workloads.
  31. * The estimator marks a bandwidth sample as application-limited if there
  32. * was some moment during the sampled window of packets when there was no data
  33. * ready to send in the write queue.
  34. */
  35. /* Snapshot the current delivery information in the skb, to generate
  36. * a rate sample later when the skb is (s)acked in tcp_rate_skb_delivered().
  37. */
  38. void tcp_rate_skb_sent(struct sock *sk, struct sk_buff *skb)
  39. {
  40. struct tcp_sock *tp = tcp_sk(sk);
  41. /* In general we need to start delivery rate samples from the
  42. * time we received the most recent ACK, to ensure we include
  43. * the full time the network needs to deliver all in-flight
  44. * packets. If there are no packets in flight yet, then we
  45. * know that any ACKs after now indicate that the network was
  46. * able to deliver those packets completely in the sampling
  47. * interval between now and the next ACK.
  48. *
  49. * Note that we use packets_out instead of tcp_packets_in_flight(tp)
  50. * because the latter is a guess based on RTO and loss-marking
  51. * heuristics. We don't want spurious RTOs or loss markings to cause
  52. * a spuriously small time interval, causing a spuriously high
  53. * bandwidth estimate.
  54. */
  55. if (!tp->packets_out) {
  56. u64 tstamp_us = tcp_skb_timestamp_us(skb);
  57. tp->first_tx_mstamp = tstamp_us;
  58. tp->delivered_mstamp = tstamp_us;
  59. }
  60. TCP_SKB_CB(skb)->tx.first_tx_mstamp = tp->first_tx_mstamp;
  61. TCP_SKB_CB(skb)->tx.delivered_mstamp = tp->delivered_mstamp;
  62. TCP_SKB_CB(skb)->tx.delivered = tp->delivered;
  63. TCP_SKB_CB(skb)->tx.is_app_limited = tp->app_limited ? 1 : 0;
  64. }
  65. /* When an skb is sacked or acked, we fill in the rate sample with the (prior)
  66. * delivery information when the skb was last transmitted.
  67. *
  68. * If an ACK (s)acks multiple skbs (e.g., stretched-acks), this function is
  69. * called multiple times. We favor the information from the most recently
  70. * sent skb, i.e., the skb with the highest prior_delivered count.
  71. */
  72. void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb,
  73. struct rate_sample *rs)
  74. {
  75. struct tcp_sock *tp = tcp_sk(sk);
  76. struct tcp_skb_cb *scb = TCP_SKB_CB(skb);
  77. if (!scb->tx.delivered_mstamp)
  78. return;
  79. if (!rs->prior_delivered ||
  80. after(scb->tx.delivered, rs->prior_delivered)) {
  81. rs->prior_delivered = scb->tx.delivered;
  82. rs->prior_mstamp = scb->tx.delivered_mstamp;
  83. rs->is_app_limited = scb->tx.is_app_limited;
  84. rs->is_retrans = scb->sacked & TCPCB_RETRANS;
  85. /* Record send time of most recently ACKed packet: */
  86. tp->first_tx_mstamp = tcp_skb_timestamp_us(skb);
  87. /* Find the duration of the "send phase" of this window: */
  88. rs->interval_us = tcp_stamp_us_delta(tp->first_tx_mstamp,
  89. scb->tx.first_tx_mstamp);
  90. }
  91. /* Mark off the skb delivered once it's sacked to avoid being
  92. * used again when it's cumulatively acked. For acked packets
  93. * we don't need to reset since it'll be freed soon.
  94. */
  95. if (scb->sacked & TCPCB_SACKED_ACKED)
  96. scb->tx.delivered_mstamp = 0;
  97. }
  98. /* Update the connection delivery information and generate a rate sample. */
  99. void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost,
  100. bool is_sack_reneg, struct rate_sample *rs)
  101. {
  102. struct tcp_sock *tp = tcp_sk(sk);
  103. u32 snd_us, ack_us;
  104. /* Clear app limited if bubble is acked and gone. */
  105. if (tp->app_limited && after(tp->delivered, tp->app_limited))
  106. tp->app_limited = 0;
  107. /* TODO: there are multiple places throughout tcp_ack() to get
  108. * current time. Refactor the code using a new "tcp_acktag_state"
  109. * to carry current time, flags, stats like "tcp_sacktag_state".
  110. */
  111. if (delivered)
  112. tp->delivered_mstamp = tp->tcp_mstamp;
  113. rs->acked_sacked = delivered; /* freshly ACKed or SACKed */
  114. rs->losses = lost; /* freshly marked lost */
  115. /* Return an invalid sample if no timing information is available or
  116. * in recovery from loss with SACK reneging. Rate samples taken during
  117. * a SACK reneging event may overestimate bw by including packets that
  118. * were SACKed before the reneg.
  119. */
  120. if (!rs->prior_mstamp || is_sack_reneg) {
  121. rs->delivered = -1;
  122. rs->interval_us = -1;
  123. return;
  124. }
  125. rs->delivered = tp->delivered - rs->prior_delivered;
  126. /* Model sending data and receiving ACKs as separate pipeline phases
  127. * for a window. Usually the ACK phase is longer, but with ACK
  128. * compression the send phase can be longer. To be safe we use the
  129. * longer phase.
  130. */
  131. snd_us = rs->interval_us; /* send phase */
  132. ack_us = tcp_stamp_us_delta(tp->tcp_mstamp,
  133. rs->prior_mstamp); /* ack phase */
  134. rs->interval_us = max(snd_us, ack_us);
  135. /* Record both segment send and ack receive intervals */
  136. rs->snd_interval_us = snd_us;
  137. rs->rcv_interval_us = ack_us;
  138. /* Normally we expect interval_us >= min-rtt.
  139. * Note that rate may still be over-estimated when a spuriously
  140. * retransmistted skb was first (s)acked because "interval_us"
  141. * is under-estimated (up to an RTT). However continuously
  142. * measuring the delivery rate during loss recovery is crucial
  143. * for connections suffer heavy or prolonged losses.
  144. */
  145. if (unlikely(rs->interval_us < tcp_min_rtt(tp))) {
  146. if (!rs->is_retrans)
  147. pr_debug("tcp rate: %ld %d %u %u %u\n",
  148. rs->interval_us, rs->delivered,
  149. inet_csk(sk)->icsk_ca_state,
  150. tp->rx_opt.sack_ok, tcp_min_rtt(tp));
  151. rs->interval_us = -1;
  152. return;
  153. }
  154. /* Record the last non-app-limited or the highest app-limited bw */
  155. if (!rs->is_app_limited ||
  156. ((u64)rs->delivered * tp->rate_interval_us >=
  157. (u64)tp->rate_delivered * rs->interval_us)) {
  158. tp->rate_delivered = rs->delivered;
  159. tp->rate_interval_us = rs->interval_us;
  160. tp->rate_app_limited = rs->is_app_limited;
  161. }
  162. }
  163. /* If a gap is detected between sends, mark the socket application-limited. */
  164. void tcp_rate_check_app_limited(struct sock *sk)
  165. {
  166. struct tcp_sock *tp = tcp_sk(sk);
  167. if (/* We have less than one packet to send. */
  168. tp->write_seq - tp->snd_nxt < tp->mss_cache &&
  169. /* Nothing in sending host's qdisc queues or NIC tx queue. */
  170. sk_wmem_alloc_get(sk) < SKB_TRUESIZE(1) &&
  171. /* We are not limited by CWND. */
  172. tcp_packets_in_flight(tp) < tp->snd_cwnd &&
  173. /* All lost packets have been retransmitted. */
  174. tp->lost_out <= tp->retrans_out)
  175. tp->app_limited =
  176. (tp->delivered + tcp_packets_in_flight(tp)) ? : 1;
  177. }
  178. EXPORT_SYMBOL_GPL(tcp_rate_check_app_limited);