tcp_timer.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * INET An implementation of the TCP/IP protocol suite for the LINUX
  4. * operating system. INET is implemented using the BSD Socket
  5. * interface as the means of communication with the user level.
  6. *
  7. * Implementation of the Transmission Control Protocol(TCP).
  8. *
  9. * Authors: Ross Biro
  10. * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
  11. * Mark Evans, <evansmp@uhura.aston.ac.uk>
  12. * Corey Minyard <wf-rch!minyard@relay.EU.net>
  13. * Florian La Roche, <flla@stud.uni-sb.de>
  14. * Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
  15. * Linus Torvalds, <torvalds@cs.helsinki.fi>
  16. * Alan Cox, <gw4pts@gw4pts.ampr.org>
  17. * Matthew Dillon, <dillon@apollo.west.oic.com>
  18. * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
  19. * Jorge Cwik, <jorge@laser.satlink.net>
  20. */
  21. #include <linux/module.h>
  22. #include <linux/gfp.h>
  23. #include <net/tcp.h>
  24. static u32 tcp_clamp_rto_to_user_timeout(const struct sock *sk)
  25. {
  26. struct inet_connection_sock *icsk = inet_csk(sk);
  27. u32 elapsed, start_ts;
  28. s32 remaining;
  29. start_ts = tcp_sk(sk)->retrans_stamp;
  30. if (!icsk->icsk_user_timeout)
  31. return icsk->icsk_rto;
  32. elapsed = tcp_time_stamp(tcp_sk(sk)) - start_ts;
  33. remaining = icsk->icsk_user_timeout - elapsed;
  34. if (remaining <= 0)
  35. return 1; /* user timeout has passed; fire ASAP */
  36. return min_t(u32, icsk->icsk_rto, msecs_to_jiffies(remaining));
  37. }
  38. u32 tcp_clamp_probe0_to_user_timeout(const struct sock *sk, u32 when)
  39. {
  40. struct inet_connection_sock *icsk = inet_csk(sk);
  41. u32 remaining;
  42. s32 elapsed;
  43. if (!icsk->icsk_user_timeout || !icsk->icsk_probes_tstamp)
  44. return when;
  45. elapsed = tcp_jiffies32 - icsk->icsk_probes_tstamp;
  46. if (unlikely(elapsed < 0))
  47. elapsed = 0;
  48. remaining = msecs_to_jiffies(icsk->icsk_user_timeout) - elapsed;
  49. remaining = max_t(u32, remaining, TCP_TIMEOUT_MIN);
  50. return min_t(u32, remaining, when);
  51. }
  52. /**
  53. * tcp_write_err() - close socket and save error info
  54. * @sk: The socket the error has appeared on.
  55. *
  56. * Returns: Nothing (void)
  57. */
  58. static void tcp_write_err(struct sock *sk)
  59. {
  60. sk->sk_err = sk->sk_err_soft ? : ETIMEDOUT;
  61. sk->sk_error_report(sk);
  62. tcp_write_queue_purge(sk);
  63. tcp_done(sk);
  64. __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONTIMEOUT);
  65. }
  66. /**
  67. * tcp_out_of_resources() - Close socket if out of resources
  68. * @sk: pointer to current socket
  69. * @do_reset: send a last packet with reset flag
  70. *
  71. * Do not allow orphaned sockets to eat all our resources.
  72. * This is direct violation of TCP specs, but it is required
  73. * to prevent DoS attacks. It is called when a retransmission timeout
  74. * or zero probe timeout occurs on orphaned socket.
  75. *
  76. * Also close if our net namespace is exiting; in that case there is no
  77. * hope of ever communicating again since all netns interfaces are already
  78. * down (or about to be down), and we need to release our dst references,
  79. * which have been moved to the netns loopback interface, so the namespace
  80. * can finish exiting. This condition is only possible if we are a kernel
  81. * socket, as those do not hold references to the namespace.
  82. *
  83. * Criteria is still not confirmed experimentally and may change.
  84. * We kill the socket, if:
  85. * 1. If number of orphaned sockets exceeds an administratively configured
  86. * limit.
  87. * 2. If we have strong memory pressure.
  88. * 3. If our net namespace is exiting.
  89. */
  90. static int tcp_out_of_resources(struct sock *sk, bool do_reset)
  91. {
  92. struct tcp_sock *tp = tcp_sk(sk);
  93. int shift = 0;
  94. /* If peer does not open window for long time, or did not transmit
  95. * anything for long time, penalize it. */
  96. if ((s32)(tcp_jiffies32 - tp->lsndtime) > 2*TCP_RTO_MAX || !do_reset)
  97. shift++;
  98. /* If some dubious ICMP arrived, penalize even more. */
  99. if (sk->sk_err_soft)
  100. shift++;
  101. if (tcp_check_oom(sk, shift)) {
  102. /* Catch exceptional cases, when connection requires reset.
  103. * 1. Last segment was sent recently. */
  104. if ((s32)(tcp_jiffies32 - tp->lsndtime) <= TCP_TIMEWAIT_LEN ||
  105. /* 2. Window is closed. */
  106. (!tp->snd_wnd && !tp->packets_out))
  107. do_reset = true;
  108. if (do_reset)
  109. tcp_send_active_reset(sk, GFP_ATOMIC);
  110. tcp_done(sk);
  111. __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONMEMORY);
  112. return 1;
  113. }
  114. if (!check_net(sock_net(sk))) {
  115. /* Not possible to send reset; just close */
  116. tcp_done(sk);
  117. return 1;
  118. }
  119. return 0;
  120. }
  121. /**
  122. * tcp_orphan_retries() - Returns maximal number of retries on an orphaned socket
  123. * @sk: Pointer to the current socket.
  124. * @alive: bool, socket alive state
  125. */
  126. static int tcp_orphan_retries(struct sock *sk, bool alive)
  127. {
  128. int retries = sock_net(sk)->ipv4.sysctl_tcp_orphan_retries; /* May be zero. */
  129. /* We know from an ICMP that something is wrong. */
  130. if (sk->sk_err_soft && !alive)
  131. retries = 0;
  132. /* However, if socket sent something recently, select some safe
  133. * number of retries. 8 corresponds to >100 seconds with minimal
  134. * RTO of 200msec. */
  135. if (retries == 0 && alive)
  136. retries = 8;
  137. return retries;
  138. }
  139. static void tcp_mtu_probing(struct inet_connection_sock *icsk, struct sock *sk)
  140. {
  141. const struct net *net = sock_net(sk);
  142. int mss;
  143. /* Black hole detection */
  144. if (!net->ipv4.sysctl_tcp_mtu_probing)
  145. return;
  146. if (!icsk->icsk_mtup.enabled) {
  147. icsk->icsk_mtup.enabled = 1;
  148. icsk->icsk_mtup.probe_timestamp = tcp_jiffies32;
  149. } else {
  150. mss = tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low) >> 1;
  151. mss = min(net->ipv4.sysctl_tcp_base_mss, mss);
  152. mss = max(mss, net->ipv4.sysctl_tcp_mtu_probe_floor);
  153. mss = max(mss, net->ipv4.sysctl_tcp_min_snd_mss);
  154. icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss);
  155. }
  156. tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
  157. }
  158. static unsigned int tcp_model_timeout(struct sock *sk,
  159. unsigned int boundary,
  160. unsigned int rto_base)
  161. {
  162. unsigned int linear_backoff_thresh, timeout;
  163. linear_backoff_thresh = ilog2(TCP_RTO_MAX / rto_base);
  164. if (boundary <= linear_backoff_thresh)
  165. timeout = ((2 << boundary) - 1) * rto_base;
  166. else
  167. timeout = ((2 << linear_backoff_thresh) - 1) * rto_base +
  168. (boundary - linear_backoff_thresh) * TCP_RTO_MAX;
  169. return jiffies_to_msecs(timeout);
  170. }
  171. /**
  172. * retransmits_timed_out() - returns true if this connection has timed out
  173. * @sk: The current socket
  174. * @boundary: max number of retransmissions
  175. * @timeout: A custom timeout value.
  176. * If set to 0 the default timeout is calculated and used.
  177. * Using TCP_RTO_MIN and the number of unsuccessful retransmits.
  178. *
  179. * The default "timeout" value this function can calculate and use
  180. * is equivalent to the timeout of a TCP Connection
  181. * after "boundary" unsuccessful, exponentially backed-off
  182. * retransmissions with an initial RTO of TCP_RTO_MIN.
  183. */
  184. static bool retransmits_timed_out(struct sock *sk,
  185. unsigned int boundary,
  186. unsigned int timeout)
  187. {
  188. unsigned int start_ts;
  189. if (!inet_csk(sk)->icsk_retransmits)
  190. return false;
  191. start_ts = tcp_sk(sk)->retrans_stamp;
  192. if (likely(timeout == 0)) {
  193. unsigned int rto_base = TCP_RTO_MIN;
  194. if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
  195. rto_base = tcp_timeout_init(sk);
  196. timeout = tcp_model_timeout(sk, boundary, rto_base);
  197. }
  198. return (s32)(tcp_time_stamp(tcp_sk(sk)) - start_ts - timeout) >= 0;
  199. }
  200. /* A write timeout has occurred. Process the after effects. */
  201. static int tcp_write_timeout(struct sock *sk)
  202. {
  203. struct inet_connection_sock *icsk = inet_csk(sk);
  204. struct tcp_sock *tp = tcp_sk(sk);
  205. struct net *net = sock_net(sk);
  206. bool expired = false, do_reset;
  207. int retry_until;
  208. if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
  209. if (icsk->icsk_retransmits)
  210. __dst_negative_advice(sk);
  211. retry_until = icsk->icsk_syn_retries ? : net->ipv4.sysctl_tcp_syn_retries;
  212. expired = icsk->icsk_retransmits >= retry_until;
  213. } else {
  214. if (retransmits_timed_out(sk, net->ipv4.sysctl_tcp_retries1, 0)) {
  215. /* Black hole detection */
  216. tcp_mtu_probing(icsk, sk);
  217. __dst_negative_advice(sk);
  218. }
  219. retry_until = net->ipv4.sysctl_tcp_retries2;
  220. if (sock_flag(sk, SOCK_DEAD)) {
  221. const bool alive = icsk->icsk_rto < TCP_RTO_MAX;
  222. retry_until = tcp_orphan_retries(sk, alive);
  223. do_reset = alive ||
  224. !retransmits_timed_out(sk, retry_until, 0);
  225. if (tcp_out_of_resources(sk, do_reset))
  226. return 1;
  227. }
  228. }
  229. if (!expired)
  230. expired = retransmits_timed_out(sk, retry_until,
  231. icsk->icsk_user_timeout);
  232. tcp_fastopen_active_detect_blackhole(sk, expired);
  233. if (BPF_SOCK_OPS_TEST_FLAG(tp, BPF_SOCK_OPS_RTO_CB_FLAG))
  234. tcp_call_bpf_3arg(sk, BPF_SOCK_OPS_RTO_CB,
  235. icsk->icsk_retransmits,
  236. icsk->icsk_rto, (int)expired);
  237. if (expired) {
  238. /* Has it gone just too far? */
  239. tcp_write_err(sk);
  240. return 1;
  241. }
  242. if (sk_rethink_txhash(sk)) {
  243. tp->timeout_rehash++;
  244. __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPTIMEOUTREHASH);
  245. }
  246. return 0;
  247. }
  248. /* Called with BH disabled */
  249. void tcp_delack_timer_handler(struct sock *sk)
  250. {
  251. struct inet_connection_sock *icsk = inet_csk(sk);
  252. sk_mem_reclaim_partial(sk);
  253. if (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) ||
  254. !(icsk->icsk_ack.pending & ICSK_ACK_TIMER))
  255. goto out;
  256. if (time_after(icsk->icsk_ack.timeout, jiffies)) {
  257. sk_reset_timer(sk, &icsk->icsk_delack_timer, icsk->icsk_ack.timeout);
  258. goto out;
  259. }
  260. icsk->icsk_ack.pending &= ~ICSK_ACK_TIMER;
  261. if (inet_csk_ack_scheduled(sk)) {
  262. if (!inet_csk_in_pingpong_mode(sk)) {
  263. /* Delayed ACK missed: inflate ATO. */
  264. icsk->icsk_ack.ato = min(icsk->icsk_ack.ato << 1, icsk->icsk_rto);
  265. } else {
  266. /* Delayed ACK missed: leave pingpong mode and
  267. * deflate ATO.
  268. */
  269. inet_csk_exit_pingpong_mode(sk);
  270. icsk->icsk_ack.ato = TCP_ATO_MIN;
  271. }
  272. tcp_mstamp_refresh(tcp_sk(sk));
  273. tcp_send_ack(sk);
  274. __NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKS);
  275. }
  276. out:
  277. if (tcp_under_memory_pressure(sk))
  278. sk_mem_reclaim(sk);
  279. }
  280. /**
  281. * tcp_delack_timer() - The TCP delayed ACK timeout handler
  282. * @t: Pointer to the timer. (gets casted to struct sock *)
  283. *
  284. * This function gets (indirectly) called when the kernel timer for a TCP packet
  285. * of this socket expires. Calls tcp_delack_timer_handler() to do the actual work.
  286. *
  287. * Returns: Nothing (void)
  288. */
  289. static void tcp_delack_timer(struct timer_list *t)
  290. {
  291. struct inet_connection_sock *icsk =
  292. from_timer(icsk, t, icsk_delack_timer);
  293. struct sock *sk = &icsk->icsk_inet.sk;
  294. bh_lock_sock(sk);
  295. if (!sock_owned_by_user(sk)) {
  296. tcp_delack_timer_handler(sk);
  297. } else {
  298. __NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED);
  299. /* deleguate our work to tcp_release_cb() */
  300. if (!test_and_set_bit(TCP_DELACK_TIMER_DEFERRED, &sk->sk_tsq_flags))
  301. sock_hold(sk);
  302. }
  303. bh_unlock_sock(sk);
  304. sock_put(sk);
  305. }
  306. static void tcp_probe_timer(struct sock *sk)
  307. {
  308. struct inet_connection_sock *icsk = inet_csk(sk);
  309. struct sk_buff *skb = tcp_send_head(sk);
  310. struct tcp_sock *tp = tcp_sk(sk);
  311. int max_probes;
  312. if (tp->packets_out || !skb) {
  313. icsk->icsk_probes_out = 0;
  314. icsk->icsk_probes_tstamp = 0;
  315. return;
  316. }
  317. /* RFC 1122 4.2.2.17 requires the sender to stay open indefinitely as
  318. * long as the receiver continues to respond probes. We support this by
  319. * default and reset icsk_probes_out with incoming ACKs. But if the
  320. * socket is orphaned or the user specifies TCP_USER_TIMEOUT, we
  321. * kill the socket when the retry count and the time exceeds the
  322. * corresponding system limit. We also implement similar policy when
  323. * we use RTO to probe window in tcp_retransmit_timer().
  324. */
  325. if (!icsk->icsk_probes_tstamp)
  326. icsk->icsk_probes_tstamp = tcp_jiffies32;
  327. else if (icsk->icsk_user_timeout &&
  328. (s32)(tcp_jiffies32 - icsk->icsk_probes_tstamp) >=
  329. msecs_to_jiffies(icsk->icsk_user_timeout))
  330. goto abort;
  331. max_probes = sock_net(sk)->ipv4.sysctl_tcp_retries2;
  332. if (sock_flag(sk, SOCK_DEAD)) {
  333. const bool alive = inet_csk_rto_backoff(icsk, TCP_RTO_MAX) < TCP_RTO_MAX;
  334. max_probes = tcp_orphan_retries(sk, alive);
  335. if (!alive && icsk->icsk_backoff >= max_probes)
  336. goto abort;
  337. if (tcp_out_of_resources(sk, true))
  338. return;
  339. }
  340. if (icsk->icsk_probes_out >= max_probes) {
  341. abort: tcp_write_err(sk);
  342. } else {
  343. /* Only send another probe if we didn't close things up. */
  344. tcp_send_probe0(sk);
  345. }
  346. }
  347. /*
  348. * Timer for Fast Open socket to retransmit SYNACK. Note that the
  349. * sk here is the child socket, not the parent (listener) socket.
  350. */
  351. static void tcp_fastopen_synack_timer(struct sock *sk, struct request_sock *req)
  352. {
  353. struct inet_connection_sock *icsk = inet_csk(sk);
  354. int max_retries = icsk->icsk_syn_retries ? :
  355. sock_net(sk)->ipv4.sysctl_tcp_synack_retries + 1; /* add one more retry for fastopen */
  356. struct tcp_sock *tp = tcp_sk(sk);
  357. req->rsk_ops->syn_ack_timeout(req);
  358. if (req->num_timeout >= max_retries) {
  359. tcp_write_err(sk);
  360. return;
  361. }
  362. /* Lower cwnd after certain SYNACK timeout like tcp_init_transfer() */
  363. if (icsk->icsk_retransmits == 1)
  364. tcp_enter_loss(sk);
  365. /* XXX (TFO) - Unlike regular SYN-ACK retransmit, we ignore error
  366. * returned from rtx_syn_ack() to make it more persistent like
  367. * regular retransmit because if the child socket has been accepted
  368. * it's not good to give up too easily.
  369. */
  370. inet_rtx_syn_ack(sk, req);
  371. req->num_timeout++;
  372. icsk->icsk_retransmits++;
  373. if (!tp->retrans_stamp)
  374. tp->retrans_stamp = tcp_time_stamp(tp);
  375. inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
  376. TCP_TIMEOUT_INIT << req->num_timeout, TCP_RTO_MAX);
  377. }
  378. /**
  379. * tcp_retransmit_timer() - The TCP retransmit timeout handler
  380. * @sk: Pointer to the current socket.
  381. *
  382. * This function gets called when the kernel timer for a TCP packet
  383. * of this socket expires.
  384. *
  385. * It handles retransmission, timer adjustment and other necesarry measures.
  386. *
  387. * Returns: Nothing (void)
  388. */
  389. void tcp_retransmit_timer(struct sock *sk)
  390. {
  391. struct tcp_sock *tp = tcp_sk(sk);
  392. struct net *net = sock_net(sk);
  393. struct inet_connection_sock *icsk = inet_csk(sk);
  394. struct request_sock *req;
  395. struct sk_buff *skb;
  396. req = rcu_dereference_protected(tp->fastopen_rsk,
  397. lockdep_sock_is_held(sk));
  398. if (req) {
  399. WARN_ON_ONCE(sk->sk_state != TCP_SYN_RECV &&
  400. sk->sk_state != TCP_FIN_WAIT1);
  401. tcp_fastopen_synack_timer(sk, req);
  402. /* Before we receive ACK to our SYN-ACK don't retransmit
  403. * anything else (e.g., data or FIN segments).
  404. */
  405. return;
  406. }
  407. if (!tp->packets_out)
  408. return;
  409. skb = tcp_rtx_queue_head(sk);
  410. if (WARN_ON_ONCE(!skb))
  411. return;
  412. tp->tlp_high_seq = 0;
  413. if (!tp->snd_wnd && !sock_flag(sk, SOCK_DEAD) &&
  414. !((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))) {
  415. /* Receiver dastardly shrinks window. Our retransmits
  416. * become zero probes, but we should not timeout this
  417. * connection. If the socket is an orphan, time it out,
  418. * we cannot allow such beasts to hang infinitely.
  419. */
  420. struct inet_sock *inet = inet_sk(sk);
  421. if (sk->sk_family == AF_INET) {
  422. net_dbg_ratelimited("Peer %pI4:%u/%u unexpectedly shrunk window %u:%u (repaired)\n",
  423. &inet->inet_daddr,
  424. ntohs(inet->inet_dport),
  425. inet->inet_num,
  426. tp->snd_una, tp->snd_nxt);
  427. }
  428. #if IS_ENABLED(CONFIG_IPV6)
  429. else if (sk->sk_family == AF_INET6) {
  430. net_dbg_ratelimited("Peer %pI6:%u/%u unexpectedly shrunk window %u:%u (repaired)\n",
  431. &sk->sk_v6_daddr,
  432. ntohs(inet->inet_dport),
  433. inet->inet_num,
  434. tp->snd_una, tp->snd_nxt);
  435. }
  436. #endif
  437. if (tcp_jiffies32 - tp->rcv_tstamp > TCP_RTO_MAX) {
  438. tcp_write_err(sk);
  439. goto out;
  440. }
  441. tcp_enter_loss(sk);
  442. tcp_retransmit_skb(sk, skb, 1);
  443. __sk_dst_reset(sk);
  444. goto out_reset_timer;
  445. }
  446. __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPTIMEOUTS);
  447. if (tcp_write_timeout(sk))
  448. goto out;
  449. if (icsk->icsk_retransmits == 0) {
  450. int mib_idx = 0;
  451. if (icsk->icsk_ca_state == TCP_CA_Recovery) {
  452. if (tcp_is_sack(tp))
  453. mib_idx = LINUX_MIB_TCPSACKRECOVERYFAIL;
  454. else
  455. mib_idx = LINUX_MIB_TCPRENORECOVERYFAIL;
  456. } else if (icsk->icsk_ca_state == TCP_CA_Loss) {
  457. mib_idx = LINUX_MIB_TCPLOSSFAILURES;
  458. } else if ((icsk->icsk_ca_state == TCP_CA_Disorder) ||
  459. tp->sacked_out) {
  460. if (tcp_is_sack(tp))
  461. mib_idx = LINUX_MIB_TCPSACKFAILURES;
  462. else
  463. mib_idx = LINUX_MIB_TCPRENOFAILURES;
  464. }
  465. if (mib_idx)
  466. __NET_INC_STATS(sock_net(sk), mib_idx);
  467. }
  468. tcp_enter_loss(sk);
  469. icsk->icsk_retransmits++;
  470. if (tcp_retransmit_skb(sk, tcp_rtx_queue_head(sk), 1) > 0) {
  471. /* Retransmission failed because of local congestion,
  472. * Let senders fight for local resources conservatively.
  473. */
  474. inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
  475. TCP_RESOURCE_PROBE_INTERVAL,
  476. TCP_RTO_MAX);
  477. goto out;
  478. }
  479. /* Increase the timeout each time we retransmit. Note that
  480. * we do not increase the rtt estimate. rto is initialized
  481. * from rtt, but increases here. Jacobson (SIGCOMM 88) suggests
  482. * that doubling rto each time is the least we can get away with.
  483. * In KA9Q, Karn uses this for the first few times, and then
  484. * goes to quadratic. netBSD doubles, but only goes up to *64,
  485. * and clamps at 1 to 64 sec afterwards. Note that 120 sec is
  486. * defined in the protocol as the maximum possible RTT. I guess
  487. * we'll have to use something other than TCP to talk to the
  488. * University of Mars.
  489. *
  490. * PAWS allows us longer timeouts and large windows, so once
  491. * implemented ftp to mars will work nicely. We will have to fix
  492. * the 120 second clamps though!
  493. */
  494. icsk->icsk_backoff++;
  495. out_reset_timer:
  496. /* If stream is thin, use linear timeouts. Since 'icsk_backoff' is
  497. * used to reset timer, set to 0. Recalculate 'icsk_rto' as this
  498. * might be increased if the stream oscillates between thin and thick,
  499. * thus the old value might already be too high compared to the value
  500. * set by 'tcp_set_rto' in tcp_input.c which resets the rto without
  501. * backoff. Limit to TCP_THIN_LINEAR_RETRIES before initiating
  502. * exponential backoff behaviour to avoid continue hammering
  503. * linear-timeout retransmissions into a black hole
  504. */
  505. if (sk->sk_state == TCP_ESTABLISHED &&
  506. (tp->thin_lto || net->ipv4.sysctl_tcp_thin_linear_timeouts) &&
  507. tcp_stream_is_thin(tp) &&
  508. icsk->icsk_retransmits <= TCP_THIN_LINEAR_RETRIES) {
  509. icsk->icsk_backoff = 0;
  510. icsk->icsk_rto = min(__tcp_set_rto(tp), TCP_RTO_MAX);
  511. } else {
  512. /* Use normal (exponential) backoff */
  513. icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX);
  514. }
  515. inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
  516. tcp_clamp_rto_to_user_timeout(sk), TCP_RTO_MAX);
  517. if (retransmits_timed_out(sk, net->ipv4.sysctl_tcp_retries1 + 1, 0))
  518. __sk_dst_reset(sk);
  519. out:;
  520. }
  521. /* Called with bottom-half processing disabled.
  522. Called by tcp_write_timer() */
  523. void tcp_write_timer_handler(struct sock *sk)
  524. {
  525. struct inet_connection_sock *icsk = inet_csk(sk);
  526. int event;
  527. if (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) ||
  528. !icsk->icsk_pending)
  529. goto out;
  530. if (time_after(icsk->icsk_timeout, jiffies)) {
  531. sk_reset_timer(sk, &icsk->icsk_retransmit_timer, icsk->icsk_timeout);
  532. goto out;
  533. }
  534. tcp_mstamp_refresh(tcp_sk(sk));
  535. event = icsk->icsk_pending;
  536. switch (event) {
  537. case ICSK_TIME_REO_TIMEOUT:
  538. tcp_rack_reo_timeout(sk);
  539. break;
  540. case ICSK_TIME_LOSS_PROBE:
  541. tcp_send_loss_probe(sk);
  542. break;
  543. case ICSK_TIME_RETRANS:
  544. icsk->icsk_pending = 0;
  545. tcp_retransmit_timer(sk);
  546. break;
  547. case ICSK_TIME_PROBE0:
  548. icsk->icsk_pending = 0;
  549. tcp_probe_timer(sk);
  550. break;
  551. }
  552. out:
  553. sk_mem_reclaim(sk);
  554. }
  555. static void tcp_write_timer(struct timer_list *t)
  556. {
  557. struct inet_connection_sock *icsk =
  558. from_timer(icsk, t, icsk_retransmit_timer);
  559. struct sock *sk = &icsk->icsk_inet.sk;
  560. bh_lock_sock(sk);
  561. if (!sock_owned_by_user(sk)) {
  562. tcp_write_timer_handler(sk);
  563. } else {
  564. /* delegate our work to tcp_release_cb() */
  565. if (!test_and_set_bit(TCP_WRITE_TIMER_DEFERRED, &sk->sk_tsq_flags))
  566. sock_hold(sk);
  567. }
  568. bh_unlock_sock(sk);
  569. sock_put(sk);
  570. }
  571. void tcp_syn_ack_timeout(const struct request_sock *req)
  572. {
  573. struct net *net = read_pnet(&inet_rsk(req)->ireq_net);
  574. __NET_INC_STATS(net, LINUX_MIB_TCPTIMEOUTS);
  575. }
  576. EXPORT_SYMBOL(tcp_syn_ack_timeout);
  577. void tcp_set_keepalive(struct sock *sk, int val)
  578. {
  579. if ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))
  580. return;
  581. if (val && !sock_flag(sk, SOCK_KEEPOPEN))
  582. inet_csk_reset_keepalive_timer(sk, keepalive_time_when(tcp_sk(sk)));
  583. else if (!val)
  584. inet_csk_delete_keepalive_timer(sk);
  585. }
  586. EXPORT_SYMBOL_GPL(tcp_set_keepalive);
  587. static void tcp_keepalive_timer (struct timer_list *t)
  588. {
  589. struct sock *sk = from_timer(sk, t, sk_timer);
  590. struct inet_connection_sock *icsk = inet_csk(sk);
  591. struct tcp_sock *tp = tcp_sk(sk);
  592. u32 elapsed;
  593. /* Only process if socket is not in use. */
  594. bh_lock_sock(sk);
  595. if (sock_owned_by_user(sk)) {
  596. /* Try again later. */
  597. inet_csk_reset_keepalive_timer (sk, HZ/20);
  598. goto out;
  599. }
  600. if (sk->sk_state == TCP_LISTEN) {
  601. pr_err("Hmm... keepalive on a LISTEN ???\n");
  602. goto out;
  603. }
  604. tcp_mstamp_refresh(tp);
  605. if (sk->sk_state == TCP_FIN_WAIT2 && sock_flag(sk, SOCK_DEAD)) {
  606. if (tp->linger2 >= 0) {
  607. const int tmo = tcp_fin_time(sk) - TCP_TIMEWAIT_LEN;
  608. if (tmo > 0) {
  609. tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
  610. goto out;
  611. }
  612. }
  613. tcp_send_active_reset(sk, GFP_ATOMIC);
  614. goto death;
  615. }
  616. if (!sock_flag(sk, SOCK_KEEPOPEN) ||
  617. ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_SYN_SENT)))
  618. goto out;
  619. elapsed = keepalive_time_when(tp);
  620. /* It is alive without keepalive 8) */
  621. if (tp->packets_out || !tcp_write_queue_empty(sk))
  622. goto resched;
  623. elapsed = keepalive_time_elapsed(tp);
  624. if (elapsed >= keepalive_time_when(tp)) {
  625. /* If the TCP_USER_TIMEOUT option is enabled, use that
  626. * to determine when to timeout instead.
  627. */
  628. if ((icsk->icsk_user_timeout != 0 &&
  629. elapsed >= msecs_to_jiffies(icsk->icsk_user_timeout) &&
  630. icsk->icsk_probes_out > 0) ||
  631. (icsk->icsk_user_timeout == 0 &&
  632. icsk->icsk_probes_out >= keepalive_probes(tp))) {
  633. tcp_send_active_reset(sk, GFP_ATOMIC);
  634. tcp_write_err(sk);
  635. goto out;
  636. }
  637. if (tcp_write_wakeup(sk, LINUX_MIB_TCPKEEPALIVE) <= 0) {
  638. icsk->icsk_probes_out++;
  639. elapsed = keepalive_intvl_when(tp);
  640. } else {
  641. /* If keepalive was lost due to local congestion,
  642. * try harder.
  643. */
  644. elapsed = TCP_RESOURCE_PROBE_INTERVAL;
  645. }
  646. } else {
  647. /* It is tp->rcv_tstamp + keepalive_time_when(tp) */
  648. elapsed = keepalive_time_when(tp) - elapsed;
  649. }
  650. sk_mem_reclaim(sk);
  651. resched:
  652. inet_csk_reset_keepalive_timer (sk, elapsed);
  653. goto out;
  654. death:
  655. tcp_done(sk);
  656. out:
  657. bh_unlock_sock(sk);
  658. sock_put(sk);
  659. }
  660. static enum hrtimer_restart tcp_compressed_ack_kick(struct hrtimer *timer)
  661. {
  662. struct tcp_sock *tp = container_of(timer, struct tcp_sock, compressed_ack_timer);
  663. struct sock *sk = (struct sock *)tp;
  664. bh_lock_sock(sk);
  665. if (!sock_owned_by_user(sk)) {
  666. if (tp->compressed_ack) {
  667. /* Since we have to send one ack finally,
  668. * substract one from tp->compressed_ack to keep
  669. * LINUX_MIB_TCPACKCOMPRESSED accurate.
  670. */
  671. tp->compressed_ack--;
  672. tcp_send_ack(sk);
  673. }
  674. } else {
  675. if (!test_and_set_bit(TCP_DELACK_TIMER_DEFERRED,
  676. &sk->sk_tsq_flags))
  677. sock_hold(sk);
  678. }
  679. bh_unlock_sock(sk);
  680. sock_put(sk);
  681. return HRTIMER_NORESTART;
  682. }
  683. void tcp_init_xmit_timers(struct sock *sk)
  684. {
  685. inet_csk_init_xmit_timers(sk, &tcp_write_timer, &tcp_delack_timer,
  686. &tcp_keepalive_timer);
  687. hrtimer_init(&tcp_sk(sk)->pacing_timer, CLOCK_MONOTONIC,
  688. HRTIMER_MODE_ABS_PINNED_SOFT);
  689. tcp_sk(sk)->pacing_timer.function = tcp_pace_kick;
  690. hrtimer_init(&tcp_sk(sk)->compressed_ack_timer, CLOCK_MONOTONIC,
  691. HRTIMER_MODE_REL_PINNED_SOFT);
  692. tcp_sk(sk)->compressed_ack_timer.function = tcp_compressed_ack_kick;
  693. }