blob: c5d51f530c65c8e635ac3d2a6b70c4d95131f295 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Implementation of the Transmission Control Protocol(TCP).
7 *
Jesper Juhl02c30a82005-05-05 16:16:16 -07008 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Mark Evans, <evansmp@uhura.aston.ac.uk>
11 * Corey Minyard <wf-rch!minyard@relay.EU.net>
12 * Florian La Roche, <flla@stud.uni-sb.de>
13 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
14 * Linus Torvalds, <torvalds@cs.helsinki.fi>
15 * Alan Cox, <gw4pts@gw4pts.ampr.org>
16 * Matthew Dillon, <dillon@apollo.west.oic.com>
17 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
18 * Jorge Cwik, <jorge@laser.satlink.net>
19 */
20
21#include <linux/module.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090022#include <linux/gfp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070023#include <net/tcp.h>
24
Brian Haleyab32ea52006-09-22 14:15:41 -070025int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
Brian Haleyab32ea52006-09-22 14:15:41 -070026int sysctl_tcp_retries1 __read_mostly = TCP_RETR1;
27int sysctl_tcp_retries2 __read_mostly = TCP_RETR2;
28int sysctl_tcp_orphan_retries __read_mostly;
Andreas Petlund36e31b0a2010-02-18 02:47:01 +000029int sysctl_tcp_thin_linear_timeouts __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -070030
Linus Torvalds1da177e2005-04-16 15:20:36 -070031static void tcp_write_err(struct sock *sk)
32{
33 sk->sk_err = sk->sk_err_soft ? : ETIMEDOUT;
34 sk->sk_error_report(sk);
35
36 tcp_done(sk);
Pavel Emelyanovde0744a2008-07-16 20:31:16 -070037 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONTIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -070038}
39
40/* Do not allow orphaned sockets to eat all our resources.
41 * This is direct violation of TCP specs, but it is required
42 * to prevent DoS attacks. It is called when a retransmission timeout
43 * or zero probe timeout occurs on orphaned socket.
44 *
Stephen Hemmingercaa20d9a2005-11-10 17:13:47 -080045 * Criteria is still not confirmed experimentally and may change.
Linus Torvalds1da177e2005-04-16 15:20:36 -070046 * We kill the socket, if:
47 * 1. If number of orphaned sockets exceeds an administratively configured
48 * limit.
49 * 2. If we have strong memory pressure.
50 */
Yuchung Chengb2482302014-09-29 13:20:38 -070051static int tcp_out_of_resources(struct sock *sk, bool do_reset)
Linus Torvalds1da177e2005-04-16 15:20:36 -070052{
53 struct tcp_sock *tp = tcp_sk(sk);
David S. Millerad1af0f2010-08-25 02:27:49 -070054 int shift = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070055
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +090056 /* If peer does not open window for long time, or did not transmit
Linus Torvalds1da177e2005-04-16 15:20:36 -070057 * anything for long time, penalize it. */
58 if ((s32)(tcp_time_stamp - tp->lsndtime) > 2*TCP_RTO_MAX || !do_reset)
David S. Millerad1af0f2010-08-25 02:27:49 -070059 shift++;
Linus Torvalds1da177e2005-04-16 15:20:36 -070060
61 /* If some dubious ICMP arrived, penalize even more. */
62 if (sk->sk_err_soft)
David S. Millerad1af0f2010-08-25 02:27:49 -070063 shift++;
Linus Torvalds1da177e2005-04-16 15:20:36 -070064
Arun Sharmaefcdbf22012-01-30 14:16:06 -080065 if (tcp_check_oom(sk, shift)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070066 /* Catch exceptional cases, when connection requires reset.
67 * 1. Last segment was sent recently. */
68 if ((s32)(tcp_time_stamp - tp->lsndtime) <= TCP_TIMEWAIT_LEN ||
69 /* 2. Window is closed. */
70 (!tp->snd_wnd && !tp->packets_out))
Yuchung Chengb2482302014-09-29 13:20:38 -070071 do_reset = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -070072 if (do_reset)
73 tcp_send_active_reset(sk, GFP_ATOMIC);
74 tcp_done(sk);
Pavel Emelyanovde0744a2008-07-16 20:31:16 -070075 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONMEMORY);
Linus Torvalds1da177e2005-04-16 15:20:36 -070076 return 1;
77 }
78 return 0;
79}
80
81/* Calculate maximal number or retries on an orphaned socket. */
Richard Sailer7533ce32015-10-09 02:41:37 +020082static int tcp_orphan_retries(struct sock *sk, bool alive)
Linus Torvalds1da177e2005-04-16 15:20:36 -070083{
84 int retries = sysctl_tcp_orphan_retries; /* May be zero. */
85
86 /* We know from an ICMP that something is wrong. */
87 if (sk->sk_err_soft && !alive)
88 retries = 0;
89
90 /* However, if socket sent something recently, select some safe
91 * number of retries. 8 corresponds to >100 seconds with minimal
92 * RTO of 200msec. */
93 if (retries == 0 && alive)
94 retries = 8;
95 return retries;
96}
97
Eric Dumazetce55dd32007-12-21 01:50:43 -080098static void tcp_mtu_probing(struct inet_connection_sock *icsk, struct sock *sk)
99{
Fan Dub0f9ca52015-02-10 09:53:16 +0800100 struct net *net = sock_net(sk);
101
Eric Dumazetce55dd32007-12-21 01:50:43 -0800102 /* Black hole detection */
Fan Dub0f9ca52015-02-10 09:53:16 +0800103 if (net->ipv4.sysctl_tcp_mtu_probing) {
Eric Dumazetce55dd32007-12-21 01:50:43 -0800104 if (!icsk->icsk_mtup.enabled) {
105 icsk->icsk_mtup.enabled = 1;
Fan Du05cbc0d2015-03-06 11:18:24 +0800106 icsk->icsk_mtup.probe_timestamp = tcp_time_stamp;
Eric Dumazetce55dd32007-12-21 01:50:43 -0800107 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
108 } else {
Fan Dub0f9ca52015-02-10 09:53:16 +0800109 struct net *net = sock_net(sk);
Eric Dumazetce55dd32007-12-21 01:50:43 -0800110 struct tcp_sock *tp = tcp_sk(sk);
David S. Miller829942c2007-12-21 04:29:16 -0800111 int mss;
112
Eric Dumazet8beb5c52007-12-21 05:58:29 -0800113 mss = tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low) >> 1;
Fan Dub0f9ca52015-02-10 09:53:16 +0800114 mss = min(net->ipv4.sysctl_tcp_base_mss, mss);
Eric Dumazetce55dd32007-12-21 01:50:43 -0800115 mss = max(mss, 68 - tp->tcp_header_len);
116 icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss);
117 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
118 }
119 }
120}
121
Damian Lukowski2f7de572009-12-07 06:06:16 +0000122/* This function calculates a "timeout" which is equivalent to the timeout of a
Daniel Mack3ad2f3f2010-02-03 08:01:28 +0800123 * TCP connection after "boundary" unsuccessful, exponentially backed-off
Damian Lukowski4d22f7d2010-09-28 13:08:32 -0700124 * retransmissions with an initial RTO of TCP_RTO_MIN or TCP_TIMEOUT_INIT if
125 * syn_set flag is set.
Damian Lukowski2f7de572009-12-07 06:06:16 +0000126 */
127static bool retransmits_timed_out(struct sock *sk,
Jerry Chudca43c72010-08-27 19:13:28 +0000128 unsigned int boundary,
David S. Miller21a180c2010-10-04 11:56:38 -0700129 unsigned int timeout,
Damian Lukowski4d22f7d2010-09-28 13:08:32 -0700130 bool syn_set)
Damian Lukowski2f7de572009-12-07 06:06:16 +0000131{
Jerry Chudca43c72010-08-27 19:13:28 +0000132 unsigned int linear_backoff_thresh, start_ts;
Damian Lukowski4d22f7d2010-09-28 13:08:32 -0700133 unsigned int rto_base = syn_set ? TCP_TIMEOUT_INIT : TCP_RTO_MIN;
Damian Lukowski2f7de572009-12-07 06:06:16 +0000134
135 if (!inet_csk(sk)->icsk_retransmits)
136 return false;
137
Eric Dumazet7faee5c2014-09-05 15:33:33 -0700138 start_ts = tcp_sk(sk)->retrans_stamp;
139 if (unlikely(!start_ts))
140 start_ts = tcp_skb_timestamp(tcp_write_queue_head(sk));
Damian Lukowski2f7de572009-12-07 06:06:16 +0000141
Jerry Chudca43c72010-08-27 19:13:28 +0000142 if (likely(timeout == 0)) {
David S. Miller21a180c2010-10-04 11:56:38 -0700143 linear_backoff_thresh = ilog2(TCP_RTO_MAX/rto_base);
Damian Lukowski2f7de572009-12-07 06:06:16 +0000144
Jerry Chudca43c72010-08-27 19:13:28 +0000145 if (boundary <= linear_backoff_thresh)
David S. Miller21a180c2010-10-04 11:56:38 -0700146 timeout = ((2 << boundary) - 1) * rto_base;
Jerry Chudca43c72010-08-27 19:13:28 +0000147 else
David S. Miller21a180c2010-10-04 11:56:38 -0700148 timeout = ((2 << linear_backoff_thresh) - 1) * rto_base +
Jerry Chudca43c72010-08-27 19:13:28 +0000149 (boundary - linear_backoff_thresh) * TCP_RTO_MAX;
150 }
Damian Lukowski2f7de572009-12-07 06:06:16 +0000151 return (tcp_time_stamp - start_ts) >= timeout;
152}
153
Linus Torvalds1da177e2005-04-16 15:20:36 -0700154/* A write timeout has occurred. Process the after effects. */
155static int tcp_write_timeout(struct sock *sk)
156{
John Heffner5d424d52006-03-20 17:53:41 -0800157 struct inet_connection_sock *icsk = inet_csk(sk);
Yuchung Chengc9686012013-10-29 10:09:05 -0700158 struct tcp_sock *tp = tcp_sk(sk);
Nikolay Borisov6fa25162016-02-03 09:46:49 +0200159 struct net *net = sock_net(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160 int retry_until;
Rusty Russell3db1cd52011-12-19 13:56:45 +0000161 bool do_reset, syn_set = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162
163 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
Yuchung Chengc9686012013-10-29 10:09:05 -0700164 if (icsk->icsk_retransmits) {
Eric Dumazetb6c67122010-04-08 23:03:29 +0000165 dst_negative_advice(sk);
Yuchung Chengc9686012013-10-29 10:09:05 -0700166 if (tp->syn_fastopen || tp->syn_data)
Daniel Lee2646c832015-04-06 14:37:27 -0700167 tcp_fastopen_cache_set(sk, 0, NULL, true, 0);
Yuchung Chengdd52bc22015-11-18 18:17:31 -0800168 if (tp->syn_data && icsk->icsk_retransmits == 1)
Yuchung Chengf19c29e2014-03-03 12:31:36 -0800169 NET_INC_STATS_BH(sock_net(sk),
170 LINUX_MIB_TCPFASTOPENACTIVEFAIL);
Yuchung Chengc9686012013-10-29 10:09:05 -0700171 }
Nikolay Borisov6fa25162016-02-03 09:46:49 +0200172 retry_until = icsk->icsk_syn_retries ? : net->ipv4.sysctl_tcp_syn_retries;
Rusty Russell3db1cd52011-12-19 13:56:45 +0000173 syn_set = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174 } else {
David S. Miller21a180c2010-10-04 11:56:38 -0700175 if (retransmits_timed_out(sk, sysctl_tcp_retries1, 0, 0)) {
Yuchung Cheng0e45f4d2015-11-18 18:17:30 -0800176 /* Some middle-boxes may black-hole Fast Open _after_
177 * the handshake. Therefore we conservatively disable
178 * Fast Open on this path on recurring timeouts with
179 * few or zero bytes acked after Fast Open.
180 */
181 if (tp->syn_data_acked &&
182 tp->bytes_acked <= tp->rx_opt.mss_clamp) {
183 tcp_fastopen_cache_set(sk, 0, NULL, true, 0);
184 if (icsk->icsk_retransmits == sysctl_tcp_retries1)
185 NET_INC_STATS_BH(sock_net(sk),
186 LINUX_MIB_TCPFASTOPENACTIVEFAIL);
187 }
John Heffner5d424d52006-03-20 17:53:41 -0800188 /* Black hole detection */
Eric Dumazetce55dd32007-12-21 01:50:43 -0800189 tcp_mtu_probing(icsk, sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190
Eric Dumazetb6c67122010-04-08 23:03:29 +0000191 dst_negative_advice(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700192 }
193
194 retry_until = sysctl_tcp_retries2;
195 if (sock_flag(sk, SOCK_DEAD)) {
Richard Sailer7533ce32015-10-09 02:41:37 +0200196 const bool alive = icsk->icsk_rto < TCP_RTO_MAX;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900197
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198 retry_until = tcp_orphan_retries(sk, alive);
Damian Lukowski6fa12c82009-08-26 00:16:34 +0000199 do_reset = alive ||
David S. Miller21a180c2010-10-04 11:56:38 -0700200 !retransmits_timed_out(sk, retry_until, 0, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201
Damian Lukowski6fa12c82009-08-26 00:16:34 +0000202 if (tcp_out_of_resources(sk, do_reset))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203 return 1;
204 }
205 }
206
Jerry Chudca43c72010-08-27 19:13:28 +0000207 if (retransmits_timed_out(sk, retry_until,
David S. Miller21a180c2010-10-04 11:56:38 -0700208 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209 /* Has it gone just too far? */
210 tcp_write_err(sk);
211 return 1;
212 }
213 return 0;
214}
215
Eric Dumazet6f458df2012-07-20 05:45:50 +0000216void tcp_delack_timer_handler(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700218 struct tcp_sock *tp = tcp_sk(sk);
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700219 struct inet_connection_sock *icsk = inet_csk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220
David S. Miller9993e7d2008-01-10 21:56:38 -0800221 sk_mem_reclaim_partial(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700222
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700223 if (sk->sk_state == TCP_CLOSE || !(icsk->icsk_ack.pending & ICSK_ACK_TIMER))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224 goto out;
225
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700226 if (time_after(icsk->icsk_ack.timeout, jiffies)) {
227 sk_reset_timer(sk, &icsk->icsk_delack_timer, icsk->icsk_ack.timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700228 goto out;
229 }
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700230 icsk->icsk_ack.pending &= ~ICSK_ACK_TIMER;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231
David S. Millerb03efcf2005-07-08 14:57:23 -0700232 if (!skb_queue_empty(&tp->ucopy.prequeue)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700233 struct sk_buff *skb;
234
Pavel Emelyanovde0744a2008-07-16 20:31:16 -0700235 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSCHEDULERFAILED);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700236
237 while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL)
Peter Zijlstrac57943a2008-10-07 14:18:42 -0700238 sk_backlog_rcv(sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700239
240 tp->ucopy.memory = 0;
241 }
242
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700243 if (inet_csk_ack_scheduled(sk)) {
244 if (!icsk->icsk_ack.pingpong) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700245 /* Delayed ACK missed: inflate ATO. */
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700246 icsk->icsk_ack.ato = min(icsk->icsk_ack.ato << 1, icsk->icsk_rto);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247 } else {
248 /* Delayed ACK missed: leave pingpong mode and
249 * deflate ATO.
250 */
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700251 icsk->icsk_ack.pingpong = 0;
252 icsk->icsk_ack.ato = TCP_ATO_MIN;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253 }
254 tcp_send_ack(sk);
Pavel Emelyanovde0744a2008-07-16 20:31:16 -0700255 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257
258out:
Eric Dumazetb8da51e2015-05-15 12:39:27 -0700259 if (tcp_under_memory_pressure(sk))
Hideo Aoki3ab224b2007-12-31 00:11:19 -0800260 sk_mem_reclaim(sk);
Eric Dumazet6f458df2012-07-20 05:45:50 +0000261}
262
263static void tcp_delack_timer(unsigned long data)
264{
265 struct sock *sk = (struct sock *)data;
266
267 bh_lock_sock(sk);
268 if (!sock_owned_by_user(sk)) {
269 tcp_delack_timer_handler(sk);
270 } else {
271 inet_csk(sk)->icsk_ack.blocked = 1;
272 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED);
273 /* deleguate our work to tcp_release_cb() */
Eric Dumazet144d56e2012-08-20 00:22:46 +0000274 if (!test_and_set_bit(TCP_DELACK_TIMER_DEFERRED, &tcp_sk(sk)->tsq_flags))
275 sock_hold(sk);
Eric Dumazet6f458df2012-07-20 05:45:50 +0000276 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700277 bh_unlock_sock(sk);
278 sock_put(sk);
279}
280
281static void tcp_probe_timer(struct sock *sk)
282{
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300283 struct inet_connection_sock *icsk = inet_csk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700284 struct tcp_sock *tp = tcp_sk(sk);
285 int max_probes;
Yuchung Chengb2482302014-09-29 13:20:38 -0700286 u32 start_ts;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287
David S. Millerfe067e82007-03-07 12:12:44 -0800288 if (tp->packets_out || !tcp_send_head(sk)) {
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300289 icsk->icsk_probes_out = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290 return;
291 }
292
Yuchung Chengb2482302014-09-29 13:20:38 -0700293 /* RFC 1122 4.2.2.17 requires the sender to stay open indefinitely as
294 * long as the receiver continues to respond probes. We support this by
295 * default and reset icsk_probes_out with incoming ACKs. But if the
296 * socket is orphaned or the user specifies TCP_USER_TIMEOUT, we
297 * kill the socket when the retry count and the time exceeds the
298 * corresponding system limit. We also implement similar policy when
299 * we use RTO to probe window in tcp_retransmit_timer().
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300 */
Yuchung Chengb2482302014-09-29 13:20:38 -0700301 start_ts = tcp_skb_timestamp(tcp_send_head(sk));
302 if (!start_ts)
303 skb_mstamp_get(&tcp_send_head(sk)->skb_mstamp);
304 else if (icsk->icsk_user_timeout &&
305 (s32)(tcp_time_stamp - start_ts) > icsk->icsk_user_timeout)
306 goto abort;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307
Yuchung Chengb2482302014-09-29 13:20:38 -0700308 max_probes = sysctl_tcp_retries2;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700309 if (sock_flag(sk, SOCK_DEAD)) {
Richard Sailer7533ce32015-10-09 02:41:37 +0200310 const bool alive = inet_csk_rto_backoff(icsk, TCP_RTO_MAX) < TCP_RTO_MAX;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900311
Linus Torvalds1da177e2005-04-16 15:20:36 -0700312 max_probes = tcp_orphan_retries(sk, alive);
Yuchung Chengb2482302014-09-29 13:20:38 -0700313 if (!alive && icsk->icsk_backoff >= max_probes)
314 goto abort;
315 if (tcp_out_of_resources(sk, true))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700316 return;
317 }
318
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300319 if (icsk->icsk_probes_out > max_probes) {
Yuchung Chengb2482302014-09-29 13:20:38 -0700320abort: tcp_write_err(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700321 } else {
322 /* Only send another probe if we didn't close things up. */
323 tcp_send_probe0(sk);
324 }
325}
326
327/*
Jerry Chu83368862012-08-31 12:29:12 +0000328 * Timer for Fast Open socket to retransmit SYNACK. Note that the
329 * sk here is the child socket, not the parent (listener) socket.
330 */
331static void tcp_fastopen_synack_timer(struct sock *sk)
332{
333 struct inet_connection_sock *icsk = inet_csk(sk);
334 int max_retries = icsk->icsk_syn_retries ? :
335 sysctl_tcp_synack_retries + 1; /* add one more retry for fastopen */
336 struct request_sock *req;
337
338 req = tcp_sk(sk)->fastopen_rsk;
Eric Dumazet42cb80a2015-03-22 10:22:19 -0700339 req->rsk_ops->syn_ack_timeout(req);
Jerry Chu83368862012-08-31 12:29:12 +0000340
Eric Dumazete6c022a2012-10-27 23:16:46 +0000341 if (req->num_timeout >= max_retries) {
Jerry Chu83368862012-08-31 12:29:12 +0000342 tcp_write_err(sk);
343 return;
344 }
345 /* XXX (TFO) - Unlike regular SYN-ACK retransmit, we ignore error
346 * returned from rtx_syn_ack() to make it more persistent like
347 * regular retransmit because if the child socket has been accepted
348 * it's not good to give up too easily.
349 */
Eric Dumazete6c022a2012-10-27 23:16:46 +0000350 inet_rtx_syn_ack(sk, req);
351 req->num_timeout++;
Jerry Chu83368862012-08-31 12:29:12 +0000352 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
Eric Dumazete6c022a2012-10-27 23:16:46 +0000353 TCP_TIMEOUT_INIT << req->num_timeout, TCP_RTO_MAX);
Jerry Chu83368862012-08-31 12:29:12 +0000354}
355
356/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700357 * The TCP retransmit timer.
358 */
359
Damian Lukowskif1ecd5d2009-08-26 00:16:31 +0000360void tcp_retransmit_timer(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700361{
362 struct tcp_sock *tp = tcp_sk(sk);
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700363 struct inet_connection_sock *icsk = inet_csk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700364
Jerry Chu83368862012-08-31 12:29:12 +0000365 if (tp->fastopen_rsk) {
Jerry Chu37561f62012-10-22 11:26:36 +0000366 WARN_ON_ONCE(sk->sk_state != TCP_SYN_RECV &&
367 sk->sk_state != TCP_FIN_WAIT1);
Jerry Chu83368862012-08-31 12:29:12 +0000368 tcp_fastopen_synack_timer(sk);
369 /* Before we receive ACK to our SYN-ACK don't retransmit
370 * anything else (e.g., data or FIN segments).
371 */
372 return;
373 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700374 if (!tp->packets_out)
375 goto out;
376
Ilpo Järvinen547b7922008-07-25 21:43:18 -0700377 WARN_ON(tcp_write_queue_empty(sk));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378
Nandita Dukkipati9b717a82013-03-11 10:00:44 +0000379 tp->tlp_high_seq = 0;
380
Linus Torvalds1da177e2005-04-16 15:20:36 -0700381 if (!tp->snd_wnd && !sock_flag(sk, SOCK_DEAD) &&
382 !((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))) {
383 /* Receiver dastardly shrinks window. Our retransmits
384 * become zero probes, but we should not timeout this
385 * connection. If the socket is an orphan, time it out,
386 * we cannot allow such beasts to hang infinitely.
387 */
YOSHIFUJI Hideaki569508c2008-04-14 04:09:36 -0700388 struct inet_sock *inet = inet_sk(sk);
389 if (sk->sk_family == AF_INET) {
Joe Perchesba7a46f2014-11-11 10:59:17 -0800390 net_dbg_ratelimited("Peer %pI4:%u/%u unexpectedly shrunk window %u:%u (repaired)\n",
391 &inet->inet_daddr,
392 ntohs(inet->inet_dport),
393 inet->inet_num,
394 tp->snd_una, tp->snd_nxt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700395 }
Eric Dumazetdfd56b82011-12-10 09:48:31 +0000396#if IS_ENABLED(CONFIG_IPV6)
YOSHIFUJI Hideaki569508c2008-04-14 04:09:36 -0700397 else if (sk->sk_family == AF_INET6) {
Joe Perchesba7a46f2014-11-11 10:59:17 -0800398 net_dbg_ratelimited("Peer %pI6:%u/%u unexpectedly shrunk window %u:%u (repaired)\n",
399 &sk->sk_v6_daddr,
400 ntohs(inet->inet_dport),
401 inet->inet_num,
402 tp->snd_una, tp->snd_nxt);
YOSHIFUJI Hideaki569508c2008-04-14 04:09:36 -0700403 }
404#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700405 if (tcp_time_stamp - tp->rcv_tstamp > TCP_RTO_MAX) {
406 tcp_write_err(sk);
407 goto out;
408 }
Neal Cardwell5ae344c2014-08-04 19:12:29 -0400409 tcp_enter_loss(sk);
David S. Millerfe067e82007-03-07 12:12:44 -0800410 tcp_retransmit_skb(sk, tcp_write_queue_head(sk));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700411 __sk_dst_reset(sk);
412 goto out_reset_timer;
413 }
414
415 if (tcp_write_timeout(sk))
416 goto out;
417
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700418 if (icsk->icsk_retransmits == 0) {
Pavel Emelyanov40b215e2008-07-03 01:05:41 -0700419 int mib_idx;
420
Ilpo Järvinenc60ce4e2010-10-14 01:52:09 +0000421 if (icsk->icsk_ca_state == TCP_CA_Recovery) {
Ilpo Järvinenbc079e92009-02-28 04:44:34 +0000422 if (tcp_is_sack(tp))
423 mib_idx = LINUX_MIB_TCPSACKRECOVERYFAIL;
424 else
425 mib_idx = LINUX_MIB_TCPRENORECOVERYFAIL;
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300426 } else if (icsk->icsk_ca_state == TCP_CA_Loss) {
Pavel Emelyanov40b215e2008-07-03 01:05:41 -0700427 mib_idx = LINUX_MIB_TCPLOSSFAILURES;
Ilpo Järvinenc60ce4e2010-10-14 01:52:09 +0000428 } else if ((icsk->icsk_ca_state == TCP_CA_Disorder) ||
429 tp->sacked_out) {
430 if (tcp_is_sack(tp))
431 mib_idx = LINUX_MIB_TCPSACKFAILURES;
432 else
433 mib_idx = LINUX_MIB_TCPRENOFAILURES;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700434 } else {
Pavel Emelyanov40b215e2008-07-03 01:05:41 -0700435 mib_idx = LINUX_MIB_TCPTIMEOUTS;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700436 }
Pavel Emelyanovde0744a2008-07-16 20:31:16 -0700437 NET_INC_STATS_BH(sock_net(sk), mib_idx);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700438 }
439
Neal Cardwell5ae344c2014-08-04 19:12:29 -0400440 tcp_enter_loss(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700441
David S. Millerfe067e82007-03-07 12:12:44 -0800442 if (tcp_retransmit_skb(sk, tcp_write_queue_head(sk)) > 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700443 /* Retransmission failed because of local congestion,
444 * do not backoff.
445 */
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700446 if (!icsk->icsk_retransmits)
447 icsk->icsk_retransmits = 1;
448 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
Arnaldo Carvalho de Melo3f421ba2005-08-09 20:11:08 -0700449 min(icsk->icsk_rto, TCP_RESOURCE_PROBE_INTERVAL),
450 TCP_RTO_MAX);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700451 goto out;
452 }
453
454 /* Increase the timeout each time we retransmit. Note that
455 * we do not increase the rtt estimate. rto is initialized
456 * from rtt, but increases here. Jacobson (SIGCOMM 88) suggests
457 * that doubling rto each time is the least we can get away with.
458 * In KA9Q, Karn uses this for the first few times, and then
459 * goes to quadratic. netBSD doubles, but only goes up to *64,
460 * and clamps at 1 to 64 sec afterwards. Note that 120 sec is
461 * defined in the protocol as the maximum possible RTT. I guess
462 * we'll have to use something other than TCP to talk to the
463 * University of Mars.
464 *
465 * PAWS allows us longer timeouts and large windows, so once
466 * implemented ftp to mars will work nicely. We will have to fix
467 * the 120 second clamps though!
468 */
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700469 icsk->icsk_backoff++;
470 icsk->icsk_retransmits++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700471
472out_reset_timer:
Andreas Petlund36e31b0a2010-02-18 02:47:01 +0000473 /* If stream is thin, use linear timeouts. Since 'icsk_backoff' is
474 * used to reset timer, set to 0. Recalculate 'icsk_rto' as this
475 * might be increased if the stream oscillates between thin and thick,
476 * thus the old value might already be too high compared to the value
477 * set by 'tcp_set_rto' in tcp_input.c which resets the rto without
478 * backoff. Limit to TCP_THIN_LINEAR_RETRIES before initiating
479 * exponential backoff behaviour to avoid continue hammering
480 * linear-timeout retransmissions into a black hole
481 */
482 if (sk->sk_state == TCP_ESTABLISHED &&
483 (tp->thin_lto || sysctl_tcp_thin_linear_timeouts) &&
484 tcp_stream_is_thin(tp) &&
485 icsk->icsk_retransmits <= TCP_THIN_LINEAR_RETRIES) {
486 icsk->icsk_backoff = 0;
487 icsk->icsk_rto = min(__tcp_set_rto(tp), TCP_RTO_MAX);
488 } else {
489 /* Use normal (exponential) backoff */
490 icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX);
491 }
Arnaldo Carvalho de Melo3f421ba2005-08-09 20:11:08 -0700492 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, icsk->icsk_rto, TCP_RTO_MAX);
David S. Miller21a180c2010-10-04 11:56:38 -0700493 if (retransmits_timed_out(sk, sysctl_tcp_retries1 + 1, 0, 0))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700494 __sk_dst_reset(sk);
495
496out:;
497}
498
Eric Dumazet6f458df2012-07-20 05:45:50 +0000499void tcp_write_timer_handler(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700500{
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700501 struct inet_connection_sock *icsk = inet_csk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700502 int event;
503
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700504 if (sk->sk_state == TCP_CLOSE || !icsk->icsk_pending)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700505 goto out;
506
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700507 if (time_after(icsk->icsk_timeout, jiffies)) {
508 sk_reset_timer(sk, &icsk->icsk_retransmit_timer, icsk->icsk_timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700509 goto out;
510 }
511
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700512 event = icsk->icsk_pending;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700513
514 switch (event) {
Nandita Dukkipati6ba8a3b2013-03-11 10:00:43 +0000515 case ICSK_TIME_EARLY_RETRANS:
516 tcp_resume_early_retransmit(sk);
517 break;
518 case ICSK_TIME_LOSS_PROBE:
519 tcp_send_loss_probe(sk);
520 break;
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700521 case ICSK_TIME_RETRANS:
Nandita Dukkipati6ba8a3b2013-03-11 10:00:43 +0000522 icsk->icsk_pending = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700523 tcp_retransmit_timer(sk);
524 break;
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700525 case ICSK_TIME_PROBE0:
Nandita Dukkipati6ba8a3b2013-03-11 10:00:43 +0000526 icsk->icsk_pending = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700527 tcp_probe_timer(sk);
528 break;
529 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700530
531out:
Hideo Aoki3ab224b2007-12-31 00:11:19 -0800532 sk_mem_reclaim(sk);
Eric Dumazet6f458df2012-07-20 05:45:50 +0000533}
534
535static void tcp_write_timer(unsigned long data)
536{
537 struct sock *sk = (struct sock *)data;
538
539 bh_lock_sock(sk);
540 if (!sock_owned_by_user(sk)) {
541 tcp_write_timer_handler(sk);
542 } else {
543 /* deleguate our work to tcp_release_cb() */
Eric Dumazet144d56e2012-08-20 00:22:46 +0000544 if (!test_and_set_bit(TCP_WRITE_TIMER_DEFERRED, &tcp_sk(sk)->tsq_flags))
545 sock_hold(sk);
Eric Dumazet6f458df2012-07-20 05:45:50 +0000546 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700547 bh_unlock_sock(sk);
548 sock_put(sk);
549}
550
Eric Dumazet42cb80a2015-03-22 10:22:19 -0700551void tcp_syn_ack_timeout(const struct request_sock *req)
Octavian Purdila72659ec2010-01-17 19:09:39 -0800552{
Eric Dumazet42cb80a2015-03-22 10:22:19 -0700553 struct net *net = read_pnet(&inet_rsk(req)->ireq_net);
554
555 NET_INC_STATS_BH(net, LINUX_MIB_TCPTIMEOUTS);
Octavian Purdila72659ec2010-01-17 19:09:39 -0800556}
557EXPORT_SYMBOL(tcp_syn_ack_timeout);
558
Linus Torvalds1da177e2005-04-16 15:20:36 -0700559void tcp_set_keepalive(struct sock *sk, int val)
560{
561 if ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))
562 return;
563
564 if (val && !sock_flag(sk, SOCK_KEEPOPEN))
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700565 inet_csk_reset_keepalive_timer(sk, keepalive_time_when(tcp_sk(sk)));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700566 else if (!val)
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700567 inet_csk_delete_keepalive_timer(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700568}
569
570
571static void tcp_keepalive_timer (unsigned long data)
572{
573 struct sock *sk = (struct sock *) data;
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300574 struct inet_connection_sock *icsk = inet_csk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700575 struct tcp_sock *tp = tcp_sk(sk);
Flavio Leitner6c37e5d2010-04-26 18:33:27 +0000576 u32 elapsed;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700577
578 /* Only process if socket is not in use. */
579 bh_lock_sock(sk);
580 if (sock_owned_by_user(sk)) {
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900581 /* Try again later. */
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700582 inet_csk_reset_keepalive_timer (sk, HZ/20);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700583 goto out;
584 }
585
586 if (sk->sk_state == TCP_LISTEN) {
Eric Dumazetfa76ce732015-03-19 19:04:20 -0700587 pr_err("Hmm... keepalive on a LISTEN ???\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700588 goto out;
589 }
590
591 if (sk->sk_state == TCP_FIN_WAIT2 && sock_flag(sk, SOCK_DEAD)) {
592 if (tp->linger2 >= 0) {
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700593 const int tmo = tcp_fin_time(sk) - TCP_TIMEWAIT_LEN;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700594
595 if (tmo > 0) {
596 tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
597 goto out;
598 }
599 }
600 tcp_send_active_reset(sk, GFP_ATOMIC);
601 goto death;
602 }
603
604 if (!sock_flag(sk, SOCK_KEEPOPEN) || sk->sk_state == TCP_CLOSE)
605 goto out;
606
607 elapsed = keepalive_time_when(tp);
608
609 /* It is alive without keepalive 8) */
David S. Millerfe067e82007-03-07 12:12:44 -0800610 if (tp->packets_out || tcp_send_head(sk))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700611 goto resched;
612
Flavio Leitner6c37e5d2010-04-26 18:33:27 +0000613 elapsed = keepalive_time_elapsed(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700614
615 if (elapsed >= keepalive_time_when(tp)) {
Jerry Chudca43c72010-08-27 19:13:28 +0000616 /* If the TCP_USER_TIMEOUT option is enabled, use that
617 * to determine when to timeout instead.
618 */
619 if ((icsk->icsk_user_timeout != 0 &&
620 elapsed >= icsk->icsk_user_timeout &&
621 icsk->icsk_probes_out > 0) ||
622 (icsk->icsk_user_timeout == 0 &&
623 icsk->icsk_probes_out >= keepalive_probes(tp))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700624 tcp_send_active_reset(sk, GFP_ATOMIC);
625 tcp_write_err(sk);
626 goto out;
627 }
Eric Dumazete520af42015-05-06 14:26:25 -0700628 if (tcp_write_wakeup(sk, LINUX_MIB_TCPKEEPALIVE) <= 0) {
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300629 icsk->icsk_probes_out++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700630 elapsed = keepalive_intvl_when(tp);
631 } else {
632 /* If keepalive was lost due to local congestion,
633 * try harder.
634 */
635 elapsed = TCP_RESOURCE_PROBE_INTERVAL;
636 }
637 } else {
638 /* It is tp->rcv_tstamp + keepalive_time_when(tp) */
639 elapsed = keepalive_time_when(tp) - elapsed;
640 }
641
Hideo Aoki3ab224b2007-12-31 00:11:19 -0800642 sk_mem_reclaim(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700643
644resched:
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700645 inet_csk_reset_keepalive_timer (sk, elapsed);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700646 goto out;
647
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900648death:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700649 tcp_done(sk);
650
651out:
652 bh_unlock_sock(sk);
653 sock_put(sk);
654}
Eric Dumazet6f458df2012-07-20 05:45:50 +0000655
656void tcp_init_xmit_timers(struct sock *sk)
657{
658 inet_csk_init_xmit_timers(sk, &tcp_write_timer, &tcp_delack_timer,
659 &tcp_keepalive_timer);
660}