blob: 761a198ed5f307dec45b81e0aa7c1bc0a82fb6fe [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Implementation of the Transmission Control Protocol(TCP).
7 *
Jesper Juhl02c30a82005-05-05 16:16:16 -07008 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Mark Evans, <evansmp@uhura.aston.ac.uk>
11 * Corey Minyard <wf-rch!minyard@relay.EU.net>
12 * Florian La Roche, <flla@stud.uni-sb.de>
13 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
14 * Linus Torvalds, <torvalds@cs.helsinki.fi>
15 * Alan Cox, <gw4pts@gw4pts.ampr.org>
16 * Matthew Dillon, <dillon@apollo.west.oic.com>
17 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
18 * Jorge Cwik, <jorge@laser.satlink.net>
19 */
20
21#include <linux/module.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090022#include <linux/gfp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070023#include <net/tcp.h>
24
Andreas Petlund36e31b0a2010-02-18 02:47:01 +000025int sysctl_tcp_thin_linear_timeouts __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -070026
Richard Sailerc380d372016-07-16 04:04:34 +020027/**
28 * tcp_write_err() - close socket and save error info
29 * @sk: The socket the error has appeared on.
30 *
31 * Returns: Nothing (void)
32 */
33
Linus Torvalds1da177e2005-04-16 15:20:36 -070034static void tcp_write_err(struct sock *sk)
35{
36 sk->sk_err = sk->sk_err_soft ? : ETIMEDOUT;
37 sk->sk_error_report(sk);
38
39 tcp_done(sk);
Eric Dumazet02a1d6e2016-04-27 16:44:39 -070040 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONTIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -070041}
42
Richard Sailerc380d372016-07-16 04:04:34 +020043/**
44 * tcp_out_of_resources() - Close socket if out of resources
45 * @sk: pointer to current socket
46 * @do_reset: send a last packet with reset flag
Linus Torvalds1da177e2005-04-16 15:20:36 -070047 *
Richard Sailerc380d372016-07-16 04:04:34 +020048 * Do not allow orphaned sockets to eat all our resources.
49 * This is direct violation of TCP specs, but it is required
50 * to prevent DoS attacks. It is called when a retransmission timeout
51 * or zero probe timeout occurs on orphaned socket.
52 *
Dan Streetmancf67be72018-01-18 16:14:26 -050053 * Also close if our net namespace is exiting; in that case there is no
54 * hope of ever communicating again since all netns interfaces are already
55 * down (or about to be down), and we need to release our dst references,
56 * which have been moved to the netns loopback interface, so the namespace
57 * can finish exiting. This condition is only possible if we are a kernel
58 * socket, as those do not hold references to the namespace.
59 *
Richard Sailerc380d372016-07-16 04:04:34 +020060 * Criteria is still not confirmed experimentally and may change.
61 * We kill the socket, if:
62 * 1. If number of orphaned sockets exceeds an administratively configured
63 * limit.
64 * 2. If we have strong memory pressure.
Dan Streetmancf67be72018-01-18 16:14:26 -050065 * 3. If our net namespace is exiting.
Linus Torvalds1da177e2005-04-16 15:20:36 -070066 */
Yuchung Chengb2482302014-09-29 13:20:38 -070067static int tcp_out_of_resources(struct sock *sk, bool do_reset)
Linus Torvalds1da177e2005-04-16 15:20:36 -070068{
69 struct tcp_sock *tp = tcp_sk(sk);
David S. Millerad1af0f2010-08-25 02:27:49 -070070 int shift = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070071
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +090072 /* If peer does not open window for long time, or did not transmit
Linus Torvalds1da177e2005-04-16 15:20:36 -070073 * anything for long time, penalize it. */
74 if ((s32)(tcp_time_stamp - tp->lsndtime) > 2*TCP_RTO_MAX || !do_reset)
David S. Millerad1af0f2010-08-25 02:27:49 -070075 shift++;
Linus Torvalds1da177e2005-04-16 15:20:36 -070076
77 /* If some dubious ICMP arrived, penalize even more. */
78 if (sk->sk_err_soft)
David S. Millerad1af0f2010-08-25 02:27:49 -070079 shift++;
Linus Torvalds1da177e2005-04-16 15:20:36 -070080
Arun Sharmaefcdbf22012-01-30 14:16:06 -080081 if (tcp_check_oom(sk, shift)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070082 /* Catch exceptional cases, when connection requires reset.
83 * 1. Last segment was sent recently. */
84 if ((s32)(tcp_time_stamp - tp->lsndtime) <= TCP_TIMEWAIT_LEN ||
85 /* 2. Window is closed. */
86 (!tp->snd_wnd && !tp->packets_out))
Yuchung Chengb2482302014-09-29 13:20:38 -070087 do_reset = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -070088 if (do_reset)
89 tcp_send_active_reset(sk, GFP_ATOMIC);
90 tcp_done(sk);
Eric Dumazet02a1d6e2016-04-27 16:44:39 -070091 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONMEMORY);
Linus Torvalds1da177e2005-04-16 15:20:36 -070092 return 1;
93 }
Dan Streetmancf67be72018-01-18 16:14:26 -050094
95 if (!check_net(sock_net(sk))) {
96 /* Not possible to send reset; just close */
97 tcp_done(sk);
98 return 1;
99 }
100
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101 return 0;
102}
103
Richard Sailerc380d372016-07-16 04:04:34 +0200104/**
105 * tcp_orphan_retries() - Returns maximal number of retries on an orphaned socket
106 * @sk: Pointer to the current socket.
107 * @alive: bool, socket alive state
108 */
Richard Sailer7533ce32015-10-09 02:41:37 +0200109static int tcp_orphan_retries(struct sock *sk, bool alive)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110{
Nikolay Borisovc402d9b2016-02-03 09:46:55 +0200111 int retries = sock_net(sk)->ipv4.sysctl_tcp_orphan_retries; /* May be zero. */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112
113 /* We know from an ICMP that something is wrong. */
114 if (sk->sk_err_soft && !alive)
115 retries = 0;
116
117 /* However, if socket sent something recently, select some safe
118 * number of retries. 8 corresponds to >100 seconds with minimal
119 * RTO of 200msec. */
120 if (retries == 0 && alive)
121 retries = 8;
122 return retries;
123}
124
Eric Dumazetce55dd32007-12-21 01:50:43 -0800125static void tcp_mtu_probing(struct inet_connection_sock *icsk, struct sock *sk)
126{
Fan Dub0f9ca52015-02-10 09:53:16 +0800127 struct net *net = sock_net(sk);
128
Eric Dumazetce55dd32007-12-21 01:50:43 -0800129 /* Black hole detection */
Fan Dub0f9ca52015-02-10 09:53:16 +0800130 if (net->ipv4.sysctl_tcp_mtu_probing) {
Eric Dumazetce55dd32007-12-21 01:50:43 -0800131 if (!icsk->icsk_mtup.enabled) {
132 icsk->icsk_mtup.enabled = 1;
Fan Du05cbc0d2015-03-06 11:18:24 +0800133 icsk->icsk_mtup.probe_timestamp = tcp_time_stamp;
Eric Dumazetce55dd32007-12-21 01:50:43 -0800134 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
135 } else {
Fan Dub0f9ca52015-02-10 09:53:16 +0800136 struct net *net = sock_net(sk);
Eric Dumazetce55dd32007-12-21 01:50:43 -0800137 struct tcp_sock *tp = tcp_sk(sk);
David S. Miller829942c2007-12-21 04:29:16 -0800138 int mss;
139
Eric Dumazet8beb5c52007-12-21 05:58:29 -0800140 mss = tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low) >> 1;
Fan Dub0f9ca52015-02-10 09:53:16 +0800141 mss = min(net->ipv4.sysctl_tcp_base_mss, mss);
Eric Dumazetce55dd32007-12-21 01:50:43 -0800142 mss = max(mss, 68 - tp->tcp_header_len);
Eric Dumazet7e909622019-06-15 17:47:27 -0700143 mss = max(mss, net->ipv4.sysctl_tcp_min_snd_mss);
Eric Dumazetce55dd32007-12-21 01:50:43 -0800144 icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss);
145 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
146 }
147 }
148}
149
Richard Sailerc380d372016-07-16 04:04:34 +0200150
151/**
152 * retransmits_timed_out() - returns true if this connection has timed out
153 * @sk: The current socket
154 * @boundary: max number of retransmissions
155 * @timeout: A custom timeout value.
156 * If set to 0 the default timeout is calculated and used.
157 * Using TCP_RTO_MIN and the number of unsuccessful retransmits.
158 * @syn_set: true if the SYN Bit was set.
159 *
160 * The default "timeout" value this function can calculate and use
161 * is equivalent to the timeout of a TCP Connection
162 * after "boundary" unsuccessful, exponentially backed-off
Damian Lukowski4d22f7d2010-09-28 13:08:32 -0700163 * retransmissions with an initial RTO of TCP_RTO_MIN or TCP_TIMEOUT_INIT if
164 * syn_set flag is set.
Richard Sailerc380d372016-07-16 04:04:34 +0200165 *
Damian Lukowski2f7de572009-12-07 06:06:16 +0000166 */
167static bool retransmits_timed_out(struct sock *sk,
Jerry Chudca43c72010-08-27 19:13:28 +0000168 unsigned int boundary,
David S. Miller21a180c2010-10-04 11:56:38 -0700169 unsigned int timeout,
Damian Lukowski4d22f7d2010-09-28 13:08:32 -0700170 bool syn_set)
Damian Lukowski2f7de572009-12-07 06:06:16 +0000171{
Jerry Chudca43c72010-08-27 19:13:28 +0000172 unsigned int linear_backoff_thresh, start_ts;
Damian Lukowski4d22f7d2010-09-28 13:08:32 -0700173 unsigned int rto_base = syn_set ? TCP_TIMEOUT_INIT : TCP_RTO_MIN;
Damian Lukowski2f7de572009-12-07 06:06:16 +0000174
175 if (!inet_csk(sk)->icsk_retransmits)
176 return false;
177
Eric Dumazet7faee5c2014-09-05 15:33:33 -0700178 start_ts = tcp_sk(sk)->retrans_stamp;
179 if (unlikely(!start_ts))
180 start_ts = tcp_skb_timestamp(tcp_write_queue_head(sk));
Damian Lukowski2f7de572009-12-07 06:06:16 +0000181
Jerry Chudca43c72010-08-27 19:13:28 +0000182 if (likely(timeout == 0)) {
David S. Miller21a180c2010-10-04 11:56:38 -0700183 linear_backoff_thresh = ilog2(TCP_RTO_MAX/rto_base);
Damian Lukowski2f7de572009-12-07 06:06:16 +0000184
Jerry Chudca43c72010-08-27 19:13:28 +0000185 if (boundary <= linear_backoff_thresh)
David S. Miller21a180c2010-10-04 11:56:38 -0700186 timeout = ((2 << boundary) - 1) * rto_base;
Jerry Chudca43c72010-08-27 19:13:28 +0000187 else
David S. Miller21a180c2010-10-04 11:56:38 -0700188 timeout = ((2 << linear_backoff_thresh) - 1) * rto_base +
Jerry Chudca43c72010-08-27 19:13:28 +0000189 (boundary - linear_backoff_thresh) * TCP_RTO_MAX;
190 }
Damian Lukowski2f7de572009-12-07 06:06:16 +0000191 return (tcp_time_stamp - start_ts) >= timeout;
192}
193
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194/* A write timeout has occurred. Process the after effects. */
195static int tcp_write_timeout(struct sock *sk)
196{
John Heffner5d424d52006-03-20 17:53:41 -0800197 struct inet_connection_sock *icsk = inet_csk(sk);
Yuchung Chengc9686012013-10-29 10:09:05 -0700198 struct tcp_sock *tp = tcp_sk(sk);
Nikolay Borisov6fa25162016-02-03 09:46:49 +0200199 struct net *net = sock_net(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200 int retry_until;
Rusty Russell3db1cd52011-12-19 13:56:45 +0000201 bool do_reset, syn_set = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202
203 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
Yuchung Chengc9686012013-10-29 10:09:05 -0700204 if (icsk->icsk_retransmits) {
Eric Dumazetb6c67122010-04-08 23:03:29 +0000205 dst_negative_advice(sk);
Yuchung Chengc9686012013-10-29 10:09:05 -0700206 if (tp->syn_fastopen || tp->syn_data)
Daniel Lee2646c832015-04-06 14:37:27 -0700207 tcp_fastopen_cache_set(sk, 0, NULL, true, 0);
Yuchung Chengdd52bc22015-11-18 18:17:31 -0800208 if (tp->syn_data && icsk->icsk_retransmits == 1)
Eric Dumazetc10d9312016-04-29 14:16:47 -0700209 NET_INC_STATS(sock_net(sk),
210 LINUX_MIB_TCPFASTOPENACTIVEFAIL);
Lawrence Brakmo3acf3ec2016-09-27 19:03:37 -0700211 } else if (!tp->syn_data && !tp->syn_fastopen) {
212 sk_rethink_txhash(sk);
Yuchung Chengc9686012013-10-29 10:09:05 -0700213 }
Nikolay Borisov6fa25162016-02-03 09:46:49 +0200214 retry_until = icsk->icsk_syn_retries ? : net->ipv4.sysctl_tcp_syn_retries;
Rusty Russell3db1cd52011-12-19 13:56:45 +0000215 syn_set = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216 } else {
Nikolay Borisovae5c3f42016-02-03 09:46:53 +0200217 if (retransmits_timed_out(sk, net->ipv4.sysctl_tcp_retries1, 0, 0)) {
Yuchung Cheng0e45f4d2015-11-18 18:17:30 -0800218 /* Some middle-boxes may black-hole Fast Open _after_
219 * the handshake. Therefore we conservatively disable
220 * Fast Open on this path on recurring timeouts with
221 * few or zero bytes acked after Fast Open.
222 */
223 if (tp->syn_data_acked &&
224 tp->bytes_acked <= tp->rx_opt.mss_clamp) {
225 tcp_fastopen_cache_set(sk, 0, NULL, true, 0);
Nikolay Borisovae5c3f42016-02-03 09:46:53 +0200226 if (icsk->icsk_retransmits == net->ipv4.sysctl_tcp_retries1)
Eric Dumazetc10d9312016-04-29 14:16:47 -0700227 NET_INC_STATS(sock_net(sk),
228 LINUX_MIB_TCPFASTOPENACTIVEFAIL);
Yuchung Cheng0e45f4d2015-11-18 18:17:30 -0800229 }
John Heffner5d424d52006-03-20 17:53:41 -0800230 /* Black hole detection */
Eric Dumazetce55dd32007-12-21 01:50:43 -0800231 tcp_mtu_probing(icsk, sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232
Eric Dumazetb6c67122010-04-08 23:03:29 +0000233 dst_negative_advice(sk);
Lawrence Brakmo3acf3ec2016-09-27 19:03:37 -0700234 } else {
235 sk_rethink_txhash(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700236 }
237
Nikolay Borisovc6214a92016-02-03 09:46:54 +0200238 retry_until = net->ipv4.sysctl_tcp_retries2;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700239 if (sock_flag(sk, SOCK_DEAD)) {
Richard Sailer7533ce32015-10-09 02:41:37 +0200240 const bool alive = icsk->icsk_rto < TCP_RTO_MAX;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900241
Linus Torvalds1da177e2005-04-16 15:20:36 -0700242 retry_until = tcp_orphan_retries(sk, alive);
Damian Lukowski6fa12c82009-08-26 00:16:34 +0000243 do_reset = alive ||
David S. Miller21a180c2010-10-04 11:56:38 -0700244 !retransmits_timed_out(sk, retry_until, 0, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700245
Damian Lukowski6fa12c82009-08-26 00:16:34 +0000246 if (tcp_out_of_resources(sk, do_reset))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247 return 1;
248 }
249 }
250
Jerry Chudca43c72010-08-27 19:13:28 +0000251 if (retransmits_timed_out(sk, retry_until,
David S. Miller21a180c2010-10-04 11:56:38 -0700252 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253 /* Has it gone just too far? */
254 tcp_write_err(sk);
255 return 1;
256 }
257 return 0;
258}
259
Eric Dumazetc10d9312016-04-29 14:16:47 -0700260/* Called with BH disabled */
Eric Dumazet6f458df2012-07-20 05:45:50 +0000261void tcp_delack_timer_handler(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700262{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700263 struct tcp_sock *tp = tcp_sk(sk);
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700264 struct inet_connection_sock *icsk = inet_csk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700265
David S. Miller9993e7d2008-01-10 21:56:38 -0800266 sk_mem_reclaim_partial(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700267
Eric Dumazet07753bc2017-03-03 14:08:21 -0800268 if (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) ||
269 !(icsk->icsk_ack.pending & ICSK_ACK_TIMER))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700270 goto out;
271
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700272 if (time_after(icsk->icsk_ack.timeout, jiffies)) {
273 sk_reset_timer(sk, &icsk->icsk_delack_timer, icsk->icsk_ack.timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700274 goto out;
275 }
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700276 icsk->icsk_ack.pending &= ~ICSK_ACK_TIMER;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700277
David S. Millerb03efcf2005-07-08 14:57:23 -0700278 if (!skb_queue_empty(&tp->ucopy.prequeue)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700279 struct sk_buff *skb;
280
Eric Dumazet02a1d6e2016-04-27 16:44:39 -0700281 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSCHEDULERFAILED);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700282
283 while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL)
Peter Zijlstrac57943a2008-10-07 14:18:42 -0700284 sk_backlog_rcv(sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700285
286 tp->ucopy.memory = 0;
287 }
288
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700289 if (inet_csk_ack_scheduled(sk)) {
290 if (!icsk->icsk_ack.pingpong) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700291 /* Delayed ACK missed: inflate ATO. */
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700292 icsk->icsk_ack.ato = min(icsk->icsk_ack.ato << 1, icsk->icsk_rto);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700293 } else {
294 /* Delayed ACK missed: leave pingpong mode and
295 * deflate ATO.
296 */
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700297 icsk->icsk_ack.pingpong = 0;
298 icsk->icsk_ack.ato = TCP_ATO_MIN;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700299 }
300 tcp_send_ack(sk);
Eric Dumazet02a1d6e2016-04-27 16:44:39 -0700301 __NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700302 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700303
304out:
Eric Dumazetb8da51e2015-05-15 12:39:27 -0700305 if (tcp_under_memory_pressure(sk))
Hideo Aoki3ab224b2007-12-31 00:11:19 -0800306 sk_mem_reclaim(sk);
Eric Dumazet6f458df2012-07-20 05:45:50 +0000307}
308
Richard Sailerc380d372016-07-16 04:04:34 +0200309
310/**
311 * tcp_delack_timer() - The TCP delayed ACK timeout handler
312 * @data: Pointer to the current socket. (gets casted to struct sock *)
313 *
314 * This function gets (indirectly) called when the kernel timer for a TCP packet
315 * of this socket expires. Calls tcp_delack_timer_handler() to do the actual work.
316 *
317 * Returns: Nothing (void)
318 */
Eric Dumazet6f458df2012-07-20 05:45:50 +0000319static void tcp_delack_timer(unsigned long data)
320{
321 struct sock *sk = (struct sock *)data;
322
323 bh_lock_sock(sk);
324 if (!sock_owned_by_user(sk)) {
325 tcp_delack_timer_handler(sk);
326 } else {
327 inet_csk(sk)->icsk_ack.blocked = 1;
Eric Dumazet02a1d6e2016-04-27 16:44:39 -0700328 __NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED);
Eric Dumazet6f458df2012-07-20 05:45:50 +0000329 /* deleguate our work to tcp_release_cb() */
Eric Dumazet144d56e2012-08-20 00:22:46 +0000330 if (!test_and_set_bit(TCP_DELACK_TIMER_DEFERRED, &tcp_sk(sk)->tsq_flags))
331 sock_hold(sk);
Eric Dumazet6f458df2012-07-20 05:45:50 +0000332 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700333 bh_unlock_sock(sk);
334 sock_put(sk);
335}
336
337static void tcp_probe_timer(struct sock *sk)
338{
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300339 struct inet_connection_sock *icsk = inet_csk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700340 struct tcp_sock *tp = tcp_sk(sk);
341 int max_probes;
Yuchung Chengb2482302014-09-29 13:20:38 -0700342 u32 start_ts;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700343
David S. Millerfe067e82007-03-07 12:12:44 -0800344 if (tp->packets_out || !tcp_send_head(sk)) {
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300345 icsk->icsk_probes_out = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346 return;
347 }
348
Yuchung Chengb2482302014-09-29 13:20:38 -0700349 /* RFC 1122 4.2.2.17 requires the sender to stay open indefinitely as
350 * long as the receiver continues to respond probes. We support this by
351 * default and reset icsk_probes_out with incoming ACKs. But if the
352 * socket is orphaned or the user specifies TCP_USER_TIMEOUT, we
353 * kill the socket when the retry count and the time exceeds the
354 * corresponding system limit. We also implement similar policy when
355 * we use RTO to probe window in tcp_retransmit_timer().
Linus Torvalds1da177e2005-04-16 15:20:36 -0700356 */
Yuchung Chengb2482302014-09-29 13:20:38 -0700357 start_ts = tcp_skb_timestamp(tcp_send_head(sk));
358 if (!start_ts)
359 skb_mstamp_get(&tcp_send_head(sk)->skb_mstamp);
360 else if (icsk->icsk_user_timeout &&
361 (s32)(tcp_time_stamp - start_ts) > icsk->icsk_user_timeout)
362 goto abort;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700363
Nikolay Borisovc6214a92016-02-03 09:46:54 +0200364 max_probes = sock_net(sk)->ipv4.sysctl_tcp_retries2;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700365 if (sock_flag(sk, SOCK_DEAD)) {
Richard Sailer7533ce32015-10-09 02:41:37 +0200366 const bool alive = inet_csk_rto_backoff(icsk, TCP_RTO_MAX) < TCP_RTO_MAX;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900367
Linus Torvalds1da177e2005-04-16 15:20:36 -0700368 max_probes = tcp_orphan_retries(sk, alive);
Yuchung Chengb2482302014-09-29 13:20:38 -0700369 if (!alive && icsk->icsk_backoff >= max_probes)
370 goto abort;
371 if (tcp_out_of_resources(sk, true))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700372 return;
373 }
374
Yuchung Cheng4ae7b6e2018-11-28 16:06:43 -0800375 if (icsk->icsk_probes_out >= max_probes) {
Yuchung Chengb2482302014-09-29 13:20:38 -0700376abort: tcp_write_err(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700377 } else {
378 /* Only send another probe if we didn't close things up. */
379 tcp_send_probe0(sk);
380 }
381}
382
383/*
Jerry Chu83368862012-08-31 12:29:12 +0000384 * Timer for Fast Open socket to retransmit SYNACK. Note that the
385 * sk here is the child socket, not the parent (listener) socket.
386 */
387static void tcp_fastopen_synack_timer(struct sock *sk)
388{
389 struct inet_connection_sock *icsk = inet_csk(sk);
390 int max_retries = icsk->icsk_syn_retries ? :
Nikolay Borisov7c083ec2016-02-03 09:46:50 +0200391 sock_net(sk)->ipv4.sysctl_tcp_synack_retries + 1; /* add one more retry for fastopen */
Jerry Chu83368862012-08-31 12:29:12 +0000392 struct request_sock *req;
393
394 req = tcp_sk(sk)->fastopen_rsk;
Eric Dumazet42cb80a2015-03-22 10:22:19 -0700395 req->rsk_ops->syn_ack_timeout(req);
Jerry Chu83368862012-08-31 12:29:12 +0000396
Eric Dumazete6c022a2012-10-27 23:16:46 +0000397 if (req->num_timeout >= max_retries) {
Jerry Chu83368862012-08-31 12:29:12 +0000398 tcp_write_err(sk);
399 return;
400 }
401 /* XXX (TFO) - Unlike regular SYN-ACK retransmit, we ignore error
402 * returned from rtx_syn_ack() to make it more persistent like
403 * regular retransmit because if the child socket has been accepted
404 * it's not good to give up too easily.
405 */
Eric Dumazete6c022a2012-10-27 23:16:46 +0000406 inet_rtx_syn_ack(sk, req);
407 req->num_timeout++;
Yuchung Cheng7e32b442016-09-21 16:16:15 -0700408 icsk->icsk_retransmits++;
Jerry Chu83368862012-08-31 12:29:12 +0000409 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
Eric Dumazete6c022a2012-10-27 23:16:46 +0000410 TCP_TIMEOUT_INIT << req->num_timeout, TCP_RTO_MAX);
Jerry Chu83368862012-08-31 12:29:12 +0000411}
412
Linus Torvalds1da177e2005-04-16 15:20:36 -0700413
Richard Sailerc380d372016-07-16 04:04:34 +0200414/**
415 * tcp_retransmit_timer() - The TCP retransmit timeout handler
416 * @sk: Pointer to the current socket.
417 *
418 * This function gets called when the kernel timer for a TCP packet
419 * of this socket expires.
420 *
421 * It handles retransmission, timer adjustment and other necesarry measures.
422 *
423 * Returns: Nothing (void)
424 */
Damian Lukowskif1ecd5d2009-08-26 00:16:31 +0000425void tcp_retransmit_timer(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426{
427 struct tcp_sock *tp = tcp_sk(sk);
Nikolay Borisovae5c3f42016-02-03 09:46:53 +0200428 struct net *net = sock_net(sk);
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700429 struct inet_connection_sock *icsk = inet_csk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700430
Jerry Chu83368862012-08-31 12:29:12 +0000431 if (tp->fastopen_rsk) {
Jerry Chu37561f62012-10-22 11:26:36 +0000432 WARN_ON_ONCE(sk->sk_state != TCP_SYN_RECV &&
433 sk->sk_state != TCP_FIN_WAIT1);
Jerry Chu83368862012-08-31 12:29:12 +0000434 tcp_fastopen_synack_timer(sk);
435 /* Before we receive ACK to our SYN-ACK don't retransmit
436 * anything else (e.g., data or FIN segments).
437 */
438 return;
439 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700440 if (!tp->packets_out)
441 goto out;
442
Ilpo Järvinen547b7922008-07-25 21:43:18 -0700443 WARN_ON(tcp_write_queue_empty(sk));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700444
Nandita Dukkipati9b717a82013-03-11 10:00:44 +0000445 tp->tlp_high_seq = 0;
446
Linus Torvalds1da177e2005-04-16 15:20:36 -0700447 if (!tp->snd_wnd && !sock_flag(sk, SOCK_DEAD) &&
448 !((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))) {
449 /* Receiver dastardly shrinks window. Our retransmits
450 * become zero probes, but we should not timeout this
451 * connection. If the socket is an orphan, time it out,
452 * we cannot allow such beasts to hang infinitely.
453 */
YOSHIFUJI Hideaki569508c2008-04-14 04:09:36 -0700454 struct inet_sock *inet = inet_sk(sk);
455 if (sk->sk_family == AF_INET) {
Joe Perchesba7a46f2014-11-11 10:59:17 -0800456 net_dbg_ratelimited("Peer %pI4:%u/%u unexpectedly shrunk window %u:%u (repaired)\n",
457 &inet->inet_daddr,
458 ntohs(inet->inet_dport),
459 inet->inet_num,
460 tp->snd_una, tp->snd_nxt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700461 }
Eric Dumazetdfd56b82011-12-10 09:48:31 +0000462#if IS_ENABLED(CONFIG_IPV6)
YOSHIFUJI Hideaki569508c2008-04-14 04:09:36 -0700463 else if (sk->sk_family == AF_INET6) {
Joe Perchesba7a46f2014-11-11 10:59:17 -0800464 net_dbg_ratelimited("Peer %pI6:%u/%u unexpectedly shrunk window %u:%u (repaired)\n",
465 &sk->sk_v6_daddr,
466 ntohs(inet->inet_dport),
467 inet->inet_num,
468 tp->snd_una, tp->snd_nxt);
YOSHIFUJI Hideaki569508c2008-04-14 04:09:36 -0700469 }
470#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700471 if (tcp_time_stamp - tp->rcv_tstamp > TCP_RTO_MAX) {
472 tcp_write_err(sk);
473 goto out;
474 }
Neal Cardwell5ae344c2014-08-04 19:12:29 -0400475 tcp_enter_loss(sk);
Eric Dumazet10d3be52016-04-21 10:55:23 -0700476 tcp_retransmit_skb(sk, tcp_write_queue_head(sk), 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700477 __sk_dst_reset(sk);
478 goto out_reset_timer;
479 }
480
Yuchung Cheng54717172018-11-28 16:06:45 -0800481 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPTIMEOUTS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700482 if (tcp_write_timeout(sk))
483 goto out;
484
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700485 if (icsk->icsk_retransmits == 0) {
Yuchung Cheng54717172018-11-28 16:06:45 -0800486 int mib_idx = 0;
Pavel Emelyanov40b215e2008-07-03 01:05:41 -0700487
Ilpo Järvinenc60ce4e2010-10-14 01:52:09 +0000488 if (icsk->icsk_ca_state == TCP_CA_Recovery) {
Ilpo Järvinenbc079e92009-02-28 04:44:34 +0000489 if (tcp_is_sack(tp))
490 mib_idx = LINUX_MIB_TCPSACKRECOVERYFAIL;
491 else
492 mib_idx = LINUX_MIB_TCPRENORECOVERYFAIL;
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300493 } else if (icsk->icsk_ca_state == TCP_CA_Loss) {
Pavel Emelyanov40b215e2008-07-03 01:05:41 -0700494 mib_idx = LINUX_MIB_TCPLOSSFAILURES;
Ilpo Järvinenc60ce4e2010-10-14 01:52:09 +0000495 } else if ((icsk->icsk_ca_state == TCP_CA_Disorder) ||
496 tp->sacked_out) {
497 if (tcp_is_sack(tp))
498 mib_idx = LINUX_MIB_TCPSACKFAILURES;
499 else
500 mib_idx = LINUX_MIB_TCPRENOFAILURES;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700501 }
Yuchung Cheng54717172018-11-28 16:06:45 -0800502 if (mib_idx)
503 __NET_INC_STATS(sock_net(sk), mib_idx);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700504 }
505
Neal Cardwell5ae344c2014-08-04 19:12:29 -0400506 tcp_enter_loss(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700507
Eric Dumazet10d3be52016-04-21 10:55:23 -0700508 if (tcp_retransmit_skb(sk, tcp_write_queue_head(sk), 1) > 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700509 /* Retransmission failed because of local congestion,
510 * do not backoff.
511 */
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700512 if (!icsk->icsk_retransmits)
513 icsk->icsk_retransmits = 1;
514 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
Arnaldo Carvalho de Melo3f421ba2005-08-09 20:11:08 -0700515 min(icsk->icsk_rto, TCP_RESOURCE_PROBE_INTERVAL),
516 TCP_RTO_MAX);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700517 goto out;
518 }
519
520 /* Increase the timeout each time we retransmit. Note that
521 * we do not increase the rtt estimate. rto is initialized
522 * from rtt, but increases here. Jacobson (SIGCOMM 88) suggests
523 * that doubling rto each time is the least we can get away with.
524 * In KA9Q, Karn uses this for the first few times, and then
525 * goes to quadratic. netBSD doubles, but only goes up to *64,
526 * and clamps at 1 to 64 sec afterwards. Note that 120 sec is
527 * defined in the protocol as the maximum possible RTT. I guess
528 * we'll have to use something other than TCP to talk to the
529 * University of Mars.
530 *
531 * PAWS allows us longer timeouts and large windows, so once
532 * implemented ftp to mars will work nicely. We will have to fix
533 * the 120 second clamps though!
534 */
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700535 icsk->icsk_backoff++;
536 icsk->icsk_retransmits++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700537
538out_reset_timer:
Andreas Petlund36e31b0a2010-02-18 02:47:01 +0000539 /* If stream is thin, use linear timeouts. Since 'icsk_backoff' is
540 * used to reset timer, set to 0. Recalculate 'icsk_rto' as this
541 * might be increased if the stream oscillates between thin and thick,
542 * thus the old value might already be too high compared to the value
543 * set by 'tcp_set_rto' in tcp_input.c which resets the rto without
544 * backoff. Limit to TCP_THIN_LINEAR_RETRIES before initiating
545 * exponential backoff behaviour to avoid continue hammering
546 * linear-timeout retransmissions into a black hole
547 */
548 if (sk->sk_state == TCP_ESTABLISHED &&
549 (tp->thin_lto || sysctl_tcp_thin_linear_timeouts) &&
550 tcp_stream_is_thin(tp) &&
551 icsk->icsk_retransmits <= TCP_THIN_LINEAR_RETRIES) {
552 icsk->icsk_backoff = 0;
553 icsk->icsk_rto = min(__tcp_set_rto(tp), TCP_RTO_MAX);
554 } else {
555 /* Use normal (exponential) backoff */
556 icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX);
557 }
Arnaldo Carvalho de Melo3f421ba2005-08-09 20:11:08 -0700558 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, icsk->icsk_rto, TCP_RTO_MAX);
Nikolay Borisovae5c3f42016-02-03 09:46:53 +0200559 if (retransmits_timed_out(sk, net->ipv4.sysctl_tcp_retries1 + 1, 0, 0))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700560 __sk_dst_reset(sk);
561
562out:;
563}
564
Richard Sailerc380d372016-07-16 04:04:34 +0200565/* Called with bottom-half processing disabled.
566 Called by tcp_write_timer() */
Eric Dumazet6f458df2012-07-20 05:45:50 +0000567void tcp_write_timer_handler(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700568{
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700569 struct inet_connection_sock *icsk = inet_csk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700570 int event;
571
Eric Dumazet07753bc2017-03-03 14:08:21 -0800572 if (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) ||
573 !icsk->icsk_pending)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700574 goto out;
575
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700576 if (time_after(icsk->icsk_timeout, jiffies)) {
577 sk_reset_timer(sk, &icsk->icsk_retransmit_timer, icsk->icsk_timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700578 goto out;
579 }
580
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700581 event = icsk->icsk_pending;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700582
583 switch (event) {
Nandita Dukkipati6ba8a3b2013-03-11 10:00:43 +0000584 case ICSK_TIME_EARLY_RETRANS:
585 tcp_resume_early_retransmit(sk);
586 break;
587 case ICSK_TIME_LOSS_PROBE:
588 tcp_send_loss_probe(sk);
589 break;
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700590 case ICSK_TIME_RETRANS:
Nandita Dukkipati6ba8a3b2013-03-11 10:00:43 +0000591 icsk->icsk_pending = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700592 tcp_retransmit_timer(sk);
593 break;
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700594 case ICSK_TIME_PROBE0:
Nandita Dukkipati6ba8a3b2013-03-11 10:00:43 +0000595 icsk->icsk_pending = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700596 tcp_probe_timer(sk);
597 break;
598 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700599
600out:
Hideo Aoki3ab224b2007-12-31 00:11:19 -0800601 sk_mem_reclaim(sk);
Eric Dumazet6f458df2012-07-20 05:45:50 +0000602}
603
604static void tcp_write_timer(unsigned long data)
605{
606 struct sock *sk = (struct sock *)data;
607
608 bh_lock_sock(sk);
609 if (!sock_owned_by_user(sk)) {
610 tcp_write_timer_handler(sk);
611 } else {
Richard Sailerc380d372016-07-16 04:04:34 +0200612 /* delegate our work to tcp_release_cb() */
Eric Dumazet144d56e2012-08-20 00:22:46 +0000613 if (!test_and_set_bit(TCP_WRITE_TIMER_DEFERRED, &tcp_sk(sk)->tsq_flags))
614 sock_hold(sk);
Eric Dumazet6f458df2012-07-20 05:45:50 +0000615 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700616 bh_unlock_sock(sk);
617 sock_put(sk);
618}
619
Eric Dumazet42cb80a2015-03-22 10:22:19 -0700620void tcp_syn_ack_timeout(const struct request_sock *req)
Octavian Purdila72659ec2010-01-17 19:09:39 -0800621{
Eric Dumazet42cb80a2015-03-22 10:22:19 -0700622 struct net *net = read_pnet(&inet_rsk(req)->ireq_net);
623
Eric Dumazet02a1d6e2016-04-27 16:44:39 -0700624 __NET_INC_STATS(net, LINUX_MIB_TCPTIMEOUTS);
Octavian Purdila72659ec2010-01-17 19:09:39 -0800625}
626EXPORT_SYMBOL(tcp_syn_ack_timeout);
627
Linus Torvalds1da177e2005-04-16 15:20:36 -0700628void tcp_set_keepalive(struct sock *sk, int val)
629{
630 if ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))
631 return;
632
633 if (val && !sock_flag(sk, SOCK_KEEPOPEN))
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700634 inet_csk_reset_keepalive_timer(sk, keepalive_time_when(tcp_sk(sk)));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700635 else if (!val)
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700636 inet_csk_delete_keepalive_timer(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700637}
638
639
640static void tcp_keepalive_timer (unsigned long data)
641{
642 struct sock *sk = (struct sock *) data;
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300643 struct inet_connection_sock *icsk = inet_csk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700644 struct tcp_sock *tp = tcp_sk(sk);
Flavio Leitner6c37e5d2010-04-26 18:33:27 +0000645 u32 elapsed;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700646
647 /* Only process if socket is not in use. */
648 bh_lock_sock(sk);
649 if (sock_owned_by_user(sk)) {
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900650 /* Try again later. */
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700651 inet_csk_reset_keepalive_timer (sk, HZ/20);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700652 goto out;
653 }
654
655 if (sk->sk_state == TCP_LISTEN) {
Eric Dumazetfa76ce732015-03-19 19:04:20 -0700656 pr_err("Hmm... keepalive on a LISTEN ???\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700657 goto out;
658 }
659
660 if (sk->sk_state == TCP_FIN_WAIT2 && sock_flag(sk, SOCK_DEAD)) {
661 if (tp->linger2 >= 0) {
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700662 const int tmo = tcp_fin_time(sk) - TCP_TIMEWAIT_LEN;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700663
664 if (tmo > 0) {
665 tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
666 goto out;
667 }
668 }
669 tcp_send_active_reset(sk, GFP_ATOMIC);
670 goto death;
671 }
672
Eric Dumazet05046af2017-08-02 23:10:46 -0700673 if (!sock_flag(sk, SOCK_KEEPOPEN) ||
674 ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_SYN_SENT)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700675 goto out;
676
677 elapsed = keepalive_time_when(tp);
678
679 /* It is alive without keepalive 8) */
David S. Millerfe067e82007-03-07 12:12:44 -0800680 if (tp->packets_out || tcp_send_head(sk))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700681 goto resched;
682
Flavio Leitner6c37e5d2010-04-26 18:33:27 +0000683 elapsed = keepalive_time_elapsed(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700684
685 if (elapsed >= keepalive_time_when(tp)) {
Jerry Chudca43c72010-08-27 19:13:28 +0000686 /* If the TCP_USER_TIMEOUT option is enabled, use that
687 * to determine when to timeout instead.
688 */
689 if ((icsk->icsk_user_timeout != 0 &&
690 elapsed >= icsk->icsk_user_timeout &&
691 icsk->icsk_probes_out > 0) ||
692 (icsk->icsk_user_timeout == 0 &&
693 icsk->icsk_probes_out >= keepalive_probes(tp))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700694 tcp_send_active_reset(sk, GFP_ATOMIC);
695 tcp_write_err(sk);
696 goto out;
697 }
Eric Dumazete520af42015-05-06 14:26:25 -0700698 if (tcp_write_wakeup(sk, LINUX_MIB_TCPKEEPALIVE) <= 0) {
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300699 icsk->icsk_probes_out++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700700 elapsed = keepalive_intvl_when(tp);
701 } else {
702 /* If keepalive was lost due to local congestion,
703 * try harder.
704 */
705 elapsed = TCP_RESOURCE_PROBE_INTERVAL;
706 }
707 } else {
708 /* It is tp->rcv_tstamp + keepalive_time_when(tp) */
709 elapsed = keepalive_time_when(tp) - elapsed;
710 }
711
Hideo Aoki3ab224b2007-12-31 00:11:19 -0800712 sk_mem_reclaim(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700713
714resched:
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700715 inet_csk_reset_keepalive_timer (sk, elapsed);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700716 goto out;
717
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900718death:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700719 tcp_done(sk);
720
721out:
722 bh_unlock_sock(sk);
723 sock_put(sk);
724}
Eric Dumazet6f458df2012-07-20 05:45:50 +0000725
726void tcp_init_xmit_timers(struct sock *sk)
727{
728 inet_csk_init_xmit_timers(sk, &tcp_write_timer, &tcp_delack_timer,
729 &tcp_keepalive_timer);
730}