blob: 3270ab8416ce8691cbb1c3a25533142fe1029bed [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Implementation of the Transmission Control Protocol(TCP).
7 *
Jesper Juhl02c30a82005-05-05 16:16:16 -07008 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Mark Evans, <evansmp@uhura.aston.ac.uk>
11 * Corey Minyard <wf-rch!minyard@relay.EU.net>
12 * Florian La Roche, <flla@stud.uni-sb.de>
13 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
14 * Linus Torvalds, <torvalds@cs.helsinki.fi>
15 * Alan Cox, <gw4pts@gw4pts.ampr.org>
16 * Matthew Dillon, <dillon@apollo.west.oic.com>
17 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
18 * Jorge Cwik, <jorge@laser.satlink.net>
19 */
20
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#include <linux/mm.h>
22#include <linux/module.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090023#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include <linux/sysctl.h>
25#include <linux/workqueue.h>
Ursula Braun60e2a772017-10-25 11:01:45 +020026#include <linux/static_key.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070027#include <net/tcp.h>
28#include <net/inet_common.h>
29#include <net/xfrm.h>
Alexander Duycke5907452017-03-24 10:08:00 -070030#include <net/busy_poll.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070031
Eric Dumazeta2a385d2012-05-16 23:15:34 +000032static bool tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win)
Linus Torvalds1da177e2005-04-16 15:20:36 -070033{
34 if (seq == s_win)
Eric Dumazeta2a385d2012-05-16 23:15:34 +000035 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -070036 if (after(end_seq, s_win) && before(seq, e_win))
Eric Dumazeta2a385d2012-05-16 23:15:34 +000037 return true;
Eric Dumazeta02cec22010-09-22 20:43:57 +000038 return seq == e_win && seq == end_seq;
Linus Torvalds1da177e2005-04-16 15:20:36 -070039}
40
Neal Cardwell4fb17a62015-02-06 16:04:41 -050041static enum tcp_tw_status
42tcp_timewait_check_oow_rate_limit(struct inet_timewait_sock *tw,
43 const struct sk_buff *skb, int mib_idx)
44{
45 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
46
47 if (!tcp_oow_rate_limited(twsk_net(tw), skb, mib_idx,
48 &tcptw->tw_last_oow_ack_time)) {
49 /* Send ACK. Note, we do not put the bucket,
50 * it will be released by caller.
51 */
52 return TCP_TW_ACK;
53 }
54
55 /* We are rate-limiting, so just release the tw sock and drop skb. */
56 inet_twsk_put(tw);
57 return TCP_TW_SUCCESS;
58}
59
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +090060/*
Linus Torvalds1da177e2005-04-16 15:20:36 -070061 * * Main purpose of TIME-WAIT state is to close connection gracefully,
62 * when one of ends sits in LAST-ACK or CLOSING retransmitting FIN
63 * (and, probably, tail of data) and one or more our ACKs are lost.
64 * * What is TIME-WAIT timeout? It is associated with maximal packet
65 * lifetime in the internet, which results in wrong conclusion, that
66 * it is set to catch "old duplicate segments" wandering out of their path.
67 * It is not quite correct. This timeout is calculated so that it exceeds
68 * maximal retransmission timeout enough to allow to lose one (or more)
69 * segments sent by peer and our ACKs. This time may be calculated from RTO.
70 * * When TIME-WAIT socket receives RST, it means that another end
71 * finally closed and we are allowed to kill TIME-WAIT too.
72 * * Second purpose of TIME-WAIT is catching old duplicate segments.
73 * Well, certainly it is pure paranoia, but if we load TIME-WAIT
74 * with this semantics, we MUST NOT kill TIME-WAIT state with RSTs.
75 * * If we invented some more clever way to catch duplicates
76 * (f.e. based on PAWS), we could truncate TIME-WAIT to several RTOs.
77 *
78 * The algorithm below is based on FORMAL INTERPRETATION of RFCs.
79 * When you compare it to RFCs, please, read section SEGMENT ARRIVES
80 * from the very beginning.
81 *
82 * NOTE. With recycling (and later with fin-wait-2) TW bucket
83 * is _not_ stateless. It means, that strictly speaking we must
84 * spinlock it. I do not want! Well, probability of misbehaviour
85 * is ridiculously low and, seems, we could use some mb() tricks
86 * to avoid misread sequence numbers, states etc. --ANK
Alan Cox4308fc52012-09-19 15:46:06 +010087 *
88 * We don't need to initialize tmp_out.sack_ok as we don't use the results
Linus Torvalds1da177e2005-04-16 15:20:36 -070089 */
90enum tcp_tw_status
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -070091tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
92 const struct tcphdr *th)
Linus Torvalds1da177e2005-04-16 15:20:36 -070093{
94 struct tcp_options_received tmp_opt;
William Allen Simpson4957faade2009-12-02 18:25:27 +000095 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
Eric Dumazeta2a385d2012-05-16 23:15:34 +000096 bool paws_reject = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -070097
David S. Millerbb5b7c12009-12-15 20:56:42 -080098 tmp_opt.saw_tstamp = 0;
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -070099 if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) {
Eric Dumazeteed29f12017-06-07 10:34:36 -0700100 tcp_parse_options(twsk_net(tw), skb, &tmp_opt, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101
102 if (tmp_opt.saw_tstamp) {
Alexey Kodaneveee2faa2017-02-22 13:23:56 +0300103 if (tmp_opt.rcv_tsecr)
104 tmp_opt.rcv_tsecr -= tcptw->tw_ts_offset;
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700105 tmp_opt.ts_recent = tcptw->tw_ts_recent;
106 tmp_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
Ilpo Järvinenc887e6d2009-03-14 14:23:03 +0000107 paws_reject = tcp_paws_reject(&tmp_opt, th->rst);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108 }
109 }
110
111 if (tw->tw_substate == TCP_FIN_WAIT2) {
112 /* Just repeat all the checks of tcp_rcv_state_process() */
113
114 /* Out of window, send ACK */
115 if (paws_reject ||
116 !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700117 tcptw->tw_rcv_nxt,
118 tcptw->tw_rcv_nxt + tcptw->tw_rcv_wnd))
Neal Cardwell4fb17a62015-02-06 16:04:41 -0500119 return tcp_timewait_check_oow_rate_limit(
120 tw, skb, LINUX_MIB_TCPACKSKIPPEDFINWAIT2);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121
122 if (th->rst)
123 goto kill;
124
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700125 if (th->syn && !before(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt))
Florian Westphal271c3b92015-12-21 21:29:26 +0100126 return TCP_TW_RST;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127
128 /* Dup ACK? */
Wei Yongjun1ac530b2009-06-24 22:29:31 +0000129 if (!th->ack ||
130 !after(TCP_SKB_CB(skb)->end_seq, tcptw->tw_rcv_nxt) ||
Linus Torvalds1da177e2005-04-16 15:20:36 -0700131 TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) {
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700132 inet_twsk_put(tw);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700133 return TCP_TW_SUCCESS;
134 }
135
136 /* New data or FIN. If new data arrive after half-duplex close,
137 * reset.
138 */
139 if (!th->fin ||
Florian Westphal271c3b92015-12-21 21:29:26 +0100140 TCP_SKB_CB(skb)->end_seq != tcptw->tw_rcv_nxt + 1)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141 return TCP_TW_RST;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142
143 /* FIN arrived, enter true time-wait state. */
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700144 tw->tw_substate = TCP_TIME_WAIT;
145 tcptw->tw_rcv_nxt = TCP_SKB_CB(skb)->end_seq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700146 if (tmp_opt.saw_tstamp) {
James Morris9d729f72007-03-04 16:12:44 -0800147 tcptw->tw_ts_recent_stamp = get_seconds();
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700148 tcptw->tw_ts_recent = tmp_opt.rcv_tsval;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149 }
150
Soheil Hassas Yeganehd82bae12017-03-15 16:30:45 -0400151 inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152 return TCP_TW_ACK;
153 }
154
155 /*
156 * Now real TIME-WAIT state.
157 *
158 * RFC 1122:
159 * "When a connection is [...] on TIME-WAIT state [...]
160 * [a TCP] MAY accept a new SYN from the remote TCP to
161 * reopen the connection directly, if it:
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900162 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163 * (1) assigns its initial sequence number for the new
164 * connection to be larger than the largest sequence
165 * number it used on the previous connection incarnation,
166 * and
167 *
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900168 * (2) returns to TIME-WAIT state if the SYN turns out
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169 * to be an old duplicate".
170 */
171
172 if (!paws_reject &&
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700173 (TCP_SKB_CB(skb)->seq == tcptw->tw_rcv_nxt &&
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174 (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq || th->rst))) {
175 /* In window segment, it may be only reset or bare ack. */
176
177 if (th->rst) {
Stephen Hemmingercaa20d9a2005-11-10 17:13:47 -0800178 /* This is TIME_WAIT assassination, in two flavors.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700179 * Oh well... nobody has a sufficient solution to this
180 * protocol bug yet.
181 */
Eric Dumazet625357a2017-10-26 21:55:02 -0700182 if (twsk_net(tw)->ipv4.sysctl_tcp_rfc1337 == 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183kill:
Eric Dumazetdbe7faa2015-07-08 14:28:30 -0700184 inet_twsk_deschedule_put(tw);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185 return TCP_TW_SUCCESS;
186 }
187 }
Eric Dumazeted2e9232015-09-19 09:08:34 -0700188 inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189
190 if (tmp_opt.saw_tstamp) {
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700191 tcptw->tw_ts_recent = tmp_opt.rcv_tsval;
James Morris9d729f72007-03-04 16:12:44 -0800192 tcptw->tw_ts_recent_stamp = get_seconds();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193 }
194
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700195 inet_twsk_put(tw);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700196 return TCP_TW_SUCCESS;
197 }
198
199 /* Out of window segment.
200
201 All the segments are ACKed immediately.
202
203 The only exception is new SYN. We accept it, if it is
204 not old duplicate and we are not in danger to be killed
205 by delayed old duplicates. RFC check is that it has
206 newer sequence number works at rates <40Mbit/sec.
207 However, if paws works, it is reliable AND even more,
208 we even may relax silly seq space cutoff.
209
210 RED-PEN: we violate main RFC requirement, if this SYN will appear
211 old duplicate (i.e. we receive RST in reply to SYN-ACK),
212 we must return socket to time-wait state. It is not good,
213 but not fatal yet.
214 */
215
216 if (th->syn && !th->rst && !th->ack && !paws_reject &&
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700217 (after(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt) ||
218 (tmp_opt.saw_tstamp &&
219 (s32)(tcptw->tw_ts_recent - tmp_opt.rcv_tsval) < 0))) {
220 u32 isn = tcptw->tw_snd_nxt + 65535 + 2;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221 if (isn == 0)
222 isn++;
Eric Dumazet04317da2014-09-05 15:33:32 -0700223 TCP_SKB_CB(skb)->tcp_tw_isn = isn;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224 return TCP_TW_SYN;
225 }
226
227 if (paws_reject)
Eric Dumazet02a1d6e2016-04-27 16:44:39 -0700228 __NET_INC_STATS(twsk_net(tw), LINUX_MIB_PAWSESTABREJECTED);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700229
Stephen Hemminger2de979b2007-03-08 20:45:19 -0800230 if (!th->rst) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231 /* In this case we must reset the TIMEWAIT timer.
232 *
233 * If it is ACKless SYN it may be both old duplicate
234 * and new good SYN with random sequence number <rcv_nxt.
235 * Do not reschedule in the last case.
236 */
237 if (paws_reject || th->ack)
Eric Dumazeted2e9232015-09-19 09:08:34 -0700238 inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700239
Neal Cardwell4fb17a62015-02-06 16:04:41 -0500240 return tcp_timewait_check_oow_rate_limit(
241 tw, skb, LINUX_MIB_TCPACKSKIPPEDTIMEWAIT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700242 }
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700243 inet_twsk_put(tw);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244 return TCP_TW_SUCCESS;
245}
Eric Dumazet4bc2f182010-07-09 21:22:10 +0000246EXPORT_SYMBOL(tcp_timewait_state_process);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900248/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700249 * Move a socket to time-wait or dead fin-wait-2 state.
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900250 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251void tcp_time_wait(struct sock *sk, int state, int timeo)
252{
Arnaldo Carvalho de Melo8292a172005-12-13 23:15:52 -0800253 const struct inet_connection_sock *icsk = inet_csk(sk);
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700254 const struct tcp_sock *tp = tcp_sk(sk);
Eric Dumazet789f5582015-04-12 18:51:09 -0700255 struct inet_timewait_sock *tw;
Haishuang Yan1946e672016-12-28 17:52:32 +0800256 struct inet_timewait_death_row *tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257
Haishuang Yan1946e672016-12-28 17:52:32 +0800258 tw = inet_twsk_alloc(sk, tcp_death_row, state);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259
Ian Morris00db4122015-04-03 09:17:27 +0100260 if (tw) {
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700261 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700262 const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1);
David S. Miller23978492012-06-09 14:56:12 -0700263 struct inet_sock *inet = inet_sk(sk);
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700264
David S. Miller23978492012-06-09 14:56:12 -0700265 tw->tw_transparent = inet->transparent;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266 tw->tw_rcv_wscale = tp->rx_opt.rcv_wscale;
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700267 tcptw->tw_rcv_nxt = tp->rcv_nxt;
268 tcptw->tw_snd_nxt = tp->snd_nxt;
269 tcptw->tw_rcv_wnd = tcp_receive_window(tp);
270 tcptw->tw_ts_recent = tp->rx_opt.ts_recent;
271 tcptw->tw_ts_recent_stamp = tp->rx_opt.ts_recent_stamp;
Andrey Vaginceaa1fe2013-02-11 05:50:17 +0000272 tcptw->tw_ts_offset = tp->tsoffset;
Neal Cardwell4fb17a62015-02-06 16:04:41 -0500273 tcptw->tw_last_oow_ack_time = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700274
Eric Dumazetdfd56b82011-12-10 09:48:31 +0000275#if IS_ENABLED(CONFIG_IPV6)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700276 if (tw->tw_family == PF_INET6) {
277 struct ipv6_pinfo *np = inet6_sk(sk);
278
Eric Dumazetefe42082013-10-03 15:42:29 -0700279 tw->tw_v6_daddr = sk->sk_v6_daddr;
280 tw->tw_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
Eric Dumazetb903d322011-10-27 00:44:35 -0400281 tw->tw_tclass = np->tclass;
Florent Fourcot21858cd2015-05-16 00:24:59 +0200282 tw->tw_flowlabel = be32_to_cpu(np->flow_label & IPV6_FLOWLABEL_MASK);
Eric Dumazet9fe516b2014-06-27 08:36:16 -0700283 tw->tw_ipv6only = sk->sk_ipv6only;
Arnaldo Carvalho de Meloc6762702005-08-09 20:09:59 -0700284 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700285#endif
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800286
287#ifdef CONFIG_TCP_MD5SIG
288 /*
289 * The timewait bucket does not have the key DB from the
290 * sock structure. We just make a quick copy of the
291 * md5 key being used (if indeed we are using one)
292 * so the timewait ack generating code has the key.
293 */
294 do {
295 struct tcp_md5sig_key *key;
Eric Dumazeta915da9b2012-01-31 05:18:33 +0000296 tcptw->tw_md5_key = NULL;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800297 key = tp->af_specific->md5_lookup(sk, sk);
Ian Morris00db4122015-04-03 09:17:27 +0100298 if (key) {
Eric Dumazeta915da9b2012-01-31 05:18:33 +0000299 tcptw->tw_md5_key = kmemdup(key, sizeof(*key), GFP_ATOMIC);
Gustavo A. R. Silva49ca1942017-10-23 13:10:56 -0500300 BUG_ON(tcptw->tw_md5_key && !tcp_alloc_md5sig_pool());
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800301 }
Stephen Hemminger2de979b2007-03-08 20:45:19 -0800302 } while (0);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800303#endif
304
Linus Torvalds1da177e2005-04-16 15:20:36 -0700305 /* Get the TIME_WAIT timeout firing. */
306 if (timeo < rto)
307 timeo = rto;
308
Soheil Hassas Yeganehd82bae12017-03-15 16:30:45 -0400309 tw->tw_timeout = TCP_TIMEWAIT_LEN;
310 if (state == TCP_TIME_WAIT)
311 timeo = TCP_TIMEWAIT_LEN;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700312
Eric Dumazet789f5582015-04-12 18:51:09 -0700313 inet_twsk_schedule(tw, timeo);
Eric Dumazeted2e9232015-09-19 09:08:34 -0700314 /* Linkage updates. */
315 __inet_twsk_hashdance(tw, sk, &tcp_hashinfo);
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700316 inet_twsk_put(tw);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700317 } else {
318 /* Sorry, if we're out of memory, just CLOSE this
319 * socket up. We've got bigger problems than
320 * non-graceful socket closings.
321 */
Eric Dumazetc10d9312016-04-29 14:16:47 -0700322 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPTIMEWAITOVERFLOW);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700323 }
324
325 tcp_update_metrics(sk);
326 tcp_done(sk);
327}
328
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800329void tcp_twsk_destructor(struct sock *sk)
330{
David S. Millerb6242b92012-07-10 03:27:56 -0700331#ifdef CONFIG_TCP_MD5SIG
David S. Millera9286302006-11-14 19:53:22 -0800332 struct tcp_timewait_sock *twsk = tcp_twsk(sk);
David S. Miller23978492012-06-09 14:56:12 -0700333
Eric Dumazet71cea172013-05-20 06:52:26 +0000334 if (twsk->tw_md5_key)
Eric Dumazeta915da9b2012-01-31 05:18:33 +0000335 kfree_rcu(twsk->tw_md5_key, rcu);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800336#endif
337}
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800338EXPORT_SYMBOL_GPL(tcp_twsk_destructor);
339
Eric Dumazetb1964b52015-09-25 07:39:09 -0700340/* Warning : This function is called without sk_listener being locked.
341 * Be sure to read socket fields once, as their value could change under us.
342 */
Yuchung Cheng843f4a52014-05-11 20:22:11 -0700343void tcp_openreq_init_rwin(struct request_sock *req,
Eric Dumazetb1964b52015-09-25 07:39:09 -0700344 const struct sock *sk_listener,
345 const struct dst_entry *dst)
Yuchung Cheng843f4a52014-05-11 20:22:11 -0700346{
347 struct inet_request_sock *ireq = inet_rsk(req);
Eric Dumazetb1964b52015-09-25 07:39:09 -0700348 const struct tcp_sock *tp = tcp_sk(sk_listener);
Eric Dumazetb1964b52015-09-25 07:39:09 -0700349 int full_space = tcp_full_space(sk_listener);
Eric Dumazetb1964b52015-09-25 07:39:09 -0700350 u32 window_clamp;
351 __u8 rcv_wscale;
Lawrence Brakmo13d3b1e2017-06-30 20:02:44 -0700352 u32 rcv_wnd;
Eric Dumazet3541f9e2017-02-02 08:04:56 -0800353 int mss;
Yuchung Cheng843f4a52014-05-11 20:22:11 -0700354
Eric Dumazet3541f9e2017-02-02 08:04:56 -0800355 mss = tcp_mss_clamp(tp, dst_metric_advmss(dst));
Eric Dumazetb1964b52015-09-25 07:39:09 -0700356 window_clamp = READ_ONCE(tp->window_clamp);
Yuchung Cheng843f4a52014-05-11 20:22:11 -0700357 /* Set this up on the first call only */
Eric Dumazeted53d0a2015-10-08 19:33:23 -0700358 req->rsk_window_clamp = window_clamp ? : dst_metric(dst, RTAX_WINDOW);
Yuchung Cheng843f4a52014-05-11 20:22:11 -0700359
360 /* limit the window selection if the user enforce a smaller rx buffer */
Eric Dumazetb1964b52015-09-25 07:39:09 -0700361 if (sk_listener->sk_userlocks & SOCK_RCVBUF_LOCK &&
Eric Dumazeted53d0a2015-10-08 19:33:23 -0700362 (req->rsk_window_clamp > full_space || req->rsk_window_clamp == 0))
363 req->rsk_window_clamp = full_space;
Yuchung Cheng843f4a52014-05-11 20:22:11 -0700364
Lawrence Brakmo13d3b1e2017-06-30 20:02:44 -0700365 rcv_wnd = tcp_rwnd_init_bpf((struct sock *)req);
366 if (rcv_wnd == 0)
367 rcv_wnd = dst_metric(dst, RTAX_INITRWND);
368 else if (full_space < rcv_wnd * mss)
369 full_space = rcv_wnd * mss;
370
Yuchung Cheng843f4a52014-05-11 20:22:11 -0700371 /* tcp_full_space because it is guaranteed to be the first packet */
Eric Dumazetb1964b52015-09-25 07:39:09 -0700372 tcp_select_initial_window(full_space,
Yuchung Cheng843f4a52014-05-11 20:22:11 -0700373 mss - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0),
Eric Dumazeted53d0a2015-10-08 19:33:23 -0700374 &req->rsk_rcv_wnd,
375 &req->rsk_window_clamp,
Yuchung Cheng843f4a52014-05-11 20:22:11 -0700376 ireq->wscale_ok,
377 &rcv_wscale,
Lawrence Brakmo13d3b1e2017-06-30 20:02:44 -0700378 rcv_wnd);
Yuchung Cheng843f4a52014-05-11 20:22:11 -0700379 ireq->rcv_wscale = rcv_wscale;
380}
381EXPORT_SYMBOL(tcp_openreq_init_rwin);
382
Florian Westphal735d3832014-09-29 13:08:30 +0200383static void tcp_ecn_openreq_child(struct tcp_sock *tp,
384 const struct request_sock *req)
Ilpo Järvinenbdf1ee52007-05-27 02:04:16 -0700385{
386 tp->ecn_flags = inet_rsk(req)->ecn_ok ? TCP_ECN_OK : 0;
387}
388
Daniel Borkmann81164412015-01-05 23:57:48 +0100389void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst)
390{
391 struct inet_connection_sock *icsk = inet_csk(sk);
392 u32 ca_key = dst_metric(dst, RTAX_CC_ALGO);
393 bool ca_got_dst = false;
394
395 if (ca_key != TCP_CA_UNSPEC) {
396 const struct tcp_congestion_ops *ca;
397
398 rcu_read_lock();
399 ca = tcp_ca_find_key(ca_key);
400 if (likely(ca && try_module_get(ca->owner))) {
401 icsk->icsk_ca_dst_locked = tcp_ca_dst_locked(dst);
402 icsk->icsk_ca_ops = ca;
403 ca_got_dst = true;
404 }
405 rcu_read_unlock();
406 }
407
Neal Cardwell9f950412015-05-29 13:47:07 -0400408 /* If no valid choice made yet, assign current system default ca. */
409 if (!ca_got_dst &&
410 (!icsk->icsk_ca_setsockopt ||
411 !try_module_get(icsk->icsk_ca_ops->owner)))
Daniel Borkmann81164412015-01-05 23:57:48 +0100412 tcp_assign_congestion_control(sk);
413
414 tcp_set_ca_state(sk, TCP_CA_Open);
415}
416EXPORT_SYMBOL_GPL(tcp_ca_openreq_child);
417
Ursula Braun60e2a772017-10-25 11:01:45 +0200418static void smc_check_reset_syn_req(struct tcp_sock *oldtp,
419 struct request_sock *req,
420 struct tcp_sock *newtp)
421{
422#if IS_ENABLED(CONFIG_SMC)
423 struct inet_request_sock *ireq;
424
425 if (static_branch_unlikely(&tcp_have_smc)) {
426 ireq = inet_rsk(req);
427 if (oldtp->syn_smc && !ireq->smc_ok)
428 newtp->syn_smc = 0;
429 }
430#endif
431}
432
Linus Torvalds1da177e2005-04-16 15:20:36 -0700433/* This is not only more efficient than what we used to do, it eliminates
434 * a lot of code duplication between IPv4/IPv6 SYN recv processing. -DaveM
435 *
436 * Actually, we could lots of memory writes here. tp of listening
437 * socket contains all necessary default parameters.
438 */
Eric Dumazetc28c6f02015-09-29 07:42:47 -0700439struct sock *tcp_create_openreq_child(const struct sock *sk,
440 struct request_sock *req,
441 struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700442{
Eric Dumazete56c57d2011-11-08 17:07:07 -0500443 struct sock *newsk = inet_csk_clone_lock(sk, req, GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700444
Ian Morris00db4122015-04-03 09:17:27 +0100445 if (newsk) {
Arnaldo Carvalho de Melo9f1d2602005-08-09 20:11:24 -0700446 const struct inet_request_sock *ireq = inet_rsk(req);
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -0700447 struct tcp_request_sock *treq = tcp_rsk(req);
Arnaldo Carvalho de Meloa9948a72007-02-28 11:05:56 -0800448 struct inet_connection_sock *newicsk = inet_csk(newsk);
William Allen Simpson435cf552009-12-02 18:17:05 +0000449 struct tcp_sock *newtp = tcp_sk(newsk);
Ursula Braun60e2a772017-10-25 11:01:45 +0200450 struct tcp_sock *oldtp = tcp_sk(sk);
451
452 smc_check_reset_syn_req(oldtp, req, newtp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700453
Linus Torvalds1da177e2005-04-16 15:20:36 -0700454 /* Now setup tcp_sock */
Florian Westphal31770e32017-08-30 19:24:58 +0200455 newtp->pred_flags = 0;
456
William Allen Simpson435cf552009-12-02 18:17:05 +0000457 newtp->rcv_wup = newtp->copied_seq =
458 newtp->rcv_nxt = treq->rcv_isn + 1;
Eric Dumazeta9d99ce2016-03-06 09:29:21 -0800459 newtp->segs_in = 1;
William Allen Simpson435cf552009-12-02 18:17:05 +0000460
461 newtp->snd_sml = newtp->snd_una =
Christoph Paasch1a2c6182013-03-17 08:23:34 +0000462 newtp->snd_nxt = newtp->snd_up = treq->snt_isn + 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700463
Eric Dumazet46d3cea2012-07-11 05:50:31 +0000464 INIT_LIST_HEAD(&newtp->tsq_node);
Eric Dumazete2080072017-10-04 12:59:58 -0700465 INIT_LIST_HEAD(&newtp->tsorted_sent_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700466
Hantzis Fotisee7537b2009-03-02 22:42:02 -0800467 tcp_init_wl(newtp, treq->rcv_isn);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700468
Eric Dumazet740b0f12014-02-26 14:02:48 -0800469 newtp->srtt_us = 0;
470 newtp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT);
Eric Dumazetac9517f2017-05-16 14:00:13 -0700471 minmax_reset(&newtp->rtt_min, tcp_jiffies32, ~0U);
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700472 newicsk->icsk_rto = TCP_TIMEOUT_INIT;
Eric Dumazet70eabf02017-05-16 14:00:07 -0700473 newicsk->icsk_ack.lrcvtime = tcp_jiffies32;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700474
475 newtp->packets_out = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700476 newtp->retrans_out = 0;
477 newtp->sacked_out = 0;
478 newtp->fackets_out = 0;
Ilpo Järvinen0b6a05c2009-09-15 01:30:10 -0700479 newtp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
Nandita Dukkipati9b717a82013-03-11 10:00:44 +0000480 newtp->tlp_high_seq = 0;
Eric Dumazet9a568de2017-05-16 14:00:14 -0700481 newtp->lsndtime = tcp_jiffies32;
Eric Dumazetd8ed6252015-09-22 20:44:17 -0700482 newsk->sk_txhash = treq->txhash;
Neal Cardwellf2b2c582015-02-06 16:04:40 -0500483 newtp->last_oow_ack_time = 0;
Yuchung Cheng375fe022013-07-22 16:20:45 -0700484 newtp->total_retrans = req->num_retrans;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700485
486 /* So many TCP implementations out there (incorrectly) count the
487 * initial SYN frame in their delayed-ACK and congestion control
488 * algorithms that we must have the following bandaid to talk
489 * efficiently to them. -DaveM
490 */
Jerry Chu9ad7c042011-06-08 11:08:38 +0000491 newtp->snd_cwnd = TCP_INIT_CWND;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700492 newtp->snd_cwnd_cnt = 0;
493
Soheil Hassas Yeganehd7722e82016-09-19 23:39:15 -0400494 /* There's a bubble in the pipe until at least the first ACK. */
495 newtp->app_limited = ~0U;
496
Linus Torvalds1da177e2005-04-16 15:20:36 -0700497 tcp_init_xmit_timers(newsk);
Christoph Paasch1a2c6182013-03-17 08:23:34 +0000498 newtp->write_seq = newtp->pushed_seq = treq->snt_isn + 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700499
500 newtp->rx_opt.saw_tstamp = 0;
501
502 newtp->rx_opt.dsack = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700503 newtp->rx_opt.num_sacks = 0;
Ilpo Järvinencabeccb2009-02-28 04:44:38 +0000504
Linus Torvalds1da177e2005-04-16 15:20:36 -0700505 newtp->urg_data = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700506
Linus Torvalds1da177e2005-04-16 15:20:36 -0700507 if (sock_flag(newsk, SOCK_KEEPOPEN))
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700508 inet_csk_reset_keepalive_timer(newsk,
509 keepalive_time_when(newtp));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700510
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -0700511 newtp->rx_opt.tstamp_ok = ireq->tstamp_ok;
Stephen Hemminger2de979b2007-03-08 20:45:19 -0800512 if ((newtp->rx_opt.sack_ok = ireq->sack_ok) != 0) {
Eric Dumazet0bc65a22017-10-26 21:55:04 -0700513 if (sock_net(sk)->ipv4.sysctl_tcp_fack)
Ilpo Järvinene60402d2007-08-09 15:14:46 +0300514 tcp_enable_fack(newtp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700515 }
Eric Dumazeted53d0a2015-10-08 19:33:23 -0700516 newtp->window_clamp = req->rsk_window_clamp;
517 newtp->rcv_ssthresh = req->rsk_rcv_wnd;
518 newtp->rcv_wnd = req->rsk_rcv_wnd;
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -0700519 newtp->rx_opt.wscale_ok = ireq->wscale_ok;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700520 if (newtp->rx_opt.wscale_ok) {
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -0700521 newtp->rx_opt.snd_wscale = ireq->snd_wscale;
522 newtp->rx_opt.rcv_wscale = ireq->rcv_wscale;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700523 } else {
524 newtp->rx_opt.snd_wscale = newtp->rx_opt.rcv_wscale = 0;
525 newtp->window_clamp = min(newtp->window_clamp, 65535U);
526 }
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -0700527 newtp->snd_wnd = (ntohs(tcp_hdr(skb)->window) <<
528 newtp->rx_opt.snd_wscale);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700529 newtp->max_window = newtp->snd_wnd;
530
531 if (newtp->rx_opt.tstamp_ok) {
532 newtp->rx_opt.ts_recent = req->ts_recent;
James Morris9d729f72007-03-04 16:12:44 -0800533 newtp->rx_opt.ts_recent_stamp = get_seconds();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700534 newtp->tcp_header_len = sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED;
535 } else {
536 newtp->rx_opt.ts_recent_stamp = 0;
537 newtp->tcp_header_len = sizeof(struct tcphdr);
538 }
Florian Westphal95a22ca2016-12-01 11:32:06 +0100539 newtp->tsoffset = treq->ts_off;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800540#ifdef CONFIG_TCP_MD5SIG
541 newtp->md5sig_info = NULL; /*XXX*/
542 if (newtp->af_specific->md5_lookup(sk, newsk))
543 newtp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED;
544#endif
William Allen Simpsonbee7ca92009-11-10 09:51:18 +0000545 if (skb->len >= TCP_MSS_DEFAULT + newtp->tcp_header_len)
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700546 newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700547 newtp->rx_opt.mss_clamp = req->mss;
Florian Westphal735d3832014-09-29 13:08:30 +0200548 tcp_ecn_openreq_child(newtp, req);
Eric Dumazet8b485ce2017-05-03 06:39:31 -0700549 newtp->fastopen_req = NULL;
Jerry Chu83368862012-08-31 12:29:12 +0000550 newtp->fastopen_rsk = NULL;
Yuchung Cheng6f736012012-10-19 15:14:44 +0000551 newtp->syn_data_acked = 0;
Eric Dumazet9a568de2017-05-16 14:00:14 -0700552 newtp->rack.mstamp = 0;
Yuchung Cheng659a8ad2015-10-16 21:57:46 -0700553 newtp->rack.advanced = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700554
Eric Dumazet90bbcc62016-04-27 16:44:32 -0700555 __TCP_INC_STATS(sock_net(sk), TCP_MIB_PASSIVEOPENS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700556 }
557 return newsk;
558}
Eric Dumazet4bc2f182010-07-09 21:22:10 +0000559EXPORT_SYMBOL(tcp_create_openreq_child);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700560
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900561/*
Jerry Chu83368862012-08-31 12:29:12 +0000562 * Process an incoming packet for SYN_RECV sockets represented as a
563 * request_sock. Normally sk is the listener socket but for TFO it
564 * points to the child socket.
565 *
566 * XXX (TFO) - The current impl contains a special check for ack
567 * validation and inside tcp_v4_reqsk_send_ack(). Can we do better?
Alan Cox4308fc52012-09-19 15:46:06 +0100568 *
569 * We don't need to initialize tmp_opt.sack_ok as we don't use the results
Linus Torvalds1da177e2005-04-16 15:20:36 -0700570 */
571
Jianjun Kong5a5f3a82008-11-03 00:24:34 -0800572struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -0700573 struct request_sock *req,
Jerry Chu83368862012-08-31 12:29:12 +0000574 bool fastopen)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700575{
William Allen Simpson4957faade2009-12-02 18:25:27 +0000576 struct tcp_options_received tmp_opt;
William Allen Simpson4957faade2009-12-02 18:25:27 +0000577 struct sock *child;
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -0700578 const struct tcphdr *th = tcp_hdr(skb);
Al Viro714e85b2006-11-14 20:51:49 -0800579 __be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK);
Eric Dumazeta2a385d2012-05-16 23:15:34 +0000580 bool paws_reject = false;
Eric Dumazet5e0724d2015-10-22 08:20:46 -0700581 bool own_req;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700582
David S. Millerbb5b7c12009-12-15 20:56:42 -0800583 tmp_opt.saw_tstamp = 0;
584 if (th->doff > (sizeof(struct tcphdr)>>2)) {
Eric Dumazeteed29f12017-06-07 10:34:36 -0700585 tcp_parse_options(sock_net(sk), skb, &tmp_opt, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700586
587 if (tmp_opt.saw_tstamp) {
588 tmp_opt.ts_recent = req->ts_recent;
Florian Westphal95a22ca2016-12-01 11:32:06 +0100589 if (tmp_opt.rcv_tsecr)
590 tmp_opt.rcv_tsecr -= tcp_rsk(req)->ts_off;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700591 /* We do not store true stamp, but it is not required,
592 * it can be estimated (approximately)
593 * from another data.
594 */
Eric Dumazete6c022a2012-10-27 23:16:46 +0000595 tmp_opt.ts_recent_stamp = get_seconds() - ((TCP_TIMEOUT_INIT/HZ)<<req->num_timeout);
Ilpo Järvinenc887e6d2009-03-14 14:23:03 +0000596 paws_reject = tcp_paws_reject(&tmp_opt, th->rst);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700597 }
598 }
599
600 /* Check for pure retransmitted SYN. */
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -0700601 if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn &&
Linus Torvalds1da177e2005-04-16 15:20:36 -0700602 flg == TCP_FLAG_SYN &&
603 !paws_reject) {
604 /*
605 * RFC793 draws (Incorrectly! It was fixed in RFC1122)
606 * this case on figure 6 and figure 8, but formal
607 * protocol description says NOTHING.
608 * To be more exact, it says that we should send ACK,
609 * because this segment (at least, if it has no data)
610 * is out of window.
611 *
612 * CONCLUSION: RFC793 (even with RFC1122) DOES NOT
613 * describe SYN-RECV state. All the description
614 * is wrong, we cannot believe to it and should
615 * rely only on common sense and implementation
616 * experience.
617 *
618 * Enforce "SYN-ACK" according to figure 8, figure 6
619 * of RFC793, fixed by RFC1122.
Jerry Chu83368862012-08-31 12:29:12 +0000620 *
621 * Note that even if there is new data in the SYN packet
622 * they will be thrown away too.
Yuchung Chengcd75eff2013-04-29 08:44:51 +0000623 *
624 * Reset timer after retransmitting SYNACK, similar to
625 * the idea of fast retransmit in recovery.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700626 */
Neal Cardwella9b2c062015-02-06 16:04:39 -0500627 if (!tcp_oow_rate_limited(sock_net(sk), skb,
628 LINUX_MIB_TCPACKSKIPPEDSYNRECV,
629 &tcp_rsk(req)->last_oow_ack_time) &&
630
Eric Dumazetdd929c12015-04-08 15:34:04 -0700631 !inet_rtx_syn_ack(sk, req)) {
632 unsigned long expires = jiffies;
633
634 expires += min(TCP_TIMEOUT_INIT << req->num_timeout,
635 TCP_RTO_MAX);
636 if (!fastopen)
637 mod_timer_pending(&req->rsk_timer, expires);
638 else
639 req->rsk_timer.expires = expires;
640 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700641 return NULL;
642 }
643
644 /* Further reproduces section "SEGMENT ARRIVES"
645 for state SYN-RECEIVED of RFC793.
646 It is broken, however, it does not work only
647 when SYNs are crossed.
648
649 You would think that SYN crossing is impossible here, since
650 we should have a SYN_SENT socket (from connect()) on our end,
651 but this is not true if the crossed SYNs were sent to both
652 ends by a malicious third party. We must defend against this,
653 and to do that we first verify the ACK (as per RFC793, page
654 36) and reset if it is invalid. Is this a true full defense?
655 To convince ourselves, let us consider a way in which the ACK
656 test can still pass in this 'malicious crossed SYNs' case.
657 Malicious sender sends identical SYNs (and thus identical sequence
658 numbers) to both A and B:
659
660 A: gets SYN, seq=7
661 B: gets SYN, seq=7
662
663 By our good fortune, both A and B select the same initial
664 send sequence number of seven :-)
665
666 A: sends SYN|ACK, seq=7, ack_seq=8
667 B: sends SYN|ACK, seq=7, ack_seq=8
668
669 So we are now A eating this SYN|ACK, ACK test passes. So
670 does sequence test, SYN is truncated, and thus we consider
671 it a bare ACK.
672
David S. Millerec0a1962008-06-12 16:31:35 -0700673 If icsk->icsk_accept_queue.rskq_defer_accept, we silently drop this
674 bare ACK. Otherwise, we create an established connection. Both
675 ends (listening sockets) accept the new incoming connection and try
676 to talk to each other. 8-)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700677
678 Note: This case is both harmless, and rare. Possibility is about the
679 same as us discovering intelligent life on another plant tomorrow.
680
681 But generally, we should (RFC lies!) to accept ACK
682 from SYNACK both here and in tcp_rcv_state_process().
683 tcp_rcv_state_process() does not, hence, we do not too.
684
685 Note that the case is absolutely generic:
686 we cannot optimize anything here without
687 violating protocol. All the checks must be made
688 before attempt to create socket.
689 */
690
691 /* RFC793 page 36: "If the connection is in any non-synchronized state ...
692 * and the incoming segment acknowledges something not yet
Stephen Hemmingercaa20d9a2005-11-10 17:13:47 -0800693 * sent (the segment carries an unacceptable ACK) ...
Linus Torvalds1da177e2005-04-16 15:20:36 -0700694 * a reset is sent."
695 *
Jerry Chu83368862012-08-31 12:29:12 +0000696 * Invalid ACK: reset will be sent by listening socket.
697 * Note that the ACK validity check for a Fast Open socket is done
698 * elsewhere and is checked directly against the child socket rather
699 * than req because user data may have been sent out.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700700 */
Jerry Chu83368862012-08-31 12:29:12 +0000701 if ((flg & TCP_FLAG_ACK) && !fastopen &&
William Allen Simpson435cf552009-12-02 18:17:05 +0000702 (TCP_SKB_CB(skb)->ack_seq !=
Christoph Paasch1a2c6182013-03-17 08:23:34 +0000703 tcp_rsk(req)->snt_isn + 1))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700704 return sk;
705
706 /* Also, it would be not so bad idea to check rcv_tsecr, which
707 * is essentially ACK extension and too early or too late values
708 * should cause reset in unsynchronized states.
709 */
710
711 /* RFC793: "first check sequence number". */
712
713 if (paws_reject || !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
Eric Dumazeted53d0a2015-10-08 19:33:23 -0700714 tcp_rsk(req)->rcv_nxt, tcp_rsk(req)->rcv_nxt + req->rsk_rcv_wnd)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700715 /* Out of window: send ACK and drop. */
Eric Dumazet4ce7e932016-04-01 08:52:22 -0700716 if (!(flg & TCP_FLAG_RST) &&
717 !tcp_oow_rate_limited(sock_net(sk), skb,
718 LINUX_MIB_TCPACKSKIPPEDSYNRECV,
719 &tcp_rsk(req)->last_oow_ack_time))
Gui Jianfeng6edafaa2008-08-06 23:50:04 -0700720 req->rsk_ops->send_ack(sk, skb, req);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700721 if (paws_reject)
Eric Dumazet02a1d6e2016-04-27 16:44:39 -0700722 __NET_INC_STATS(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700723 return NULL;
724 }
725
726 /* In sequence, PAWS is OK. */
727
Jerry Chu83368862012-08-31 12:29:12 +0000728 if (tmp_opt.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, tcp_rsk(req)->rcv_nxt))
Adam Langley2aaab9a2008-08-07 20:27:45 -0700729 req->ts_recent = tmp_opt.rcv_tsval;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700730
Adam Langley2aaab9a2008-08-07 20:27:45 -0700731 if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn) {
732 /* Truncate SYN, it is out of window starting
733 at tcp_rsk(req)->rcv_isn + 1. */
734 flg &= ~TCP_FLAG_SYN;
735 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700736
Adam Langley2aaab9a2008-08-07 20:27:45 -0700737 /* RFC793: "second check the RST bit" and
738 * "fourth, check the SYN bit"
739 */
740 if (flg & (TCP_FLAG_RST|TCP_FLAG_SYN)) {
Eric Dumazet90bbcc62016-04-27 16:44:32 -0700741 __TCP_INC_STATS(sock_net(sk), TCP_MIB_ATTEMPTFAILS);
Adam Langley2aaab9a2008-08-07 20:27:45 -0700742 goto embryonic_reset;
743 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700744
Adam Langley2aaab9a2008-08-07 20:27:45 -0700745 /* ACK sequence verified above, just make sure ACK is
746 * set. If ACK not set, just silently drop the packet.
Jerry Chu83368862012-08-31 12:29:12 +0000747 *
748 * XXX (TFO) - if we ever allow "data after SYN", the
749 * following check needs to be removed.
Adam Langley2aaab9a2008-08-07 20:27:45 -0700750 */
751 if (!(flg & TCP_FLAG_ACK))
752 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700753
Jerry Chu83368862012-08-31 12:29:12 +0000754 /* For Fast Open no more processing is needed (sk is the
755 * child socket).
756 */
757 if (fastopen)
758 return sk;
759
Julian Anastasovd1b99ba2009-10-19 10:01:56 +0000760 /* While TCP_DEFER_ACCEPT is active, drop bare ACK. */
Eric Dumazete6c022a2012-10-27 23:16:46 +0000761 if (req->num_timeout < inet_csk(sk)->icsk_accept_queue.rskq_defer_accept &&
Adam Langley2aaab9a2008-08-07 20:27:45 -0700762 TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) {
763 inet_rsk(req)->acked = 1;
Eric Dumazet02a1d6e2016-04-27 16:44:39 -0700764 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDEFERACCEPTDROP);
Adam Langley2aaab9a2008-08-07 20:27:45 -0700765 return NULL;
766 }
David S. Millerec0a1962008-06-12 16:31:35 -0700767
Adam Langley2aaab9a2008-08-07 20:27:45 -0700768 /* OK, ACK is valid, create big socket and
769 * feed this segment to it. It will repeat all
770 * the tests. THIS SEGMENT MUST MOVE SOCKET TO
771 * ESTABLISHED STATE. If it will be dropped after
772 * socket is created, wait for troubles.
773 */
Eric Dumazet5e0724d2015-10-22 08:20:46 -0700774 child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL,
775 req, &own_req);
Ian Morris51456b22015-04-03 09:17:26 +0100776 if (!child)
Adam Langley2aaab9a2008-08-07 20:27:45 -0700777 goto listen_overflow;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700778
Eric Dumazet6bcfd7f2015-10-08 11:16:48 -0700779 sock_rps_save_rxhash(child, skb);
Yuchung Cheng0f1c28a2015-09-18 11:36:14 -0700780 tcp_synack_rtt_meas(child, req);
Eric Dumazet5e0724d2015-10-22 08:20:46 -0700781 return inet_csk_complete_hashdance(sk, child, req, own_req);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700782
Adam Langley2aaab9a2008-08-07 20:27:45 -0700783listen_overflow:
Eric Dumazet65c94102017-10-26 21:55:03 -0700784 if (!sock_net(sk)->ipv4.sysctl_tcp_abort_on_overflow) {
Adam Langley2aaab9a2008-08-07 20:27:45 -0700785 inet_rsk(req)->acked = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700786 return NULL;
Adam Langley2aaab9a2008-08-07 20:27:45 -0700787 }
788
789embryonic_reset:
Jerry Chu83368862012-08-31 12:29:12 +0000790 if (!(flg & TCP_FLAG_RST)) {
791 /* Received a bad SYN pkt - for TFO We try not to reset
792 * the local connection unless it's really necessary to
793 * avoid becoming vulnerable to outside attack aiming at
794 * resetting legit local connections.
795 */
Adam Langley2aaab9a2008-08-07 20:27:45 -0700796 req->rsk_ops->send_reset(sk, skb);
Jerry Chu83368862012-08-31 12:29:12 +0000797 } else if (fastopen) { /* received a valid RST pkt */
798 reqsk_fastopen_remove(sk, req, true);
799 tcp_reset(sk);
800 }
801 if (!fastopen) {
Eric Dumazet52452c52015-03-19 19:04:19 -0700802 inet_csk_reqsk_queue_drop(sk, req);
Eric Dumazet02a1d6e2016-04-27 16:44:39 -0700803 __NET_INC_STATS(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
Jerry Chu83368862012-08-31 12:29:12 +0000804 }
Adam Langley2aaab9a2008-08-07 20:27:45 -0700805 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700806}
Eric Dumazet4bc2f182010-07-09 21:22:10 +0000807EXPORT_SYMBOL(tcp_check_req);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700808
809/*
810 * Queue segment on the new socket if the new socket is active,
811 * otherwise we just shortcircuit this and continue with
812 * the new socket.
Jerry Chu83368862012-08-31 12:29:12 +0000813 *
814 * For the vast majority of cases child->sk_state will be TCP_SYN_RECV
815 * when entering. But other states are possible due to a race condition
816 * where after __inet_lookup_established() fails but before the listener
817 * locked is obtained, other packets cause the same connection to
818 * be created.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700819 */
820
821int tcp_child_process(struct sock *parent, struct sock *child,
822 struct sk_buff *skb)
823{
824 int ret = 0;
825 int state = child->sk_state;
826
Alexander Duycke5907452017-03-24 10:08:00 -0700827 /* record NAPI ID of child */
828 sk_mark_napi_id(child, skb);
829
Martin KaFai Laua44d6ea2016-03-14 10:52:15 -0700830 tcp_segs_in(tcp_sk(child), skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700831 if (!sock_owned_by_user(child)) {
Eric Dumazet72ab4a82015-09-29 07:42:41 -0700832 ret = tcp_rcv_state_process(child, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700833 /* Wakeup parent, send SIGIO */
834 if (state == TCP_SYN_RECV && child->sk_state != state)
David S. Miller676d2362014-04-11 16:15:36 -0400835 parent->sk_data_ready(parent);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700836 } else {
837 /* Alas, it is possible again, because we do lookup
838 * in main socket hash table and lock on listening
839 * socket does not protect us more.
840 */
Zhu Yia3a858f2010-03-04 18:01:47 +0000841 __sk_add_backlog(child, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700842 }
843
844 bh_unlock_sock(child);
845 sock_put(child);
846 return ret;
847}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700848EXPORT_SYMBOL(tcp_child_process);