blob: bea17f1e8302585d70c1e0108ae1c33d149230d8 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Implementation of the Transmission Control Protocol(TCP).
7 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 * IPv4 specific functions
9 *
10 *
11 * code split from:
12 * linux/ipv4/tcp.c
13 * linux/ipv4/tcp_input.c
14 * linux/ipv4/tcp_output.c
15 *
16 * See tcp.c for author information
17 *
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
22 */
23
24/*
25 * Changes:
26 * David S. Miller : New socket lookup architecture.
27 * This code is dedicated to John Dyson.
28 * David S. Miller : Change semantics of established hash,
29 * half is devoted to TIME_WAIT sockets
30 * and the rest go in the other half.
31 * Andi Kleen : Add support for syncookies and fixed
32 * some bugs: ip options weren't passed to
33 * the TCP layer, missed a check for an
34 * ACK bit.
35 * Andi Kleen : Implemented fast path mtu discovery.
36 * Fixed many serious bugs in the
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -070037 * request_sock handling and moved
Linus Torvalds1da177e2005-04-16 15:20:36 -070038 * most of it into the af independent code.
39 * Added tail drop and some other bugfixes.
Stephen Hemmingercaa20d9a2005-11-10 17:13:47 -080040 * Added new listen semantics.
Linus Torvalds1da177e2005-04-16 15:20:36 -070041 * Mike McLagan : Routing by source
42 * Juan Jose Ciarlante: ip_dynaddr bits
43 * Andi Kleen: various fixes.
44 * Vitaly E. Lavrov : Transparent proxy revived after year
45 * coma.
46 * Andi Kleen : Fix new listen.
47 * Andi Kleen : Fix accept error reporting.
48 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
49 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
50 * a single port at the same time.
51 */
52
Joe Perchesafd465032012-03-12 07:03:32 +000053#define pr_fmt(fmt) "TCP: " fmt
Linus Torvalds1da177e2005-04-16 15:20:36 -070054
Herbert Xueb4dea52008-12-29 23:04:08 -080055#include <linux/bottom_half.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070056#include <linux/types.h>
57#include <linux/fcntl.h>
58#include <linux/module.h>
59#include <linux/random.h>
60#include <linux/cache.h>
61#include <linux/jhash.h>
62#include <linux/init.h>
63#include <linux/times.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090064#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070065
Eric W. Biederman457c4cb2007-09-12 12:01:34 +020066#include <net/net_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070067#include <net/icmp.h>
Arnaldo Carvalho de Melo304a1612005-08-09 19:59:20 -070068#include <net/inet_hashtables.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070069#include <net/tcp.h>
Arnaldo Carvalho de Melo20380732005-08-16 02:18:02 -030070#include <net/transp_v6.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070071#include <net/ipv6.h>
72#include <net/inet_common.h>
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -080073#include <net/timewait_sock.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070074#include <net/xfrm.h>
David S. Miller6e5714e2011-08-03 20:50:44 -070075#include <net/secure_seq.h>
Eliezer Tamir076bb0c2013-07-10 17:13:17 +030076#include <net/busy_poll.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070077
78#include <linux/inet.h>
79#include <linux/ipv6.h>
80#include <linux/stddef.h>
81#include <linux/proc_fs.h>
82#include <linux/seq_file.h>
Ivan Delalande67973182017-06-15 18:07:06 -070083#include <linux/inetdevice.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070084
Herbert Xucf80e0e2016-01-24 21:20:23 +080085#include <crypto/hash.h>
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -080086#include <linux/scatterlist.h>
87
Song Liuc24b14c2017-10-23 09:20:24 -070088#include <trace/events/tcp.h>
89
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -080090#ifdef CONFIG_TCP_MD5SIG
Eric Dumazeta915da9b2012-01-31 05:18:33 +000091static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
Eric Dumazet318cf7a2011-10-24 02:46:04 -040092 __be32 daddr, __be32 saddr, const struct tcphdr *th);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -080093#endif
94
Eric Dumazet5caea4e2008-11-20 00:40:07 -080095struct inet_hashinfo tcp_hashinfo;
Eric Dumazet4bc2f182010-07-09 21:22:10 +000096EXPORT_SYMBOL(tcp_hashinfo);
Linus Torvalds1da177e2005-04-16 15:20:36 -070097
Eric Dumazet84b114b2017-05-05 06:56:54 -070098static u32 tcp_v4_init_seq(const struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -070099{
Eric Dumazet84b114b2017-05-05 06:56:54 -0700100 return secure_tcp_seq(ip_hdr(skb)->daddr,
101 ip_hdr(skb)->saddr,
102 tcp_hdr(skb)->dest,
103 tcp_hdr(skb)->source);
104}
105
Eric Dumazet5d2ed052017-06-07 10:34:39 -0700106static u32 tcp_v4_init_ts_off(const struct net *net, const struct sk_buff *skb)
Eric Dumazet84b114b2017-05-05 06:56:54 -0700107{
Eric Dumazet5d2ed052017-06-07 10:34:39 -0700108 return secure_tcp_ts_off(net, ip_hdr(skb)->daddr, ip_hdr(skb)->saddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109}
110
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -0800111int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
112{
Maciej Żenczykowski79e9fed2018-06-03 10:41:17 -0700113 const struct inet_timewait_sock *tw = inet_twsk(sktw);
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -0800114 const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
115 struct tcp_sock *tp = tcp_sk(sk);
Maciej Żenczykowski79e9fed2018-06-03 10:41:17 -0700116 int reuse = sock_net(sk)->ipv4.sysctl_tcp_tw_reuse;
117
118 if (reuse == 2) {
119 /* Still does not detect *everything* that goes through
120 * lo, since we require a loopback src or dst address
121 * or direct binding to 'lo' interface.
122 */
123 bool loopback = false;
124 if (tw->tw_bound_dev_if == LOOPBACK_IFINDEX)
125 loopback = true;
126#if IS_ENABLED(CONFIG_IPV6)
127 if (tw->tw_family == AF_INET6) {
128 if (ipv6_addr_loopback(&tw->tw_v6_daddr) ||
129 (ipv6_addr_v4mapped(&tw->tw_v6_daddr) &&
130 (tw->tw_v6_daddr.s6_addr[12] == 127)) ||
131 ipv6_addr_loopback(&tw->tw_v6_rcv_saddr) ||
132 (ipv6_addr_v4mapped(&tw->tw_v6_rcv_saddr) &&
133 (tw->tw_v6_rcv_saddr.s6_addr[12] == 127)))
134 loopback = true;
135 } else
136#endif
137 {
138 if (ipv4_is_loopback(tw->tw_daddr) ||
139 ipv4_is_loopback(tw->tw_rcv_saddr))
140 loopback = true;
141 }
142 if (!loopback)
143 reuse = 0;
144 }
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -0800145
146 /* With PAWS, it is safe from the viewpoint
147 of data integrity. Even without PAWS it is safe provided sequence
148 spaces do not overlap i.e. at data rates <= 80Mbit/sec.
149
150 Actually, the idea is close to VJ's one, only timestamp cache is
151 held not per host, but per port pair and TW bucket is used as state
152 holder.
153
154 If TW bucket has been already destroyed we fall back to VJ's scheme
155 and use initial timestamp retrieved from peer table.
156 */
157 if (tcptw->tw_ts_recent_stamp &&
Maciej Żenczykowski79e9fed2018-06-03 10:41:17 -0700158 (!twp || (reuse && get_seconds() - tcptw->tw_ts_recent_stamp > 1))) {
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -0800159 tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
160 if (tp->write_seq == 0)
161 tp->write_seq = 1;
162 tp->rx_opt.ts_recent = tcptw->tw_ts_recent;
163 tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
164 sock_hold(sktw);
165 return 1;
166 }
167
168 return 0;
169}
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -0800170EXPORT_SYMBOL_GPL(tcp_twsk_unique);
171
Andrey Ignatovd74bad42018-03-30 15:08:05 -0700172static int tcp_v4_pre_connect(struct sock *sk, struct sockaddr *uaddr,
173 int addr_len)
174{
175 /* This check is replicated from tcp_v4_connect() and intended to
176 * prevent BPF program called below from accessing bytes that are out
177 * of the bound specified by user in addr_len.
178 */
179 if (addr_len < sizeof(struct sockaddr_in))
180 return -EINVAL;
181
182 sock_owned_by_me(sk);
183
184 return BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr);
185}
186
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187/* This will initiate an outgoing connection. */
188int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
189{
David S. Miller2d7192d2011-04-26 13:28:44 -0700190 struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191 struct inet_sock *inet = inet_sk(sk);
192 struct tcp_sock *tp = tcp_sk(sk);
David S. Millerdca8b082011-02-24 13:38:12 -0800193 __be16 orig_sport, orig_dport;
Al Virobada8ad2006-09-26 21:27:15 -0700194 __be32 daddr, nexthop;
David S. Millerda905bd2011-05-06 16:11:19 -0700195 struct flowi4 *fl4;
David S. Miller2d7192d2011-04-26 13:28:44 -0700196 struct rtable *rt;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197 int err;
Eric Dumazetf6d8bd02011-04-21 09:45:37 +0000198 struct ip_options_rcu *inet_opt;
Haishuang Yan1946e672016-12-28 17:52:32 +0800199 struct inet_timewait_death_row *tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200
201 if (addr_len < sizeof(struct sockaddr_in))
202 return -EINVAL;
203
204 if (usin->sin_family != AF_INET)
205 return -EAFNOSUPPORT;
206
207 nexthop = daddr = usin->sin_addr.s_addr;
Eric Dumazetf6d8bd02011-04-21 09:45:37 +0000208 inet_opt = rcu_dereference_protected(inet->inet_opt,
Hannes Frederic Sowa1e1d04e2016-04-05 17:10:15 +0200209 lockdep_sock_is_held(sk));
Eric Dumazetf6d8bd02011-04-21 09:45:37 +0000210 if (inet_opt && inet_opt->opt.srr) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211 if (!daddr)
212 return -EINVAL;
Eric Dumazetf6d8bd02011-04-21 09:45:37 +0000213 nexthop = inet_opt->opt.faddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214 }
215
David S. Millerdca8b082011-02-24 13:38:12 -0800216 orig_sport = inet->inet_sport;
217 orig_dport = usin->sin_port;
David S. Millerda905bd2011-05-06 16:11:19 -0700218 fl4 = &inet->cork.fl.u.ip4;
219 rt = ip_route_connect(fl4, nexthop, inet->inet_saddr,
David S. Millerb23dd4f2011-03-02 14:31:35 -0800220 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
221 IPPROTO_TCP,
Steffen Klassert0e0d44a2013-08-28 08:04:14 +0200222 orig_sport, orig_dport, sk);
David S. Millerb23dd4f2011-03-02 14:31:35 -0800223 if (IS_ERR(rt)) {
224 err = PTR_ERR(rt);
225 if (err == -ENETUNREACH)
Eric Dumazetf1d8cba2013-11-28 09:51:22 -0800226 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
David S. Millerb23dd4f2011-03-02 14:31:35 -0800227 return err;
Wei Dong584bdf82007-05-31 22:49:28 -0700228 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700229
230 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
231 ip_rt_put(rt);
232 return -ENETUNREACH;
233 }
234
Eric Dumazetf6d8bd02011-04-21 09:45:37 +0000235 if (!inet_opt || !inet_opt->opt.srr)
David S. Millerda905bd2011-05-06 16:11:19 -0700236 daddr = fl4->daddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700237
Eric Dumazetc720c7e2009-10-15 06:30:45 +0000238 if (!inet->inet_saddr)
David S. Millerda905bd2011-05-06 16:11:19 -0700239 inet->inet_saddr = fl4->saddr;
Eric Dumazetd1e559d2015-03-18 14:05:35 -0700240 sk_rcv_saddr_set(sk, inet->inet_saddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700241
Eric Dumazetc720c7e2009-10-15 06:30:45 +0000242 if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243 /* Reset inherited state */
244 tp->rx_opt.ts_recent = 0;
245 tp->rx_opt.ts_recent_stamp = 0;
Pavel Emelyanovee995282012-04-19 03:40:39 +0000246 if (likely(!tp->repair))
247 tp->write_seq = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700248 }
249
Eric Dumazetc720c7e2009-10-15 06:30:45 +0000250 inet->inet_dport = usin->sin_port;
Eric Dumazetd1e559d2015-03-18 14:05:35 -0700251 sk_daddr_set(sk, daddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -0800253 inet_csk(sk)->icsk_ext_hdr_len = 0;
Eric Dumazetf6d8bd02011-04-21 09:45:37 +0000254 if (inet_opt)
255 inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256
William Allen Simpsonbee7ca92009-11-10 09:51:18 +0000257 tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700258
259 /* Socket identity is still unknown (sport may be zero).
260 * However we set state to SYN-SENT and not releasing socket
261 * lock select source port, enter ourselves into the hash tables and
262 * complete initialization after this.
263 */
264 tcp_set_state(sk, TCP_SYN_SENT);
Haishuang Yan1946e672016-12-28 17:52:32 +0800265 err = inet_hash_connect(tcp_death_row, sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266 if (err)
267 goto failure;
268
Tom Herbert877d1f62015-07-28 16:02:05 -0700269 sk_set_txhash(sk);
Sathya Perla9e7ceb02014-10-22 21:42:01 +0530270
David S. Millerda905bd2011-05-06 16:11:19 -0700271 rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
David S. Millerb23dd4f2011-03-02 14:31:35 -0800272 inet->inet_sport, inet->inet_dport, sk);
273 if (IS_ERR(rt)) {
274 err = PTR_ERR(rt);
275 rt = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700276 goto failure;
David S. Millerb23dd4f2011-03-02 14:31:35 -0800277 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278 /* OK, now commit destination to socket. */
Herbert Xubcd76112006-06-30 13:36:35 -0700279 sk->sk_gso_type = SKB_GSO_TCPV4;
Changli Gaod8d1f302010-06-10 23:31:35 -0700280 sk_setup_caps(sk, &rt->dst);
Wei Wang19f6d3f2017-01-23 10:59:22 -0800281 rt = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700282
Alexey Kodanev00355fa2017-02-22 13:23:55 +0300283 if (likely(!tp->repair)) {
Alexey Kodanev00355fa2017-02-22 13:23:55 +0300284 if (!tp->write_seq)
Eric Dumazet84b114b2017-05-05 06:56:54 -0700285 tp->write_seq = secure_tcp_seq(inet->inet_saddr,
286 inet->inet_daddr,
287 inet->inet_sport,
288 usin->sin_port);
Eric Dumazet5d2ed052017-06-07 10:34:39 -0700289 tp->tsoffset = secure_tcp_ts_off(sock_net(sk),
290 inet->inet_saddr,
Eric Dumazet84b114b2017-05-05 06:56:54 -0700291 inet->inet_daddr);
Alexey Kodanev00355fa2017-02-22 13:23:55 +0300292 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700293
Eric Dumazetc720c7e2009-10-15 06:30:45 +0000294 inet->inet_id = tp->write_seq ^ jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295
Wei Wang19f6d3f2017-01-23 10:59:22 -0800296 if (tcp_fastopen_defer_connect(sk, &err))
297 return err;
298 if (err)
299 goto failure;
300
Andrey Vagin2b916472012-11-22 01:13:58 +0000301 err = tcp_connect(sk);
Pavel Emelyanovee995282012-04-19 03:40:39 +0000302
Linus Torvalds1da177e2005-04-16 15:20:36 -0700303 if (err)
304 goto failure;
305
306 return 0;
307
308failure:
Arnaldo Carvalho de Melo71742592006-11-17 10:57:30 -0200309 /*
310 * This unhashes the socket and releases the local port,
311 * if necessary.
312 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700313 tcp_set_state(sk, TCP_CLOSE);
314 ip_rt_put(rt);
315 sk->sk_route_caps = 0;
Eric Dumazetc720c7e2009-10-15 06:30:45 +0000316 inet->inet_dport = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700317 return err;
318}
Eric Dumazet4bc2f182010-07-09 21:22:10 +0000319EXPORT_SYMBOL(tcp_v4_connect);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700320
Linus Torvalds1da177e2005-04-16 15:20:36 -0700321/*
Eric Dumazet563d34d2012-07-23 09:48:52 +0200322 * This routine reacts to ICMP_FRAG_NEEDED mtu indications as defined in RFC1191.
323 * It can be called through tcp_release_cb() if socket was owned by user
324 * at the time tcp_v4_err() was called to handle ICMP message.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700325 */
Neal Cardwell4fab9072014-08-14 12:40:05 -0400326void tcp_v4_mtu_reduced(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700327{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700328 struct inet_sock *inet = inet_sk(sk);
Eric Dumazet02b2faa2017-03-03 14:08:21 -0800329 struct dst_entry *dst;
330 u32 mtu;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700331
Eric Dumazet02b2faa2017-03-03 14:08:21 -0800332 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
333 return;
334 mtu = tcp_sk(sk)->mtu_info;
David S. Miller80d0a692012-07-16 03:28:06 -0700335 dst = inet_csk_update_pmtu(sk, mtu);
336 if (!dst)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700337 return;
338
Linus Torvalds1da177e2005-04-16 15:20:36 -0700339 /* Something is about to be wrong... Remember soft error
340 * for the case, if this connection will not able to recover.
341 */
342 if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
343 sk->sk_err_soft = EMSGSIZE;
344
345 mtu = dst_mtu(dst);
346
347 if (inet->pmtudisc != IP_PMTUDISC_DONT &&
Hannes Frederic Sowa482fc602013-11-05 02:24:17 +0100348 ip_sk_accept_pmtu(sk) &&
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -0800349 inet_csk(sk)->icsk_pmtu_cookie > mtu) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700350 tcp_sync_mss(sk, mtu);
351
352 /* Resend the TCP packet because it's
353 * clear that the old packet has been
354 * dropped. This is the new "fast" path mtu
355 * discovery.
356 */
357 tcp_simple_retransmit(sk);
358 } /* else let the usual retransmit timer handle it */
359}
Neal Cardwell4fab9072014-08-14 12:40:05 -0400360EXPORT_SYMBOL(tcp_v4_mtu_reduced);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700361
David S. Miller55be7a92012-07-11 21:27:49 -0700362static void do_redirect(struct sk_buff *skb, struct sock *sk)
363{
364 struct dst_entry *dst = __sk_dst_check(sk, 0);
365
David S. Miller1ed5c482012-07-12 00:41:25 -0700366 if (dst)
David S. Miller6700c272012-07-17 03:29:28 -0700367 dst->ops->redirect(dst, sk, skb);
David S. Miller55be7a92012-07-11 21:27:49 -0700368}
369
Eric Dumazet26e37362015-03-22 10:22:22 -0700370
371/* handle ICMP messages on TCP_NEW_SYN_RECV request sockets */
Eric Dumazet9cf74902016-02-02 19:31:12 -0800372void tcp_req_err(struct sock *sk, u32 seq, bool abort)
Eric Dumazet26e37362015-03-22 10:22:22 -0700373{
374 struct request_sock *req = inet_reqsk(sk);
375 struct net *net = sock_net(sk);
376
377 /* ICMPs are not backlogged, hence we cannot get
378 * an established socket here.
379 */
Eric Dumazet26e37362015-03-22 10:22:22 -0700380 if (seq != tcp_rsk(req)->snt_isn) {
Eric Dumazet02a1d6e2016-04-27 16:44:39 -0700381 __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
Eric Dumazet9cf74902016-02-02 19:31:12 -0800382 } else if (abort) {
Eric Dumazet26e37362015-03-22 10:22:22 -0700383 /*
384 * Still in SYN_RECV, just remove it silently.
385 * There is no good way to pass the error to the newly
386 * created socket, and POSIX does not want network
387 * errors returned from accept().
388 */
Fan Duc6973662015-03-23 15:00:41 -0700389 inet_csk_reqsk_queue_drop(req->rsk_listener, req);
Eric Dumazet9caad862016-04-01 08:52:20 -0700390 tcp_listendrop(req->rsk_listener);
Eric Dumazet26e37362015-03-22 10:22:22 -0700391 }
Eric Dumazetef84d8c2015-10-14 11:16:26 -0700392 reqsk_put(req);
Eric Dumazet26e37362015-03-22 10:22:22 -0700393}
394EXPORT_SYMBOL(tcp_req_err);
395
Linus Torvalds1da177e2005-04-16 15:20:36 -0700396/*
397 * This routine is called by the ICMP module when it gets some
398 * sort of error condition. If err < 0 then the socket should
399 * be closed and the error returned to the user. If err > 0
400 * it's just the icmp type << 8 | icmp code. After adjustment
401 * header points to the first 8 bytes of the tcp header. We need
402 * to find the appropriate port.
403 *
404 * The locking strategy used here is very "optimistic". When
405 * someone else accesses the socket the ICMP is just dropped
406 * and for some paths there is no check at all.
407 * A more general error queue to queue errors for later handling
408 * is probably better.
409 *
410 */
411
Damian Lukowski4d1a2d92009-08-26 00:16:27 +0000412void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700413{
Eric Dumazetb71d1d42011-04-22 04:53:02 +0000414 const struct iphdr *iph = (const struct iphdr *)icmp_skb->data;
Damian Lukowski4d1a2d92009-08-26 00:16:27 +0000415 struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2));
Damian Lukowskif1ecd5d2009-08-26 00:16:31 +0000416 struct inet_connection_sock *icsk;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700417 struct tcp_sock *tp;
418 struct inet_sock *inet;
Damian Lukowski4d1a2d92009-08-26 00:16:27 +0000419 const int type = icmp_hdr(icmp_skb)->type;
420 const int code = icmp_hdr(icmp_skb)->code;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700421 struct sock *sk;
Damian Lukowskif1ecd5d2009-08-26 00:16:31 +0000422 struct sk_buff *skb;
Yuchung Cheng0a672f72014-05-11 20:22:12 -0700423 struct request_sock *fastopen;
Eric Dumazet9a568de2017-05-16 14:00:14 -0700424 u32 seq, snd_una;
425 s32 remaining;
426 u32 delta_us;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700427 int err;
Damian Lukowski4d1a2d92009-08-26 00:16:27 +0000428 struct net *net = dev_net(icmp_skb->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700429
Eric Dumazet26e37362015-03-22 10:22:22 -0700430 sk = __inet_lookup_established(net, &tcp_hashinfo, iph->daddr,
431 th->dest, iph->saddr, ntohs(th->source),
David Ahern3fa6f612017-08-07 08:44:17 -0700432 inet_iif(icmp_skb), 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700433 if (!sk) {
Eric Dumazet5d3848b2016-04-27 16:44:29 -0700434 __ICMP_INC_STATS(net, ICMP_MIB_INERRORS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700435 return;
436 }
437 if (sk->sk_state == TCP_TIME_WAIT) {
YOSHIFUJI Hideaki9469c7b2006-10-10 19:41:46 -0700438 inet_twsk_put(inet_twsk(sk));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700439 return;
440 }
Eric Dumazet26e37362015-03-22 10:22:22 -0700441 seq = ntohl(th->seq);
442 if (sk->sk_state == TCP_NEW_SYN_RECV)
Eric Dumazet9cf74902016-02-02 19:31:12 -0800443 return tcp_req_err(sk, seq,
444 type == ICMP_PARAMETERPROB ||
445 type == ICMP_TIME_EXCEEDED ||
446 (type == ICMP_DEST_UNREACH &&
447 (code == ICMP_NET_UNREACH ||
448 code == ICMP_HOST_UNREACH)));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700449
450 bh_lock_sock(sk);
451 /* If too many ICMPs get dropped on busy
452 * servers this needs to be solved differently.
Eric Dumazet563d34d2012-07-23 09:48:52 +0200453 * We do take care of PMTU discovery (RFC1191) special case :
454 * we can receive locally generated ICMP messages while socket is held.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700455 */
Eric Dumazetb74aa932013-01-19 16:10:37 +0000456 if (sock_owned_by_user(sk)) {
457 if (!(type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED))
Eric Dumazet02a1d6e2016-04-27 16:44:39 -0700458 __NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
Eric Dumazetb74aa932013-01-19 16:10:37 +0000459 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700460 if (sk->sk_state == TCP_CLOSE)
461 goto out;
462
stephen hemminger97e3ecd12010-03-18 11:27:32 +0000463 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
Eric Dumazet02a1d6e2016-04-27 16:44:39 -0700464 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
stephen hemminger97e3ecd12010-03-18 11:27:32 +0000465 goto out;
466 }
467
Damian Lukowskif1ecd5d2009-08-26 00:16:31 +0000468 icsk = inet_csk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700469 tp = tcp_sk(sk);
Yuchung Cheng0a672f72014-05-11 20:22:12 -0700470 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
471 fastopen = tp->fastopen_rsk;
472 snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700473 if (sk->sk_state != TCP_LISTEN &&
Yuchung Cheng0a672f72014-05-11 20:22:12 -0700474 !between(seq, snd_una, tp->snd_nxt)) {
Eric Dumazet02a1d6e2016-04-27 16:44:39 -0700475 __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700476 goto out;
477 }
478
479 switch (type) {
David S. Miller55be7a92012-07-11 21:27:49 -0700480 case ICMP_REDIRECT:
Jon Maxwell45caeaa2017-03-10 16:40:33 +1100481 if (!sock_owned_by_user(sk))
482 do_redirect(icmp_skb, sk);
David S. Miller55be7a92012-07-11 21:27:49 -0700483 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700484 case ICMP_SOURCE_QUENCH:
485 /* Just silently ignore these. */
486 goto out;
487 case ICMP_PARAMETERPROB:
488 err = EPROTO;
489 break;
490 case ICMP_DEST_UNREACH:
491 if (code > NR_ICMP_UNREACH)
492 goto out;
493
494 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
Eric Dumazet0d4f0602013-03-18 07:01:28 +0000495 /* We are not interested in TCP_LISTEN and open_requests
496 * (SYN-ACKs send out by Linux are always <576bytes so
497 * they should go through unfragmented).
498 */
499 if (sk->sk_state == TCP_LISTEN)
500 goto out;
501
Eric Dumazet563d34d2012-07-23 09:48:52 +0200502 tp->mtu_info = info;
Eric Dumazet144d56e2012-08-20 00:22:46 +0000503 if (!sock_owned_by_user(sk)) {
Eric Dumazet563d34d2012-07-23 09:48:52 +0200504 tcp_v4_mtu_reduced(sk);
Eric Dumazet144d56e2012-08-20 00:22:46 +0000505 } else {
Eric Dumazet7aa54702016-12-03 11:14:57 -0800506 if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, &sk->sk_tsq_flags))
Eric Dumazet144d56e2012-08-20 00:22:46 +0000507 sock_hold(sk);
508 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700509 goto out;
510 }
511
512 err = icmp_err_convert[code].errno;
Damian Lukowskif1ecd5d2009-08-26 00:16:31 +0000513 /* check if icmp_skb allows revert of backoff
514 * (see draft-zimmermann-tcp-lcd) */
515 if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH)
516 break;
517 if (seq != tp->snd_una || !icsk->icsk_retransmits ||
Yuchung Cheng0a672f72014-05-11 20:22:12 -0700518 !icsk->icsk_backoff || fastopen)
Damian Lukowskif1ecd5d2009-08-26 00:16:31 +0000519 break;
520
David S. Miller8f49c272010-11-12 13:35:00 -0800521 if (sock_owned_by_user(sk))
522 break;
523
Damian Lukowskif1ecd5d2009-08-26 00:16:31 +0000524 icsk->icsk_backoff--;
Eric Dumazetfcdd1cf2014-09-22 13:19:44 -0700525 icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) :
526 TCP_TIMEOUT_INIT;
527 icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX);
Damian Lukowskif1ecd5d2009-08-26 00:16:31 +0000528
Eric Dumazet75c119a2017-10-05 22:21:27 -0700529 skb = tcp_rtx_queue_head(sk);
Damian Lukowskif1ecd5d2009-08-26 00:16:31 +0000530 BUG_ON(!skb);
531
Eric Dumazet9a568de2017-05-16 14:00:14 -0700532 tcp_mstamp_refresh(tp);
533 delta_us = (u32)(tp->tcp_mstamp - skb->skb_mstamp);
Eric Dumazet7faee5c2014-09-05 15:33:33 -0700534 remaining = icsk->icsk_rto -
Eric Dumazet9a568de2017-05-16 14:00:14 -0700535 usecs_to_jiffies(delta_us);
Damian Lukowskif1ecd5d2009-08-26 00:16:31 +0000536
Eric Dumazet9a568de2017-05-16 14:00:14 -0700537 if (remaining > 0) {
Damian Lukowskif1ecd5d2009-08-26 00:16:31 +0000538 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
539 remaining, TCP_RTO_MAX);
Damian Lukowskif1ecd5d2009-08-26 00:16:31 +0000540 } else {
541 /* RTO revert clocked out retransmission.
542 * Will retransmit now */
543 tcp_retransmit_timer(sk);
544 }
545
Linus Torvalds1da177e2005-04-16 15:20:36 -0700546 break;
547 case ICMP_TIME_EXCEEDED:
548 err = EHOSTUNREACH;
549 break;
550 default:
551 goto out;
552 }
553
554 switch (sk->sk_state) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700555 case TCP_SYN_SENT:
Yuchung Cheng0a672f72014-05-11 20:22:12 -0700556 case TCP_SYN_RECV:
557 /* Only in fast or simultaneous open. If a fast open socket is
558 * is already accepted it is treated as a connected one below.
559 */
Ian Morris51456b22015-04-03 09:17:26 +0100560 if (fastopen && !fastopen->sk)
Yuchung Cheng0a672f72014-05-11 20:22:12 -0700561 break;
562
Linus Torvalds1da177e2005-04-16 15:20:36 -0700563 if (!sock_owned_by_user(sk)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700564 sk->sk_err = err;
565
566 sk->sk_error_report(sk);
567
568 tcp_done(sk);
569 } else {
570 sk->sk_err_soft = err;
571 }
572 goto out;
573 }
574
575 /* If we've already connected we will keep trying
576 * until we time out, or the user gives up.
577 *
578 * rfc1122 4.2.3.9 allows to consider as hard errors
579 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
580 * but it is obsoleted by pmtu discovery).
581 *
582 * Note, that in modern internet, where routing is unreliable
583 * and in each dark corner broken firewalls sit, sending random
584 * errors ordered by their masters even this two messages finally lose
585 * their original sense (even Linux sends invalid PORT_UNREACHs)
586 *
587 * Now we are in compliance with RFCs.
588 * --ANK (980905)
589 */
590
591 inet = inet_sk(sk);
592 if (!sock_owned_by_user(sk) && inet->recverr) {
593 sk->sk_err = err;
594 sk->sk_error_report(sk);
595 } else { /* Only an error on timeout */
596 sk->sk_err_soft = err;
597 }
598
599out:
600 bh_unlock_sock(sk);
601 sock_put(sk);
602}
603
Daniel Borkmann28850dc2013-06-07 05:11:46 +0000604void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700605{
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -0700606 struct tcphdr *th = tcp_hdr(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700607
Eric Dumazet98be9b12018-02-19 11:56:52 -0800608 th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0);
609 skb->csum_start = skb_transport_header(skb) - skb->head;
610 skb->csum_offset = offsetof(struct tcphdr, check);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700611}
612
Herbert Xu419f9f82010-04-11 02:15:53 +0000613/* This routine computes an IPv4 TCP checksum. */
Herbert Xubb296242010-04-11 02:15:55 +0000614void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
Herbert Xu419f9f82010-04-11 02:15:53 +0000615{
Eric Dumazetcf533ea2011-10-21 05:22:42 -0400616 const struct inet_sock *inet = inet_sk(sk);
Herbert Xu419f9f82010-04-11 02:15:53 +0000617
618 __tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
619}
Eric Dumazet4bc2f182010-07-09 21:22:10 +0000620EXPORT_SYMBOL(tcp_v4_send_check);
Herbert Xu419f9f82010-04-11 02:15:53 +0000621
Linus Torvalds1da177e2005-04-16 15:20:36 -0700622/*
623 * This routine will send an RST to the other tcp.
624 *
625 * Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
626 * for reset.
627 * Answer: if a packet caused RST, it is not for a socket
628 * existing in our system, if it is matched to a socket,
629 * it is just duplicate segment or bug in other side's TCP.
630 * So that we build reply only basing on parameters
631 * arrived with segment.
632 * Exception: precedence violation. We do not implement it in any case.
633 */
634
Eric Dumazeta00e7442015-09-29 07:42:39 -0700635static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700636{
Eric Dumazetcf533ea2011-10-21 05:22:42 -0400637 const struct tcphdr *th = tcp_hdr(skb);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800638 struct {
639 struct tcphdr th;
640#ifdef CONFIG_TCP_MD5SIG
Al Viro714e85b2006-11-14 20:51:49 -0800641 __be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)];
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800642#endif
643 } rep;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700644 struct ip_reply_arg arg;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800645#ifdef CONFIG_TCP_MD5SIG
Florian Westphale46787f2015-12-21 21:29:25 +0100646 struct tcp_md5sig_key *key = NULL;
Shawn Lu658ddaa2012-01-31 22:35:48 +0000647 const __u8 *hash_location = NULL;
648 unsigned char newhash[16];
649 int genhash;
650 struct sock *sk1 = NULL;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800651#endif
Pavel Emelyanova86b1e32008-07-16 20:20:58 -0700652 struct net *net;
Jon Maxwell00483692018-05-10 16:53:51 +1000653 struct sock *ctl_sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700654
655 /* Never send a reset in response to a reset. */
656 if (th->rst)
657 return;
658
Eric Dumazetc3658e82014-11-25 07:40:04 -0800659 /* If sk not NULL, it means we did a successful lookup and incoming
660 * route had to be correct. prequeue might have dropped our dst.
661 */
662 if (!sk && skb_rtable(skb)->rt_type != RTN_LOCAL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700663 return;
664
665 /* Swap the send and the receive. */
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800666 memset(&rep, 0, sizeof(rep));
667 rep.th.dest = th->source;
668 rep.th.source = th->dest;
669 rep.th.doff = sizeof(struct tcphdr) / 4;
670 rep.th.rst = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700671
672 if (th->ack) {
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800673 rep.th.seq = th->ack_seq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700674 } else {
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800675 rep.th.ack = 1;
676 rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
677 skb->len - (th->doff << 2));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700678 }
679
Arnaldo Carvalho de Melo71742592006-11-17 10:57:30 -0200680 memset(&arg, 0, sizeof(arg));
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800681 arg.iov[0].iov_base = (unsigned char *)&rep;
682 arg.iov[0].iov_len = sizeof(rep.th);
683
Eric Dumazet0f85fea2014-12-09 09:56:08 -0800684 net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800685#ifdef CONFIG_TCP_MD5SIG
Eric Dumazet3b24d852016-04-01 08:52:17 -0700686 rcu_read_lock();
Shawn Lu658ddaa2012-01-31 22:35:48 +0000687 hash_location = tcp_parse_md5sig_option(th);
Florian Westphal271c3b92015-12-21 21:29:26 +0100688 if (sk && sk_fullsock(sk)) {
Florian Westphale46787f2015-12-21 21:29:25 +0100689 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)
690 &ip_hdr(skb)->saddr, AF_INET);
691 } else if (hash_location) {
Shawn Lu658ddaa2012-01-31 22:35:48 +0000692 /*
693 * active side is lost. Try to find listening socket through
694 * source port, and then find md5 key through listening socket.
695 * we are not loose security here:
696 * Incoming packet is checked with md5 hash with finding key,
697 * no RST generated if md5 hash doesn't match.
698 */
Craig Galleka5836362016-02-10 11:50:38 -0500699 sk1 = __inet_lookup_listener(net, &tcp_hashinfo, NULL, 0,
700 ip_hdr(skb)->saddr,
Tom Herbertda5e3632013-01-22 09:50:24 +0000701 th->source, ip_hdr(skb)->daddr,
David Ahern3fa6f612017-08-07 08:44:17 -0700702 ntohs(th->source), inet_iif(skb),
703 tcp_v4_sdif(skb));
Shawn Lu658ddaa2012-01-31 22:35:48 +0000704 /* don't send rst if it can't find key */
705 if (!sk1)
Eric Dumazet3b24d852016-04-01 08:52:17 -0700706 goto out;
707
Shawn Lu658ddaa2012-01-31 22:35:48 +0000708 key = tcp_md5_do_lookup(sk1, (union tcp_md5_addr *)
709 &ip_hdr(skb)->saddr, AF_INET);
710 if (!key)
Eric Dumazet3b24d852016-04-01 08:52:17 -0700711 goto out;
712
Shawn Lu658ddaa2012-01-31 22:35:48 +0000713
Eric Dumazet39f8e582015-03-24 15:58:55 -0700714 genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, skb);
Shawn Lu658ddaa2012-01-31 22:35:48 +0000715 if (genhash || memcmp(hash_location, newhash, 16) != 0)
Eric Dumazet3b24d852016-04-01 08:52:17 -0700716 goto out;
717
Shawn Lu658ddaa2012-01-31 22:35:48 +0000718 }
719
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800720 if (key) {
721 rep.opt[0] = htonl((TCPOPT_NOP << 24) |
722 (TCPOPT_NOP << 16) |
723 (TCPOPT_MD5SIG << 8) |
724 TCPOLEN_MD5SIG);
725 /* Update length and the length the header thinks exists */
726 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
727 rep.th.doff = arg.iov[0].iov_len / 4;
728
Adam Langley49a72df2008-07-19 00:01:42 -0700729 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1],
Ilpo Järvinen78e645cb2008-10-09 14:37:47 -0700730 key, ip_hdr(skb)->saddr,
731 ip_hdr(skb)->daddr, &rep.th);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800732 }
733#endif
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700734 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
735 ip_hdr(skb)->saddr, /* XXX */
Ilpo Järvinen52cd5752008-10-08 11:34:06 -0700736 arg.iov[0].iov_len, IPPROTO_TCP, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700737 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
Florian Westphal271c3b92015-12-21 21:29:26 +0100738 arg.flags = (sk && inet_sk_transparent(sk)) ? IP_REPLY_ARG_NOSRCCHECK : 0;
739
Shawn Lue2446ea2012-02-04 12:38:09 +0000740 /* When socket is gone, all binding information is lost.
Alexey Kuznetsov4c675252012-10-12 04:34:17 +0000741 * routing might fail in this case. No choice here, if we choose to force
742 * input interface, we will misroute in case of asymmetric route.
Shawn Lue2446ea2012-02-04 12:38:09 +0000743 */
Song Liuc24b14c2017-10-23 09:20:24 -0700744 if (sk) {
Alexey Kuznetsov4c675252012-10-12 04:34:17 +0000745 arg.bound_dev_if = sk->sk_bound_dev_if;
Song Liu5c487bb2018-02-06 20:50:23 -0800746 if (sk_fullsock(sk))
747 trace_tcp_send_reset(sk, skb);
Song Liuc24b14c2017-10-23 09:20:24 -0700748 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700749
Florian Westphal271c3b92015-12-21 21:29:26 +0100750 BUILD_BUG_ON(offsetof(struct sock, sk_bound_dev_if) !=
751 offsetof(struct inet_timewait_sock, tw_bound_dev_if));
752
Eric Dumazet66b13d92011-10-24 03:06:21 -0400753 arg.tos = ip_hdr(skb)->tos;
Lorenzo Colittie2d118a2016-11-04 02:23:43 +0900754 arg.uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL);
Eric Dumazet47dcc202016-05-06 09:46:18 -0700755 local_bh_disable();
Jon Maxwell00483692018-05-10 16:53:51 +1000756 ctl_sk = *this_cpu_ptr(net->ipv4.tcp_sk);
757 if (sk)
758 ctl_sk->sk_mark = (sk->sk_state == TCP_TIME_WAIT) ?
759 inet_twsk(sk)->tw_mark : sk->sk_mark;
760 ip_send_unicast_reply(ctl_sk,
Eric Dumazetbdbbb852015-01-29 21:35:05 -0800761 skb, &TCP_SKB_CB(skb)->header.h4.opt,
Eric Dumazet24a2d432014-09-27 09:50:55 -0700762 ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
763 &arg, arg.iov[0].iov_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700764
Jon Maxwell00483692018-05-10 16:53:51 +1000765 ctl_sk->sk_mark = 0;
Eric Dumazet90bbcc62016-04-27 16:44:32 -0700766 __TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
767 __TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
Eric Dumazet47dcc202016-05-06 09:46:18 -0700768 local_bh_enable();
Shawn Lu658ddaa2012-01-31 22:35:48 +0000769
770#ifdef CONFIG_TCP_MD5SIG
Eric Dumazet3b24d852016-04-01 08:52:17 -0700771out:
772 rcu_read_unlock();
Shawn Lu658ddaa2012-01-31 22:35:48 +0000773#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700774}
775
776/* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
777 outside socket context is ugly, certainly. What can I do?
778 */
779
Lorenzo Colittie2d118a2016-11-04 02:23:43 +0900780static void tcp_v4_send_ack(const struct sock *sk,
Eric Dumazete62a1232016-01-21 08:02:54 -0800781 struct sk_buff *skb, u32 seq, u32 ack,
Andrey Vaginee684b62013-02-11 05:50:19 +0000782 u32 win, u32 tsval, u32 tsecr, int oif,
KOVACS Krisztian88ef4a52008-10-01 07:41:00 -0700783 struct tcp_md5sig_key *key,
Eric Dumazet66b13d92011-10-24 03:06:21 -0400784 int reply_flags, u8 tos)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700785{
Eric Dumazetcf533ea2011-10-21 05:22:42 -0400786 const struct tcphdr *th = tcp_hdr(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700787 struct {
788 struct tcphdr th;
Al Viro714e85b2006-11-14 20:51:49 -0800789 __be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800790#ifdef CONFIG_TCP_MD5SIG
Al Viro714e85b2006-11-14 20:51:49 -0800791 + (TCPOLEN_MD5SIG_ALIGNED >> 2)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800792#endif
793 ];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700794 } rep;
Lorenzo Colittie2d118a2016-11-04 02:23:43 +0900795 struct net *net = sock_net(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700796 struct ip_reply_arg arg;
Jon Maxwell00483692018-05-10 16:53:51 +1000797 struct sock *ctl_sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700798
799 memset(&rep.th, 0, sizeof(struct tcphdr));
Arnaldo Carvalho de Melo71742592006-11-17 10:57:30 -0200800 memset(&arg, 0, sizeof(arg));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700801
802 arg.iov[0].iov_base = (unsigned char *)&rep;
803 arg.iov[0].iov_len = sizeof(rep.th);
Andrey Vaginee684b62013-02-11 05:50:19 +0000804 if (tsecr) {
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800805 rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
806 (TCPOPT_TIMESTAMP << 8) |
807 TCPOLEN_TIMESTAMP);
Andrey Vaginee684b62013-02-11 05:50:19 +0000808 rep.opt[1] = htonl(tsval);
809 rep.opt[2] = htonl(tsecr);
Craig Schlentercb48cfe2007-01-09 00:11:15 -0800810 arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700811 }
812
813 /* Swap the send and the receive. */
814 rep.th.dest = th->source;
815 rep.th.source = th->dest;
816 rep.th.doff = arg.iov[0].iov_len / 4;
817 rep.th.seq = htonl(seq);
818 rep.th.ack_seq = htonl(ack);
819 rep.th.ack = 1;
820 rep.th.window = htons(win);
821
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800822#ifdef CONFIG_TCP_MD5SIG
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800823 if (key) {
Andrey Vaginee684b62013-02-11 05:50:19 +0000824 int offset = (tsecr) ? 3 : 0;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800825
826 rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
827 (TCPOPT_NOP << 16) |
828 (TCPOPT_MD5SIG << 8) |
829 TCPOLEN_MD5SIG);
830 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
831 rep.th.doff = arg.iov[0].iov_len/4;
832
Adam Langley49a72df2008-07-19 00:01:42 -0700833 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset],
Adam Langley90b7e112008-07-31 20:49:48 -0700834 key, ip_hdr(skb)->saddr,
835 ip_hdr(skb)->daddr, &rep.th);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800836 }
837#endif
KOVACS Krisztian88ef4a52008-10-01 07:41:00 -0700838 arg.flags = reply_flags;
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700839 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
840 ip_hdr(skb)->saddr, /* XXX */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700841 arg.iov[0].iov_len, IPPROTO_TCP, 0);
842 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
YOSHIFUJI Hideaki9501f972008-04-18 12:45:16 +0900843 if (oif)
844 arg.bound_dev_if = oif;
Eric Dumazet66b13d92011-10-24 03:06:21 -0400845 arg.tos = tos;
Lorenzo Colittie2d118a2016-11-04 02:23:43 +0900846 arg.uid = sock_net_uid(net, sk_fullsock(sk) ? sk : NULL);
Eric Dumazet47dcc202016-05-06 09:46:18 -0700847 local_bh_disable();
Jon Maxwell00483692018-05-10 16:53:51 +1000848 ctl_sk = *this_cpu_ptr(net->ipv4.tcp_sk);
849 if (sk)
850 ctl_sk->sk_mark = (sk->sk_state == TCP_TIME_WAIT) ?
851 inet_twsk(sk)->tw_mark : sk->sk_mark;
852 ip_send_unicast_reply(ctl_sk,
Eric Dumazetbdbbb852015-01-29 21:35:05 -0800853 skb, &TCP_SKB_CB(skb)->header.h4.opt,
Eric Dumazet24a2d432014-09-27 09:50:55 -0700854 ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
855 &arg, arg.iov[0].iov_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700856
Jon Maxwell00483692018-05-10 16:53:51 +1000857 ctl_sk->sk_mark = 0;
Eric Dumazet90bbcc62016-04-27 16:44:32 -0700858 __TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
Eric Dumazet47dcc202016-05-06 09:46:18 -0700859 local_bh_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700860}
861
862static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
863{
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700864 struct inet_timewait_sock *tw = inet_twsk(sk);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800865 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700866
Lorenzo Colittie2d118a2016-11-04 02:23:43 +0900867 tcp_v4_send_ack(sk, skb,
Eric Dumazete62a1232016-01-21 08:02:54 -0800868 tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
Arnaldo Carvalho de Melo71742592006-11-17 10:57:30 -0200869 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
Eric Dumazet9a568de2017-05-16 14:00:14 -0700870 tcp_time_stamp_raw() + tcptw->tw_ts_offset,
YOSHIFUJI Hideaki9501f972008-04-18 12:45:16 +0900871 tcptw->tw_ts_recent,
872 tw->tw_bound_dev_if,
KOVACS Krisztian88ef4a52008-10-01 07:41:00 -0700873 tcp_twsk_md5_key(tcptw),
Eric Dumazet66b13d92011-10-24 03:06:21 -0400874 tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0,
875 tw->tw_tos
YOSHIFUJI Hideaki9501f972008-04-18 12:45:16 +0900876 );
Linus Torvalds1da177e2005-04-16 15:20:36 -0700877
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700878 inet_twsk_put(tw);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700879}
880
Eric Dumazeta00e7442015-09-29 07:42:39 -0700881static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
Arnaldo Carvalho de Melo71742592006-11-17 10:57:30 -0200882 struct request_sock *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700883{
Jerry Chu168a8f52012-08-31 12:29:13 +0000884 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
885 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
886 */
Eric Dumazete62a1232016-01-21 08:02:54 -0800887 u32 seq = (sk->sk_state == TCP_LISTEN) ? tcp_rsk(req)->snt_isn + 1 :
888 tcp_sk(sk)->snd_nxt;
889
Eric Dumazet20a2b492016-08-22 11:31:10 -0700890 /* RFC 7323 2.3
891 * The window field (SEG.WND) of every outgoing segment, with the
892 * exception of <SYN> segments, MUST be right-shifted by
893 * Rcv.Wind.Shift bits:
894 */
Lorenzo Colittie2d118a2016-11-04 02:23:43 +0900895 tcp_v4_send_ack(sk, skb, seq,
Eric Dumazet20a2b492016-08-22 11:31:10 -0700896 tcp_rsk(req)->rcv_nxt,
897 req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
Eric Dumazet9a568de2017-05-16 14:00:14 -0700898 tcp_time_stamp_raw() + tcp_rsk(req)->ts_off,
YOSHIFUJI Hideaki9501f972008-04-18 12:45:16 +0900899 req->ts_recent,
900 0,
Christoph Paasch30791ac2017-12-11 00:05:46 -0800901 tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->saddr,
Eric Dumazeta915da9b2012-01-31 05:18:33 +0000902 AF_INET),
Eric Dumazet66b13d92011-10-24 03:06:21 -0400903 inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
904 ip_hdr(skb)->tos);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700905}
906
Linus Torvalds1da177e2005-04-16 15:20:36 -0700907/*
Kris Katterjohn9bf1d832008-02-17 22:29:19 -0800908 * Send a SYN-ACK after having received a SYN.
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -0700909 * This still operates on a request_sock only, not on a big
Linus Torvalds1da177e2005-04-16 15:20:36 -0700910 * socket.
911 */
Eric Dumazet0f935db2015-09-25 07:39:21 -0700912static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
Octavian Purdilad6274bd2014-06-25 17:09:58 +0300913 struct flowi *fl,
Octavian Purdila72659ec2010-01-17 19:09:39 -0800914 struct request_sock *req,
Eric Dumazetca6fb062015-10-02 11:43:35 -0700915 struct tcp_fastopen_cookie *foc,
Eric Dumazetb3d05142016-04-13 22:05:39 -0700916 enum tcp_synack_type synack_type)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700917{
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -0700918 const struct inet_request_sock *ireq = inet_rsk(req);
David S. Miller6bd023f2011-05-18 18:32:03 -0400919 struct flowi4 fl4;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700920 int err = -1;
Weilong Chend41db5a2013-12-23 14:37:28 +0800921 struct sk_buff *skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700922
923 /* First, grab a route. */
David S. Millerba3f7f02012-07-17 14:02:46 -0700924 if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
Denis V. Lunevfd80eb92008-02-29 11:43:03 -0800925 return -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700926
Eric Dumazetb3d05142016-04-13 22:05:39 -0700927 skb = tcp_make_synack(sk, dst, req, foc, synack_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700928
929 if (skb) {
Eric Dumazet634fb9792013-10-09 15:21:29 -0700930 __tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700931
Eric Dumazet634fb9792013-10-09 15:21:29 -0700932 err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
933 ireq->ir_rmt_addr,
Eric Dumazet06f877d2017-10-24 08:20:31 -0700934 ireq_opt_deref(ireq));
Gerrit Renkerb9df3cb2006-11-14 11:21:36 -0200935 err = net_xmit_eval(err);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700936 }
937
Linus Torvalds1da177e2005-04-16 15:20:36 -0700938 return err;
939}
940
941/*
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -0700942 * IPv4 request_sock destructor.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700943 */
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -0700944static void tcp_v4_reqsk_destructor(struct request_sock *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700945{
Eric Dumazetc92e8c02017-10-20 09:04:13 -0700946 kfree(rcu_dereference_protected(inet_rsk(req)->ireq_opt, 1));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700947}
948
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800949#ifdef CONFIG_TCP_MD5SIG
950/*
951 * RFC2385 MD5 checksumming requires a mapping of
952 * IP address->MD5 Key.
953 * We need to maintain these in the sk structure.
954 */
955
956/* Find the Key structure for an address. */
Eric Dumazetb83e3de2015-09-25 07:39:15 -0700957struct tcp_md5sig_key *tcp_md5_do_lookup(const struct sock *sk,
Eric Dumazeta915da9b2012-01-31 05:18:33 +0000958 const union tcp_md5_addr *addr,
959 int family)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800960{
Eric Dumazetfd3a1542015-03-24 15:58:56 -0700961 const struct tcp_sock *tp = tcp_sk(sk);
Eric Dumazeta915da9b2012-01-31 05:18:33 +0000962 struct tcp_md5sig_key *key;
Eric Dumazetfd3a1542015-03-24 15:58:56 -0700963 const struct tcp_md5sig_info *md5sig;
Ivan Delalande67973182017-06-15 18:07:06 -0700964 __be32 mask;
965 struct tcp_md5sig_key *best_match = NULL;
966 bool match;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800967
Eric Dumazeta8afca02012-01-31 18:45:40 +0000968 /* caller either holds rcu_read_lock() or socket lock */
969 md5sig = rcu_dereference_check(tp->md5sig_info,
Hannes Frederic Sowa1e1d04e2016-04-05 17:10:15 +0200970 lockdep_sock_is_held(sk));
Eric Dumazeta8afca02012-01-31 18:45:40 +0000971 if (!md5sig)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800972 return NULL;
Arnd Bergmann083a0322017-06-20 22:11:21 +0200973
Sasha Levinb67bfe02013-02-27 17:06:00 -0800974 hlist_for_each_entry_rcu(key, &md5sig->head, node) {
Eric Dumazeta915da9b2012-01-31 05:18:33 +0000975 if (key->family != family)
976 continue;
Ivan Delalande67973182017-06-15 18:07:06 -0700977
978 if (family == AF_INET) {
979 mask = inet_make_mask(key->prefixlen);
980 match = (key->addr.a4.s_addr & mask) ==
981 (addr->a4.s_addr & mask);
982#if IS_ENABLED(CONFIG_IPV6)
983 } else if (family == AF_INET6) {
984 match = ipv6_prefix_equal(&key->addr.a6, &addr->a6,
985 key->prefixlen);
986#endif
987 } else {
988 match = false;
989 }
990
991 if (match && (!best_match ||
992 key->prefixlen > best_match->prefixlen))
993 best_match = key;
994 }
995 return best_match;
996}
997EXPORT_SYMBOL(tcp_md5_do_lookup);
998
Wu Fengguange8f37d52017-07-06 07:58:53 +0800999static struct tcp_md5sig_key *tcp_md5_do_lookup_exact(const struct sock *sk,
1000 const union tcp_md5_addr *addr,
1001 int family, u8 prefixlen)
Ivan Delalande67973182017-06-15 18:07:06 -07001002{
1003 const struct tcp_sock *tp = tcp_sk(sk);
1004 struct tcp_md5sig_key *key;
1005 unsigned int size = sizeof(struct in_addr);
1006 const struct tcp_md5sig_info *md5sig;
1007
1008 /* caller either holds rcu_read_lock() or socket lock */
1009 md5sig = rcu_dereference_check(tp->md5sig_info,
1010 lockdep_sock_is_held(sk));
1011 if (!md5sig)
1012 return NULL;
1013#if IS_ENABLED(CONFIG_IPV6)
1014 if (family == AF_INET6)
1015 size = sizeof(struct in6_addr);
1016#endif
1017 hlist_for_each_entry_rcu(key, &md5sig->head, node) {
1018 if (key->family != family)
1019 continue;
1020 if (!memcmp(&key->addr, addr, size) &&
1021 key->prefixlen == prefixlen)
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001022 return key;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001023 }
1024 return NULL;
1025}
1026
Eric Dumazetb83e3de2015-09-25 07:39:15 -07001027struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
Eric Dumazetfd3a1542015-03-24 15:58:56 -07001028 const struct sock *addr_sk)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001029{
Eric Dumazetb52e6922015-04-09 14:36:42 -07001030 const union tcp_md5_addr *addr;
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001031
Eric Dumazetb52e6922015-04-09 14:36:42 -07001032 addr = (const union tcp_md5_addr *)&addr_sk->sk_daddr;
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001033 return tcp_md5_do_lookup(sk, addr, AF_INET);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001034}
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001035EXPORT_SYMBOL(tcp_v4_md5_lookup);
1036
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001037/* This can be called on a newly created socket, from other files */
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001038int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
Ivan Delalande67973182017-06-15 18:07:06 -07001039 int family, u8 prefixlen, const u8 *newkey, u8 newkeylen,
1040 gfp_t gfp)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001041{
1042 /* Add Key to the list */
Matthias M. Dellwegb0a713e2007-10-29 20:55:27 -07001043 struct tcp_md5sig_key *key;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001044 struct tcp_sock *tp = tcp_sk(sk);
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001045 struct tcp_md5sig_info *md5sig;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001046
Ivan Delalande67973182017-06-15 18:07:06 -07001047 key = tcp_md5_do_lookup_exact(sk, addr, family, prefixlen);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001048 if (key) {
1049 /* Pre-existing entry - just update that one. */
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001050 memcpy(key->key, newkey, newkeylen);
Matthias M. Dellwegb0a713e2007-10-29 20:55:27 -07001051 key->keylen = newkeylen;
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001052 return 0;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001053 }
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001054
Eric Dumazeta8afca02012-01-31 18:45:40 +00001055 md5sig = rcu_dereference_protected(tp->md5sig_info,
Hannes Frederic Sowa1e1d04e2016-04-05 17:10:15 +02001056 lockdep_sock_is_held(sk));
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001057 if (!md5sig) {
1058 md5sig = kmalloc(sizeof(*md5sig), gfp);
1059 if (!md5sig)
1060 return -ENOMEM;
1061
1062 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
1063 INIT_HLIST_HEAD(&md5sig->head);
Eric Dumazeta8afca02012-01-31 18:45:40 +00001064 rcu_assign_pointer(tp->md5sig_info, md5sig);
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001065 }
1066
Eric Dumazet5f3d9cb2012-01-31 10:56:48 +00001067 key = sock_kmalloc(sk, sizeof(*key), gfp);
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001068 if (!key)
1069 return -ENOMEM;
Eric Dumazet71cea172013-05-20 06:52:26 +00001070 if (!tcp_alloc_md5sig_pool()) {
Eric Dumazet5f3d9cb2012-01-31 10:56:48 +00001071 sock_kfree_s(sk, key, sizeof(*key));
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001072 return -ENOMEM;
1073 }
1074
1075 memcpy(key->key, newkey, newkeylen);
1076 key->keylen = newkeylen;
1077 key->family = family;
Ivan Delalande67973182017-06-15 18:07:06 -07001078 key->prefixlen = prefixlen;
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001079 memcpy(&key->addr, addr,
1080 (family == AF_INET6) ? sizeof(struct in6_addr) :
1081 sizeof(struct in_addr));
1082 hlist_add_head_rcu(&key->node, &md5sig->head);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001083 return 0;
1084}
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001085EXPORT_SYMBOL(tcp_md5_do_add);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001086
Ivan Delalande67973182017-06-15 18:07:06 -07001087int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family,
1088 u8 prefixlen)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001089{
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001090 struct tcp_md5sig_key *key;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001091
Ivan Delalande67973182017-06-15 18:07:06 -07001092 key = tcp_md5_do_lookup_exact(sk, addr, family, prefixlen);
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001093 if (!key)
1094 return -ENOENT;
1095 hlist_del_rcu(&key->node);
Eric Dumazet5f3d9cb2012-01-31 10:56:48 +00001096 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001097 kfree_rcu(key, rcu);
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001098 return 0;
1099}
1100EXPORT_SYMBOL(tcp_md5_do_del);
1101
stephen hemmingere0683e702012-10-26 14:31:40 +00001102static void tcp_clear_md5_list(struct sock *sk)
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001103{
1104 struct tcp_sock *tp = tcp_sk(sk);
1105 struct tcp_md5sig_key *key;
Sasha Levinb67bfe02013-02-27 17:06:00 -08001106 struct hlist_node *n;
Eric Dumazeta8afca02012-01-31 18:45:40 +00001107 struct tcp_md5sig_info *md5sig;
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001108
Eric Dumazeta8afca02012-01-31 18:45:40 +00001109 md5sig = rcu_dereference_protected(tp->md5sig_info, 1);
1110
Sasha Levinb67bfe02013-02-27 17:06:00 -08001111 hlist_for_each_entry_safe(key, n, &md5sig->head, node) {
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001112 hlist_del_rcu(&key->node);
Eric Dumazet5f3d9cb2012-01-31 10:56:48 +00001113 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001114 kfree_rcu(key, rcu);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001115 }
1116}
1117
Ivan Delalande8917a772017-06-15 18:07:07 -07001118static int tcp_v4_parse_md5_keys(struct sock *sk, int optname,
1119 char __user *optval, int optlen)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001120{
1121 struct tcp_md5sig cmd;
1122 struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
Ivan Delalande8917a772017-06-15 18:07:07 -07001123 u8 prefixlen = 32;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001124
1125 if (optlen < sizeof(cmd))
1126 return -EINVAL;
1127
Arnaldo Carvalho de Melo71742592006-11-17 10:57:30 -02001128 if (copy_from_user(&cmd, optval, sizeof(cmd)))
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001129 return -EFAULT;
1130
1131 if (sin->sin_family != AF_INET)
1132 return -EINVAL;
1133
Ivan Delalande8917a772017-06-15 18:07:07 -07001134 if (optname == TCP_MD5SIG_EXT &&
1135 cmd.tcpm_flags & TCP_MD5SIG_FLAG_PREFIX) {
1136 prefixlen = cmd.tcpm_prefixlen;
1137 if (prefixlen > 32)
1138 return -EINVAL;
1139 }
1140
Dmitry Popov64a124e2014-08-03 22:45:19 +04001141 if (!cmd.tcpm_keylen)
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001142 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
Ivan Delalande8917a772017-06-15 18:07:07 -07001143 AF_INET, prefixlen);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001144
1145 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
1146 return -EINVAL;
1147
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001148 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
Ivan Delalande8917a772017-06-15 18:07:07 -07001149 AF_INET, prefixlen, cmd.tcpm_key, cmd.tcpm_keylen,
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001150 GFP_KERNEL);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001151}
1152
Eric Dumazet19689e32016-06-27 18:51:53 +02001153static int tcp_v4_md5_hash_headers(struct tcp_md5sig_pool *hp,
1154 __be32 daddr, __be32 saddr,
1155 const struct tcphdr *th, int nbytes)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001156{
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001157 struct tcp4_pseudohdr *bp;
Adam Langley49a72df2008-07-19 00:01:42 -07001158 struct scatterlist sg;
Eric Dumazet19689e32016-06-27 18:51:53 +02001159 struct tcphdr *_th;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001160
Eric Dumazet19689e32016-06-27 18:51:53 +02001161 bp = hp->scratch;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001162 bp->saddr = saddr;
1163 bp->daddr = daddr;
1164 bp->pad = 0;
YOSHIFUJI Hideaki076fb722008-04-17 12:48:12 +09001165 bp->protocol = IPPROTO_TCP;
Adam Langley49a72df2008-07-19 00:01:42 -07001166 bp->len = cpu_to_be16(nbytes);
David S. Millerc7da57a2007-10-26 00:41:21 -07001167
Eric Dumazet19689e32016-06-27 18:51:53 +02001168 _th = (struct tcphdr *)(bp + 1);
1169 memcpy(_th, th, sizeof(*th));
1170 _th->check = 0;
1171
1172 sg_init_one(&sg, bp, sizeof(*bp) + sizeof(*th));
1173 ahash_request_set_crypt(hp->md5_req, &sg, NULL,
1174 sizeof(*bp) + sizeof(*th));
Herbert Xucf80e0e2016-01-24 21:20:23 +08001175 return crypto_ahash_update(hp->md5_req);
Adam Langley49a72df2008-07-19 00:01:42 -07001176}
1177
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001178static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
Eric Dumazet318cf7a2011-10-24 02:46:04 -04001179 __be32 daddr, __be32 saddr, const struct tcphdr *th)
Adam Langley49a72df2008-07-19 00:01:42 -07001180{
1181 struct tcp_md5sig_pool *hp;
Herbert Xucf80e0e2016-01-24 21:20:23 +08001182 struct ahash_request *req;
Adam Langley49a72df2008-07-19 00:01:42 -07001183
1184 hp = tcp_get_md5sig_pool();
1185 if (!hp)
1186 goto clear_hash_noput;
Herbert Xucf80e0e2016-01-24 21:20:23 +08001187 req = hp->md5_req;
Adam Langley49a72df2008-07-19 00:01:42 -07001188
Herbert Xucf80e0e2016-01-24 21:20:23 +08001189 if (crypto_ahash_init(req))
Adam Langley49a72df2008-07-19 00:01:42 -07001190 goto clear_hash;
Eric Dumazet19689e32016-06-27 18:51:53 +02001191 if (tcp_v4_md5_hash_headers(hp, daddr, saddr, th, th->doff << 2))
Adam Langley49a72df2008-07-19 00:01:42 -07001192 goto clear_hash;
1193 if (tcp_md5_hash_key(hp, key))
1194 goto clear_hash;
Herbert Xucf80e0e2016-01-24 21:20:23 +08001195 ahash_request_set_crypt(req, NULL, md5_hash, 0);
1196 if (crypto_ahash_final(req))
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001197 goto clear_hash;
1198
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001199 tcp_put_md5sig_pool();
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001200 return 0;
Adam Langley49a72df2008-07-19 00:01:42 -07001201
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001202clear_hash:
1203 tcp_put_md5sig_pool();
1204clear_hash_noput:
1205 memset(md5_hash, 0, 16);
Adam Langley49a72df2008-07-19 00:01:42 -07001206 return 1;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001207}
1208
Eric Dumazet39f8e582015-03-24 15:58:55 -07001209int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
1210 const struct sock *sk,
Eric Dumazet318cf7a2011-10-24 02:46:04 -04001211 const struct sk_buff *skb)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001212{
Adam Langley49a72df2008-07-19 00:01:42 -07001213 struct tcp_md5sig_pool *hp;
Herbert Xucf80e0e2016-01-24 21:20:23 +08001214 struct ahash_request *req;
Eric Dumazet318cf7a2011-10-24 02:46:04 -04001215 const struct tcphdr *th = tcp_hdr(skb);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001216 __be32 saddr, daddr;
1217
Eric Dumazet39f8e582015-03-24 15:58:55 -07001218 if (sk) { /* valid for establish/request sockets */
1219 saddr = sk->sk_rcv_saddr;
1220 daddr = sk->sk_daddr;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001221 } else {
Adam Langley49a72df2008-07-19 00:01:42 -07001222 const struct iphdr *iph = ip_hdr(skb);
1223 saddr = iph->saddr;
1224 daddr = iph->daddr;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001225 }
Adam Langley49a72df2008-07-19 00:01:42 -07001226
1227 hp = tcp_get_md5sig_pool();
1228 if (!hp)
1229 goto clear_hash_noput;
Herbert Xucf80e0e2016-01-24 21:20:23 +08001230 req = hp->md5_req;
Adam Langley49a72df2008-07-19 00:01:42 -07001231
Herbert Xucf80e0e2016-01-24 21:20:23 +08001232 if (crypto_ahash_init(req))
Adam Langley49a72df2008-07-19 00:01:42 -07001233 goto clear_hash;
1234
Eric Dumazet19689e32016-06-27 18:51:53 +02001235 if (tcp_v4_md5_hash_headers(hp, daddr, saddr, th, skb->len))
Adam Langley49a72df2008-07-19 00:01:42 -07001236 goto clear_hash;
1237 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
1238 goto clear_hash;
1239 if (tcp_md5_hash_key(hp, key))
1240 goto clear_hash;
Herbert Xucf80e0e2016-01-24 21:20:23 +08001241 ahash_request_set_crypt(req, NULL, md5_hash, 0);
1242 if (crypto_ahash_final(req))
Adam Langley49a72df2008-07-19 00:01:42 -07001243 goto clear_hash;
1244
1245 tcp_put_md5sig_pool();
1246 return 0;
1247
1248clear_hash:
1249 tcp_put_md5sig_pool();
1250clear_hash_noput:
1251 memset(md5_hash, 0, 16);
1252 return 1;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001253}
Adam Langley49a72df2008-07-19 00:01:42 -07001254EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001255
Eric Dumazetba8e2752015-10-02 11:43:28 -07001256#endif
1257
Eric Dumazetff74e232015-03-24 15:58:54 -07001258/* Called with rcu_read_lock() */
Eric Dumazetba8e2752015-10-02 11:43:28 -07001259static bool tcp_v4_inbound_md5_hash(const struct sock *sk,
Eric Dumazetff74e232015-03-24 15:58:54 -07001260 const struct sk_buff *skb)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001261{
Eric Dumazetba8e2752015-10-02 11:43:28 -07001262#ifdef CONFIG_TCP_MD5SIG
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001263 /*
1264 * This gets called for each TCP segment that arrives
1265 * so we want to be efficient.
1266 * We have 3 drop cases:
1267 * o No MD5 hash and one expected.
1268 * o MD5 hash and we're not expecting one.
1269 * o MD5 hash and its wrong.
1270 */
Eric Dumazetcf533ea2011-10-21 05:22:42 -04001271 const __u8 *hash_location = NULL;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001272 struct tcp_md5sig_key *hash_expected;
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07001273 const struct iphdr *iph = ip_hdr(skb);
Eric Dumazetcf533ea2011-10-21 05:22:42 -04001274 const struct tcphdr *th = tcp_hdr(skb);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001275 int genhash;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001276 unsigned char newhash[16];
1277
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001278 hash_expected = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&iph->saddr,
1279 AF_INET);
YOSHIFUJI Hideaki7d5d5522008-04-17 12:29:53 +09001280 hash_location = tcp_parse_md5sig_option(th);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001281
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001282 /* We've parsed the options - do we have a hash? */
1283 if (!hash_expected && !hash_location)
Eric Dumazeta2a385d2012-05-16 23:15:34 +00001284 return false;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001285
1286 if (hash_expected && !hash_location) {
Eric Dumazetc10d9312016-04-29 14:16:47 -07001287 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
Eric Dumazeta2a385d2012-05-16 23:15:34 +00001288 return true;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001289 }
1290
1291 if (!hash_expected && hash_location) {
Eric Dumazetc10d9312016-04-29 14:16:47 -07001292 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
Eric Dumazeta2a385d2012-05-16 23:15:34 +00001293 return true;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001294 }
1295
1296 /* Okay, so this is hash_expected and hash_location -
1297 * so we need to calculate the checksum.
1298 */
Adam Langley49a72df2008-07-19 00:01:42 -07001299 genhash = tcp_v4_md5_hash_skb(newhash,
1300 hash_expected,
Eric Dumazet39f8e582015-03-24 15:58:55 -07001301 NULL, skb);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001302
1303 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
Eric Dumazet72145a62016-08-24 09:01:23 -07001304 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE);
Joe Perchese87cc472012-05-13 21:56:26 +00001305 net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
1306 &iph->saddr, ntohs(th->source),
1307 &iph->daddr, ntohs(th->dest),
1308 genhash ? " tcp_v4_calc_md5_hash failed"
1309 : "");
Eric Dumazeta2a385d2012-05-16 23:15:34 +00001310 return true;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001311 }
Eric Dumazeta2a385d2012-05-16 23:15:34 +00001312 return false;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001313#endif
Eric Dumazetba8e2752015-10-02 11:43:28 -07001314 return false;
1315}
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001316
Eric Dumazetb40cf182015-09-25 07:39:08 -07001317static void tcp_v4_init_req(struct request_sock *req,
1318 const struct sock *sk_listener,
Octavian Purdila16bea702014-06-25 17:09:53 +03001319 struct sk_buff *skb)
1320{
1321 struct inet_request_sock *ireq = inet_rsk(req);
Eric Dumazetc92e8c02017-10-20 09:04:13 -07001322 struct net *net = sock_net(sk_listener);
Octavian Purdila16bea702014-06-25 17:09:53 +03001323
Eric Dumazet08d2cc3b2015-03-18 14:05:38 -07001324 sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr);
1325 sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr);
Eric Dumazetc92e8c02017-10-20 09:04:13 -07001326 RCU_INIT_POINTER(ireq->ireq_opt, tcp_v4_save_options(net, skb));
Octavian Purdila16bea702014-06-25 17:09:53 +03001327}
1328
Eric Dumazetf9646292015-09-29 07:42:50 -07001329static struct dst_entry *tcp_v4_route_req(const struct sock *sk,
1330 struct flowi *fl,
Soheil Hassas Yeganeh4396e462017-03-15 16:30:46 -04001331 const struct request_sock *req)
Octavian Purdilad94e0412014-06-25 17:09:55 +03001332{
Soheil Hassas Yeganeh4396e462017-03-15 16:30:46 -04001333 return inet_csk_route_req(sk, &fl->u.ip4, req);
Octavian Purdilad94e0412014-06-25 17:09:55 +03001334}
1335
Eric Dumazet72a3eff2006-11-16 02:30:37 -08001336struct request_sock_ops tcp_request_sock_ops __read_mostly = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001337 .family = PF_INET,
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07001338 .obj_size = sizeof(struct tcp_request_sock),
Octavian Purdila5db92c92014-06-25 17:09:59 +03001339 .rtx_syn_ack = tcp_rtx_synack,
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -07001340 .send_ack = tcp_v4_reqsk_send_ack,
1341 .destructor = tcp_v4_reqsk_destructor,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001342 .send_reset = tcp_v4_send_reset,
stephen hemminger688d1942014-08-29 23:32:05 -07001343 .syn_ack_timeout = tcp_syn_ack_timeout,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001344};
1345
Stephen Hemmingerb2e4b3d2009-09-01 19:25:03 +00001346static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
Octavian Purdila2aec4a22014-06-25 17:10:00 +03001347 .mss_clamp = TCP_MSS_DEFAULT,
Octavian Purdila16bea702014-06-25 17:09:53 +03001348#ifdef CONFIG_TCP_MD5SIG
Eric Dumazetfd3a1542015-03-24 15:58:56 -07001349 .req_md5_lookup = tcp_v4_md5_lookup,
John Dykstrae3afe7b2009-07-16 05:04:51 +00001350 .calc_md5_hash = tcp_v4_md5_hash_skb,
Andrew Mortonb6332e62006-11-30 19:16:28 -08001351#endif
Octavian Purdila16bea702014-06-25 17:09:53 +03001352 .init_req = tcp_v4_init_req,
Octavian Purdilafb7b37a2014-06-25 17:09:54 +03001353#ifdef CONFIG_SYN_COOKIES
1354 .cookie_init_seq = cookie_v4_init_sequence,
1355#endif
Octavian Purdilad94e0412014-06-25 17:09:55 +03001356 .route_req = tcp_v4_route_req,
Eric Dumazet84b114b2017-05-05 06:56:54 -07001357 .init_seq = tcp_v4_init_seq,
1358 .init_ts_off = tcp_v4_init_ts_off,
Octavian Purdilad6274bd2014-06-25 17:09:58 +03001359 .send_synack = tcp_v4_send_synack,
Octavian Purdila16bea702014-06-25 17:09:53 +03001360};
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001361
Linus Torvalds1da177e2005-04-16 15:20:36 -07001362int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1363{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001364 /* Never answer to SYNs send to broadcast or multicast */
Eric Dumazet511c3f92009-06-02 05:14:27 +00001365 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001366 goto drop;
1367
Octavian Purdila1fb6f152014-06-25 17:10:02 +03001368 return tcp_conn_request(&tcp_request_sock_ops,
1369 &tcp_request_sock_ipv4_ops, sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001370
Linus Torvalds1da177e2005-04-16 15:20:36 -07001371drop:
Eric Dumazet9caad862016-04-01 08:52:20 -07001372 tcp_listendrop(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001373 return 0;
1374}
Eric Dumazet4bc2f182010-07-09 21:22:10 +00001375EXPORT_SYMBOL(tcp_v4_conn_request);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001376
1377
1378/*
1379 * The three way handshake has completed - we got a valid synack -
1380 * now create the new socket.
1381 */
Eric Dumazet0c271712015-09-29 07:42:48 -07001382struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -07001383 struct request_sock *req,
Eric Dumazet5e0724d2015-10-22 08:20:46 -07001384 struct dst_entry *dst,
1385 struct request_sock *req_unhash,
1386 bool *own_req)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001387{
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07001388 struct inet_request_sock *ireq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001389 struct inet_sock *newinet;
1390 struct tcp_sock *newtp;
1391 struct sock *newsk;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001392#ifdef CONFIG_TCP_MD5SIG
1393 struct tcp_md5sig_key *key;
1394#endif
Eric Dumazetf6d8bd02011-04-21 09:45:37 +00001395 struct ip_options_rcu *inet_opt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001396
1397 if (sk_acceptq_is_full(sk))
1398 goto exit_overflow;
1399
Linus Torvalds1da177e2005-04-16 15:20:36 -07001400 newsk = tcp_create_openreq_child(sk, req, skb);
1401 if (!newsk)
Balazs Scheidler093d2822010-10-21 13:06:43 +02001402 goto exit_nonewsk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001403
Herbert Xubcd76112006-06-30 13:36:35 -07001404 newsk->sk_gso_type = SKB_GSO_TCPV4;
Neal Cardwellfae6ef82012-08-19 03:30:38 +00001405 inet_sk_rx_dst_set(newsk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001406
1407 newtp = tcp_sk(newsk);
1408 newinet = inet_sk(newsk);
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07001409 ireq = inet_rsk(req);
Eric Dumazetd1e559d2015-03-18 14:05:35 -07001410 sk_daddr_set(newsk, ireq->ir_rmt_addr);
1411 sk_rcv_saddr_set(newsk, ireq->ir_loc_addr);
David Ahern6dd9a142015-12-16 13:20:44 -08001412 newsk->sk_bound_dev_if = ireq->ir_iif;
Eric Dumazetc92e8c02017-10-20 09:04:13 -07001413 newinet->inet_saddr = ireq->ir_loc_addr;
1414 inet_opt = rcu_dereference(ireq->ireq_opt);
1415 RCU_INIT_POINTER(newinet->inet_opt, inet_opt);
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001416 newinet->mc_index = inet_iif(skb);
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07001417 newinet->mc_ttl = ip_hdr(skb)->ttl;
Jiri Benc4c507d22012-02-09 09:35:49 +00001418 newinet->rcv_tos = ip_hdr(skb)->tos;
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -08001419 inet_csk(newsk)->icsk_ext_hdr_len = 0;
Eric Dumazetf6d8bd02011-04-21 09:45:37 +00001420 if (inet_opt)
1421 inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
Eric Dumazetc720c7e2009-10-15 06:30:45 +00001422 newinet->inet_id = newtp->write_seq ^ jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001423
Eric Dumazetdfd25ff2012-03-10 09:20:21 +00001424 if (!dst) {
1425 dst = inet_csk_route_child_sock(sk, newsk, req);
1426 if (!dst)
1427 goto put_and_exit;
1428 } else {
1429 /* syncookie case : see end of cookie_v4_check() */
1430 }
David S. Miller0e734412011-05-08 15:28:03 -07001431 sk_setup_caps(newsk, dst);
1432
Daniel Borkmann81164412015-01-05 23:57:48 +01001433 tcp_ca_openreq_child(newsk, dst);
1434
Linus Torvalds1da177e2005-04-16 15:20:36 -07001435 tcp_sync_mss(newsk, dst_mtu(dst));
Eric Dumazet3541f9e2017-02-02 08:04:56 -08001436 newtp->advmss = tcp_mss_clamp(tcp_sk(sk), dst_metric_advmss(dst));
Tom Quetchenbachf5fff5d2008-09-21 00:21:51 -07001437
Linus Torvalds1da177e2005-04-16 15:20:36 -07001438 tcp_initialize_rcv_mss(newsk);
1439
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001440#ifdef CONFIG_TCP_MD5SIG
1441 /* Copy over the MD5 key from the original socket */
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001442 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&newinet->inet_daddr,
1443 AF_INET);
Ian Morris00db4122015-04-03 09:17:27 +01001444 if (key) {
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001445 /*
1446 * We're using one, so create a matching key
1447 * on the newsk structure. If we fail to get
1448 * memory, then we end up not copying the key
1449 * across. Shucks.
1450 */
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001451 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newinet->inet_daddr,
Ivan Delalande67973182017-06-15 18:07:06 -07001452 AF_INET, 32, key->key, key->keylen, GFP_ATOMIC);
Eric Dumazeta4654192010-05-16 00:36:33 -07001453 sk_nocaps_add(newsk, NETIF_F_GSO_MASK);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001454 }
1455#endif
1456
David S. Miller0e734412011-05-08 15:28:03 -07001457 if (__inet_inherit_port(sk, newsk) < 0)
1458 goto put_and_exit;
Eric Dumazet5e0724d2015-10-22 08:20:46 -07001459 *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
Eric Dumazetc92e8c02017-10-20 09:04:13 -07001460 if (likely(*own_req)) {
Eric Dumazet49a496c2015-11-05 12:50:19 -08001461 tcp_move_syn(newtp, req);
Eric Dumazetc92e8c02017-10-20 09:04:13 -07001462 ireq->ireq_opt = NULL;
1463 } else {
1464 newinet->inet_opt = NULL;
1465 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001466 return newsk;
1467
1468exit_overflow:
Eric Dumazetc10d9312016-04-29 14:16:47 -07001469 NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
Balazs Scheidler093d2822010-10-21 13:06:43 +02001470exit_nonewsk:
1471 dst_release(dst);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001472exit:
Eric Dumazet9caad862016-04-01 08:52:20 -07001473 tcp_listendrop(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001474 return NULL;
David S. Miller0e734412011-05-08 15:28:03 -07001475put_and_exit:
Eric Dumazetc92e8c02017-10-20 09:04:13 -07001476 newinet->inet_opt = NULL;
Christoph Paasche337e242012-12-14 04:07:58 +00001477 inet_csk_prepare_forced_close(newsk);
1478 tcp_done(newsk);
David S. Miller0e734412011-05-08 15:28:03 -07001479 goto exit;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001480}
Eric Dumazet4bc2f182010-07-09 21:22:10 +00001481EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001482
Eric Dumazet079096f2015-10-02 11:43:32 -07001483static struct sock *tcp_v4_cookie_check(struct sock *sk, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001484{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001485#ifdef CONFIG_SYN_COOKIES
Eric Dumazet079096f2015-10-02 11:43:32 -07001486 const struct tcphdr *th = tcp_hdr(skb);
1487
Florian Westphalaf9b4732010-06-03 00:43:44 +00001488 if (!th->syn)
Cong Wang461b74c2014-10-15 14:33:22 -07001489 sk = cookie_v4_check(sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001490#endif
1491 return sk;
1492}
1493
Linus Torvalds1da177e2005-04-16 15:20:36 -07001494/* The socket must have it's spinlock held when we get
Eric Dumazete994b2f2015-10-02 11:43:39 -07001495 * here, unless it is a TCP_LISTEN socket.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001496 *
1497 * We have a potential double-lock case here, so even when
1498 * doing backlog processing we use the BH locking scheme.
1499 * This is because we cannot sleep with the original spinlock
1500 * held.
1501 */
1502int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1503{
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001504 struct sock *rsk;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001505
Linus Torvalds1da177e2005-04-16 15:20:36 -07001506 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
Eric Dumazet404e0a82012-07-29 23:20:37 +00001507 struct dst_entry *dst = sk->sk_rx_dst;
1508
Tom Herbertbdeab992011-08-14 19:45:55 +00001509 sock_rps_save_rxhash(sk, skb);
Eric Dumazet3d973792014-11-11 05:54:27 -08001510 sk_mark_napi_id(sk, skb);
Eric Dumazet404e0a82012-07-29 23:20:37 +00001511 if (dst) {
Eric Dumazet505fbcf2012-07-27 06:23:40 +00001512 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
Ian Morris51456b22015-04-03 09:17:26 +01001513 !dst->ops->check(dst, 0)) {
David S. Miller92101b32012-07-23 16:29:00 -07001514 dst_release(dst);
1515 sk->sk_rx_dst = NULL;
1516 }
1517 }
Yafang Shao3d97d882018-05-29 23:27:31 +08001518 tcp_rcv_established(sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001519 return 0;
1520 }
1521
Eric Dumazet12e25e12015-06-03 23:49:21 -07001522 if (tcp_checksum_complete(skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001523 goto csum_err;
1524
1525 if (sk->sk_state == TCP_LISTEN) {
Eric Dumazet079096f2015-10-02 11:43:32 -07001526 struct sock *nsk = tcp_v4_cookie_check(sk, skb);
1527
Linus Torvalds1da177e2005-04-16 15:20:36 -07001528 if (!nsk)
1529 goto discard;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001530 if (nsk != sk) {
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001531 if (tcp_child_process(sk, nsk, skb)) {
1532 rsk = nsk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001533 goto reset;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001534 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001535 return 0;
1536 }
Eric Dumazetca551582010-06-03 09:03:58 +00001537 } else
Tom Herbertbdeab992011-08-14 19:45:55 +00001538 sock_rps_save_rxhash(sk, skb);
Eric Dumazetca551582010-06-03 09:03:58 +00001539
Eric Dumazet72ab4a82015-09-29 07:42:41 -07001540 if (tcp_rcv_state_process(sk, skb)) {
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001541 rsk = sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001542 goto reset;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001543 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001544 return 0;
1545
1546reset:
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001547 tcp_v4_send_reset(rsk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001548discard:
1549 kfree_skb(skb);
1550 /* Be careful here. If this function gets more complicated and
1551 * gcc suffers from register pressure on the x86, sk (in %ebx)
1552 * might be destroyed here. This current version compiles correctly,
1553 * but you have been warned.
1554 */
1555 return 0;
1556
1557csum_err:
Eric Dumazetc10d9312016-04-29 14:16:47 -07001558 TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
1559 TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001560 goto discard;
1561}
Eric Dumazet4bc2f182010-07-09 21:22:10 +00001562EXPORT_SYMBOL(tcp_v4_do_rcv);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001563
Paolo Abeni74874492017-09-28 15:51:36 +02001564int tcp_v4_early_demux(struct sk_buff *skb)
David S. Miller41063e92012-06-19 21:22:05 -07001565{
David S. Miller41063e92012-06-19 21:22:05 -07001566 const struct iphdr *iph;
1567 const struct tcphdr *th;
1568 struct sock *sk;
David S. Miller41063e92012-06-19 21:22:05 -07001569
David S. Miller41063e92012-06-19 21:22:05 -07001570 if (skb->pkt_type != PACKET_HOST)
Paolo Abeni74874492017-09-28 15:51:36 +02001571 return 0;
David S. Miller41063e92012-06-19 21:22:05 -07001572
Eric Dumazet45f00f92012-10-22 21:42:47 +00001573 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
Paolo Abeni74874492017-09-28 15:51:36 +02001574 return 0;
David S. Miller41063e92012-06-19 21:22:05 -07001575
1576 iph = ip_hdr(skb);
Eric Dumazet45f00f92012-10-22 21:42:47 +00001577 th = tcp_hdr(skb);
David S. Miller41063e92012-06-19 21:22:05 -07001578
1579 if (th->doff < sizeof(struct tcphdr) / 4)
Paolo Abeni74874492017-09-28 15:51:36 +02001580 return 0;
David S. Miller41063e92012-06-19 21:22:05 -07001581
Eric Dumazet45f00f92012-10-22 21:42:47 +00001582 sk = __inet_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
David S. Miller41063e92012-06-19 21:22:05 -07001583 iph->saddr, th->source,
Vijay Subramanian7011d082012-06-23 17:38:10 +00001584 iph->daddr, ntohs(th->dest),
David Ahern3fa6f612017-08-07 08:44:17 -07001585 skb->skb_iif, inet_sdif(skb));
David S. Miller41063e92012-06-19 21:22:05 -07001586 if (sk) {
1587 skb->sk = sk;
1588 skb->destructor = sock_edemux;
Eric Dumazetf7e4eb02015-03-15 21:12:13 -07001589 if (sk_fullsock(sk)) {
Michal Kubečekd0c294c2015-03-23 15:14:00 +01001590 struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
Eric Dumazet505fbcf2012-07-27 06:23:40 +00001591
David S. Miller41063e92012-06-19 21:22:05 -07001592 if (dst)
1593 dst = dst_check(dst, 0);
David S. Miller92101b32012-07-23 16:29:00 -07001594 if (dst &&
Eric Dumazet505fbcf2012-07-27 06:23:40 +00001595 inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
David S. Miller92101b32012-07-23 16:29:00 -07001596 skb_dst_set_noref(skb, dst);
David S. Miller41063e92012-06-19 21:22:05 -07001597 }
1598 }
Paolo Abeni74874492017-09-28 15:51:36 +02001599 return 0;
David S. Miller41063e92012-06-19 21:22:05 -07001600}
1601
Eric Dumazetc9c33212016-08-27 07:37:54 -07001602bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb)
1603{
1604 u32 limit = sk->sk_rcvbuf + sk->sk_sndbuf;
1605
1606 /* Only socket owner can try to collapse/prune rx queues
1607 * to reduce memory overhead, so add a little headroom here.
1608 * Few sockets backlog are possibly concurrently non empty.
1609 */
1610 limit += 64*1024;
1611
1612 /* In case all data was pulled from skb frags (in __pskb_pull_tail()),
1613 * we can fix skb->truesize to its real value to avoid future drops.
1614 * This is valid because skb is not yet charged to the socket.
1615 * It has been noticed pure SACK packets were sometimes dropped
1616 * (if cooked by drivers without copybreak feature).
1617 */
Eric Dumazet60b1af32017-01-24 14:57:36 -08001618 skb_condense(skb);
Eric Dumazetc9c33212016-08-27 07:37:54 -07001619
1620 if (unlikely(sk_add_backlog(sk, skb, limit))) {
1621 bh_unlock_sock(sk);
1622 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPBACKLOGDROP);
1623 return true;
1624 }
1625 return false;
1626}
1627EXPORT_SYMBOL(tcp_add_backlog);
1628
Eric Dumazetac6e7802016-11-10 13:12:35 -08001629int tcp_filter(struct sock *sk, struct sk_buff *skb)
1630{
1631 struct tcphdr *th = (struct tcphdr *)skb->data;
1632 unsigned int eaten = skb->len;
1633 int err;
1634
1635 err = sk_filter_trim_cap(sk, skb, th->doff * 4);
1636 if (!err) {
1637 eaten -= skb->len;
1638 TCP_SKB_CB(skb)->end_seq -= eaten;
1639 }
1640 return err;
1641}
1642EXPORT_SYMBOL(tcp_filter);
1643
Eric Dumazeteeea10b2017-12-03 09:32:59 -08001644static void tcp_v4_restore_cb(struct sk_buff *skb)
1645{
1646 memmove(IPCB(skb), &TCP_SKB_CB(skb)->header.h4,
1647 sizeof(struct inet_skb_parm));
1648}
1649
1650static void tcp_v4_fill_cb(struct sk_buff *skb, const struct iphdr *iph,
1651 const struct tcphdr *th)
1652{
1653 /* This is tricky : We move IPCB at its correct location into TCP_SKB_CB()
1654 * barrier() makes sure compiler wont play fool^Waliasing games.
1655 */
1656 memmove(&TCP_SKB_CB(skb)->header.h4, IPCB(skb),
1657 sizeof(struct inet_skb_parm));
1658 barrier();
1659
1660 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1661 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1662 skb->len - th->doff * 4);
1663 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1664 TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1665 TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1666 TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
1667 TCP_SKB_CB(skb)->sacked = 0;
1668 TCP_SKB_CB(skb)->has_rxtstamp =
1669 skb->tstamp || skb_hwtstamps(skb)->hwtstamp;
1670}
1671
Linus Torvalds1da177e2005-04-16 15:20:36 -07001672/*
1673 * From tcp_input.c
1674 */
1675
1676int tcp_v4_rcv(struct sk_buff *skb)
1677{
Eric Dumazet3b24d852016-04-01 08:52:17 -07001678 struct net *net = dev_net(skb->dev);
David Ahern3fa6f612017-08-07 08:44:17 -07001679 int sdif = inet_sdif(skb);
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07001680 const struct iphdr *iph;
Eric Dumazetcf533ea2011-10-21 05:22:42 -04001681 const struct tcphdr *th;
Eric Dumazet3b24d852016-04-01 08:52:17 -07001682 bool refcounted;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001683 struct sock *sk;
1684 int ret;
1685
1686 if (skb->pkt_type != PACKET_HOST)
1687 goto discard_it;
1688
1689 /* Count it even if it's bad */
Eric Dumazet90bbcc62016-04-27 16:44:32 -07001690 __TCP_INC_STATS(net, TCP_MIB_INSEGS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001691
1692 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1693 goto discard_it;
1694
Eric Dumazetea1627c2016-05-13 09:16:40 -07001695 th = (const struct tcphdr *)skb->data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001696
Eric Dumazetea1627c2016-05-13 09:16:40 -07001697 if (unlikely(th->doff < sizeof(struct tcphdr) / 4))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001698 goto bad_packet;
1699 if (!pskb_may_pull(skb, th->doff * 4))
1700 goto discard_it;
1701
1702 /* An explanation is required here, I think.
1703 * Packet length and doff are validated by header prediction,
Stephen Hemmingercaa20d9a2005-11-10 17:13:47 -08001704 * provided case of th->doff==0 is eliminated.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001705 * So, we defer the checks. */
Tom Herberted70fcf2014-05-02 16:29:38 -07001706
1707 if (skb_checksum_init(skb, IPPROTO_TCP, inet_compute_pseudo))
Eric Dumazet6a5dc9e2013-04-29 08:39:56 +00001708 goto csum_error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001709
Eric Dumazetea1627c2016-05-13 09:16:40 -07001710 th = (const struct tcphdr *)skb->data;
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07001711 iph = ip_hdr(skb);
Eric Dumazet4bdc3d62015-10-13 17:12:54 -07001712lookup:
Craig Galleka5836362016-02-10 11:50:38 -05001713 sk = __inet_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th), th->source,
David Ahern3fa6f612017-08-07 08:44:17 -07001714 th->dest, sdif, &refcounted);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001715 if (!sk)
1716 goto no_tcp_socket;
1717
Eric Dumazetbb134d52010-03-09 05:55:56 +00001718process:
1719 if (sk->sk_state == TCP_TIME_WAIT)
1720 goto do_time_wait;
1721
Eric Dumazet079096f2015-10-02 11:43:32 -07001722 if (sk->sk_state == TCP_NEW_SYN_RECV) {
1723 struct request_sock *req = inet_reqsk(sk);
Eric Dumazete0f97592018-02-13 06:14:12 -08001724 bool req_stolen = false;
Eric Dumazet77166822016-02-18 05:39:18 -08001725 struct sock *nsk;
Eric Dumazet079096f2015-10-02 11:43:32 -07001726
1727 sk = req->rsk_listener;
Eric Dumazet72923552016-02-11 22:50:29 -08001728 if (unlikely(tcp_v4_inbound_md5_hash(sk, skb))) {
Eric Dumazete65c3322016-08-24 08:50:24 -07001729 sk_drops_add(sk, skb);
Eric Dumazet72923552016-02-11 22:50:29 -08001730 reqsk_put(req);
1731 goto discard_it;
1732 }
Frank van der Linden4fd44a92018-06-12 23:09:37 +00001733 if (tcp_checksum_complete(skb)) {
1734 reqsk_put(req);
1735 goto csum_error;
1736 }
Eric Dumazet77166822016-02-18 05:39:18 -08001737 if (unlikely(sk->sk_state != TCP_LISTEN)) {
Eric Dumazetf03f2e12015-10-14 11:16:27 -07001738 inet_csk_reqsk_queue_drop_and_put(sk, req);
Eric Dumazet4bdc3d62015-10-13 17:12:54 -07001739 goto lookup;
1740 }
Eric Dumazet3b24d852016-04-01 08:52:17 -07001741 /* We own a reference on the listener, increase it again
1742 * as we might lose it too soon.
1743 */
Eric Dumazet77166822016-02-18 05:39:18 -08001744 sock_hold(sk);
Eric Dumazet3b24d852016-04-01 08:52:17 -07001745 refcounted = true;
Eric Dumazet1f3b3592017-09-08 12:44:47 -07001746 nsk = NULL;
Eric Dumazeteeea10b2017-12-03 09:32:59 -08001747 if (!tcp_filter(sk, skb)) {
1748 th = (const struct tcphdr *)skb->data;
1749 iph = ip_hdr(skb);
1750 tcp_v4_fill_cb(skb, iph, th);
Eric Dumazete0f97592018-02-13 06:14:12 -08001751 nsk = tcp_check_req(sk, skb, req, false, &req_stolen);
Eric Dumazeteeea10b2017-12-03 09:32:59 -08001752 }
Eric Dumazet079096f2015-10-02 11:43:32 -07001753 if (!nsk) {
1754 reqsk_put(req);
Eric Dumazete0f97592018-02-13 06:14:12 -08001755 if (req_stolen) {
1756 /* Another cpu got exclusive access to req
1757 * and created a full blown socket.
1758 * Try to feed this packet to this socket
1759 * instead of discarding it.
1760 */
1761 tcp_v4_restore_cb(skb);
1762 sock_put(sk);
1763 goto lookup;
1764 }
Eric Dumazet77166822016-02-18 05:39:18 -08001765 goto discard_and_relse;
Eric Dumazet079096f2015-10-02 11:43:32 -07001766 }
1767 if (nsk == sk) {
Eric Dumazet079096f2015-10-02 11:43:32 -07001768 reqsk_put(req);
Eric Dumazeteeea10b2017-12-03 09:32:59 -08001769 tcp_v4_restore_cb(skb);
Eric Dumazet079096f2015-10-02 11:43:32 -07001770 } else if (tcp_child_process(sk, nsk, skb)) {
1771 tcp_v4_send_reset(nsk, skb);
Eric Dumazet77166822016-02-18 05:39:18 -08001772 goto discard_and_relse;
Eric Dumazet079096f2015-10-02 11:43:32 -07001773 } else {
Eric Dumazet77166822016-02-18 05:39:18 -08001774 sock_put(sk);
Eric Dumazet079096f2015-10-02 11:43:32 -07001775 return 0;
1776 }
1777 }
Eric Dumazet6cce09f2010-03-07 23:21:57 +00001778 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
Eric Dumazet02a1d6e2016-04-27 16:44:39 -07001779 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
Stephen Hemmingerd218d112010-01-11 16:28:01 -08001780 goto discard_and_relse;
Eric Dumazet6cce09f2010-03-07 23:21:57 +00001781 }
Stephen Hemmingerd218d112010-01-11 16:28:01 -08001782
Linus Torvalds1da177e2005-04-16 15:20:36 -07001783 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1784 goto discard_and_relse;
Dmitry Popov9ea88a12014-08-07 02:38:22 +04001785
Dmitry Popov9ea88a12014-08-07 02:38:22 +04001786 if (tcp_v4_inbound_md5_hash(sk, skb))
1787 goto discard_and_relse;
Dmitry Popov9ea88a12014-08-07 02:38:22 +04001788
Patrick McHardyb59c2702006-01-06 23:06:10 -08001789 nf_reset(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001790
Eric Dumazetac6e7802016-11-10 13:12:35 -08001791 if (tcp_filter(sk, skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001792 goto discard_and_relse;
Eric Dumazetac6e7802016-11-10 13:12:35 -08001793 th = (const struct tcphdr *)skb->data;
1794 iph = ip_hdr(skb);
Eric Dumazeteeea10b2017-12-03 09:32:59 -08001795 tcp_v4_fill_cb(skb, iph, th);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001796
1797 skb->dev = NULL;
1798
Eric Dumazete994b2f2015-10-02 11:43:39 -07001799 if (sk->sk_state == TCP_LISTEN) {
1800 ret = tcp_v4_do_rcv(sk, skb);
1801 goto put_and_return;
1802 }
1803
1804 sk_incoming_cpu_update(sk);
1805
Ingo Molnarc6366182006-07-03 00:25:13 -07001806 bh_lock_sock_nested(sk);
Martin KaFai Laua44d6ea2016-03-14 10:52:15 -07001807 tcp_segs_in(tcp_sk(sk), skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001808 ret = 0;
1809 if (!sock_owned_by_user(sk)) {
Florian Westphale7942d02017-07-30 03:57:18 +02001810 ret = tcp_v4_do_rcv(sk, skb);
Eric Dumazetc9c33212016-08-27 07:37:54 -07001811 } else if (tcp_add_backlog(sk, skb)) {
Zhu Yi6b03a532010-03-04 18:01:41 +00001812 goto discard_and_relse;
1813 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001814 bh_unlock_sock(sk);
1815
Eric Dumazete994b2f2015-10-02 11:43:39 -07001816put_and_return:
Eric Dumazet3b24d852016-04-01 08:52:17 -07001817 if (refcounted)
1818 sock_put(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001819
1820 return ret;
1821
1822no_tcp_socket:
1823 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1824 goto discard_it;
1825
Eric Dumazeteeea10b2017-12-03 09:32:59 -08001826 tcp_v4_fill_cb(skb, iph, th);
1827
Eric Dumazet12e25e12015-06-03 23:49:21 -07001828 if (tcp_checksum_complete(skb)) {
Eric Dumazet6a5dc9e2013-04-29 08:39:56 +00001829csum_error:
Eric Dumazet90bbcc62016-04-27 16:44:32 -07001830 __TCP_INC_STATS(net, TCP_MIB_CSUMERRORS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001831bad_packet:
Eric Dumazet90bbcc62016-04-27 16:44:32 -07001832 __TCP_INC_STATS(net, TCP_MIB_INERRS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001833 } else {
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001834 tcp_v4_send_reset(NULL, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001835 }
1836
1837discard_it:
1838 /* Discard frame. */
1839 kfree_skb(skb);
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001840 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001841
1842discard_and_relse:
Eric Dumazet532182c2016-04-01 08:52:19 -07001843 sk_drops_add(sk, skb);
Eric Dumazet3b24d852016-04-01 08:52:17 -07001844 if (refcounted)
1845 sock_put(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001846 goto discard_it;
1847
1848do_time_wait:
1849 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
YOSHIFUJI Hideaki9469c7b2006-10-10 19:41:46 -07001850 inet_twsk_put(inet_twsk(sk));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001851 goto discard_it;
1852 }
1853
Eric Dumazeteeea10b2017-12-03 09:32:59 -08001854 tcp_v4_fill_cb(skb, iph, th);
1855
Eric Dumazet6a5dc9e2013-04-29 08:39:56 +00001856 if (tcp_checksum_complete(skb)) {
1857 inet_twsk_put(inet_twsk(sk));
1858 goto csum_error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001859 }
YOSHIFUJI Hideaki9469c7b2006-10-10 19:41:46 -07001860 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001861 case TCP_TW_SYN: {
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09001862 struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
Craig Galleka5836362016-02-10 11:50:38 -05001863 &tcp_hashinfo, skb,
1864 __tcp_hdrlen(th),
Tom Herbertda5e3632013-01-22 09:50:24 +00001865 iph->saddr, th->source,
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07001866 iph->daddr, th->dest,
David Ahern3fa6f612017-08-07 08:44:17 -07001867 inet_iif(skb),
1868 sdif);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001869 if (sk2) {
Eric Dumazetdbe7faa2015-07-08 14:28:30 -07001870 inet_twsk_deschedule_put(inet_twsk(sk));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001871 sk = sk2;
Eric Dumazeteeea10b2017-12-03 09:32:59 -08001872 tcp_v4_restore_cb(skb);
Eric Dumazet3b24d852016-04-01 08:52:17 -07001873 refcounted = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001874 goto process;
1875 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001876 }
Gustavo A. R. Silvafcfd6df2017-10-16 15:48:55 -05001877 /* to ACK */
1878 /* fall through */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001879 case TCP_TW_ACK:
1880 tcp_v4_timewait_ack(sk, skb);
1881 break;
1882 case TCP_TW_RST:
Florian Westphal271c3b92015-12-21 21:29:26 +01001883 tcp_v4_send_reset(sk, skb);
1884 inet_twsk_deschedule_put(inet_twsk(sk));
1885 goto discard_it;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001886 case TCP_TW_SUCCESS:;
1887 }
1888 goto discard_it;
1889}
1890
David S. Millerccb7c412010-12-01 18:09:13 -08001891static struct timewait_sock_ops tcp_timewait_sock_ops = {
1892 .twsk_obj_size = sizeof(struct tcp_timewait_sock),
1893 .twsk_unique = tcp_twsk_unique,
1894 .twsk_destructor= tcp_twsk_destructor,
David S. Millerccb7c412010-12-01 18:09:13 -08001895};
Linus Torvalds1da177e2005-04-16 15:20:36 -07001896
Eric Dumazet63d02d12012-08-09 14:11:00 +00001897void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
Eric Dumazet5d299f32012-08-06 05:09:33 +00001898{
1899 struct dst_entry *dst = skb_dst(skb);
1900
Eric Dumazet5037e9e2015-12-14 14:08:53 -08001901 if (dst && dst_hold_safe(dst)) {
Eric Dumazetca777ef2014-09-08 08:06:07 -07001902 sk->sk_rx_dst = dst;
1903 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
1904 }
Eric Dumazet5d299f32012-08-06 05:09:33 +00001905}
Eric Dumazet63d02d12012-08-09 14:11:00 +00001906EXPORT_SYMBOL(inet_sk_rx_dst_set);
Eric Dumazet5d299f32012-08-06 05:09:33 +00001907
Stephen Hemminger3b401a82009-09-01 19:25:04 +00001908const struct inet_connection_sock_af_ops ipv4_specific = {
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001909 .queue_xmit = ip_queue_xmit,
1910 .send_check = tcp_v4_send_check,
1911 .rebuild_header = inet_sk_rebuild_header,
Eric Dumazet5d299f32012-08-06 05:09:33 +00001912 .sk_rx_dst_set = inet_sk_rx_dst_set,
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001913 .conn_request = tcp_v4_conn_request,
1914 .syn_recv_sock = tcp_v4_syn_recv_sock,
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001915 .net_header_len = sizeof(struct iphdr),
1916 .setsockopt = ip_setsockopt,
1917 .getsockopt = ip_getsockopt,
1918 .addr2sockaddr = inet_csk_addr2sockaddr,
1919 .sockaddr_len = sizeof(struct sockaddr_in),
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08001920#ifdef CONFIG_COMPAT
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001921 .compat_setsockopt = compat_ip_setsockopt,
1922 .compat_getsockopt = compat_ip_getsockopt,
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08001923#endif
Neal Cardwell4fab9072014-08-14 12:40:05 -04001924 .mtu_reduced = tcp_v4_mtu_reduced,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001925};
Eric Dumazet4bc2f182010-07-09 21:22:10 +00001926EXPORT_SYMBOL(ipv4_specific);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001927
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001928#ifdef CONFIG_TCP_MD5SIG
Stephen Hemmingerb2e4b3d2009-09-01 19:25:03 +00001929static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001930 .md5_lookup = tcp_v4_md5_lookup,
Adam Langley49a72df2008-07-19 00:01:42 -07001931 .calc_md5_hash = tcp_v4_md5_hash_skb,
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001932 .md5_parse = tcp_v4_parse_md5_keys,
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001933};
Andrew Mortonb6332e62006-11-30 19:16:28 -08001934#endif
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001935
Linus Torvalds1da177e2005-04-16 15:20:36 -07001936/* NOTE: A lot of things set to zero explicitly by call to
1937 * sk_alloc() so need not be done here.
1938 */
1939static int tcp_v4_init_sock(struct sock *sk)
1940{
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -03001941 struct inet_connection_sock *icsk = inet_csk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001942
Neal Cardwell900f65d2012-04-19 09:55:21 +00001943 tcp_init_sock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001944
Arnaldo Carvalho de Melo8292a172005-12-13 23:15:52 -08001945 icsk->icsk_af_ops = &ipv4_specific;
Neal Cardwell900f65d2012-04-19 09:55:21 +00001946
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001947#ifdef CONFIG_TCP_MD5SIG
David S. Millerac807fa2012-04-23 03:21:58 -04001948 tcp_sk(sk)->af_specific = &tcp_sock_ipv4_specific;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001949#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001950
Linus Torvalds1da177e2005-04-16 15:20:36 -07001951 return 0;
1952}
1953
Brian Haley7d06b2e2008-06-14 17:04:49 -07001954void tcp_v4_destroy_sock(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001955{
1956 struct tcp_sock *tp = tcp_sk(sk);
1957
Song Liue1a4aa52017-10-23 09:20:26 -07001958 trace_tcp_destroy_sock(sk);
1959
Linus Torvalds1da177e2005-04-16 15:20:36 -07001960 tcp_clear_xmit_timers(sk);
1961
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -03001962 tcp_cleanup_congestion_control(sk);
Stephen Hemminger317a76f2005-06-23 12:19:55 -07001963
Dave Watson734942c2017-06-14 11:37:14 -07001964 tcp_cleanup_ulp(sk);
1965
Linus Torvalds1da177e2005-04-16 15:20:36 -07001966 /* Cleanup up the write buffer. */
David S. Millerfe067e82007-03-07 12:12:44 -08001967 tcp_write_queue_purge(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001968
Wei Wangcf1ef3f2017-04-20 14:45:46 -07001969 /* Check if we want to disable active TFO */
1970 tcp_fastopen_active_disable_ofo_check(sk);
1971
Linus Torvalds1da177e2005-04-16 15:20:36 -07001972 /* Cleans up our, hopefully empty, out_of_order_queue. */
Yaogong Wang9f5afea2016-09-07 14:49:28 -07001973 skb_rbtree_purge(&tp->out_of_order_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001974
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001975#ifdef CONFIG_TCP_MD5SIG
1976 /* Clean up the MD5 key list, if any */
1977 if (tp->md5sig_info) {
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001978 tcp_clear_md5_list(sk);
Mat Martineaufb7df5e2017-12-21 10:29:10 -08001979 kfree_rcu(rcu_dereference_protected(tp->md5sig_info, 1), rcu);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001980 tp->md5sig_info = NULL;
1981 }
1982#endif
1983
Linus Torvalds1da177e2005-04-16 15:20:36 -07001984 /* Clean up a referenced TCP bind bucket. */
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001985 if (inet_csk(sk)->icsk_bind_hash)
Arnaldo Carvalho de Meloab1e0a12008-02-03 04:06:04 -08001986 inet_put_port(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001987
Ian Morris00db4122015-04-03 09:17:27 +01001988 BUG_ON(tp->fastopen_rsk);
William Allen Simpson435cf552009-12-02 18:17:05 +00001989
Yuchung Chengcf60af02012-07-19 06:43:09 +00001990 /* If socket is aborted during connect operation */
1991 tcp_free_fastopen_req(tp);
Yuchung Cheng1fba70e2017-10-18 11:22:51 -07001992 tcp_fastopen_destroy_cipher(sk);
Eric Dumazetcd8ae852015-05-03 21:34:46 -07001993 tcp_saved_syn_free(tp);
Yuchung Chengcf60af02012-07-19 06:43:09 +00001994
Glauber Costa180d8cd2011-12-11 21:47:02 +00001995 sk_sockets_allocated_dec(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001996}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001997EXPORT_SYMBOL(tcp_v4_destroy_sock);
1998
1999#ifdef CONFIG_PROC_FS
2000/* Proc filesystem TCP sock list dumping. */
2001
Tom Herberta8b690f2010-06-07 00:43:42 -07002002/*
2003 * Get next listener socket follow cur. If cur is NULL, get first socket
2004 * starting from bucket given in st->bucket; when st->bucket is zero the
2005 * very first socket in the hash table is returned.
2006 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002007static void *listening_get_next(struct seq_file *seq, void *cur)
2008{
Christoph Hellwig37d849b2018-04-11 09:31:28 +02002009 struct tcp_seq_afinfo *afinfo = PDE_DATA(file_inode(seq->file));
Jianjun Kong5799de02008-11-03 02:49:10 -08002010 struct tcp_iter_state *st = seq->private;
Denis V. Luneva4146b12008-04-13 22:11:14 -07002011 struct net *net = seq_file_net(seq);
Eric Dumazet3b24d852016-04-01 08:52:17 -07002012 struct inet_listen_hashbucket *ilb;
Eric Dumazet3b24d852016-04-01 08:52:17 -07002013 struct sock *sk = cur;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002014
2015 if (!sk) {
Eric Dumazet3b24d852016-04-01 08:52:17 -07002016get_head:
Tom Herberta8b690f2010-06-07 00:43:42 -07002017 ilb = &tcp_hashinfo.listening_hash[st->bucket];
Eric Dumazet9652dc22016-10-19 21:24:58 -07002018 spin_lock(&ilb->lock);
Eric Dumazet3b24d852016-04-01 08:52:17 -07002019 sk = sk_head(&ilb->head);
Tom Herberta8b690f2010-06-07 00:43:42 -07002020 st->offset = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002021 goto get_sk;
2022 }
Eric Dumazet5caea4e2008-11-20 00:40:07 -08002023 ilb = &tcp_hashinfo.listening_hash[st->bucket];
Linus Torvalds1da177e2005-04-16 15:20:36 -07002024 ++st->num;
Tom Herberta8b690f2010-06-07 00:43:42 -07002025 ++st->offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002026
Eric Dumazet3b24d852016-04-01 08:52:17 -07002027 sk = sk_next(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002028get_sk:
Eric Dumazet3b24d852016-04-01 08:52:17 -07002029 sk_for_each_from(sk) {
Pavel Emelyanov8475ef92010-11-22 03:26:12 +00002030 if (!net_eq(sock_net(sk), net))
2031 continue;
Christoph Hellwig37d849b2018-04-11 09:31:28 +02002032 if (sk->sk_family == afinfo->family)
Eric Dumazet3b24d852016-04-01 08:52:17 -07002033 return sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002034 }
Eric Dumazet9652dc22016-10-19 21:24:58 -07002035 spin_unlock(&ilb->lock);
Tom Herberta8b690f2010-06-07 00:43:42 -07002036 st->offset = 0;
Eric Dumazet3b24d852016-04-01 08:52:17 -07002037 if (++st->bucket < INET_LHTABLE_SIZE)
2038 goto get_head;
2039 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002040}
2041
2042static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
2043{
Tom Herberta8b690f2010-06-07 00:43:42 -07002044 struct tcp_iter_state *st = seq->private;
2045 void *rc;
2046
2047 st->bucket = 0;
2048 st->offset = 0;
2049 rc = listening_get_next(seq, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002050
2051 while (rc && *pos) {
2052 rc = listening_get_next(seq, rc);
2053 --*pos;
2054 }
2055 return rc;
2056}
2057
Eric Dumazet05dbc7b2013-10-03 00:22:02 -07002058static inline bool empty_bucket(const struct tcp_iter_state *st)
Andi Kleen6eac5602008-08-28 01:08:02 -07002059{
Eric Dumazet05dbc7b2013-10-03 00:22:02 -07002060 return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain);
Andi Kleen6eac5602008-08-28 01:08:02 -07002061}
2062
Tom Herberta8b690f2010-06-07 00:43:42 -07002063/*
2064 * Get first established socket starting from bucket given in st->bucket.
2065 * If st->bucket is zero, the very first socket in the hash is returned.
2066 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002067static void *established_get_first(struct seq_file *seq)
2068{
Christoph Hellwig37d849b2018-04-11 09:31:28 +02002069 struct tcp_seq_afinfo *afinfo = PDE_DATA(file_inode(seq->file));
Jianjun Kong5799de02008-11-03 02:49:10 -08002070 struct tcp_iter_state *st = seq->private;
Denis V. Luneva4146b12008-04-13 22:11:14 -07002071 struct net *net = seq_file_net(seq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002072 void *rc = NULL;
2073
Tom Herberta8b690f2010-06-07 00:43:42 -07002074 st->offset = 0;
2075 for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002076 struct sock *sk;
Eric Dumazet3ab5aee2008-11-16 19:40:17 -08002077 struct hlist_nulls_node *node;
Eric Dumazet9db66bd2008-11-20 20:39:09 -08002078 spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002079
Andi Kleen6eac5602008-08-28 01:08:02 -07002080 /* Lockless fast path for the common case of empty buckets */
2081 if (empty_bucket(st))
2082 continue;
2083
Eric Dumazet9db66bd2008-11-20 20:39:09 -08002084 spin_lock_bh(lock);
Eric Dumazet3ab5aee2008-11-16 19:40:17 -08002085 sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
Christoph Hellwig37d849b2018-04-11 09:31:28 +02002086 if (sk->sk_family != afinfo->family ||
YOSHIFUJI Hideaki878628f2008-03-26 03:57:35 +09002087 !net_eq(sock_net(sk), net)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002088 continue;
2089 }
2090 rc = sk;
2091 goto out;
2092 }
Eric Dumazet9db66bd2008-11-20 20:39:09 -08002093 spin_unlock_bh(lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002094 }
2095out:
2096 return rc;
2097}
2098
2099static void *established_get_next(struct seq_file *seq, void *cur)
2100{
Christoph Hellwig37d849b2018-04-11 09:31:28 +02002101 struct tcp_seq_afinfo *afinfo = PDE_DATA(file_inode(seq->file));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002102 struct sock *sk = cur;
Eric Dumazet3ab5aee2008-11-16 19:40:17 -08002103 struct hlist_nulls_node *node;
Jianjun Kong5799de02008-11-03 02:49:10 -08002104 struct tcp_iter_state *st = seq->private;
Denis V. Luneva4146b12008-04-13 22:11:14 -07002105 struct net *net = seq_file_net(seq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002106
2107 ++st->num;
Tom Herberta8b690f2010-06-07 00:43:42 -07002108 ++st->offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002109
Eric Dumazet05dbc7b2013-10-03 00:22:02 -07002110 sk = sk_nulls_next(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002111
Eric Dumazet3ab5aee2008-11-16 19:40:17 -08002112 sk_nulls_for_each_from(sk, node) {
Christoph Hellwig37d849b2018-04-11 09:31:28 +02002113 if (sk->sk_family == afinfo->family &&
2114 net_eq(sock_net(sk), net))
Eric Dumazet05dbc7b2013-10-03 00:22:02 -07002115 return sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002116 }
2117
Eric Dumazet05dbc7b2013-10-03 00:22:02 -07002118 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2119 ++st->bucket;
2120 return established_get_first(seq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002121}
2122
2123static void *established_get_idx(struct seq_file *seq, loff_t pos)
2124{
Tom Herberta8b690f2010-06-07 00:43:42 -07002125 struct tcp_iter_state *st = seq->private;
2126 void *rc;
2127
2128 st->bucket = 0;
2129 rc = established_get_first(seq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002130
2131 while (rc && pos) {
2132 rc = established_get_next(seq, rc);
2133 --pos;
Arnaldo Carvalho de Melo71742592006-11-17 10:57:30 -02002134 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002135 return rc;
2136}
2137
2138static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
2139{
2140 void *rc;
Jianjun Kong5799de02008-11-03 02:49:10 -08002141 struct tcp_iter_state *st = seq->private;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002142
Linus Torvalds1da177e2005-04-16 15:20:36 -07002143 st->state = TCP_SEQ_STATE_LISTENING;
2144 rc = listening_get_idx(seq, &pos);
2145
2146 if (!rc) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002147 st->state = TCP_SEQ_STATE_ESTABLISHED;
2148 rc = established_get_idx(seq, pos);
2149 }
2150
2151 return rc;
2152}
2153
Tom Herberta8b690f2010-06-07 00:43:42 -07002154static void *tcp_seek_last_pos(struct seq_file *seq)
2155{
2156 struct tcp_iter_state *st = seq->private;
2157 int offset = st->offset;
2158 int orig_num = st->num;
2159 void *rc = NULL;
2160
2161 switch (st->state) {
Tom Herberta8b690f2010-06-07 00:43:42 -07002162 case TCP_SEQ_STATE_LISTENING:
2163 if (st->bucket >= INET_LHTABLE_SIZE)
2164 break;
2165 st->state = TCP_SEQ_STATE_LISTENING;
2166 rc = listening_get_next(seq, NULL);
2167 while (offset-- && rc)
2168 rc = listening_get_next(seq, rc);
2169 if (rc)
2170 break;
2171 st->bucket = 0;
Eric Dumazet05dbc7b2013-10-03 00:22:02 -07002172 st->state = TCP_SEQ_STATE_ESTABLISHED;
Tom Herberta8b690f2010-06-07 00:43:42 -07002173 /* Fallthrough */
2174 case TCP_SEQ_STATE_ESTABLISHED:
Tom Herberta8b690f2010-06-07 00:43:42 -07002175 if (st->bucket > tcp_hashinfo.ehash_mask)
2176 break;
2177 rc = established_get_first(seq);
2178 while (offset-- && rc)
2179 rc = established_get_next(seq, rc);
2180 }
2181
2182 st->num = orig_num;
2183
2184 return rc;
2185}
2186
Christoph Hellwig37d849b2018-04-11 09:31:28 +02002187void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002188{
Jianjun Kong5799de02008-11-03 02:49:10 -08002189 struct tcp_iter_state *st = seq->private;
Tom Herberta8b690f2010-06-07 00:43:42 -07002190 void *rc;
2191
2192 if (*pos && *pos == st->last_pos) {
2193 rc = tcp_seek_last_pos(seq);
2194 if (rc)
2195 goto out;
2196 }
2197
Linus Torvalds1da177e2005-04-16 15:20:36 -07002198 st->state = TCP_SEQ_STATE_LISTENING;
2199 st->num = 0;
Tom Herberta8b690f2010-06-07 00:43:42 -07002200 st->bucket = 0;
2201 st->offset = 0;
2202 rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2203
2204out:
2205 st->last_pos = *pos;
2206 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002207}
Christoph Hellwig37d849b2018-04-11 09:31:28 +02002208EXPORT_SYMBOL(tcp_seq_start);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002209
Christoph Hellwig37d849b2018-04-11 09:31:28 +02002210void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002211{
Tom Herberta8b690f2010-06-07 00:43:42 -07002212 struct tcp_iter_state *st = seq->private;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002213 void *rc = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002214
2215 if (v == SEQ_START_TOKEN) {
2216 rc = tcp_get_idx(seq, 0);
2217 goto out;
2218 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002219
2220 switch (st->state) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002221 case TCP_SEQ_STATE_LISTENING:
2222 rc = listening_get_next(seq, v);
2223 if (!rc) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002224 st->state = TCP_SEQ_STATE_ESTABLISHED;
Tom Herberta8b690f2010-06-07 00:43:42 -07002225 st->bucket = 0;
2226 st->offset = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002227 rc = established_get_first(seq);
2228 }
2229 break;
2230 case TCP_SEQ_STATE_ESTABLISHED:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002231 rc = established_get_next(seq, v);
2232 break;
2233 }
2234out:
2235 ++*pos;
Tom Herberta8b690f2010-06-07 00:43:42 -07002236 st->last_pos = *pos;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002237 return rc;
2238}
Christoph Hellwig37d849b2018-04-11 09:31:28 +02002239EXPORT_SYMBOL(tcp_seq_next);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002240
Christoph Hellwig37d849b2018-04-11 09:31:28 +02002241void tcp_seq_stop(struct seq_file *seq, void *v)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002242{
Jianjun Kong5799de02008-11-03 02:49:10 -08002243 struct tcp_iter_state *st = seq->private;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002244
2245 switch (st->state) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002246 case TCP_SEQ_STATE_LISTENING:
2247 if (v != SEQ_START_TOKEN)
Eric Dumazet9652dc22016-10-19 21:24:58 -07002248 spin_unlock(&tcp_hashinfo.listening_hash[st->bucket].lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002249 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002250 case TCP_SEQ_STATE_ESTABLISHED:
2251 if (v)
Eric Dumazet9db66bd2008-11-20 20:39:09 -08002252 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002253 break;
2254 }
2255}
Christoph Hellwig37d849b2018-04-11 09:31:28 +02002256EXPORT_SYMBOL(tcp_seq_stop);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002257
Eric Dumazetd4f06872015-03-12 16:44:09 -07002258static void get_openreq4(const struct request_sock *req,
Eric Dumazetaa3a0c82015-10-02 11:43:30 -07002259 struct seq_file *f, int i)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002260{
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002261 const struct inet_request_sock *ireq = inet_rsk(req);
Eric Dumazetfa76ce732015-03-19 19:04:20 -07002262 long delta = req->rsk_timer.expires - jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002263
Pavel Emelyanov5e659e42008-04-24 01:02:16 -07002264 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
Tetsuo Handa652586d2013-11-14 14:31:57 -08002265 " %02X %08X:%08X %02X:%08lX %08X %5u %8d %u %d %pK",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002266 i,
Eric Dumazet634fb9792013-10-09 15:21:29 -07002267 ireq->ir_loc_addr,
Eric Dumazetd4f06872015-03-12 16:44:09 -07002268 ireq->ir_num,
Eric Dumazet634fb9792013-10-09 15:21:29 -07002269 ireq->ir_rmt_addr,
2270 ntohs(ireq->ir_rmt_port),
Linus Torvalds1da177e2005-04-16 15:20:36 -07002271 TCP_SYN_RECV,
2272 0, 0, /* could print option size, but that is af dependent. */
2273 1, /* timers active (only the expire timer) */
Eric Dumazeta399a802012-08-08 21:13:53 +00002274 jiffies_delta_to_clock_t(delta),
Eric Dumazete6c022a2012-10-27 23:16:46 +00002275 req->num_timeout,
Eric Dumazetaa3a0c82015-10-02 11:43:30 -07002276 from_kuid_munged(seq_user_ns(f),
2277 sock_i_uid(req->rsk_listener)),
Linus Torvalds1da177e2005-04-16 15:20:36 -07002278 0, /* non standard timer */
2279 0, /* open_requests have no inode */
Eric Dumazetd4f06872015-03-12 16:44:09 -07002280 0,
Tetsuo Handa652586d2013-11-14 14:31:57 -08002281 req);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002282}
2283
Tetsuo Handa652586d2013-11-14 14:31:57 -08002284static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002285{
2286 int timer_active;
2287 unsigned long timer_expires;
Eric Dumazetcf533ea2011-10-21 05:22:42 -04002288 const struct tcp_sock *tp = tcp_sk(sk);
Ilpo Järvinencf4c6bf2007-02-22 01:13:58 -08002289 const struct inet_connection_sock *icsk = inet_csk(sk);
Eric Dumazetcf533ea2011-10-21 05:22:42 -04002290 const struct inet_sock *inet = inet_sk(sk);
Eric Dumazet0536fcc2015-09-29 07:42:52 -07002291 const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
Eric Dumazetc720c7e2009-10-15 06:30:45 +00002292 __be32 dest = inet->inet_daddr;
2293 __be32 src = inet->inet_rcv_saddr;
2294 __u16 destp = ntohs(inet->inet_dport);
2295 __u16 srcp = ntohs(inet->inet_sport);
Eric Dumazet49d09002009-12-03 16:06:13 -08002296 int rx_queue;
Eric Dumazet00fd38d2015-11-12 08:43:18 -08002297 int state;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002298
Nandita Dukkipati6ba8a3b2013-03-11 10:00:43 +00002299 if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
Yuchung Cheng57dde7f2017-01-12 22:11:33 -08002300 icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
Nandita Dukkipati6ba8a3b2013-03-11 10:00:43 +00002301 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002302 timer_active = 1;
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002303 timer_expires = icsk->icsk_timeout;
2304 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002305 timer_active = 4;
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002306 timer_expires = icsk->icsk_timeout;
Ilpo Järvinencf4c6bf2007-02-22 01:13:58 -08002307 } else if (timer_pending(&sk->sk_timer)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002308 timer_active = 2;
Ilpo Järvinencf4c6bf2007-02-22 01:13:58 -08002309 timer_expires = sk->sk_timer.expires;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002310 } else {
2311 timer_active = 0;
2312 timer_expires = jiffies;
2313 }
2314
Yafang Shao986ffdf2017-12-20 11:12:52 +08002315 state = inet_sk_state_load(sk);
Eric Dumazet00fd38d2015-11-12 08:43:18 -08002316 if (state == TCP_LISTEN)
Eric Dumazet49d09002009-12-03 16:06:13 -08002317 rx_queue = sk->sk_ack_backlog;
2318 else
Eric Dumazet00fd38d2015-11-12 08:43:18 -08002319 /* Because we don't lock the socket,
2320 * we might find a transient negative value.
Eric Dumazet49d09002009-12-03 16:06:13 -08002321 */
2322 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
2323
Pavel Emelyanov5e659e42008-04-24 01:02:16 -07002324 seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
Tetsuo Handa652586d2013-11-14 14:31:57 -08002325 "%08X %5u %8d %lu %d %pK %lu %lu %u %u %d",
Eric Dumazet00fd38d2015-11-12 08:43:18 -08002326 i, src, srcp, dest, destp, state,
Sridhar Samudrala47da8ee2006-06-27 13:29:00 -07002327 tp->write_seq - tp->snd_una,
Eric Dumazet49d09002009-12-03 16:06:13 -08002328 rx_queue,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002329 timer_active,
Eric Dumazeta399a802012-08-08 21:13:53 +00002330 jiffies_delta_to_clock_t(timer_expires - jiffies),
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002331 icsk->icsk_retransmits,
Eric W. Biedermana7cb5a42012-05-24 01:10:10 -06002332 from_kuid_munged(seq_user_ns(f), sock_i_uid(sk)),
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -03002333 icsk->icsk_probes_out,
Ilpo Järvinencf4c6bf2007-02-22 01:13:58 -08002334 sock_i_ino(sk),
Reshetova, Elena41c6d652017-06-30 13:08:01 +03002335 refcount_read(&sk->sk_refcnt), sk,
Stephen Hemminger7be87352008-06-27 20:00:19 -07002336 jiffies_to_clock_t(icsk->icsk_rto),
2337 jiffies_to_clock_t(icsk->icsk_ack.ato),
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002338 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002339 tp->snd_cwnd,
Eric Dumazet00fd38d2015-11-12 08:43:18 -08002340 state == TCP_LISTEN ?
2341 fastopenq->max_qlen :
Tetsuo Handa652586d2013-11-14 14:31:57 -08002342 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002343}
2344
Eric Dumazetcf533ea2011-10-21 05:22:42 -04002345static void get_timewait4_sock(const struct inet_timewait_sock *tw,
Tetsuo Handa652586d2013-11-14 14:31:57 -08002346 struct seq_file *f, int i)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002347{
Eric Dumazet789f5582015-04-12 18:51:09 -07002348 long delta = tw->tw_timer.expires - jiffies;
Al Viro23f33c22006-09-27 18:43:50 -07002349 __be32 dest, src;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002350 __u16 destp, srcp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002351
2352 dest = tw->tw_daddr;
2353 src = tw->tw_rcv_saddr;
2354 destp = ntohs(tw->tw_dport);
2355 srcp = ntohs(tw->tw_sport);
2356
Pavel Emelyanov5e659e42008-04-24 01:02:16 -07002357 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
Tetsuo Handa652586d2013-11-14 14:31:57 -08002358 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002359 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
Eric Dumazeta399a802012-08-08 21:13:53 +00002360 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
Reshetova, Elena41c6d652017-06-30 13:08:01 +03002361 refcount_read(&tw->tw_refcnt), tw);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002362}
2363
2364#define TMPSZ 150
2365
2366static int tcp4_seq_show(struct seq_file *seq, void *v)
2367{
Jianjun Kong5799de02008-11-03 02:49:10 -08002368 struct tcp_iter_state *st;
Eric Dumazet05dbc7b2013-10-03 00:22:02 -07002369 struct sock *sk = v;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002370
Tetsuo Handa652586d2013-11-14 14:31:57 -08002371 seq_setwidth(seq, TMPSZ - 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002372 if (v == SEQ_START_TOKEN) {
Tetsuo Handa652586d2013-11-14 14:31:57 -08002373 seq_puts(seq, " sl local_address rem_address st tx_queue "
Linus Torvalds1da177e2005-04-16 15:20:36 -07002374 "rx_queue tr tm->when retrnsmt uid timeout "
2375 "inode");
2376 goto out;
2377 }
2378 st = seq->private;
2379
Eric Dumazet079096f2015-10-02 11:43:32 -07002380 if (sk->sk_state == TCP_TIME_WAIT)
2381 get_timewait4_sock(v, seq, st->num);
2382 else if (sk->sk_state == TCP_NEW_SYN_RECV)
Eric Dumazetaa3a0c82015-10-02 11:43:30 -07002383 get_openreq4(v, seq, st->num);
Eric Dumazet079096f2015-10-02 11:43:32 -07002384 else
2385 get_tcp4_sock(v, seq, st->num);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002386out:
Tetsuo Handa652586d2013-11-14 14:31:57 -08002387 seq_pad(seq, '\n');
Linus Torvalds1da177e2005-04-16 15:20:36 -07002388 return 0;
2389}
2390
Christoph Hellwig37d849b2018-04-11 09:31:28 +02002391static const struct seq_operations tcp4_seq_ops = {
2392 .show = tcp4_seq_show,
2393 .start = tcp_seq_start,
2394 .next = tcp_seq_next,
2395 .stop = tcp_seq_stop,
2396};
2397
Linus Torvalds1da177e2005-04-16 15:20:36 -07002398static struct tcp_seq_afinfo tcp4_seq_afinfo = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002399 .family = AF_INET,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002400};
2401
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00002402static int __net_init tcp4_proc_init_net(struct net *net)
Pavel Emelyanov757764f2008-03-24 14:56:02 -07002403{
Christoph Hellwigc3506372018-04-10 19:42:55 +02002404 if (!proc_create_net_data("tcp", 0444, net->proc_net, &tcp4_seq_ops,
2405 sizeof(struct tcp_iter_state), &tcp4_seq_afinfo))
Christoph Hellwig37d849b2018-04-11 09:31:28 +02002406 return -ENOMEM;
2407 return 0;
Pavel Emelyanov757764f2008-03-24 14:56:02 -07002408}
2409
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00002410static void __net_exit tcp4_proc_exit_net(struct net *net)
Pavel Emelyanov757764f2008-03-24 14:56:02 -07002411{
Christoph Hellwig37d849b2018-04-11 09:31:28 +02002412 remove_proc_entry("tcp", net->proc_net);
Pavel Emelyanov757764f2008-03-24 14:56:02 -07002413}
2414
2415static struct pernet_operations tcp4_net_ops = {
2416 .init = tcp4_proc_init_net,
2417 .exit = tcp4_proc_exit_net,
2418};
2419
Linus Torvalds1da177e2005-04-16 15:20:36 -07002420int __init tcp4_proc_init(void)
2421{
Pavel Emelyanov757764f2008-03-24 14:56:02 -07002422 return register_pernet_subsys(&tcp4_net_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002423}
2424
2425void tcp4_proc_exit(void)
2426{
Pavel Emelyanov757764f2008-03-24 14:56:02 -07002427 unregister_pernet_subsys(&tcp4_net_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002428}
2429#endif /* CONFIG_PROC_FS */
2430
2431struct proto tcp_prot = {
2432 .name = "TCP",
2433 .owner = THIS_MODULE,
2434 .close = tcp_close,
Andrey Ignatovd74bad42018-03-30 15:08:05 -07002435 .pre_connect = tcp_v4_pre_connect,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002436 .connect = tcp_v4_connect,
2437 .disconnect = tcp_disconnect,
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002438 .accept = inet_csk_accept,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002439 .ioctl = tcp_ioctl,
2440 .init = tcp_v4_init_sock,
2441 .destroy = tcp_v4_destroy_sock,
2442 .shutdown = tcp_shutdown,
2443 .setsockopt = tcp_setsockopt,
2444 .getsockopt = tcp_getsockopt,
Ursula Braun4b9d07a2017-01-09 16:55:12 +01002445 .keepalive = tcp_set_keepalive,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002446 .recvmsg = tcp_recvmsg,
Changli Gao7ba42912010-07-10 20:41:55 +00002447 .sendmsg = tcp_sendmsg,
2448 .sendpage = tcp_sendpage,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002449 .backlog_rcv = tcp_v4_do_rcv,
Eric Dumazet46d3cea2012-07-11 05:50:31 +00002450 .release_cb = tcp_release_cb,
Arnaldo Carvalho de Meloab1e0a12008-02-03 04:06:04 -08002451 .hash = inet_hash,
2452 .unhash = inet_unhash,
2453 .get_port = inet_csk_get_port,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002454 .enter_memory_pressure = tcp_enter_memory_pressure,
Eric Dumazet06044752017-06-07 13:29:12 -07002455 .leave_memory_pressure = tcp_leave_memory_pressure,
Eric Dumazetc9bee3b72013-07-22 20:27:07 -07002456 .stream_memory_free = tcp_stream_memory_free,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002457 .sockets_allocated = &tcp_sockets_allocated,
Arnaldo Carvalho de Melo0a5578c2005-08-09 20:11:41 -07002458 .orphan_count = &tcp_orphan_count,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002459 .memory_allocated = &tcp_memory_allocated,
2460 .memory_pressure = &tcp_memory_pressure,
Eric W. Biedermana4fe34b2013-10-19 16:25:36 -07002461 .sysctl_mem = sysctl_tcp_mem,
Eric Dumazet356d1832017-11-07 00:29:28 -08002462 .sysctl_wmem_offset = offsetof(struct net, ipv4.sysctl_tcp_wmem),
2463 .sysctl_rmem_offset = offsetof(struct net, ipv4.sysctl_tcp_rmem),
Linus Torvalds1da177e2005-04-16 15:20:36 -07002464 .max_header = MAX_TCP_HEADER,
2465 .obj_size = sizeof(struct tcp_sock),
Paul E. McKenney5f0d5a32017-01-18 02:53:44 -08002466 .slab_flags = SLAB_TYPESAFE_BY_RCU,
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002467 .twsk_prot = &tcp_timewait_sock_ops,
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -07002468 .rsk_prot = &tcp_request_sock_ops,
Pavel Emelyanov39d8cda2008-03-22 16:50:58 -07002469 .h.hashinfo = &tcp_hashinfo,
Changli Gao7ba42912010-07-10 20:41:55 +00002470 .no_autobind = true,
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08002471#ifdef CONFIG_COMPAT
2472 .compat_setsockopt = compat_tcp_setsockopt,
2473 .compat_getsockopt = compat_tcp_getsockopt,
2474#endif
Lorenzo Colittic1e64e22015-12-16 12:30:05 +09002475 .diag_destroy = tcp_abort,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002476};
Eric Dumazet4bc2f182010-07-09 21:22:10 +00002477EXPORT_SYMBOL(tcp_prot);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002478
Denis V. Lunev046ee902008-04-03 14:31:33 -07002479static void __net_exit tcp_sk_exit(struct net *net)
2480{
Eric Dumazetbdbbb852015-01-29 21:35:05 -08002481 int cpu;
2482
Stephen Hemminger6670e152017-11-14 08:25:49 -08002483 module_put(net->ipv4.tcp_congestion_control->owner);
2484
Eric Dumazetbdbbb852015-01-29 21:35:05 -08002485 for_each_possible_cpu(cpu)
2486 inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.tcp_sk, cpu));
2487 free_percpu(net->ipv4.tcp_sk);
2488}
2489
2490static int __net_init tcp_sk_init(struct net *net)
2491{
Haishuang Yanfee83d02016-12-28 17:52:33 +08002492 int res, cpu, cnt;
Eric Dumazetbdbbb852015-01-29 21:35:05 -08002493
2494 net->ipv4.tcp_sk = alloc_percpu(struct sock *);
2495 if (!net->ipv4.tcp_sk)
2496 return -ENOMEM;
2497
2498 for_each_possible_cpu(cpu) {
2499 struct sock *sk;
2500
2501 res = inet_ctl_sock_create(&sk, PF_INET, SOCK_RAW,
2502 IPPROTO_TCP, net);
2503 if (res)
2504 goto fail;
Eric Dumazeta9d65322016-04-01 08:52:21 -07002505 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
Eric Dumazetbdbbb852015-01-29 21:35:05 -08002506 *per_cpu_ptr(net->ipv4.tcp_sk, cpu) = sk;
2507 }
Daniel Borkmann49213552015-05-19 21:04:22 +02002508
Eric Dumazetbdbbb852015-01-29 21:35:05 -08002509 net->ipv4.sysctl_tcp_ecn = 2;
Daniel Borkmann49213552015-05-19 21:04:22 +02002510 net->ipv4.sysctl_tcp_ecn_fallback = 1;
2511
Fan Dub0f9ca52015-02-10 09:53:16 +08002512 net->ipv4.sysctl_tcp_base_mss = TCP_BASE_MSS;
Fan Du6b58e0a2015-03-06 11:18:23 +08002513 net->ipv4.sysctl_tcp_probe_threshold = TCP_PROBE_THRESHOLD;
Fan Du05cbc0d2015-03-06 11:18:24 +08002514 net->ipv4.sysctl_tcp_probe_interval = TCP_PROBE_INTERVAL;
Eric Dumazetbdbbb852015-01-29 21:35:05 -08002515
Nikolay Borisov13b287e2016-01-07 16:38:43 +02002516 net->ipv4.sysctl_tcp_keepalive_time = TCP_KEEPALIVE_TIME;
Nikolay Borisov9bd68612016-01-07 16:38:44 +02002517 net->ipv4.sysctl_tcp_keepalive_probes = TCP_KEEPALIVE_PROBES;
Nikolay Borisovb840d152016-01-07 16:38:45 +02002518 net->ipv4.sysctl_tcp_keepalive_intvl = TCP_KEEPALIVE_INTVL;
Nikolay Borisov13b287e2016-01-07 16:38:43 +02002519
Nikolay Borisov6fa25162016-02-03 09:46:49 +02002520 net->ipv4.sysctl_tcp_syn_retries = TCP_SYN_RETRIES;
Nikolay Borisov7c083ec2016-02-03 09:46:50 +02002521 net->ipv4.sysctl_tcp_synack_retries = TCP_SYNACK_RETRIES;
David S. Miller0aca7372016-02-08 04:24:33 -05002522 net->ipv4.sysctl_tcp_syncookies = 1;
Nikolay Borisov1043e252016-02-03 09:46:52 +02002523 net->ipv4.sysctl_tcp_reordering = TCP_FASTRETRANS_THRESH;
Nikolay Borisovae5c3f42016-02-03 09:46:53 +02002524 net->ipv4.sysctl_tcp_retries1 = TCP_RETR1;
Nikolay Borisovc6214a92016-02-03 09:46:54 +02002525 net->ipv4.sysctl_tcp_retries2 = TCP_RETR2;
Nikolay Borisovc402d9b2016-02-03 09:46:55 +02002526 net->ipv4.sysctl_tcp_orphan_retries = 0;
Nikolay Borisov1e579ca2016-02-03 09:46:56 +02002527 net->ipv4.sysctl_tcp_fin_timeout = TCP_FIN_TIMEOUT;
Nikolay Borisov4979f2d2016-02-03 09:46:57 +02002528 net->ipv4.sysctl_tcp_notsent_lowat = UINT_MAX;
Maciej Żenczykowski79e9fed2018-06-03 10:41:17 -07002529 net->ipv4.sysctl_tcp_tw_reuse = 2;
Nikolay Borisov12ed8242016-02-03 09:46:51 +02002530
Haishuang Yanfee83d02016-12-28 17:52:33 +08002531 cnt = tcp_hashinfo.ehash_mask + 1;
Haishuang Yanfee83d02016-12-28 17:52:33 +08002532 net->ipv4.tcp_death_row.sysctl_max_tw_buckets = (cnt + 1) / 2;
Haishuang Yan1946e672016-12-28 17:52:32 +08002533 net->ipv4.tcp_death_row.hashinfo = &tcp_hashinfo;
2534
Haishuang Yanfee83d02016-12-28 17:52:33 +08002535 net->ipv4.sysctl_max_syn_backlog = max(128, cnt / 256);
Eric Dumazetf9301032017-06-07 10:34:37 -07002536 net->ipv4.sysctl_tcp_sack = 1;
Eric Dumazet9bb37ef2017-06-07 10:34:38 -07002537 net->ipv4.sysctl_tcp_window_scaling = 1;
Eric Dumazet5d2ed052017-06-07 10:34:39 -07002538 net->ipv4.sysctl_tcp_timestamps = 1;
Eric Dumazet2ae21cf2017-10-26 21:54:56 -07002539 net->ipv4.sysctl_tcp_early_retrans = 3;
Eric Dumazete20223f2017-10-26 21:54:57 -07002540 net->ipv4.sysctl_tcp_recovery = TCP_RACK_LOSS_DETECTION;
Eric Dumazetb510f0d2017-10-26 21:54:59 -07002541 net->ipv4.sysctl_tcp_slow_start_after_idle = 1; /* By default, RFC2861 behavior. */
Eric Dumazete0a1e5b2017-10-26 21:55:00 -07002542 net->ipv4.sysctl_tcp_retrans_collapse = 1;
Eric Dumazetc6e21802017-10-26 21:55:06 -07002543 net->ipv4.sysctl_tcp_max_reordering = 300;
Eric Dumazet6496f6b2017-10-26 21:55:07 -07002544 net->ipv4.sysctl_tcp_dsack = 1;
Eric Dumazet0c126542017-10-26 21:55:08 -07002545 net->ipv4.sysctl_tcp_app_win = 31;
Eric Dumazet94f08932017-10-26 21:55:09 -07002546 net->ipv4.sysctl_tcp_adv_win_scale = 1;
Eric Dumazetaf9b69a2017-10-26 21:55:10 -07002547 net->ipv4.sysctl_tcp_frto = 2;
Eric Dumazet4540c0c2017-10-27 07:47:22 -07002548 net->ipv4.sysctl_tcp_moderate_rcvbuf = 1;
Eric Dumazetd06a9902017-10-27 07:47:23 -07002549 /* This limits the percentage of the congestion window which we
2550 * will allow a single TSO frame to consume. Building TSO frames
2551 * which are too large can cause TCP streams to be bursty.
2552 */
2553 net->ipv4.sysctl_tcp_tso_win_divisor = 3;
Eric Dumazet9184d8b2017-10-27 07:47:25 -07002554 /* Default TSQ limit of four TSO segments */
2555 net->ipv4.sysctl_tcp_limit_output_bytes = 262144;
Eric Dumazetb530b682017-10-27 07:47:26 -07002556 /* rfc5961 challenge ack rate limiting */
2557 net->ipv4.sysctl_tcp_challenge_ack_limit = 1000;
Eric Dumazet26e95962017-10-27 07:47:27 -07002558 net->ipv4.sysctl_tcp_min_tso_segs = 2;
Eric Dumazetbd239702017-10-27 07:47:28 -07002559 net->ipv4.sysctl_tcp_min_rtt_wlen = 300;
Eric Dumazet790f00e2017-10-27 07:47:29 -07002560 net->ipv4.sysctl_tcp_autocorking = 1;
Eric Dumazet4170ba62017-10-27 07:47:30 -07002561 net->ipv4.sysctl_tcp_invalid_ratelimit = HZ/2;
Eric Dumazet23a7102a2017-10-27 07:47:31 -07002562 net->ipv4.sysctl_tcp_pacing_ss_ratio = 200;
Eric Dumazetc26e91f2017-10-27 07:47:32 -07002563 net->ipv4.sysctl_tcp_pacing_ca_ratio = 120;
Eric Dumazet356d1832017-11-07 00:29:28 -08002564 if (net != &init_net) {
2565 memcpy(net->ipv4.sysctl_tcp_rmem,
2566 init_net.ipv4.sysctl_tcp_rmem,
2567 sizeof(init_net.ipv4.sysctl_tcp_rmem));
2568 memcpy(net->ipv4.sysctl_tcp_wmem,
2569 init_net.ipv4.sysctl_tcp_wmem,
2570 sizeof(init_net.ipv4.sysctl_tcp_wmem));
2571 }
Eric Dumazet6d82aa22018-05-17 14:47:28 -07002572 net->ipv4.sysctl_tcp_comp_sack_delay_ns = NSEC_PER_MSEC;
Eric Dumazet9c21d2f2018-05-17 14:47:29 -07002573 net->ipv4.sysctl_tcp_comp_sack_nr = 44;
Haishuang Yane1cfcbe2017-09-27 11:35:40 +08002574 net->ipv4.sysctl_tcp_fastopen = TFO_CLIENT_ENABLE;
Haishuang Yan43713842017-09-27 11:35:42 +08002575 spin_lock_init(&net->ipv4.tcp_fastopen_ctx_lock);
Haishuang Yan3733be12017-09-27 11:35:43 +08002576 net->ipv4.sysctl_tcp_fastopen_blackhole_timeout = 60 * 60;
2577 atomic_set(&net->ipv4.tfo_active_disable_times, 0);
Haishuang Yane1cfcbe2017-09-27 11:35:40 +08002578
Stephen Hemminger6670e152017-11-14 08:25:49 -08002579 /* Reno is always built in */
2580 if (!net_eq(net, &init_net) &&
2581 try_module_get(init_net.ipv4.tcp_congestion_control->owner))
2582 net->ipv4.tcp_congestion_control = init_net.ipv4.tcp_congestion_control;
2583 else
2584 net->ipv4.tcp_congestion_control = &tcp_reno;
2585
Daniel Borkmann49213552015-05-19 21:04:22 +02002586 return 0;
Eric Dumazetbdbbb852015-01-29 21:35:05 -08002587fail:
2588 tcp_sk_exit(net);
2589
2590 return res;
Eric W. Biedermanb099ce22009-12-03 02:29:09 +00002591}
2592
2593static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
2594{
Haishuang Yan43713842017-09-27 11:35:42 +08002595 struct net *net;
2596
Haishuang Yan1946e672016-12-28 17:52:32 +08002597 inet_twsk_purge(&tcp_hashinfo, AF_INET);
Haishuang Yan43713842017-09-27 11:35:42 +08002598
2599 list_for_each_entry(net, net_exit_list, exit_list)
2600 tcp_fastopen_ctx_destroy(net);
Denis V. Lunev046ee902008-04-03 14:31:33 -07002601}
2602
2603static struct pernet_operations __net_initdata tcp_sk_ops = {
Eric W. Biedermanb099ce22009-12-03 02:29:09 +00002604 .init = tcp_sk_init,
2605 .exit = tcp_sk_exit,
2606 .exit_batch = tcp_sk_exit_batch,
Denis V. Lunev046ee902008-04-03 14:31:33 -07002607};
2608
Denis V. Lunev9b0f9762008-02-29 11:13:15 -08002609void __init tcp_v4_init(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002610{
Eric W. Biederman6a1b3052009-02-22 00:10:18 -08002611 if (register_pernet_subsys(&tcp_sk_ops))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002612 panic("Failed to create the TCP control socket.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002613}