blob: 4f4310a36a0481e2bd068e39285011ff28377ea5 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * TCP over IPv6
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09003 * Linux INET6 implementation
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 *
5 * Authors:
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09006 * Pedro Roque <roque@di.fc.ul.pt>
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 *
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09008 * Based on:
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 * linux/net/ipv4/tcp.c
10 * linux/net/ipv4/tcp_input.c
11 * linux/net/ipv4/tcp_output.c
12 *
13 * Fixes:
14 * Hideaki YOSHIFUJI : sin6_scope_id support
15 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
16 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
17 * a single port at the same time.
18 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
19 *
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
24 */
25
Herbert Xueb4dea52008-12-29 23:04:08 -080026#include <linux/bottom_half.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070027#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <linux/errno.h>
29#include <linux/types.h>
30#include <linux/socket.h>
31#include <linux/sockios.h>
32#include <linux/net.h>
33#include <linux/jiffies.h>
34#include <linux/in.h>
35#include <linux/in6.h>
36#include <linux/netdevice.h>
37#include <linux/init.h>
38#include <linux/jhash.h>
39#include <linux/ipsec.h>
40#include <linux/times.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090041#include <linux/slab.h>
Wang Yufen4aa956d2014-03-29 09:27:29 +080042#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070043#include <linux/ipv6.h>
44#include <linux/icmpv6.h>
45#include <linux/random.h>
46
47#include <net/tcp.h>
48#include <net/ndisc.h>
Arnaldo Carvalho de Melo5324a042005-08-12 09:26:18 -030049#include <net/inet6_hashtables.h>
Arnaldo Carvalho de Melo81297652005-12-13 23:15:24 -080050#include <net/inet6_connection_sock.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070051#include <net/ipv6.h>
52#include <net/transp_v6.h>
53#include <net/addrconf.h>
54#include <net/ip6_route.h>
55#include <net/ip6_checksum.h>
56#include <net/inet_ecn.h>
57#include <net/protocol.h>
58#include <net/xfrm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070059#include <net/snmp.h>
60#include <net/dsfield.h>
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -080061#include <net/timewait_sock.h>
Denis V. Lunev3d58b5f2008-04-03 14:22:32 -070062#include <net/inet_common.h>
David S. Miller6e5714e2011-08-03 20:50:44 -070063#include <net/secure_seq.h>
Eliezer Tamir076bb0c2013-07-10 17:13:17 +030064#include <net/busy_poll.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070065
Linus Torvalds1da177e2005-04-16 15:20:36 -070066#include <linux/proc_fs.h>
67#include <linux/seq_file.h>
68
Herbert Xucf80e0e2016-01-24 21:20:23 +080069#include <crypto/hash.h>
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -080070#include <linux/scatterlist.h>
71
Eric Dumazeta00e7442015-09-29 07:42:39 -070072static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb);
73static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
Gui Jianfeng6edafaa2008-08-06 23:50:04 -070074 struct request_sock *req);
Linus Torvalds1da177e2005-04-16 15:20:36 -070075
76static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070077
Stephen Hemminger3b401a82009-09-01 19:25:04 +000078static const struct inet_connection_sock_af_ops ipv6_mapped;
79static const struct inet_connection_sock_af_ops ipv6_specific;
David S. Millera9286302006-11-14 19:53:22 -080080#ifdef CONFIG_TCP_MD5SIG
Stephen Hemmingerb2e4b3d2009-09-01 19:25:03 +000081static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
82static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
YOSHIFUJI Hideaki9501f972008-04-18 12:45:16 +090083#else
Eric Dumazet51723932015-09-29 21:24:05 -070084static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
Eric Dumazetb71d1d42011-04-22 04:53:02 +000085 const struct in6_addr *addr)
YOSHIFUJI Hideaki9501f972008-04-18 12:45:16 +090086{
87 return NULL;
88}
David S. Millera9286302006-11-14 19:53:22 -080089#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070090
Neal Cardwellfae6ef82012-08-19 03:30:38 +000091static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
92{
93 struct dst_entry *dst = skb_dst(skb);
Neal Cardwellfae6ef82012-08-19 03:30:38 +000094
Eric Dumazet5037e9e2015-12-14 14:08:53 -080095 if (dst && dst_hold_safe(dst)) {
Eric Dumazetca777ef2014-09-08 08:06:07 -070096 const struct rt6_info *rt = (const struct rt6_info *)dst;
97
Eric Dumazetca777ef2014-09-08 08:06:07 -070098 sk->sk_rx_dst = dst;
99 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
Martin KaFai Laub197df42015-05-22 20:56:01 -0700100 inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt);
Eric Dumazetca777ef2014-09-08 08:06:07 -0700101 }
Neal Cardwellfae6ef82012-08-19 03:30:38 +0000102}
103
Eric Dumazet84b114b2017-05-05 06:56:54 -0700104static u32 tcp_v6_init_seq(const struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105{
Eric Dumazet84b114b2017-05-05 06:56:54 -0700106 return secure_tcpv6_seq(ipv6_hdr(skb)->daddr.s6_addr32,
107 ipv6_hdr(skb)->saddr.s6_addr32,
108 tcp_hdr(skb)->dest,
109 tcp_hdr(skb)->source);
110}
111
112static u32 tcp_v6_init_ts_off(const struct sk_buff *skb)
113{
114 return secure_tcpv6_ts_off(ipv6_hdr(skb)->daddr.s6_addr32,
115 ipv6_hdr(skb)->saddr.s6_addr32);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116}
117
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +0900118static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119 int addr_len)
120{
121 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +0900122 struct inet_sock *inet = inet_sk(sk);
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -0800123 struct inet_connection_sock *icsk = inet_csk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124 struct ipv6_pinfo *np = inet6_sk(sk);
125 struct tcp_sock *tp = tcp_sk(sk);
Arnaud Ebalard20c59de2010-06-01 21:35:01 +0000126 struct in6_addr *saddr = NULL, *final_p, final;
Eric Dumazet45f6fad2015-11-29 19:37:57 -0800127 struct ipv6_txoptions *opt;
David S. Miller4c9483b2011-03-12 16:22:43 -0500128 struct flowi6 fl6;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129 struct dst_entry *dst;
130 int addr_type;
131 int err;
Haishuang Yan1946e672016-12-28 17:52:32 +0800132 struct inet_timewait_death_row *tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700133
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +0900134 if (addr_len < SIN6_LEN_RFC2133)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135 return -EINVAL;
136
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +0900137 if (usin->sin6_family != AF_INET6)
Eric Dumazeta02cec22010-09-22 20:43:57 +0000138 return -EAFNOSUPPORT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139
David S. Miller4c9483b2011-03-12 16:22:43 -0500140 memset(&fl6, 0, sizeof(fl6));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141
142 if (np->sndflow) {
David S. Miller4c9483b2011-03-12 16:22:43 -0500143 fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
144 IP6_ECN_flow_init(fl6.flowlabel);
145 if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700146 struct ip6_flowlabel *flowlabel;
David S. Miller4c9483b2011-03-12 16:22:43 -0500147 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
Ian Morris63159f22015-03-29 14:00:04 +0100148 if (!flowlabel)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700150 fl6_sock_release(flowlabel);
151 }
152 }
153
154 /*
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +0900155 * connect() to INADDR_ANY means loopback (BSD'ism).
156 */
157
Jonathan T. Leighton052d2362017-02-12 17:26:07 -0500158 if (ipv6_addr_any(&usin->sin6_addr)) {
159 if (ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr))
160 ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK),
161 &usin->sin6_addr);
162 else
163 usin->sin6_addr = in6addr_loopback;
164 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165
166 addr_type = ipv6_addr_type(&usin->sin6_addr);
167
Weilong Chen4c99aa42013-12-19 18:44:34 +0800168 if (addr_type & IPV6_ADDR_MULTICAST)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169 return -ENETUNREACH;
170
171 if (addr_type&IPV6_ADDR_LINKLOCAL) {
172 if (addr_len >= sizeof(struct sockaddr_in6) &&
173 usin->sin6_scope_id) {
174 /* If interface is set while binding, indices
175 * must coincide.
176 */
177 if (sk->sk_bound_dev_if &&
178 sk->sk_bound_dev_if != usin->sin6_scope_id)
179 return -EINVAL;
180
181 sk->sk_bound_dev_if = usin->sin6_scope_id;
182 }
183
184 /* Connect to link-local address requires an interface */
185 if (!sk->sk_bound_dev_if)
186 return -EINVAL;
187 }
188
189 if (tp->rx_opt.ts_recent_stamp &&
Eric Dumazetefe42082013-10-03 15:42:29 -0700190 !ipv6_addr_equal(&sk->sk_v6_daddr, &usin->sin6_addr)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191 tp->rx_opt.ts_recent = 0;
192 tp->rx_opt.ts_recent_stamp = 0;
193 tp->write_seq = 0;
194 }
195
Eric Dumazetefe42082013-10-03 15:42:29 -0700196 sk->sk_v6_daddr = usin->sin6_addr;
David S. Miller4c9483b2011-03-12 16:22:43 -0500197 np->flow_label = fl6.flowlabel;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198
199 /*
200 * TCP over IPv4
201 */
202
Jonathan T. Leighton052d2362017-02-12 17:26:07 -0500203 if (addr_type & IPV6_ADDR_MAPPED) {
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -0800204 u32 exthdrlen = icsk->icsk_ext_hdr_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205 struct sockaddr_in sin;
206
207 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
208
209 if (__ipv6_only_sock(sk))
210 return -ENETUNREACH;
211
212 sin.sin_family = AF_INET;
213 sin.sin_port = usin->sin6_port;
214 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
215
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -0800216 icsk->icsk_af_ops = &ipv6_mapped;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217 sk->sk_backlog_rcv = tcp_v4_do_rcv;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800218#ifdef CONFIG_TCP_MD5SIG
219 tp->af_specific = &tcp_sock_ipv6_mapped_specific;
220#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221
222 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
223
224 if (err) {
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -0800225 icsk->icsk_ext_hdr_len = exthdrlen;
226 icsk->icsk_af_ops = &ipv6_specific;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227 sk->sk_backlog_rcv = tcp_v6_do_rcv;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800228#ifdef CONFIG_TCP_MD5SIG
229 tp->af_specific = &tcp_sock_ipv6_specific;
230#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231 goto failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232 }
Eric Dumazetd1e559d2015-03-18 14:05:35 -0700233 np->saddr = sk->sk_v6_rcv_saddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234
235 return err;
236 }
237
Eric Dumazetefe42082013-10-03 15:42:29 -0700238 if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr))
239 saddr = &sk->sk_v6_rcv_saddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240
David S. Miller4c9483b2011-03-12 16:22:43 -0500241 fl6.flowi6_proto = IPPROTO_TCP;
Eric Dumazetefe42082013-10-03 15:42:29 -0700242 fl6.daddr = sk->sk_v6_daddr;
Alexey Dobriyan4e3fd7a2011-11-21 03:39:03 +0000243 fl6.saddr = saddr ? *saddr : np->saddr;
David S. Miller4c9483b2011-03-12 16:22:43 -0500244 fl6.flowi6_oif = sk->sk_bound_dev_if;
245 fl6.flowi6_mark = sk->sk_mark;
David S. Miller1958b852011-03-12 16:36:19 -0500246 fl6.fl6_dport = usin->sin6_port;
247 fl6.fl6_sport = inet->inet_sport;
Lorenzo Colittie2d118a2016-11-04 02:23:43 +0900248 fl6.flowi6_uid = sk->sk_uid;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700249
Hannes Frederic Sowa1e1d04e2016-04-05 17:10:15 +0200250 opt = rcu_dereference_protected(np->opt, lockdep_sock_is_held(sk));
Eric Dumazet45f6fad2015-11-29 19:37:57 -0800251 final_p = fl6_update_dst(&fl6, opt, &final);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252
David S. Miller4c9483b2011-03-12 16:22:43 -0500253 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
Venkat Yekkiralabeb8d132006-08-04 23:12:42 -0700254
Steffen Klassert0e0d44a2013-08-28 08:04:14 +0200255 dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
David S. Miller68d0c6d2011-03-01 13:19:07 -0800256 if (IS_ERR(dst)) {
257 err = PTR_ERR(dst);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700258 goto failure;
David S. Miller14e50e52007-05-24 18:17:54 -0700259 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700260
Ian Morris63159f22015-03-29 14:00:04 +0100261 if (!saddr) {
David S. Miller4c9483b2011-03-12 16:22:43 -0500262 saddr = &fl6.saddr;
Eric Dumazetefe42082013-10-03 15:42:29 -0700263 sk->sk_v6_rcv_saddr = *saddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700264 }
265
266 /* set the source address */
Alexey Dobriyan4e3fd7a2011-11-21 03:39:03 +0000267 np->saddr = *saddr;
Eric Dumazetc720c7e2009-10-15 06:30:45 +0000268 inet->inet_rcv_saddr = LOOPBACK4_IPV6;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269
Herbert Xuf83ef8c2006-06-30 13:37:03 -0700270 sk->sk_gso_type = SKB_GSO_TCPV6;
Eric Dumazet6bd4f352015-12-02 21:53:57 -0800271 ip6_dst_store(sk, dst, NULL, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -0800273 icsk->icsk_ext_hdr_len = 0;
Eric Dumazet45f6fad2015-11-29 19:37:57 -0800274 if (opt)
275 icsk->icsk_ext_hdr_len = opt->opt_flen +
276 opt->opt_nflen;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700277
278 tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
279
Eric Dumazetc720c7e2009-10-15 06:30:45 +0000280 inet->inet_dport = usin->sin6_port;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281
282 tcp_set_state(sk, TCP_SYN_SENT);
Haishuang Yan1946e672016-12-28 17:52:32 +0800283 err = inet6_hash_connect(tcp_death_row, sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700284 if (err)
285 goto late_failure;
286
Tom Herbert877d1f62015-07-28 16:02:05 -0700287 sk_set_txhash(sk);
Sathya Perla9e7ceb02014-10-22 21:42:01 +0530288
Alexey Kodanev00355fa2017-02-22 13:23:55 +0300289 if (likely(!tp->repair)) {
Alexey Kodanev00355fa2017-02-22 13:23:55 +0300290 if (!tp->write_seq)
Eric Dumazet84b114b2017-05-05 06:56:54 -0700291 tp->write_seq = secure_tcpv6_seq(np->saddr.s6_addr32,
292 sk->sk_v6_daddr.s6_addr32,
293 inet->inet_sport,
294 inet->inet_dport);
295 tp->tsoffset = secure_tcpv6_ts_off(np->saddr.s6_addr32,
296 sk->sk_v6_daddr.s6_addr32);
Alexey Kodanev00355fa2017-02-22 13:23:55 +0300297 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700298
Wei Wang19f6d3f2017-01-23 10:59:22 -0800299 if (tcp_fastopen_defer_connect(sk, &err))
300 return err;
301 if (err)
302 goto late_failure;
303
Linus Torvalds1da177e2005-04-16 15:20:36 -0700304 err = tcp_connect(sk);
305 if (err)
306 goto late_failure;
307
308 return 0;
309
310late_failure:
311 tcp_set_state(sk, TCP_CLOSE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700312failure:
Eric Dumazetc720c7e2009-10-15 06:30:45 +0000313 inet->inet_dport = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700314 sk->sk_route_caps = 0;
315 return err;
316}
317
Eric Dumazet563d34d2012-07-23 09:48:52 +0200318static void tcp_v6_mtu_reduced(struct sock *sk)
319{
320 struct dst_entry *dst;
321
322 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
323 return;
324
325 dst = inet6_csk_update_pmtu(sk, tcp_sk(sk)->mtu_info);
326 if (!dst)
327 return;
328
329 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
330 tcp_sync_mss(sk, dst_mtu(dst));
331 tcp_simple_retransmit(sk);
332 }
333}
334
Linus Torvalds1da177e2005-04-16 15:20:36 -0700335static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
Brian Haleyd5fdd6b2009-06-23 04:31:07 -0700336 u8 type, u8 code, int offset, __be32 info)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700337{
Weilong Chen4c99aa42013-12-19 18:44:34 +0800338 const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
Arnaldo Carvalho de Melo505cbfc2005-08-12 09:19:38 -0300339 const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
Eric Dumazet22150892015-03-22 10:22:23 -0700340 struct net *net = dev_net(skb->dev);
341 struct request_sock *fastopen;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700342 struct ipv6_pinfo *np;
Eric Dumazet22150892015-03-22 10:22:23 -0700343 struct tcp_sock *tp;
344 __u32 seq, snd_una;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345 struct sock *sk;
Eric Dumazet9cf74902016-02-02 19:31:12 -0800346 bool fatal;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700347 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700348
Eric Dumazet22150892015-03-22 10:22:23 -0700349 sk = __inet6_lookup_established(net, &tcp_hashinfo,
350 &hdr->daddr, th->dest,
351 &hdr->saddr, ntohs(th->source),
352 skb->dev->ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700353
Eric Dumazet22150892015-03-22 10:22:23 -0700354 if (!sk) {
Eric Dumazeta16292a2016-04-27 16:44:36 -0700355 __ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
356 ICMP6_MIB_INERRORS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700357 return;
358 }
359
360 if (sk->sk_state == TCP_TIME_WAIT) {
YOSHIFUJI Hideaki9469c7b2006-10-10 19:41:46 -0700361 inet_twsk_put(inet_twsk(sk));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362 return;
363 }
Eric Dumazet22150892015-03-22 10:22:23 -0700364 seq = ntohl(th->seq);
Eric Dumazet9cf74902016-02-02 19:31:12 -0800365 fatal = icmpv6_err_convert(type, code, &err);
Eric Dumazet22150892015-03-22 10:22:23 -0700366 if (sk->sk_state == TCP_NEW_SYN_RECV)
Eric Dumazet9cf74902016-02-02 19:31:12 -0800367 return tcp_req_err(sk, seq, fatal);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700368
369 bh_lock_sock(sk);
Eric Dumazet563d34d2012-07-23 09:48:52 +0200370 if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
Eric Dumazet02a1d6e2016-04-27 16:44:39 -0700371 __NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700372
373 if (sk->sk_state == TCP_CLOSE)
374 goto out;
375
Stephen Hemmingere802af92010-04-22 15:24:53 -0700376 if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) {
Eric Dumazet02a1d6e2016-04-27 16:44:39 -0700377 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
Stephen Hemmingere802af92010-04-22 15:24:53 -0700378 goto out;
379 }
380
Linus Torvalds1da177e2005-04-16 15:20:36 -0700381 tp = tcp_sk(sk);
Yuchung Cheng0a672f72014-05-11 20:22:12 -0700382 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
383 fastopen = tp->fastopen_rsk;
384 snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700385 if (sk->sk_state != TCP_LISTEN &&
Yuchung Cheng0a672f72014-05-11 20:22:12 -0700386 !between(seq, snd_una, tp->snd_nxt)) {
Eric Dumazet02a1d6e2016-04-27 16:44:39 -0700387 __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700388 goto out;
389 }
390
391 np = inet6_sk(sk);
392
David S. Millerec18d9a2012-07-12 00:25:15 -0700393 if (type == NDISC_REDIRECT) {
Jon Maxwell45caeaa2017-03-10 16:40:33 +1100394 if (!sock_owned_by_user(sk)) {
395 struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
David S. Millerec18d9a2012-07-12 00:25:15 -0700396
Jon Maxwell45caeaa2017-03-10 16:40:33 +1100397 if (dst)
398 dst->ops->redirect(dst, sk, skb);
399 }
Christoph Paasch50a75a82013-04-07 04:53:15 +0000400 goto out;
David S. Millerec18d9a2012-07-12 00:25:15 -0700401 }
402
Linus Torvalds1da177e2005-04-16 15:20:36 -0700403 if (type == ICMPV6_PKT_TOOBIG) {
Eric Dumazet0d4f0602013-03-18 07:01:28 +0000404 /* We are not interested in TCP_LISTEN and open_requests
405 * (SYN-ACKs send out by Linux are always <576bytes so
406 * they should go through unfragmented).
407 */
408 if (sk->sk_state == TCP_LISTEN)
409 goto out;
410
Hannes Frederic Sowa93b36cf2013-12-15 03:41:14 +0100411 if (!ip6_sk_accept_pmtu(sk))
412 goto out;
413
Eric Dumazet563d34d2012-07-23 09:48:52 +0200414 tp->mtu_info = ntohl(info);
415 if (!sock_owned_by_user(sk))
416 tcp_v6_mtu_reduced(sk);
Julian Anastasovd013ef2a2012-09-05 10:53:18 +0000417 else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED,
Eric Dumazet7aa54702016-12-03 11:14:57 -0800418 &sk->sk_tsq_flags))
Julian Anastasovd013ef2a2012-09-05 10:53:18 +0000419 sock_hold(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700420 goto out;
421 }
422
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -0700424 /* Might be for an request_sock */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700425 switch (sk->sk_state) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426 case TCP_SYN_SENT:
Yuchung Cheng0a672f72014-05-11 20:22:12 -0700427 case TCP_SYN_RECV:
428 /* Only in fast or simultaneous open. If a fast open socket is
429 * is already accepted it is treated as a connected one below.
430 */
Ian Morris63159f22015-03-29 14:00:04 +0100431 if (fastopen && !fastopen->sk)
Yuchung Cheng0a672f72014-05-11 20:22:12 -0700432 break;
433
Linus Torvalds1da177e2005-04-16 15:20:36 -0700434 if (!sock_owned_by_user(sk)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700435 sk->sk_err = err;
436 sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */
437
438 tcp_done(sk);
439 } else
440 sk->sk_err_soft = err;
441 goto out;
442 }
443
444 if (!sock_owned_by_user(sk) && np->recverr) {
445 sk->sk_err = err;
446 sk->sk_error_report(sk);
447 } else
448 sk->sk_err_soft = err;
449
450out:
451 bh_unlock_sock(sk);
452 sock_put(sk);
453}
454
455
Eric Dumazet0f935db2015-09-25 07:39:21 -0700456static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
Octavian Purdilad6274bd2014-06-25 17:09:58 +0300457 struct flowi *fl,
Neal Cardwell3840a062012-06-28 12:34:19 +0000458 struct request_sock *req,
Eric Dumazetca6fb062015-10-02 11:43:35 -0700459 struct tcp_fastopen_cookie *foc,
Eric Dumazetb3d05142016-04-13 22:05:39 -0700460 enum tcp_synack_type synack_type)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700461{
Eric Dumazet634fb9792013-10-09 15:21:29 -0700462 struct inet_request_sock *ireq = inet_rsk(req);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700463 struct ipv6_pinfo *np = inet6_sk(sk);
Huw Davies56ac42b2016-06-27 15:05:28 -0400464 struct ipv6_txoptions *opt;
Octavian Purdilad6274bd2014-06-25 17:09:58 +0300465 struct flowi6 *fl6 = &fl->u.ip6;
Weilong Chen4c99aa42013-12-19 18:44:34 +0800466 struct sk_buff *skb;
Neal Cardwell94942182012-06-28 12:34:20 +0000467 int err = -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700468
Neal Cardwell9f10d3f2012-06-28 12:34:21 +0000469 /* First, grab a route. */
Eric Dumazetf76b33c2015-09-29 07:42:42 -0700470 if (!dst && (dst = inet6_csk_route_req(sk, fl6, req,
471 IPPROTO_TCP)) == NULL)
Denis V. Lunevfd80eb92008-02-29 11:43:03 -0800472 goto done;
Neal Cardwell94942182012-06-28 12:34:20 +0000473
Eric Dumazetb3d05142016-04-13 22:05:39 -0700474 skb = tcp_make_synack(sk, dst, req, foc, synack_type);
Neal Cardwell94942182012-06-28 12:34:20 +0000475
Linus Torvalds1da177e2005-04-16 15:20:36 -0700476 if (skb) {
Eric Dumazet634fb9792013-10-09 15:21:29 -0700477 __tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr,
478 &ireq->ir_v6_rmt_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700479
Eric Dumazet634fb9792013-10-09 15:21:29 -0700480 fl6->daddr = ireq->ir_v6_rmt_addr;
Ian Morris53b24b82015-03-29 14:00:05 +0100481 if (np->repflow && ireq->pktopts)
Florent Fourcotdf3687f2014-01-17 17:15:03 +0100482 fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
483
Eric Dumazet3e4006f2016-01-08 09:35:51 -0800484 rcu_read_lock();
Huw Davies56ac42b2016-06-27 15:05:28 -0400485 opt = ireq->ipv6_opt;
486 if (!opt)
487 opt = rcu_dereference(np->opt);
Pablo Neira92e55f42017-01-26 22:56:21 +0100488 err = ip6_xmit(sk, skb, fl6, sk->sk_mark, opt, np->tclass);
Eric Dumazet3e4006f2016-01-08 09:35:51 -0800489 rcu_read_unlock();
Gerrit Renkerb9df3cb2006-11-14 11:21:36 -0200490 err = net_xmit_eval(err);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700491 }
492
493done:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700494 return err;
495}
496
Octavian Purdila72659ec2010-01-17 19:09:39 -0800497
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -0700498static void tcp_v6_reqsk_destructor(struct request_sock *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700499{
Huw Davies56ac42b2016-06-27 15:05:28 -0400500 kfree(inet_rsk(req)->ipv6_opt);
Eric Dumazet634fb9792013-10-09 15:21:29 -0700501 kfree_skb(inet_rsk(req)->pktopts);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700502}
503
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800504#ifdef CONFIG_TCP_MD5SIG
Eric Dumazetb83e3de2015-09-25 07:39:15 -0700505static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
Eric Dumazetb71d1d42011-04-22 04:53:02 +0000506 const struct in6_addr *addr)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800507{
Eric Dumazeta915da9b2012-01-31 05:18:33 +0000508 return tcp_md5_do_lookup(sk, (union tcp_md5_addr *)addr, AF_INET6);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800509}
510
Eric Dumazetb83e3de2015-09-25 07:39:15 -0700511static struct tcp_md5sig_key *tcp_v6_md5_lookup(const struct sock *sk,
Eric Dumazetfd3a1542015-03-24 15:58:56 -0700512 const struct sock *addr_sk)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800513{
Eric Dumazetefe42082013-10-03 15:42:29 -0700514 return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800515}
516
Wang Yufen4aa956d2014-03-29 09:27:29 +0800517static int tcp_v6_parse_md5_keys(struct sock *sk, char __user *optval,
518 int optlen)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800519{
520 struct tcp_md5sig cmd;
521 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800522
523 if (optlen < sizeof(cmd))
524 return -EINVAL;
525
526 if (copy_from_user(&cmd, optval, sizeof(cmd)))
527 return -EFAULT;
528
529 if (sin6->sin6_family != AF_INET6)
530 return -EINVAL;
531
532 if (!cmd.tcpm_keylen) {
Brian Haleye773e4f2007-08-24 23:16:08 -0700533 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
Eric Dumazeta915da9b2012-01-31 05:18:33 +0000534 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
535 AF_INET);
536 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
537 AF_INET6);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800538 }
539
540 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
541 return -EINVAL;
542
Eric Dumazeta915da9b2012-01-31 05:18:33 +0000543 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
544 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
545 AF_INET, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800546
Eric Dumazeta915da9b2012-01-31 05:18:33 +0000547 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
548 AF_INET6, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800549}
550
Eric Dumazet19689e32016-06-27 18:51:53 +0200551static int tcp_v6_md5_hash_headers(struct tcp_md5sig_pool *hp,
552 const struct in6_addr *daddr,
553 const struct in6_addr *saddr,
554 const struct tcphdr *th, int nbytes)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800555{
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800556 struct tcp6_pseudohdr *bp;
Adam Langley49a72df2008-07-19 00:01:42 -0700557 struct scatterlist sg;
Eric Dumazet19689e32016-06-27 18:51:53 +0200558 struct tcphdr *_th;
YOSHIFUJI Hideaki8d26d762008-04-17 13:19:16 +0900559
Eric Dumazet19689e32016-06-27 18:51:53 +0200560 bp = hp->scratch;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800561 /* 1. TCP pseudo-header (RFC2460) */
Alexey Dobriyan4e3fd7a2011-11-21 03:39:03 +0000562 bp->saddr = *saddr;
563 bp->daddr = *daddr;
Adam Langley49a72df2008-07-19 00:01:42 -0700564 bp->protocol = cpu_to_be32(IPPROTO_TCP);
Adam Langley00b13042008-07-31 21:36:07 -0700565 bp->len = cpu_to_be32(nbytes);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800566
Eric Dumazet19689e32016-06-27 18:51:53 +0200567 _th = (struct tcphdr *)(bp + 1);
568 memcpy(_th, th, sizeof(*th));
569 _th->check = 0;
570
571 sg_init_one(&sg, bp, sizeof(*bp) + sizeof(*th));
572 ahash_request_set_crypt(hp->md5_req, &sg, NULL,
573 sizeof(*bp) + sizeof(*th));
Herbert Xucf80e0e2016-01-24 21:20:23 +0800574 return crypto_ahash_update(hp->md5_req);
Adam Langley49a72df2008-07-19 00:01:42 -0700575}
David S. Millerc7da57a2007-10-26 00:41:21 -0700576
Eric Dumazet19689e32016-06-27 18:51:53 +0200577static int tcp_v6_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
Eric Dumazetb71d1d42011-04-22 04:53:02 +0000578 const struct in6_addr *daddr, struct in6_addr *saddr,
Eric Dumazet318cf7a2011-10-24 02:46:04 -0400579 const struct tcphdr *th)
Adam Langley49a72df2008-07-19 00:01:42 -0700580{
581 struct tcp_md5sig_pool *hp;
Herbert Xucf80e0e2016-01-24 21:20:23 +0800582 struct ahash_request *req;
Adam Langley49a72df2008-07-19 00:01:42 -0700583
584 hp = tcp_get_md5sig_pool();
585 if (!hp)
586 goto clear_hash_noput;
Herbert Xucf80e0e2016-01-24 21:20:23 +0800587 req = hp->md5_req;
Adam Langley49a72df2008-07-19 00:01:42 -0700588
Herbert Xucf80e0e2016-01-24 21:20:23 +0800589 if (crypto_ahash_init(req))
Adam Langley49a72df2008-07-19 00:01:42 -0700590 goto clear_hash;
Eric Dumazet19689e32016-06-27 18:51:53 +0200591 if (tcp_v6_md5_hash_headers(hp, daddr, saddr, th, th->doff << 2))
Adam Langley49a72df2008-07-19 00:01:42 -0700592 goto clear_hash;
593 if (tcp_md5_hash_key(hp, key))
594 goto clear_hash;
Herbert Xucf80e0e2016-01-24 21:20:23 +0800595 ahash_request_set_crypt(req, NULL, md5_hash, 0);
596 if (crypto_ahash_final(req))
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800597 goto clear_hash;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800598
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800599 tcp_put_md5sig_pool();
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800600 return 0;
Adam Langley49a72df2008-07-19 00:01:42 -0700601
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800602clear_hash:
603 tcp_put_md5sig_pool();
604clear_hash_noput:
605 memset(md5_hash, 0, 16);
Adam Langley49a72df2008-07-19 00:01:42 -0700606 return 1;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800607}
608
Eric Dumazet39f8e582015-03-24 15:58:55 -0700609static int tcp_v6_md5_hash_skb(char *md5_hash,
610 const struct tcp_md5sig_key *key,
Eric Dumazet318cf7a2011-10-24 02:46:04 -0400611 const struct sock *sk,
Eric Dumazet318cf7a2011-10-24 02:46:04 -0400612 const struct sk_buff *skb)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800613{
Eric Dumazetb71d1d42011-04-22 04:53:02 +0000614 const struct in6_addr *saddr, *daddr;
Adam Langley49a72df2008-07-19 00:01:42 -0700615 struct tcp_md5sig_pool *hp;
Herbert Xucf80e0e2016-01-24 21:20:23 +0800616 struct ahash_request *req;
Eric Dumazet318cf7a2011-10-24 02:46:04 -0400617 const struct tcphdr *th = tcp_hdr(skb);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800618
Eric Dumazet39f8e582015-03-24 15:58:55 -0700619 if (sk) { /* valid for establish/request sockets */
620 saddr = &sk->sk_v6_rcv_saddr;
Eric Dumazetefe42082013-10-03 15:42:29 -0700621 daddr = &sk->sk_v6_daddr;
Adam Langley49a72df2008-07-19 00:01:42 -0700622 } else {
Eric Dumazetb71d1d42011-04-22 04:53:02 +0000623 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
Adam Langley49a72df2008-07-19 00:01:42 -0700624 saddr = &ip6h->saddr;
625 daddr = &ip6h->daddr;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800626 }
Adam Langley49a72df2008-07-19 00:01:42 -0700627
628 hp = tcp_get_md5sig_pool();
629 if (!hp)
630 goto clear_hash_noput;
Herbert Xucf80e0e2016-01-24 21:20:23 +0800631 req = hp->md5_req;
Adam Langley49a72df2008-07-19 00:01:42 -0700632
Herbert Xucf80e0e2016-01-24 21:20:23 +0800633 if (crypto_ahash_init(req))
Adam Langley49a72df2008-07-19 00:01:42 -0700634 goto clear_hash;
635
Eric Dumazet19689e32016-06-27 18:51:53 +0200636 if (tcp_v6_md5_hash_headers(hp, daddr, saddr, th, skb->len))
Adam Langley49a72df2008-07-19 00:01:42 -0700637 goto clear_hash;
638 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
639 goto clear_hash;
640 if (tcp_md5_hash_key(hp, key))
641 goto clear_hash;
Herbert Xucf80e0e2016-01-24 21:20:23 +0800642 ahash_request_set_crypt(req, NULL, md5_hash, 0);
643 if (crypto_ahash_final(req))
Adam Langley49a72df2008-07-19 00:01:42 -0700644 goto clear_hash;
645
646 tcp_put_md5sig_pool();
647 return 0;
648
649clear_hash:
650 tcp_put_md5sig_pool();
651clear_hash_noput:
652 memset(md5_hash, 0, 16);
653 return 1;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800654}
655
Eric Dumazetba8e2752015-10-02 11:43:28 -0700656#endif
657
658static bool tcp_v6_inbound_md5_hash(const struct sock *sk,
659 const struct sk_buff *skb)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800660{
Eric Dumazetba8e2752015-10-02 11:43:28 -0700661#ifdef CONFIG_TCP_MD5SIG
Eric Dumazetcf533ea2011-10-21 05:22:42 -0400662 const __u8 *hash_location = NULL;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800663 struct tcp_md5sig_key *hash_expected;
Eric Dumazetb71d1d42011-04-22 04:53:02 +0000664 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
Eric Dumazet318cf7a2011-10-24 02:46:04 -0400665 const struct tcphdr *th = tcp_hdr(skb);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800666 int genhash;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800667 u8 newhash[16];
668
669 hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
YOSHIFUJI Hideaki7d5d5522008-04-17 12:29:53 +0900670 hash_location = tcp_parse_md5sig_option(th);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800671
David S. Miller785957d2008-07-30 03:03:15 -0700672 /* We've parsed the options - do we have a hash? */
673 if (!hash_expected && !hash_location)
Eric Dumazetff74e232015-03-24 15:58:54 -0700674 return false;
David S. Miller785957d2008-07-30 03:03:15 -0700675
676 if (hash_expected && !hash_location) {
Eric Dumazetc10d9312016-04-29 14:16:47 -0700677 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
Eric Dumazetff74e232015-03-24 15:58:54 -0700678 return true;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800679 }
680
David S. Miller785957d2008-07-30 03:03:15 -0700681 if (!hash_expected && hash_location) {
Eric Dumazetc10d9312016-04-29 14:16:47 -0700682 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
Eric Dumazetff74e232015-03-24 15:58:54 -0700683 return true;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800684 }
685
686 /* check the signature */
Adam Langley49a72df2008-07-19 00:01:42 -0700687 genhash = tcp_v6_md5_hash_skb(newhash,
688 hash_expected,
Eric Dumazet39f8e582015-03-24 15:58:55 -0700689 NULL, skb);
Adam Langley49a72df2008-07-19 00:01:42 -0700690
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800691 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
Eric Dumazet72145a62016-08-24 09:01:23 -0700692 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE);
Joe Perchese87cc472012-05-13 21:56:26 +0000693 net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
694 genhash ? "failed" : "mismatch",
695 &ip6h->saddr, ntohs(th->source),
696 &ip6h->daddr, ntohs(th->dest));
Eric Dumazetff74e232015-03-24 15:58:54 -0700697 return true;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800698 }
Eric Dumazetba8e2752015-10-02 11:43:28 -0700699#endif
Eric Dumazetff74e232015-03-24 15:58:54 -0700700 return false;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800701}
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800702
Eric Dumazetb40cf182015-09-25 07:39:08 -0700703static void tcp_v6_init_req(struct request_sock *req,
704 const struct sock *sk_listener,
Octavian Purdila16bea702014-06-25 17:09:53 +0300705 struct sk_buff *skb)
706{
707 struct inet_request_sock *ireq = inet_rsk(req);
Eric Dumazetb40cf182015-09-25 07:39:08 -0700708 const struct ipv6_pinfo *np = inet6_sk(sk_listener);
Octavian Purdila16bea702014-06-25 17:09:53 +0300709
710 ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
711 ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
712
Octavian Purdila16bea702014-06-25 17:09:53 +0300713 /* So that link locals have meaning */
Eric Dumazetb40cf182015-09-25 07:39:08 -0700714 if (!sk_listener->sk_bound_dev_if &&
Octavian Purdila16bea702014-06-25 17:09:53 +0300715 ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
Eric Dumazet870c3152014-10-17 09:17:20 -0700716 ireq->ir_iif = tcp_v6_iif(skb);
Octavian Purdila16bea702014-06-25 17:09:53 +0300717
Eric Dumazet04317da2014-09-05 15:33:32 -0700718 if (!TCP_SKB_CB(skb)->tcp_tw_isn &&
Eric Dumazetb40cf182015-09-25 07:39:08 -0700719 (ipv6_opt_accepted(sk_listener, skb, &TCP_SKB_CB(skb)->header.h6) ||
Eric Dumazeta2247722014-09-27 09:50:56 -0700720 np->rxopt.bits.rxinfo ||
Octavian Purdila16bea702014-06-25 17:09:53 +0300721 np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim ||
722 np->rxopt.bits.rxohlim || np->repflow)) {
723 atomic_inc(&skb->users);
724 ireq->pktopts = skb;
725 }
726}
727
Eric Dumazetf9646292015-09-29 07:42:50 -0700728static struct dst_entry *tcp_v6_route_req(const struct sock *sk,
729 struct flowi *fl,
Soheil Hassas Yeganeh4396e462017-03-15 16:30:46 -0400730 const struct request_sock *req)
Octavian Purdilad94e0412014-06-25 17:09:55 +0300731{
Eric Dumazetf76b33c2015-09-29 07:42:42 -0700732 return inet6_csk_route_req(sk, &fl->u.ip6, req, IPPROTO_TCP);
Octavian Purdilad94e0412014-06-25 17:09:55 +0300733}
734
Glenn Griffinc6aefaf2008-02-07 21:49:26 -0800735struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700736 .family = AF_INET6,
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -0700737 .obj_size = sizeof(struct tcp6_request_sock),
Octavian Purdila5db92c92014-06-25 17:09:59 +0300738 .rtx_syn_ack = tcp_rtx_synack,
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -0700739 .send_ack = tcp_v6_reqsk_send_ack,
740 .destructor = tcp_v6_reqsk_destructor,
Octavian Purdila72659ec2010-01-17 19:09:39 -0800741 .send_reset = tcp_v6_send_reset,
Wang Yufen4aa956d2014-03-29 09:27:29 +0800742 .syn_ack_timeout = tcp_syn_ack_timeout,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700743};
744
Stephen Hemmingerb2e4b3d2009-09-01 19:25:03 +0000745static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
Octavian Purdila2aec4a22014-06-25 17:10:00 +0300746 .mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) -
747 sizeof(struct ipv6hdr),
Octavian Purdila16bea702014-06-25 17:09:53 +0300748#ifdef CONFIG_TCP_MD5SIG
Eric Dumazetfd3a1542015-03-24 15:58:56 -0700749 .req_md5_lookup = tcp_v6_md5_lookup,
John Dykstrae3afe7b2009-07-16 05:04:51 +0000750 .calc_md5_hash = tcp_v6_md5_hash_skb,
Andrew Mortonb6332e62006-11-30 19:16:28 -0800751#endif
Octavian Purdila16bea702014-06-25 17:09:53 +0300752 .init_req = tcp_v6_init_req,
Octavian Purdilafb7b37a2014-06-25 17:09:54 +0300753#ifdef CONFIG_SYN_COOKIES
754 .cookie_init_seq = cookie_v6_init_sequence,
755#endif
Octavian Purdilad94e0412014-06-25 17:09:55 +0300756 .route_req = tcp_v6_route_req,
Eric Dumazet84b114b2017-05-05 06:56:54 -0700757 .init_seq = tcp_v6_init_seq,
758 .init_ts_off = tcp_v6_init_ts_off,
Octavian Purdilad6274bd2014-06-25 17:09:58 +0300759 .send_synack = tcp_v6_send_synack,
Octavian Purdila16bea702014-06-25 17:09:53 +0300760};
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800761
Eric Dumazeta00e7442015-09-29 07:42:39 -0700762static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 seq,
Eric Dumazet0f85fea2014-12-09 09:56:08 -0800763 u32 ack, u32 win, u32 tsval, u32 tsecr,
764 int oif, struct tcp_md5sig_key *key, int rst,
Hannes Frederic Sowa5119bd12016-06-11 20:41:38 +0200765 u8 tclass, __be32 label)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700766{
Eric Dumazetcf533ea2011-10-21 05:22:42 -0400767 const struct tcphdr *th = tcp_hdr(skb);
768 struct tcphdr *t1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700769 struct sk_buff *buff;
David S. Miller4c9483b2011-03-12 16:22:43 -0500770 struct flowi6 fl6;
Eric Dumazet0f85fea2014-12-09 09:56:08 -0800771 struct net *net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
Daniel Lezcanoe5047992008-03-07 11:16:26 -0800772 struct sock *ctl_sk = net->ipv6.tcp_sk;
YOSHIFUJI Hideaki9cb57342008-01-12 02:16:03 -0800773 unsigned int tot_len = sizeof(struct tcphdr);
Eric Dumazetadf30902009-06-02 05:19:30 +0000774 struct dst_entry *dst;
Al Viroe69a4ad2006-11-14 20:56:00 -0800775 __be32 *topt;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700776
Andrey Vaginee684b62013-02-11 05:50:19 +0000777 if (tsecr)
YOSHIFUJI Hideaki4244f8a2006-10-10 19:40:50 -0700778 tot_len += TCPOLEN_TSTAMP_ALIGNED;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800779#ifdef CONFIG_TCP_MD5SIG
780 if (key)
781 tot_len += TCPOLEN_MD5SIG_ALIGNED;
782#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700783
784 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
785 GFP_ATOMIC);
Ian Morris63159f22015-03-29 14:00:04 +0100786 if (!buff)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700787 return;
788
789 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
790
Ilpo Järvinen77c676d2008-10-09 14:41:38 -0700791 t1 = (struct tcphdr *) skb_push(buff, tot_len);
Herbert Xu6651ffc2010-04-21 00:47:15 -0700792 skb_reset_transport_header(buff);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700793
794 /* Swap the send and the receive. */
795 memset(t1, 0, sizeof(*t1));
796 t1->dest = th->source;
797 t1->source = th->dest;
Ilpo Järvinen77c676d2008-10-09 14:41:38 -0700798 t1->doff = tot_len / 4;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700799 t1->seq = htonl(seq);
800 t1->ack_seq = htonl(ack);
Ilpo Järvinen626e2642008-10-09 14:42:40 -0700801 t1->ack = !rst || !th->ack;
802 t1->rst = rst;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700803 t1->window = htons(win);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800804
Al Viroe69a4ad2006-11-14 20:56:00 -0800805 topt = (__be32 *)(t1 + 1);
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +0900806
Andrey Vaginee684b62013-02-11 05:50:19 +0000807 if (tsecr) {
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800808 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
809 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
Andrey Vaginee684b62013-02-11 05:50:19 +0000810 *topt++ = htonl(tsval);
811 *topt++ = htonl(tsecr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700812 }
813
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800814#ifdef CONFIG_TCP_MD5SIG
815 if (key) {
816 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
817 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
Adam Langley49a72df2008-07-19 00:01:42 -0700818 tcp_v6_md5_hash_hdr((__u8 *)topt, key,
Adam Langley90b7e112008-07-31 20:49:48 -0700819 &ipv6_hdr(skb)->saddr,
820 &ipv6_hdr(skb)->daddr, t1);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800821 }
822#endif
823
David S. Miller4c9483b2011-03-12 16:22:43 -0500824 memset(&fl6, 0, sizeof(fl6));
Alexey Dobriyan4e3fd7a2011-11-21 03:39:03 +0000825 fl6.daddr = ipv6_hdr(skb)->saddr;
826 fl6.saddr = ipv6_hdr(skb)->daddr;
Florent Fourcot1d13a962014-01-16 17:21:22 +0100827 fl6.flowlabel = label;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700828
David S. Millere5700af2010-04-21 14:59:20 -0700829 buff->ip_summed = CHECKSUM_PARTIAL;
830 buff->csum = 0;
831
David S. Miller4c9483b2011-03-12 16:22:43 -0500832 __tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700833
David S. Miller4c9483b2011-03-12 16:22:43 -0500834 fl6.flowi6_proto = IPPROTO_TCP;
Lorenzo Colittia36dbdb2014-04-11 13:19:12 +0900835 if (rt6_need_strict(&fl6.daddr) && !oif)
Eric Dumazet870c3152014-10-17 09:17:20 -0700836 fl6.flowi6_oif = tcp_v6_iif(skb);
David Ahern9b6c14d2016-11-09 09:07:26 -0800837 else {
838 if (!oif && netif_index_is_l3_master(net, skb->skb_iif))
839 oif = skb->skb_iif;
840
841 fl6.flowi6_oif = oif;
842 }
David Ahern1d2f7b22016-05-04 21:26:08 -0700843
Lorenzo Colittie1108612014-05-13 10:17:33 -0700844 fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark);
David S. Miller1958b852011-03-12 16:36:19 -0500845 fl6.fl6_dport = t1->dest;
846 fl6.fl6_sport = t1->source;
Lorenzo Colittie2d118a2016-11-04 02:23:43 +0900847 fl6.flowi6_uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL);
David S. Miller4c9483b2011-03-12 16:22:43 -0500848 security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700849
Ilpo Järvinen626e2642008-10-09 14:42:40 -0700850 /* Pass a socket to ip6_dst_lookup either it is for RST
851 * Underlying function will use this to retrieve the network
852 * namespace
853 */
Steffen Klassert0e0d44a2013-08-28 08:04:14 +0200854 dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL);
David S. Miller68d0c6d2011-03-01 13:19:07 -0800855 if (!IS_ERR(dst)) {
856 skb_dst_set(buff, dst);
Pablo Neira92e55f42017-01-26 22:56:21 +0100857 ip6_xmit(ctl_sk, buff, &fl6, fl6.flowi6_mark, NULL, tclass);
Eric Dumazetc10d9312016-04-29 14:16:47 -0700858 TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
David S. Miller68d0c6d2011-03-01 13:19:07 -0800859 if (rst)
Eric Dumazetc10d9312016-04-29 14:16:47 -0700860 TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
David S. Miller68d0c6d2011-03-01 13:19:07 -0800861 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700862 }
863
864 kfree_skb(buff);
865}
866
Eric Dumazeta00e7442015-09-29 07:42:39 -0700867static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb)
Ilpo Järvinen626e2642008-10-09 14:42:40 -0700868{
Eric Dumazetcf533ea2011-10-21 05:22:42 -0400869 const struct tcphdr *th = tcp_hdr(skb);
Ilpo Järvinen626e2642008-10-09 14:42:40 -0700870 u32 seq = 0, ack_seq = 0;
Guo-Fu Tsengfa3e5b42008-10-09 21:11:56 -0700871 struct tcp_md5sig_key *key = NULL;
Shawn Lu658ddaa2012-01-31 22:35:48 +0000872#ifdef CONFIG_TCP_MD5SIG
873 const __u8 *hash_location = NULL;
874 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
875 unsigned char newhash[16];
876 int genhash;
877 struct sock *sk1 = NULL;
878#endif
Wang Yufen9c76a112014-03-29 09:27:31 +0800879 int oif;
Ilpo Järvinen626e2642008-10-09 14:42:40 -0700880
881 if (th->rst)
882 return;
883
Eric Dumazetc3658e82014-11-25 07:40:04 -0800884 /* If sk not NULL, it means we did a successful lookup and incoming
885 * route had to be correct. prequeue might have dropped our dst.
886 */
887 if (!sk && !ipv6_unicast_destination(skb))
Ilpo Järvinen626e2642008-10-09 14:42:40 -0700888 return;
889
890#ifdef CONFIG_TCP_MD5SIG
Eric Dumazet3b24d852016-04-01 08:52:17 -0700891 rcu_read_lock();
Shawn Lu658ddaa2012-01-31 22:35:48 +0000892 hash_location = tcp_parse_md5sig_option(th);
Florian Westphal271c3b92015-12-21 21:29:26 +0100893 if (sk && sk_fullsock(sk)) {
Florian Westphale46787f2015-12-21 21:29:25 +0100894 key = tcp_v6_md5_do_lookup(sk, &ipv6h->saddr);
895 } else if (hash_location) {
Shawn Lu658ddaa2012-01-31 22:35:48 +0000896 /*
897 * active side is lost. Try to find listening socket through
898 * source port, and then find md5 key through listening socket.
899 * we are not loose security here:
900 * Incoming packet is checked with md5 hash with finding key,
901 * no RST generated if md5 hash doesn't match.
902 */
903 sk1 = inet6_lookup_listener(dev_net(skb_dst(skb)->dev),
Craig Galleka5836362016-02-10 11:50:38 -0500904 &tcp_hashinfo, NULL, 0,
905 &ipv6h->saddr,
Tom Herbert5ba24952013-01-22 09:50:39 +0000906 th->source, &ipv6h->daddr,
Eric Dumazet870c3152014-10-17 09:17:20 -0700907 ntohs(th->source), tcp_v6_iif(skb));
Shawn Lu658ddaa2012-01-31 22:35:48 +0000908 if (!sk1)
Eric Dumazet3b24d852016-04-01 08:52:17 -0700909 goto out;
Shawn Lu658ddaa2012-01-31 22:35:48 +0000910
Shawn Lu658ddaa2012-01-31 22:35:48 +0000911 key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr);
912 if (!key)
Eric Dumazet3b24d852016-04-01 08:52:17 -0700913 goto out;
Shawn Lu658ddaa2012-01-31 22:35:48 +0000914
Eric Dumazet39f8e582015-03-24 15:58:55 -0700915 genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, skb);
Shawn Lu658ddaa2012-01-31 22:35:48 +0000916 if (genhash || memcmp(hash_location, newhash, 16) != 0)
Eric Dumazet3b24d852016-04-01 08:52:17 -0700917 goto out;
Shawn Lu658ddaa2012-01-31 22:35:48 +0000918 }
Ilpo Järvinen626e2642008-10-09 14:42:40 -0700919#endif
920
921 if (th->ack)
922 seq = ntohl(th->ack_seq);
923 else
924 ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
925 (th->doff << 2);
926
Wang Yufen9c76a112014-03-29 09:27:31 +0800927 oif = sk ? sk->sk_bound_dev_if : 0;
Eric Dumazet0f85fea2014-12-09 09:56:08 -0800928 tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, key, 1, 0, 0);
Shawn Lu658ddaa2012-01-31 22:35:48 +0000929
930#ifdef CONFIG_TCP_MD5SIG
Eric Dumazet3b24d852016-04-01 08:52:17 -0700931out:
932 rcu_read_unlock();
Shawn Lu658ddaa2012-01-31 22:35:48 +0000933#endif
Ilpo Järvinen626e2642008-10-09 14:42:40 -0700934}
935
Eric Dumazeta00e7442015-09-29 07:42:39 -0700936static void tcp_v6_send_ack(const struct sock *sk, struct sk_buff *skb, u32 seq,
Eric Dumazet0f85fea2014-12-09 09:56:08 -0800937 u32 ack, u32 win, u32 tsval, u32 tsecr, int oif,
Florent Fourcot1d13a962014-01-16 17:21:22 +0100938 struct tcp_md5sig_key *key, u8 tclass,
Hannes Frederic Sowa5119bd12016-06-11 20:41:38 +0200939 __be32 label)
Ilpo Järvinen626e2642008-10-09 14:42:40 -0700940{
Eric Dumazet0f85fea2014-12-09 09:56:08 -0800941 tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, key, 0,
942 tclass, label);
Ilpo Järvinen626e2642008-10-09 14:42:40 -0700943}
944
Linus Torvalds1da177e2005-04-16 15:20:36 -0700945static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
946{
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700947 struct inet_timewait_sock *tw = inet_twsk(sk);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800948 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700949
Eric Dumazet0f85fea2014-12-09 09:56:08 -0800950 tcp_v6_send_ack(sk, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700951 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
Andrey Vaginee684b62013-02-11 05:50:19 +0000952 tcp_time_stamp + tcptw->tw_ts_offset,
Wang Yufen9c76a112014-03-29 09:27:31 +0800953 tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw),
Florent Fourcot21858cd2015-05-16 00:24:59 +0200954 tw->tw_tclass, cpu_to_be32(tw->tw_flowlabel));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700955
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700956 inet_twsk_put(tw);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700957}
958
Eric Dumazeta00e7442015-09-29 07:42:39 -0700959static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
Gui Jianfeng6edafaa2008-08-06 23:50:04 -0700960 struct request_sock *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700961{
Daniel Lee3a19ce02014-05-11 20:22:13 -0700962 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
963 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
964 */
Eric Dumazet20a2b492016-08-22 11:31:10 -0700965 /* RFC 7323 2.3
966 * The window field (SEG.WND) of every outgoing segment, with the
967 * exception of <SYN> segments, MUST be right-shifted by
968 * Rcv.Wind.Shift bits:
969 */
Eric Dumazet0f85fea2014-12-09 09:56:08 -0800970 tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ?
Daniel Lee3a19ce02014-05-11 20:22:13 -0700971 tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
Eric Dumazet20a2b492016-08-22 11:31:10 -0700972 tcp_rsk(req)->rcv_nxt,
973 req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
Florian Westphal95a22ca2016-12-01 11:32:06 +0100974 tcp_time_stamp + tcp_rsk(req)->ts_off,
975 req->ts_recent, sk->sk_bound_dev_if,
Florent Fourcot1d13a962014-01-16 17:21:22 +0100976 tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr),
977 0, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700978}
979
980
Eric Dumazet079096f2015-10-02 11:43:32 -0700981static struct sock *tcp_v6_cookie_check(struct sock *sk, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700982{
Glenn Griffinc6aefaf2008-02-07 21:49:26 -0800983#ifdef CONFIG_SYN_COOKIES
Eric Dumazet079096f2015-10-02 11:43:32 -0700984 const struct tcphdr *th = tcp_hdr(skb);
985
Florian Westphalaf9b4732010-06-03 00:43:44 +0000986 if (!th->syn)
Glenn Griffinc6aefaf2008-02-07 21:49:26 -0800987 sk = cookie_v6_check(sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700988#endif
989 return sk;
990}
991
Linus Torvalds1da177e2005-04-16 15:20:36 -0700992static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
993{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700994 if (skb->protocol == htons(ETH_P_IP))
995 return tcp_v4_conn_request(sk, skb);
996
997 if (!ipv6_unicast_destination(skb))
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +0900998 goto drop;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700999
Octavian Purdila1fb6f152014-06-25 17:10:02 +03001000 return tcp_conn_request(&tcp6_request_sock_ops,
1001 &tcp_request_sock_ipv6_ops, sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001002
Linus Torvalds1da177e2005-04-16 15:20:36 -07001003drop:
Eric Dumazet9caad862016-04-01 08:52:20 -07001004 tcp_listendrop(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001005 return 0; /* don't send reset */
1006}
1007
Eric Dumazetebf6c9c2017-02-05 20:23:22 -08001008static void tcp_v6_restore_cb(struct sk_buff *skb)
1009{
1010 /* We need to move header back to the beginning if xfrm6_policy_check()
1011 * and tcp_v6_fill_cb() are going to be called again.
1012 * ip6_datagram_recv_specific_ctl() also expects IP6CB to be there.
1013 */
1014 memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
1015 sizeof(struct inet6_skb_parm));
1016}
1017
Eric Dumazet0c271712015-09-29 07:42:48 -07001018static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
Weilong Chen4c99aa42013-12-19 18:44:34 +08001019 struct request_sock *req,
Eric Dumazet5e0724d2015-10-22 08:20:46 -07001020 struct dst_entry *dst,
1021 struct request_sock *req_unhash,
1022 bool *own_req)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001023{
Eric Dumazet634fb9792013-10-09 15:21:29 -07001024 struct inet_request_sock *ireq;
Eric Dumazet0c271712015-09-29 07:42:48 -07001025 struct ipv6_pinfo *newnp;
1026 const struct ipv6_pinfo *np = inet6_sk(sk);
Eric Dumazet45f6fad2015-11-29 19:37:57 -08001027 struct ipv6_txoptions *opt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001028 struct tcp6_sock *newtcp6sk;
1029 struct inet_sock *newinet;
1030 struct tcp_sock *newtp;
1031 struct sock *newsk;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001032#ifdef CONFIG_TCP_MD5SIG
1033 struct tcp_md5sig_key *key;
1034#endif
Neal Cardwell3840a062012-06-28 12:34:19 +00001035 struct flowi6 fl6;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001036
1037 if (skb->protocol == htons(ETH_P_IP)) {
1038 /*
1039 * v6 mapped
1040 */
1041
Eric Dumazet5e0724d2015-10-22 08:20:46 -07001042 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst,
1043 req_unhash, own_req);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001044
Ian Morris63159f22015-03-29 14:00:04 +01001045 if (!newsk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001046 return NULL;
1047
1048 newtcp6sk = (struct tcp6_sock *)newsk;
1049 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1050
1051 newinet = inet_sk(newsk);
1052 newnp = inet6_sk(newsk);
1053 newtp = tcp_sk(newsk);
1054
1055 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1056
Eric Dumazetd1e559d2015-03-18 14:05:35 -07001057 newnp->saddr = newsk->sk_v6_rcv_saddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001058
Arnaldo Carvalho de Melo8292a172005-12-13 23:15:52 -08001059 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001060 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001061#ifdef CONFIG_TCP_MD5SIG
1062 newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1063#endif
1064
WANG Cong83eadda2017-05-09 16:59:54 -07001065 newnp->ipv6_mc_list = NULL;
Yan, Zheng676a1182011-09-25 02:21:30 +00001066 newnp->ipv6_ac_list = NULL;
1067 newnp->ipv6_fl_list = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001068 newnp->pktoptions = NULL;
1069 newnp->opt = NULL;
Eric Dumazet870c3152014-10-17 09:17:20 -07001070 newnp->mcast_oif = tcp_v6_iif(skb);
Arnaldo Carvalho de Melo0660e032007-04-25 17:54:47 -07001071 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
Florent Fourcot1397ed32013-12-08 15:46:57 +01001072 newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
Florent Fourcotdf3687f2014-01-17 17:15:03 +01001073 if (np->repflow)
1074 newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001075
Arnaldo Carvalho de Meloe6848972005-08-09 19:45:38 -07001076 /*
1077 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1078 * here, tcp_create_openreq_child now does this for us, see the comment in
1079 * that function for the gory details. -acme
Linus Torvalds1da177e2005-04-16 15:20:36 -07001080 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001081
1082 /* It is tricky place. Until this moment IPv4 tcp
Arnaldo Carvalho de Melo8292a172005-12-13 23:15:52 -08001083 worked with IPv6 icsk.icsk_af_ops.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001084 Sync it now.
1085 */
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -08001086 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001087
1088 return newsk;
1089 }
1090
Eric Dumazet634fb9792013-10-09 15:21:29 -07001091 ireq = inet_rsk(req);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001092
1093 if (sk_acceptq_is_full(sk))
1094 goto out_overflow;
1095
David S. Miller493f3772010-12-02 12:14:29 -08001096 if (!dst) {
Eric Dumazetf76b33c2015-09-29 07:42:42 -07001097 dst = inet6_csk_route_req(sk, &fl6, req, IPPROTO_TCP);
David S. Miller493f3772010-12-02 12:14:29 -08001098 if (!dst)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001099 goto out;
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001100 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001101
1102 newsk = tcp_create_openreq_child(sk, req, skb);
Ian Morris63159f22015-03-29 14:00:04 +01001103 if (!newsk)
Balazs Scheidler093d2822010-10-21 13:06:43 +02001104 goto out_nonewsk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001105
Arnaldo Carvalho de Meloe6848972005-08-09 19:45:38 -07001106 /*
1107 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1108 * count here, tcp_create_openreq_child now does this for us, see the
1109 * comment in that function for the gory details. -acme
1110 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001111
Stephen Hemminger59eed272006-08-25 15:55:43 -07001112 newsk->sk_gso_type = SKB_GSO_TCPV6;
Eric Dumazet6bd4f352015-12-02 21:53:57 -08001113 ip6_dst_store(newsk, dst, NULL, NULL);
Neal Cardwellfae6ef82012-08-19 03:30:38 +00001114 inet6_sk_rx_dst_set(newsk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001115
1116 newtcp6sk = (struct tcp6_sock *)newsk;
1117 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1118
1119 newtp = tcp_sk(newsk);
1120 newinet = inet_sk(newsk);
1121 newnp = inet6_sk(newsk);
1122
1123 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1124
Eric Dumazet634fb9792013-10-09 15:21:29 -07001125 newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr;
1126 newnp->saddr = ireq->ir_v6_loc_addr;
1127 newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
1128 newsk->sk_bound_dev_if = ireq->ir_iif;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001129
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001130 /* Now IPv6 options...
Linus Torvalds1da177e2005-04-16 15:20:36 -07001131
1132 First: no IPv4 options.
1133 */
Eric Dumazetf6d8bd02011-04-21 09:45:37 +00001134 newinet->inet_opt = NULL;
WANG Cong83eadda2017-05-09 16:59:54 -07001135 newnp->ipv6_mc_list = NULL;
Yan, Zheng676a1182011-09-25 02:21:30 +00001136 newnp->ipv6_ac_list = NULL;
Masayuki Nakagawad35690b2007-03-16 16:14:03 -07001137 newnp->ipv6_fl_list = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001138
1139 /* Clone RX bits */
1140 newnp->rxopt.all = np->rxopt.all;
1141
Linus Torvalds1da177e2005-04-16 15:20:36 -07001142 newnp->pktoptions = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001143 newnp->opt = NULL;
Eric Dumazet870c3152014-10-17 09:17:20 -07001144 newnp->mcast_oif = tcp_v6_iif(skb);
Arnaldo Carvalho de Melo0660e032007-04-25 17:54:47 -07001145 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
Florent Fourcot1397ed32013-12-08 15:46:57 +01001146 newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
Florent Fourcotdf3687f2014-01-17 17:15:03 +01001147 if (np->repflow)
1148 newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001149
1150 /* Clone native IPv6 options from listening socket (if any)
1151
1152 Yes, keeping reference count would be much more clever,
1153 but we make one more one thing there: reattach optmem
1154 to newsk.
1155 */
Huw Davies56ac42b2016-06-27 15:05:28 -04001156 opt = ireq->ipv6_opt;
1157 if (!opt)
1158 opt = rcu_dereference(np->opt);
Eric Dumazet45f6fad2015-11-29 19:37:57 -08001159 if (opt) {
1160 opt = ipv6_dup_options(newsk, opt);
1161 RCU_INIT_POINTER(newnp->opt, opt);
1162 }
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -08001163 inet_csk(newsk)->icsk_ext_hdr_len = 0;
Eric Dumazet45f6fad2015-11-29 19:37:57 -08001164 if (opt)
1165 inet_csk(newsk)->icsk_ext_hdr_len = opt->opt_nflen +
1166 opt->opt_flen;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001167
Daniel Borkmann81164412015-01-05 23:57:48 +01001168 tcp_ca_openreq_child(newsk, dst);
1169
Linus Torvalds1da177e2005-04-16 15:20:36 -07001170 tcp_sync_mss(newsk, dst_mtu(dst));
Eric Dumazet3541f9e2017-02-02 08:04:56 -08001171 newtp->advmss = tcp_mss_clamp(tcp_sk(sk), dst_metric_advmss(dst));
Neal Cardwelld135c522012-04-22 09:45:47 +00001172
Linus Torvalds1da177e2005-04-16 15:20:36 -07001173 tcp_initialize_rcv_mss(newsk);
1174
Eric Dumazetc720c7e2009-10-15 06:30:45 +00001175 newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1176 newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001177
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001178#ifdef CONFIG_TCP_MD5SIG
1179 /* Copy over the MD5 key from the original socket */
Wang Yufen4aa956d2014-03-29 09:27:29 +08001180 key = tcp_v6_md5_do_lookup(sk, &newsk->sk_v6_daddr);
Ian Morris53b24b82015-03-29 14:00:05 +01001181 if (key) {
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001182 /* We're using one, so create a matching key
1183 * on the newsk structure. If we fail to get
1184 * memory, then we end up not copying the key
1185 * across. Shucks.
1186 */
Eric Dumazetefe42082013-10-03 15:42:29 -07001187 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newsk->sk_v6_daddr,
Mel Gorman99a1dec2012-07-31 16:44:14 -07001188 AF_INET6, key->key, key->keylen,
Eric Dumazet7450aaf2015-11-30 08:57:28 -08001189 sk_gfp_mask(sk, GFP_ATOMIC));
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001190 }
1191#endif
1192
Balazs Scheidler093d2822010-10-21 13:06:43 +02001193 if (__inet_inherit_port(sk, newsk) < 0) {
Christoph Paasche337e242012-12-14 04:07:58 +00001194 inet_csk_prepare_forced_close(newsk);
1195 tcp_done(newsk);
Balazs Scheidler093d2822010-10-21 13:06:43 +02001196 goto out;
1197 }
Eric Dumazet5e0724d2015-10-22 08:20:46 -07001198 *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
Eric Dumazet805c4bc2015-11-05 11:07:13 -08001199 if (*own_req) {
Eric Dumazet49a496c2015-11-05 12:50:19 -08001200 tcp_move_syn(newtp, req);
Eric Dumazet805c4bc2015-11-05 11:07:13 -08001201
1202 /* Clone pktoptions received with SYN, if we own the req */
1203 if (ireq->pktopts) {
1204 newnp->pktoptions = skb_clone(ireq->pktopts,
Eric Dumazet7450aaf2015-11-30 08:57:28 -08001205 sk_gfp_mask(sk, GFP_ATOMIC));
Eric Dumazet805c4bc2015-11-05 11:07:13 -08001206 consume_skb(ireq->pktopts);
1207 ireq->pktopts = NULL;
Eric Dumazetebf6c9c2017-02-05 20:23:22 -08001208 if (newnp->pktoptions) {
1209 tcp_v6_restore_cb(newnp->pktoptions);
Eric Dumazet805c4bc2015-11-05 11:07:13 -08001210 skb_set_owner_r(newnp->pktoptions, newsk);
Eric Dumazetebf6c9c2017-02-05 20:23:22 -08001211 }
Eric Dumazet805c4bc2015-11-05 11:07:13 -08001212 }
Eric Dumazetce105002015-10-30 09:46:12 -07001213 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001214
1215 return newsk;
1216
1217out_overflow:
Eric Dumazet02a1d6e2016-04-27 16:44:39 -07001218 __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
Balazs Scheidler093d2822010-10-21 13:06:43 +02001219out_nonewsk:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001220 dst_release(dst);
Balazs Scheidler093d2822010-10-21 13:06:43 +02001221out:
Eric Dumazet9caad862016-04-01 08:52:20 -07001222 tcp_listendrop(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001223 return NULL;
1224}
1225
Linus Torvalds1da177e2005-04-16 15:20:36 -07001226/* The socket must have it's spinlock held when we get
Eric Dumazete994b2f2015-10-02 11:43:39 -07001227 * here, unless it is a TCP_LISTEN socket.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001228 *
1229 * We have a potential double-lock case here, so even when
1230 * doing backlog processing we use the BH locking scheme.
1231 * This is because we cannot sleep with the original spinlock
1232 * held.
1233 */
1234static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1235{
1236 struct ipv6_pinfo *np = inet6_sk(sk);
1237 struct tcp_sock *tp;
1238 struct sk_buff *opt_skb = NULL;
1239
1240 /* Imagine: socket is IPv6. IPv4 packet arrives,
1241 goes to IPv4 receive handler and backlogged.
1242 From backlog it always goes here. Kerboom...
1243 Fortunately, tcp_rcv_established and rcv_established
1244 handle them correctly, but it is not case with
1245 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1246 */
1247
1248 if (skb->protocol == htons(ETH_P_IP))
1249 return tcp_v4_do_rcv(sk, skb);
1250
Eric Dumazetac6e7802016-11-10 13:12:35 -08001251 if (tcp_filter(sk, skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001252 goto discard;
1253
1254 /*
1255 * socket locking is here for SMP purposes as backlog rcv
1256 * is currently called with bh processing disabled.
1257 */
1258
1259 /* Do Stevens' IPV6_PKTOPTIONS.
1260
1261 Yes, guys, it is the only place in our code, where we
1262 may make it not affecting IPv4.
1263 The rest of code is protocol independent,
1264 and I do not like idea to uglify IPv4.
1265
1266 Actually, all the idea behind IPV6_PKTOPTIONS
1267 looks not very well thought. For now we latch
1268 options, received in the last packet, enqueued
1269 by tcp. Feel free to propose better solution.
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001270 --ANK (980728)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001271 */
1272 if (np->rxopt.all)
Eric Dumazet7450aaf2015-11-30 08:57:28 -08001273 opt_skb = skb_clone(skb, sk_gfp_mask(sk, GFP_ATOMIC));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001274
1275 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
Eric Dumazet5d299f32012-08-06 05:09:33 +00001276 struct dst_entry *dst = sk->sk_rx_dst;
1277
Tom Herbertbdeab992011-08-14 19:45:55 +00001278 sock_rps_save_rxhash(sk, skb);
Eric Dumazet3d973792014-11-11 05:54:27 -08001279 sk_mark_napi_id(sk, skb);
Eric Dumazet5d299f32012-08-06 05:09:33 +00001280 if (dst) {
1281 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1282 dst->ops->check(dst, np->rx_dst_cookie) == NULL) {
1283 dst_release(dst);
1284 sk->sk_rx_dst = NULL;
1285 }
1286 }
1287
Vijay Subramanianc995ae22013-09-03 12:23:22 -07001288 tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001289 if (opt_skb)
1290 goto ipv6_pktoptions;
1291 return 0;
1292 }
1293
Eric Dumazet12e25e12015-06-03 23:49:21 -07001294 if (tcp_checksum_complete(skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001295 goto csum_err;
1296
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001297 if (sk->sk_state == TCP_LISTEN) {
Eric Dumazet079096f2015-10-02 11:43:32 -07001298 struct sock *nsk = tcp_v6_cookie_check(sk, skb);
1299
Linus Torvalds1da177e2005-04-16 15:20:36 -07001300 if (!nsk)
1301 goto discard;
1302
Weilong Chen4c99aa42013-12-19 18:44:34 +08001303 if (nsk != sk) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001304 if (tcp_child_process(sk, nsk, skb))
1305 goto reset;
1306 if (opt_skb)
1307 __kfree_skb(opt_skb);
1308 return 0;
1309 }
Neil Horman47482f12011-04-06 13:07:09 -07001310 } else
Tom Herbertbdeab992011-08-14 19:45:55 +00001311 sock_rps_save_rxhash(sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001312
Eric Dumazet72ab4a82015-09-29 07:42:41 -07001313 if (tcp_rcv_state_process(sk, skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001314 goto reset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001315 if (opt_skb)
1316 goto ipv6_pktoptions;
1317 return 0;
1318
1319reset:
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001320 tcp_v6_send_reset(sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001321discard:
1322 if (opt_skb)
1323 __kfree_skb(opt_skb);
1324 kfree_skb(skb);
1325 return 0;
1326csum_err:
Eric Dumazetc10d9312016-04-29 14:16:47 -07001327 TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
1328 TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001329 goto discard;
1330
1331
1332ipv6_pktoptions:
1333 /* Do you ask, what is it?
1334
1335 1. skb was enqueued by tcp.
1336 2. skb is added to tail of read queue, rather than out of order.
1337 3. socket is not in passive state.
1338 4. Finally, it really contains options, which user wants to receive.
1339 */
1340 tp = tcp_sk(sk);
1341 if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1342 !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
YOSHIFUJI Hideaki333fad52005-09-08 09:59:17 +09001343 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
Eric Dumazet870c3152014-10-17 09:17:20 -07001344 np->mcast_oif = tcp_v6_iif(opt_skb);
YOSHIFUJI Hideaki333fad52005-09-08 09:59:17 +09001345 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
Arnaldo Carvalho de Melo0660e032007-04-25 17:54:47 -07001346 np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
Florent Fourcot82e9f102013-12-08 15:46:59 +01001347 if (np->rxopt.bits.rxflow || np->rxopt.bits.rxtclass)
Florent Fourcot1397ed32013-12-08 15:46:57 +01001348 np->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(opt_skb));
Florent Fourcotdf3687f2014-01-17 17:15:03 +01001349 if (np->repflow)
1350 np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb));
Eric Dumazeta2247722014-09-27 09:50:56 -07001351 if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001352 skb_set_owner_r(opt_skb, sk);
Eric Dumazet8ce48622016-10-12 19:01:45 +02001353 tcp_v6_restore_cb(opt_skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001354 opt_skb = xchg(&np->pktoptions, opt_skb);
1355 } else {
1356 __kfree_skb(opt_skb);
1357 opt_skb = xchg(&np->pktoptions, NULL);
1358 }
1359 }
1360
Wei Yongjun800d55f2009-02-23 21:45:33 +00001361 kfree_skb(opt_skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001362 return 0;
1363}
1364
Nicolas Dichtel2dc49d12014-12-22 18:22:48 +01001365static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr,
1366 const struct tcphdr *th)
1367{
1368 /* This is tricky: we move IP6CB at its correct location into
1369 * TCP_SKB_CB(). It must be done after xfrm6_policy_check(), because
1370 * _decode_session6() uses IP6CB().
1371 * barrier() makes sure compiler won't play aliasing games.
1372 */
1373 memmove(&TCP_SKB_CB(skb)->header.h6, IP6CB(skb),
1374 sizeof(struct inet6_skb_parm));
1375 barrier();
1376
1377 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1378 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1379 skb->len - th->doff*4);
1380 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1381 TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1382 TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1383 TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
1384 TCP_SKB_CB(skb)->sacked = 0;
1385}
1386
Herbert Xue5bbef22007-10-15 12:50:28 -07001387static int tcp_v6_rcv(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001388{
Eric Dumazetcf533ea2011-10-21 05:22:42 -04001389 const struct tcphdr *th;
Eric Dumazetb71d1d42011-04-22 04:53:02 +00001390 const struct ipv6hdr *hdr;
Eric Dumazet3b24d852016-04-01 08:52:17 -07001391 bool refcounted;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001392 struct sock *sk;
1393 int ret;
Pavel Emelyanova86b1e32008-07-16 20:20:58 -07001394 struct net *net = dev_net(skb->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001395
1396 if (skb->pkt_type != PACKET_HOST)
1397 goto discard_it;
1398
1399 /*
1400 * Count it even if it's bad.
1401 */
Eric Dumazet90bbcc62016-04-27 16:44:32 -07001402 __TCP_INC_STATS(net, TCP_MIB_INSEGS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001403
1404 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1405 goto discard_it;
1406
Eric Dumazetea1627c2016-05-13 09:16:40 -07001407 th = (const struct tcphdr *)skb->data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001408
Eric Dumazetea1627c2016-05-13 09:16:40 -07001409 if (unlikely(th->doff < sizeof(struct tcphdr)/4))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001410 goto bad_packet;
1411 if (!pskb_may_pull(skb, th->doff*4))
1412 goto discard_it;
1413
Tom Herberte4f45b72014-05-02 16:29:51 -07001414 if (skb_checksum_init(skb, IPPROTO_TCP, ip6_compute_pseudo))
Eric Dumazet6a5dc9e2013-04-29 08:39:56 +00001415 goto csum_error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001416
Eric Dumazetea1627c2016-05-13 09:16:40 -07001417 th = (const struct tcphdr *)skb->data;
Stephen Hemmingere802af92010-04-22 15:24:53 -07001418 hdr = ipv6_hdr(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001419
Eric Dumazet4bdc3d62015-10-13 17:12:54 -07001420lookup:
Craig Galleka5836362016-02-10 11:50:38 -05001421 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th),
Eric Dumazet3b24d852016-04-01 08:52:17 -07001422 th->source, th->dest, inet6_iif(skb),
1423 &refcounted);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001424 if (!sk)
1425 goto no_tcp_socket;
1426
1427process:
1428 if (sk->sk_state == TCP_TIME_WAIT)
1429 goto do_time_wait;
1430
Eric Dumazet079096f2015-10-02 11:43:32 -07001431 if (sk->sk_state == TCP_NEW_SYN_RECV) {
1432 struct request_sock *req = inet_reqsk(sk);
Eric Dumazet77166822016-02-18 05:39:18 -08001433 struct sock *nsk;
Eric Dumazet079096f2015-10-02 11:43:32 -07001434
1435 sk = req->rsk_listener;
1436 tcp_v6_fill_cb(skb, hdr, th);
1437 if (tcp_v6_inbound_md5_hash(sk, skb)) {
Eric Dumazete65c3322016-08-24 08:50:24 -07001438 sk_drops_add(sk, skb);
Eric Dumazet079096f2015-10-02 11:43:32 -07001439 reqsk_put(req);
1440 goto discard_it;
1441 }
Eric Dumazet77166822016-02-18 05:39:18 -08001442 if (unlikely(sk->sk_state != TCP_LISTEN)) {
Eric Dumazetf03f2e12015-10-14 11:16:27 -07001443 inet_csk_reqsk_queue_drop_and_put(sk, req);
Eric Dumazet4bdc3d62015-10-13 17:12:54 -07001444 goto lookup;
1445 }
Eric Dumazet77166822016-02-18 05:39:18 -08001446 sock_hold(sk);
Eric Dumazet3b24d852016-04-01 08:52:17 -07001447 refcounted = true;
Eric Dumazet77166822016-02-18 05:39:18 -08001448 nsk = tcp_check_req(sk, skb, req, false);
Eric Dumazet079096f2015-10-02 11:43:32 -07001449 if (!nsk) {
1450 reqsk_put(req);
Eric Dumazet77166822016-02-18 05:39:18 -08001451 goto discard_and_relse;
Eric Dumazet079096f2015-10-02 11:43:32 -07001452 }
1453 if (nsk == sk) {
Eric Dumazet079096f2015-10-02 11:43:32 -07001454 reqsk_put(req);
1455 tcp_v6_restore_cb(skb);
1456 } else if (tcp_child_process(sk, nsk, skb)) {
1457 tcp_v6_send_reset(nsk, skb);
Eric Dumazet77166822016-02-18 05:39:18 -08001458 goto discard_and_relse;
Eric Dumazet079096f2015-10-02 11:43:32 -07001459 } else {
Eric Dumazet77166822016-02-18 05:39:18 -08001460 sock_put(sk);
Eric Dumazet079096f2015-10-02 11:43:32 -07001461 return 0;
1462 }
1463 }
Stephen Hemmingere802af92010-04-22 15:24:53 -07001464 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
Eric Dumazet02a1d6e2016-04-27 16:44:39 -07001465 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
Stephen Hemmingere802af92010-04-22 15:24:53 -07001466 goto discard_and_relse;
1467 }
1468
Linus Torvalds1da177e2005-04-16 15:20:36 -07001469 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1470 goto discard_and_relse;
1471
Nicolas Dichtel2dc49d12014-12-22 18:22:48 +01001472 tcp_v6_fill_cb(skb, hdr, th);
1473
Dmitry Popov9ea88a12014-08-07 02:38:22 +04001474 if (tcp_v6_inbound_md5_hash(sk, skb))
1475 goto discard_and_relse;
Dmitry Popov9ea88a12014-08-07 02:38:22 +04001476
Eric Dumazetac6e7802016-11-10 13:12:35 -08001477 if (tcp_filter(sk, skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001478 goto discard_and_relse;
Eric Dumazetac6e7802016-11-10 13:12:35 -08001479 th = (const struct tcphdr *)skb->data;
1480 hdr = ipv6_hdr(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001481
1482 skb->dev = NULL;
1483
Eric Dumazete994b2f2015-10-02 11:43:39 -07001484 if (sk->sk_state == TCP_LISTEN) {
1485 ret = tcp_v6_do_rcv(sk, skb);
1486 goto put_and_return;
1487 }
1488
1489 sk_incoming_cpu_update(sk);
1490
Fabio Olive Leite293b9c42006-09-25 22:28:47 -07001491 bh_lock_sock_nested(sk);
Martin KaFai Laua44d6ea2016-03-14 10:52:15 -07001492 tcp_segs_in(tcp_sk(sk), skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001493 ret = 0;
1494 if (!sock_owned_by_user(sk)) {
Dan Williams7bced392013-12-30 12:37:29 -08001495 if (!tcp_prequeue(sk, skb))
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001496 ret = tcp_v6_do_rcv(sk, skb);
Eric Dumazetc9c33212016-08-27 07:37:54 -07001497 } else if (tcp_add_backlog(sk, skb)) {
Zhu Yi6b03a532010-03-04 18:01:41 +00001498 goto discard_and_relse;
1499 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001500 bh_unlock_sock(sk);
1501
Eric Dumazete994b2f2015-10-02 11:43:39 -07001502put_and_return:
Eric Dumazet3b24d852016-04-01 08:52:17 -07001503 if (refcounted)
1504 sock_put(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001505 return ret ? -1 : 0;
1506
1507no_tcp_socket:
1508 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1509 goto discard_it;
1510
Nicolas Dichtel2dc49d12014-12-22 18:22:48 +01001511 tcp_v6_fill_cb(skb, hdr, th);
1512
Eric Dumazet12e25e12015-06-03 23:49:21 -07001513 if (tcp_checksum_complete(skb)) {
Eric Dumazet6a5dc9e2013-04-29 08:39:56 +00001514csum_error:
Eric Dumazet90bbcc62016-04-27 16:44:32 -07001515 __TCP_INC_STATS(net, TCP_MIB_CSUMERRORS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001516bad_packet:
Eric Dumazet90bbcc62016-04-27 16:44:32 -07001517 __TCP_INC_STATS(net, TCP_MIB_INERRS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001518 } else {
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001519 tcp_v6_send_reset(NULL, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001520 }
1521
1522discard_it:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001523 kfree_skb(skb);
1524 return 0;
1525
1526discard_and_relse:
Eric Dumazet532182c2016-04-01 08:52:19 -07001527 sk_drops_add(sk, skb);
Eric Dumazet3b24d852016-04-01 08:52:17 -07001528 if (refcounted)
1529 sock_put(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001530 goto discard_it;
1531
1532do_time_wait:
1533 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
YOSHIFUJI Hideaki9469c7b2006-10-10 19:41:46 -07001534 inet_twsk_put(inet_twsk(sk));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001535 goto discard_it;
1536 }
1537
Nicolas Dichtel2dc49d12014-12-22 18:22:48 +01001538 tcp_v6_fill_cb(skb, hdr, th);
1539
Eric Dumazet6a5dc9e2013-04-29 08:39:56 +00001540 if (tcp_checksum_complete(skb)) {
1541 inet_twsk_put(inet_twsk(sk));
1542 goto csum_error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001543 }
1544
YOSHIFUJI Hideaki9469c7b2006-10-10 19:41:46 -07001545 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001546 case TCP_TW_SYN:
1547 {
1548 struct sock *sk2;
1549
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09001550 sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
Craig Galleka5836362016-02-10 11:50:38 -05001551 skb, __tcp_hdrlen(th),
Tom Herbert5ba24952013-01-22 09:50:39 +00001552 &ipv6_hdr(skb)->saddr, th->source,
Arnaldo Carvalho de Melo0660e032007-04-25 17:54:47 -07001553 &ipv6_hdr(skb)->daddr,
Eric Dumazet870c3152014-10-17 09:17:20 -07001554 ntohs(th->dest), tcp_v6_iif(skb));
Ian Morris53b24b82015-03-29 14:00:05 +01001555 if (sk2) {
Arnaldo Carvalho de Melo295ff7e2005-08-09 20:44:40 -07001556 struct inet_timewait_sock *tw = inet_twsk(sk);
Eric Dumazetdbe7faa2015-07-08 14:28:30 -07001557 inet_twsk_deschedule_put(tw);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001558 sk = sk2;
Alexey Kodanev4ad19de2015-03-27 12:24:22 +03001559 tcp_v6_restore_cb(skb);
Eric Dumazet3b24d852016-04-01 08:52:17 -07001560 refcounted = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001561 goto process;
1562 }
1563 /* Fall through to ACK */
1564 }
1565 case TCP_TW_ACK:
1566 tcp_v6_timewait_ack(sk, skb);
1567 break;
1568 case TCP_TW_RST:
Alexey Kodanev4ad19de2015-03-27 12:24:22 +03001569 tcp_v6_restore_cb(skb);
Florian Westphal271c3b92015-12-21 21:29:26 +01001570 tcp_v6_send_reset(sk, skb);
1571 inet_twsk_deschedule_put(inet_twsk(sk));
1572 goto discard_it;
Wang Yufen4aa956d2014-03-29 09:27:29 +08001573 case TCP_TW_SUCCESS:
1574 ;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001575 }
1576 goto discard_it;
1577}
1578
Eric Dumazetc7109982012-07-26 12:18:11 +00001579static void tcp_v6_early_demux(struct sk_buff *skb)
1580{
1581 const struct ipv6hdr *hdr;
1582 const struct tcphdr *th;
1583 struct sock *sk;
1584
1585 if (skb->pkt_type != PACKET_HOST)
1586 return;
1587
1588 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1589 return;
1590
1591 hdr = ipv6_hdr(skb);
1592 th = tcp_hdr(skb);
1593
1594 if (th->doff < sizeof(struct tcphdr) / 4)
1595 return;
1596
Eric Dumazet870c3152014-10-17 09:17:20 -07001597 /* Note : We use inet6_iif() here, not tcp_v6_iif() */
Eric Dumazetc7109982012-07-26 12:18:11 +00001598 sk = __inet6_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1599 &hdr->saddr, th->source,
1600 &hdr->daddr, ntohs(th->dest),
1601 inet6_iif(skb));
1602 if (sk) {
1603 skb->sk = sk;
1604 skb->destructor = sock_edemux;
Eric Dumazetf7e4eb02015-03-15 21:12:13 -07001605 if (sk_fullsock(sk)) {
Michal Kubečekd0c294c2015-03-23 15:14:00 +01001606 struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
Neal Cardwellf3f12132012-10-22 21:41:48 +00001607
Eric Dumazetc7109982012-07-26 12:18:11 +00001608 if (dst)
Eric Dumazet5d299f32012-08-06 05:09:33 +00001609 dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie);
Eric Dumazetc7109982012-07-26 12:18:11 +00001610 if (dst &&
Neal Cardwellf3f12132012-10-22 21:41:48 +00001611 inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
Eric Dumazetc7109982012-07-26 12:18:11 +00001612 skb_dst_set_noref(skb, dst);
1613 }
1614 }
1615}
1616
David S. Millerccb7c412010-12-01 18:09:13 -08001617static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1618 .twsk_obj_size = sizeof(struct tcp6_timewait_sock),
1619 .twsk_unique = tcp_twsk_unique,
Wang Yufen4aa956d2014-03-29 09:27:29 +08001620 .twsk_destructor = tcp_twsk_destructor,
David S. Millerccb7c412010-12-01 18:09:13 -08001621};
1622
Stephen Hemminger3b401a82009-09-01 19:25:04 +00001623static const struct inet_connection_sock_af_ops ipv6_specific = {
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001624 .queue_xmit = inet6_csk_xmit,
1625 .send_check = tcp_v6_send_check,
1626 .rebuild_header = inet6_sk_rebuild_header,
Eric Dumazet5d299f32012-08-06 05:09:33 +00001627 .sk_rx_dst_set = inet6_sk_rx_dst_set,
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001628 .conn_request = tcp_v6_conn_request,
1629 .syn_recv_sock = tcp_v6_syn_recv_sock,
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001630 .net_header_len = sizeof(struct ipv6hdr),
Eric Dumazet67469602012-04-24 07:37:38 +00001631 .net_frag_header_len = sizeof(struct frag_hdr),
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001632 .setsockopt = ipv6_setsockopt,
1633 .getsockopt = ipv6_getsockopt,
1634 .addr2sockaddr = inet6_csk_addr2sockaddr,
1635 .sockaddr_len = sizeof(struct sockaddr_in6),
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08001636#ifdef CONFIG_COMPAT
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001637 .compat_setsockopt = compat_ipv6_setsockopt,
1638 .compat_getsockopt = compat_ipv6_getsockopt,
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08001639#endif
Neal Cardwell4fab9072014-08-14 12:40:05 -04001640 .mtu_reduced = tcp_v6_mtu_reduced,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001641};
1642
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001643#ifdef CONFIG_TCP_MD5SIG
Stephen Hemmingerb2e4b3d2009-09-01 19:25:03 +00001644static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001645 .md5_lookup = tcp_v6_md5_lookup,
Adam Langley49a72df2008-07-19 00:01:42 -07001646 .calc_md5_hash = tcp_v6_md5_hash_skb,
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001647 .md5_parse = tcp_v6_parse_md5_keys,
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001648};
David S. Millera9286302006-11-14 19:53:22 -08001649#endif
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001650
Linus Torvalds1da177e2005-04-16 15:20:36 -07001651/*
1652 * TCP over IPv4 via INET6 API
1653 */
Stephen Hemminger3b401a82009-09-01 19:25:04 +00001654static const struct inet_connection_sock_af_ops ipv6_mapped = {
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001655 .queue_xmit = ip_queue_xmit,
1656 .send_check = tcp_v4_send_check,
1657 .rebuild_header = inet_sk_rebuild_header,
Eric Dumazet63d02d12012-08-09 14:11:00 +00001658 .sk_rx_dst_set = inet_sk_rx_dst_set,
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001659 .conn_request = tcp_v6_conn_request,
1660 .syn_recv_sock = tcp_v6_syn_recv_sock,
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001661 .net_header_len = sizeof(struct iphdr),
1662 .setsockopt = ipv6_setsockopt,
1663 .getsockopt = ipv6_getsockopt,
1664 .addr2sockaddr = inet6_csk_addr2sockaddr,
1665 .sockaddr_len = sizeof(struct sockaddr_in6),
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08001666#ifdef CONFIG_COMPAT
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001667 .compat_setsockopt = compat_ipv6_setsockopt,
1668 .compat_getsockopt = compat_ipv6_getsockopt,
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08001669#endif
Neal Cardwell4fab9072014-08-14 12:40:05 -04001670 .mtu_reduced = tcp_v4_mtu_reduced,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001671};
1672
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001673#ifdef CONFIG_TCP_MD5SIG
Stephen Hemmingerb2e4b3d2009-09-01 19:25:03 +00001674static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001675 .md5_lookup = tcp_v4_md5_lookup,
Adam Langley49a72df2008-07-19 00:01:42 -07001676 .calc_md5_hash = tcp_v4_md5_hash_skb,
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001677 .md5_parse = tcp_v6_parse_md5_keys,
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001678};
David S. Millera9286302006-11-14 19:53:22 -08001679#endif
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001680
Linus Torvalds1da177e2005-04-16 15:20:36 -07001681/* NOTE: A lot of things set to zero explicitly by call to
1682 * sk_alloc() so need not be done here.
1683 */
1684static int tcp_v6_init_sock(struct sock *sk)
1685{
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -03001686 struct inet_connection_sock *icsk = inet_csk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001687
Neal Cardwell900f65d2012-04-19 09:55:21 +00001688 tcp_init_sock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001689
Arnaldo Carvalho de Melo8292a172005-12-13 23:15:52 -08001690 icsk->icsk_af_ops = &ipv6_specific;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001691
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001692#ifdef CONFIG_TCP_MD5SIG
David S. Millerac807fa2012-04-23 03:21:58 -04001693 tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001694#endif
1695
Linus Torvalds1da177e2005-04-16 15:20:36 -07001696 return 0;
1697}
1698
Brian Haley7d06b2e2008-06-14 17:04:49 -07001699static void tcp_v6_destroy_sock(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001700{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001701 tcp_v4_destroy_sock(sk);
Brian Haley7d06b2e2008-06-14 17:04:49 -07001702 inet6_destroy_sock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001703}
1704
YOSHIFUJI Hideaki952a10b2007-04-21 20:13:44 +09001705#ifdef CONFIG_PROC_FS
Linus Torvalds1da177e2005-04-16 15:20:36 -07001706/* Proc filesystem TCPv6 sock list dumping. */
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001707static void get_openreq6(struct seq_file *seq,
Eric Dumazetaa3a0c82015-10-02 11:43:30 -07001708 const struct request_sock *req, int i)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001709{
Eric Dumazetfa76ce732015-03-19 19:04:20 -07001710 long ttd = req->rsk_timer.expires - jiffies;
Eric Dumazet634fb9792013-10-09 15:21:29 -07001711 const struct in6_addr *src = &inet_rsk(req)->ir_v6_loc_addr;
1712 const struct in6_addr *dest = &inet_rsk(req)->ir_v6_rmt_addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001713
1714 if (ttd < 0)
1715 ttd = 0;
1716
Linus Torvalds1da177e2005-04-16 15:20:36 -07001717 seq_printf(seq,
1718 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
Francesco Fuscod14c5ab2013-08-15 13:42:14 +02001719 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %d %d %pK\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001720 i,
1721 src->s6_addr32[0], src->s6_addr32[1],
1722 src->s6_addr32[2], src->s6_addr32[3],
Eric Dumazetb44084c2013-10-10 00:04:37 -07001723 inet_rsk(req)->ir_num,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001724 dest->s6_addr32[0], dest->s6_addr32[1],
1725 dest->s6_addr32[2], dest->s6_addr32[3],
Eric Dumazet634fb9792013-10-09 15:21:29 -07001726 ntohs(inet_rsk(req)->ir_rmt_port),
Linus Torvalds1da177e2005-04-16 15:20:36 -07001727 TCP_SYN_RECV,
Weilong Chen4c99aa42013-12-19 18:44:34 +08001728 0, 0, /* could print option size, but that is af dependent. */
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001729 1, /* timers active (only the expire timer) */
1730 jiffies_to_clock_t(ttd),
Eric Dumazete6c022a2012-10-27 23:16:46 +00001731 req->num_timeout,
Eric Dumazetaa3a0c82015-10-02 11:43:30 -07001732 from_kuid_munged(seq_user_ns(seq),
1733 sock_i_uid(req->rsk_listener)),
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001734 0, /* non standard timer */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001735 0, /* open_requests have no inode */
1736 0, req);
1737}
1738
1739static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1740{
Eric Dumazetb71d1d42011-04-22 04:53:02 +00001741 const struct in6_addr *dest, *src;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001742 __u16 destp, srcp;
1743 int timer_active;
1744 unsigned long timer_expires;
Eric Dumazetcf533ea2011-10-21 05:22:42 -04001745 const struct inet_sock *inet = inet_sk(sp);
1746 const struct tcp_sock *tp = tcp_sk(sp);
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001747 const struct inet_connection_sock *icsk = inet_csk(sp);
Eric Dumazet0536fcc2015-09-29 07:42:52 -07001748 const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
Eric Dumazet00fd38d2015-11-12 08:43:18 -08001749 int rx_queue;
1750 int state;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001751
Eric Dumazetefe42082013-10-03 15:42:29 -07001752 dest = &sp->sk_v6_daddr;
1753 src = &sp->sk_v6_rcv_saddr;
Eric Dumazetc720c7e2009-10-15 06:30:45 +00001754 destp = ntohs(inet->inet_dport);
1755 srcp = ntohs(inet->inet_sport);
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001756
Yuchung Chengce3cf4e2016-06-06 15:07:18 -07001757 if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
Yuchung Cheng57dde7f2017-01-12 22:11:33 -08001758 icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
Yuchung Chengce3cf4e2016-06-06 15:07:18 -07001759 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001760 timer_active = 1;
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001761 timer_expires = icsk->icsk_timeout;
1762 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001763 timer_active = 4;
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001764 timer_expires = icsk->icsk_timeout;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001765 } else if (timer_pending(&sp->sk_timer)) {
1766 timer_active = 2;
1767 timer_expires = sp->sk_timer.expires;
1768 } else {
1769 timer_active = 0;
1770 timer_expires = jiffies;
1771 }
1772
Eric Dumazet00fd38d2015-11-12 08:43:18 -08001773 state = sk_state_load(sp);
1774 if (state == TCP_LISTEN)
1775 rx_queue = sp->sk_ack_backlog;
1776 else
1777 /* Because we don't lock the socket,
1778 * we might find a transient negative value.
1779 */
1780 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
1781
Linus Torvalds1da177e2005-04-16 15:20:36 -07001782 seq_printf(seq,
1783 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
Francesco Fuscod14c5ab2013-08-15 13:42:14 +02001784 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001785 i,
1786 src->s6_addr32[0], src->s6_addr32[1],
1787 src->s6_addr32[2], src->s6_addr32[3], srcp,
1788 dest->s6_addr32[0], dest->s6_addr32[1],
1789 dest->s6_addr32[2], dest->s6_addr32[3], destp,
Eric Dumazet00fd38d2015-11-12 08:43:18 -08001790 state,
1791 tp->write_seq - tp->snd_una,
1792 rx_queue,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001793 timer_active,
Eric Dumazeta399a802012-08-08 21:13:53 +00001794 jiffies_delta_to_clock_t(timer_expires - jiffies),
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001795 icsk->icsk_retransmits,
Eric W. Biedermana7cb5a42012-05-24 01:10:10 -06001796 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -03001797 icsk->icsk_probes_out,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001798 sock_i_ino(sp),
1799 atomic_read(&sp->sk_refcnt), sp,
Stephen Hemminger7be87352008-06-27 20:00:19 -07001800 jiffies_to_clock_t(icsk->icsk_rto),
1801 jiffies_to_clock_t(icsk->icsk_ack.ato),
Weilong Chen4c99aa42013-12-19 18:44:34 +08001802 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
Ilpo Järvinen0b6a05c2009-09-15 01:30:10 -07001803 tp->snd_cwnd,
Eric Dumazet00fd38d2015-11-12 08:43:18 -08001804 state == TCP_LISTEN ?
Eric Dumazet0536fcc2015-09-29 07:42:52 -07001805 fastopenq->max_qlen :
Yuchung Cheng0a672f72014-05-11 20:22:12 -07001806 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001807 );
1808}
1809
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001810static void get_timewait6_sock(struct seq_file *seq,
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07001811 struct inet_timewait_sock *tw, int i)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001812{
Eric Dumazet789f5582015-04-12 18:51:09 -07001813 long delta = tw->tw_timer.expires - jiffies;
Eric Dumazetb71d1d42011-04-22 04:53:02 +00001814 const struct in6_addr *dest, *src;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001815 __u16 destp, srcp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001816
Eric Dumazetefe42082013-10-03 15:42:29 -07001817 dest = &tw->tw_v6_daddr;
1818 src = &tw->tw_v6_rcv_saddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001819 destp = ntohs(tw->tw_dport);
1820 srcp = ntohs(tw->tw_sport);
1821
1822 seq_printf(seq,
1823 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
Dan Rosenberg71338aa2011-05-23 12:17:35 +00001824 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001825 i,
1826 src->s6_addr32[0], src->s6_addr32[1],
1827 src->s6_addr32[2], src->s6_addr32[3], srcp,
1828 dest->s6_addr32[0], dest->s6_addr32[1],
1829 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1830 tw->tw_substate, 0, 0,
Eric Dumazeta399a802012-08-08 21:13:53 +00001831 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001832 atomic_read(&tw->tw_refcnt), tw);
1833}
1834
Linus Torvalds1da177e2005-04-16 15:20:36 -07001835static int tcp6_seq_show(struct seq_file *seq, void *v)
1836{
1837 struct tcp_iter_state *st;
Eric Dumazet05dbc7b2013-10-03 00:22:02 -07001838 struct sock *sk = v;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001839
1840 if (v == SEQ_START_TOKEN) {
1841 seq_puts(seq,
1842 " sl "
1843 "local_address "
1844 "remote_address "
1845 "st tx_queue rx_queue tr tm->when retrnsmt"
1846 " uid timeout inode\n");
1847 goto out;
1848 }
1849 st = seq->private;
1850
Eric Dumazet079096f2015-10-02 11:43:32 -07001851 if (sk->sk_state == TCP_TIME_WAIT)
1852 get_timewait6_sock(seq, v, st->num);
1853 else if (sk->sk_state == TCP_NEW_SYN_RECV)
Eric Dumazetaa3a0c82015-10-02 11:43:30 -07001854 get_openreq6(seq, v, st->num);
Eric Dumazet079096f2015-10-02 11:43:32 -07001855 else
1856 get_tcp6_sock(seq, v, st->num);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001857out:
1858 return 0;
1859}
1860
Arjan van de Ven73cb88e2011-10-30 06:46:30 +00001861static const struct file_operations tcp6_afinfo_seq_fops = {
1862 .owner = THIS_MODULE,
1863 .open = tcp_seq_open,
1864 .read = seq_read,
1865 .llseek = seq_lseek,
1866 .release = seq_release_net
1867};
1868
Linus Torvalds1da177e2005-04-16 15:20:36 -07001869static struct tcp_seq_afinfo tcp6_seq_afinfo = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001870 .name = "tcp6",
1871 .family = AF_INET6,
Arjan van de Ven73cb88e2011-10-30 06:46:30 +00001872 .seq_fops = &tcp6_afinfo_seq_fops,
Denis V. Lunev9427c4b2008-04-13 22:12:13 -07001873 .seq_ops = {
1874 .show = tcp6_seq_show,
1875 },
Linus Torvalds1da177e2005-04-16 15:20:36 -07001876};
1877
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00001878int __net_init tcp6_proc_init(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001879{
Daniel Lezcano6f8b13b2008-03-21 04:14:45 -07001880 return tcp_proc_register(net, &tcp6_seq_afinfo);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001881}
1882
Daniel Lezcano6f8b13b2008-03-21 04:14:45 -07001883void tcp6_proc_exit(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001884{
Daniel Lezcano6f8b13b2008-03-21 04:14:45 -07001885 tcp_proc_unregister(net, &tcp6_seq_afinfo);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001886}
1887#endif
1888
1889struct proto tcpv6_prot = {
1890 .name = "TCPv6",
1891 .owner = THIS_MODULE,
1892 .close = tcp_close,
1893 .connect = tcp_v6_connect,
1894 .disconnect = tcp_disconnect,
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001895 .accept = inet_csk_accept,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001896 .ioctl = tcp_ioctl,
1897 .init = tcp_v6_init_sock,
1898 .destroy = tcp_v6_destroy_sock,
1899 .shutdown = tcp_shutdown,
1900 .setsockopt = tcp_setsockopt,
1901 .getsockopt = tcp_getsockopt,
Ursula Braun4b9d07a2017-01-09 16:55:12 +01001902 .keepalive = tcp_set_keepalive,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001903 .recvmsg = tcp_recvmsg,
Changli Gao7ba42912010-07-10 20:41:55 +00001904 .sendmsg = tcp_sendmsg,
1905 .sendpage = tcp_sendpage,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001906 .backlog_rcv = tcp_v6_do_rcv,
Eric Dumazet46d3cea2012-07-11 05:50:31 +00001907 .release_cb = tcp_release_cb,
Craig Gallek496611d2016-02-10 11:50:36 -05001908 .hash = inet6_hash,
Arnaldo Carvalho de Meloab1e0a12008-02-03 04:06:04 -08001909 .unhash = inet_unhash,
1910 .get_port = inet_csk_get_port,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001911 .enter_memory_pressure = tcp_enter_memory_pressure,
Eric Dumazetc9bee3b72013-07-22 20:27:07 -07001912 .stream_memory_free = tcp_stream_memory_free,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001913 .sockets_allocated = &tcp_sockets_allocated,
1914 .memory_allocated = &tcp_memory_allocated,
1915 .memory_pressure = &tcp_memory_pressure,
Arnaldo Carvalho de Melo0a5578c2005-08-09 20:11:41 -07001916 .orphan_count = &tcp_orphan_count,
Eric W. Biedermana4fe34b2013-10-19 16:25:36 -07001917 .sysctl_mem = sysctl_tcp_mem,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001918 .sysctl_wmem = sysctl_tcp_wmem,
1919 .sysctl_rmem = sysctl_tcp_rmem,
1920 .max_header = MAX_TCP_HEADER,
1921 .obj_size = sizeof(struct tcp6_sock),
Paul E. McKenney5f0d5a32017-01-18 02:53:44 -08001922 .slab_flags = SLAB_TYPESAFE_BY_RCU,
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08001923 .twsk_prot = &tcp6_timewait_sock_ops,
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -07001924 .rsk_prot = &tcp6_request_sock_ops,
Pavel Emelyanov39d8cda2008-03-22 16:50:58 -07001925 .h.hashinfo = &tcp_hashinfo,
Changli Gao7ba42912010-07-10 20:41:55 +00001926 .no_autobind = true,
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001927#ifdef CONFIG_COMPAT
1928 .compat_setsockopt = compat_tcp_setsockopt,
1929 .compat_getsockopt = compat_tcp_getsockopt,
1930#endif
Lorenzo Colittic1e64e22015-12-16 12:30:05 +09001931 .diag_destroy = tcp_abort,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001932};
1933
subashab@codeaurora.orgdddb64b2017-03-23 13:34:16 -06001934static struct inet6_protocol tcpv6_protocol = {
Eric Dumazetc7109982012-07-26 12:18:11 +00001935 .early_demux = tcp_v6_early_demux,
subashab@codeaurora.orgdddb64b2017-03-23 13:34:16 -06001936 .early_demux_handler = tcp_v6_early_demux,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001937 .handler = tcp_v6_rcv,
1938 .err_handler = tcp_v6_err,
1939 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
1940};
1941
Linus Torvalds1da177e2005-04-16 15:20:36 -07001942static struct inet_protosw tcpv6_protosw = {
1943 .type = SOCK_STREAM,
1944 .protocol = IPPROTO_TCP,
1945 .prot = &tcpv6_prot,
1946 .ops = &inet6_stream_ops,
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -08001947 .flags = INET_PROTOSW_PERMANENT |
1948 INET_PROTOSW_ICSK,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001949};
1950
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00001951static int __net_init tcpv6_net_init(struct net *net)
Daniel Lezcano93ec9262008-03-07 11:16:02 -08001952{
Denis V. Lunev56772422008-04-03 14:28:30 -07001953 return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
1954 SOCK_RAW, IPPROTO_TCP, net);
Daniel Lezcano93ec9262008-03-07 11:16:02 -08001955}
1956
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00001957static void __net_exit tcpv6_net_exit(struct net *net)
Daniel Lezcano93ec9262008-03-07 11:16:02 -08001958{
Denis V. Lunev56772422008-04-03 14:28:30 -07001959 inet_ctl_sock_destroy(net->ipv6.tcp_sk);
Eric W. Biedermanb099ce22009-12-03 02:29:09 +00001960}
1961
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00001962static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
Eric W. Biedermanb099ce22009-12-03 02:29:09 +00001963{
Haishuang Yan1946e672016-12-28 17:52:32 +08001964 inet_twsk_purge(&tcp_hashinfo, AF_INET6);
Daniel Lezcano93ec9262008-03-07 11:16:02 -08001965}
1966
1967static struct pernet_operations tcpv6_net_ops = {
Eric W. Biedermanb099ce22009-12-03 02:29:09 +00001968 .init = tcpv6_net_init,
1969 .exit = tcpv6_net_exit,
1970 .exit_batch = tcpv6_net_exit_batch,
Daniel Lezcano93ec9262008-03-07 11:16:02 -08001971};
1972
Daniel Lezcano7f4e4862007-12-11 02:25:35 -08001973int __init tcpv6_init(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001974{
Daniel Lezcano7f4e4862007-12-11 02:25:35 -08001975 int ret;
David Woodhouseae0f7d52006-01-11 15:53:04 -08001976
Vlad Yasevich33362882012-11-15 08:49:15 +00001977 ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
1978 if (ret)
Vlad Yasevichc6b641a2012-11-15 08:49:22 +00001979 goto out;
Vlad Yasevich33362882012-11-15 08:49:15 +00001980
Daniel Lezcano7f4e4862007-12-11 02:25:35 -08001981 /* register inet6 protocol */
1982 ret = inet6_register_protosw(&tcpv6_protosw);
1983 if (ret)
1984 goto out_tcpv6_protocol;
1985
Daniel Lezcano93ec9262008-03-07 11:16:02 -08001986 ret = register_pernet_subsys(&tcpv6_net_ops);
Daniel Lezcano7f4e4862007-12-11 02:25:35 -08001987 if (ret)
1988 goto out_tcpv6_protosw;
1989out:
1990 return ret;
1991
Daniel Lezcano7f4e4862007-12-11 02:25:35 -08001992out_tcpv6_protosw:
1993 inet6_unregister_protosw(&tcpv6_protosw);
Vlad Yasevich33362882012-11-15 08:49:15 +00001994out_tcpv6_protocol:
1995 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
Daniel Lezcano7f4e4862007-12-11 02:25:35 -08001996 goto out;
1997}
1998
Daniel Lezcano09f77092007-12-13 05:34:58 -08001999void tcpv6_exit(void)
Daniel Lezcano7f4e4862007-12-11 02:25:35 -08002000{
Daniel Lezcano93ec9262008-03-07 11:16:02 -08002001 unregister_pernet_subsys(&tcpv6_net_ops);
Daniel Lezcano7f4e4862007-12-11 02:25:35 -08002002 inet6_unregister_protosw(&tcpv6_protosw);
2003 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002004}