blob: 94f4f89d73e791ba2ae7bdc7e7ac5bd7bc66a8d4 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * TCP over IPv6
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09003 * Linux INET6 implementation
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 *
5 * Authors:
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09006 * Pedro Roque <roque@di.fc.ul.pt>
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 *
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09008 * Based on:
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 * linux/net/ipv4/tcp.c
10 * linux/net/ipv4/tcp_input.c
11 * linux/net/ipv4/tcp_output.c
12 *
13 * Fixes:
14 * Hideaki YOSHIFUJI : sin6_scope_id support
15 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
16 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
17 * a single port at the same time.
18 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
19 *
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
24 */
25
Herbert Xueb4dea52008-12-29 23:04:08 -080026#include <linux/bottom_half.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070027#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <linux/errno.h>
29#include <linux/types.h>
30#include <linux/socket.h>
31#include <linux/sockios.h>
32#include <linux/net.h>
33#include <linux/jiffies.h>
34#include <linux/in.h>
35#include <linux/in6.h>
36#include <linux/netdevice.h>
37#include <linux/init.h>
38#include <linux/jhash.h>
39#include <linux/ipsec.h>
40#include <linux/times.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090041#include <linux/slab.h>
Wang Yufen4aa956d2014-03-29 09:27:29 +080042#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070043#include <linux/ipv6.h>
44#include <linux/icmpv6.h>
45#include <linux/random.h>
46
47#include <net/tcp.h>
48#include <net/ndisc.h>
Arnaldo Carvalho de Melo5324a042005-08-12 09:26:18 -030049#include <net/inet6_hashtables.h>
Arnaldo Carvalho de Melo81297652005-12-13 23:15:24 -080050#include <net/inet6_connection_sock.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070051#include <net/ipv6.h>
52#include <net/transp_v6.h>
53#include <net/addrconf.h>
54#include <net/ip6_route.h>
55#include <net/ip6_checksum.h>
56#include <net/inet_ecn.h>
57#include <net/protocol.h>
58#include <net/xfrm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070059#include <net/snmp.h>
60#include <net/dsfield.h>
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -080061#include <net/timewait_sock.h>
Denis V. Lunev3d58b5f2008-04-03 14:22:32 -070062#include <net/inet_common.h>
David S. Miller6e5714e2011-08-03 20:50:44 -070063#include <net/secure_seq.h>
Eliezer Tamir076bb0c2013-07-10 17:13:17 +030064#include <net/busy_poll.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070065
Linus Torvalds1da177e2005-04-16 15:20:36 -070066#include <linux/proc_fs.h>
67#include <linux/seq_file.h>
68
Herbert Xucf80e0e2016-01-24 21:20:23 +080069#include <crypto/hash.h>
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -080070#include <linux/scatterlist.h>
71
Eric Dumazeta00e7442015-09-29 07:42:39 -070072static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb);
73static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
Gui Jianfeng6edafaa2008-08-06 23:50:04 -070074 struct request_sock *req);
Linus Torvalds1da177e2005-04-16 15:20:36 -070075
76static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070077
Stephen Hemminger3b401a82009-09-01 19:25:04 +000078static const struct inet_connection_sock_af_ops ipv6_mapped;
79static const struct inet_connection_sock_af_ops ipv6_specific;
David S. Millera9286302006-11-14 19:53:22 -080080#ifdef CONFIG_TCP_MD5SIG
Stephen Hemmingerb2e4b3d2009-09-01 19:25:03 +000081static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
82static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
YOSHIFUJI Hideaki9501f972008-04-18 12:45:16 +090083#else
Eric Dumazet51723932015-09-29 21:24:05 -070084static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
Eric Dumazetb71d1d42011-04-22 04:53:02 +000085 const struct in6_addr *addr)
YOSHIFUJI Hideaki9501f972008-04-18 12:45:16 +090086{
87 return NULL;
88}
David S. Millera9286302006-11-14 19:53:22 -080089#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070090
Neal Cardwellfae6ef82012-08-19 03:30:38 +000091static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
92{
93 struct dst_entry *dst = skb_dst(skb);
Neal Cardwellfae6ef82012-08-19 03:30:38 +000094
Eric Dumazet5037e9e2015-12-14 14:08:53 -080095 if (dst && dst_hold_safe(dst)) {
Eric Dumazetca777ef2014-09-08 08:06:07 -070096 const struct rt6_info *rt = (const struct rt6_info *)dst;
97
Eric Dumazetca777ef2014-09-08 08:06:07 -070098 sk->sk_rx_dst = dst;
99 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
Martin KaFai Laub197df42015-05-22 20:56:01 -0700100 inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt);
Eric Dumazetca777ef2014-09-08 08:06:07 -0700101 }
Neal Cardwellfae6ef82012-08-19 03:30:38 +0000102}
103
Eric Dumazetcf533ea2011-10-21 05:22:42 -0400104static __u32 tcp_v6_init_sequence(const struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105{
Arnaldo Carvalho de Melo0660e032007-04-25 17:54:47 -0700106 return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
107 ipv6_hdr(skb)->saddr.s6_addr32,
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -0700108 tcp_hdr(skb)->dest,
109 tcp_hdr(skb)->source);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110}
111
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +0900112static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113 int addr_len)
114{
115 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +0900116 struct inet_sock *inet = inet_sk(sk);
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -0800117 struct inet_connection_sock *icsk = inet_csk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118 struct ipv6_pinfo *np = inet6_sk(sk);
119 struct tcp_sock *tp = tcp_sk(sk);
Arnaud Ebalard20c59de2010-06-01 21:35:01 +0000120 struct in6_addr *saddr = NULL, *final_p, final;
Eric Dumazet45f6fad2015-11-29 19:37:57 -0800121 struct ipv6_txoptions *opt;
David S. Miller4c9483b2011-03-12 16:22:43 -0500122 struct flowi6 fl6;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700123 struct dst_entry *dst;
124 int addr_type;
125 int err;
126
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +0900127 if (addr_len < SIN6_LEN_RFC2133)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128 return -EINVAL;
129
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +0900130 if (usin->sin6_family != AF_INET6)
Eric Dumazeta02cec22010-09-22 20:43:57 +0000131 return -EAFNOSUPPORT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132
David S. Miller4c9483b2011-03-12 16:22:43 -0500133 memset(&fl6, 0, sizeof(fl6));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134
135 if (np->sndflow) {
David S. Miller4c9483b2011-03-12 16:22:43 -0500136 fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
137 IP6_ECN_flow_init(fl6.flowlabel);
138 if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139 struct ip6_flowlabel *flowlabel;
David S. Miller4c9483b2011-03-12 16:22:43 -0500140 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
Ian Morris63159f22015-03-29 14:00:04 +0100141 if (!flowlabel)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143 fl6_sock_release(flowlabel);
144 }
145 }
146
147 /*
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +0900148 * connect() to INADDR_ANY means loopback (BSD'ism).
149 */
150
Weilong Chen4c99aa42013-12-19 18:44:34 +0800151 if (ipv6_addr_any(&usin->sin6_addr))
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +0900152 usin->sin6_addr.s6_addr[15] = 0x1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153
154 addr_type = ipv6_addr_type(&usin->sin6_addr);
155
Weilong Chen4c99aa42013-12-19 18:44:34 +0800156 if (addr_type & IPV6_ADDR_MULTICAST)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157 return -ENETUNREACH;
158
159 if (addr_type&IPV6_ADDR_LINKLOCAL) {
160 if (addr_len >= sizeof(struct sockaddr_in6) &&
161 usin->sin6_scope_id) {
162 /* If interface is set while binding, indices
163 * must coincide.
164 */
165 if (sk->sk_bound_dev_if &&
166 sk->sk_bound_dev_if != usin->sin6_scope_id)
167 return -EINVAL;
168
169 sk->sk_bound_dev_if = usin->sin6_scope_id;
170 }
171
172 /* Connect to link-local address requires an interface */
173 if (!sk->sk_bound_dev_if)
174 return -EINVAL;
175 }
176
177 if (tp->rx_opt.ts_recent_stamp &&
Eric Dumazetefe42082013-10-03 15:42:29 -0700178 !ipv6_addr_equal(&sk->sk_v6_daddr, &usin->sin6_addr)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700179 tp->rx_opt.ts_recent = 0;
180 tp->rx_opt.ts_recent_stamp = 0;
181 tp->write_seq = 0;
182 }
183
Eric Dumazetefe42082013-10-03 15:42:29 -0700184 sk->sk_v6_daddr = usin->sin6_addr;
David S. Miller4c9483b2011-03-12 16:22:43 -0500185 np->flow_label = fl6.flowlabel;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186
187 /*
188 * TCP over IPv4
189 */
190
191 if (addr_type == IPV6_ADDR_MAPPED) {
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -0800192 u32 exthdrlen = icsk->icsk_ext_hdr_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193 struct sockaddr_in sin;
194
195 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
196
197 if (__ipv6_only_sock(sk))
198 return -ENETUNREACH;
199
200 sin.sin_family = AF_INET;
201 sin.sin_port = usin->sin6_port;
202 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
203
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -0800204 icsk->icsk_af_ops = &ipv6_mapped;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205 sk->sk_backlog_rcv = tcp_v4_do_rcv;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800206#ifdef CONFIG_TCP_MD5SIG
207 tp->af_specific = &tcp_sock_ipv6_mapped_specific;
208#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209
210 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
211
212 if (err) {
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -0800213 icsk->icsk_ext_hdr_len = exthdrlen;
214 icsk->icsk_af_ops = &ipv6_specific;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215 sk->sk_backlog_rcv = tcp_v6_do_rcv;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800216#ifdef CONFIG_TCP_MD5SIG
217 tp->af_specific = &tcp_sock_ipv6_specific;
218#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700219 goto failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220 }
Eric Dumazetd1e559d2015-03-18 14:05:35 -0700221 np->saddr = sk->sk_v6_rcv_saddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700222
223 return err;
224 }
225
Eric Dumazetefe42082013-10-03 15:42:29 -0700226 if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr))
227 saddr = &sk->sk_v6_rcv_saddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700228
David S. Miller4c9483b2011-03-12 16:22:43 -0500229 fl6.flowi6_proto = IPPROTO_TCP;
Eric Dumazetefe42082013-10-03 15:42:29 -0700230 fl6.daddr = sk->sk_v6_daddr;
Alexey Dobriyan4e3fd7a2011-11-21 03:39:03 +0000231 fl6.saddr = saddr ? *saddr : np->saddr;
David S. Miller4c9483b2011-03-12 16:22:43 -0500232 fl6.flowi6_oif = sk->sk_bound_dev_if;
233 fl6.flowi6_mark = sk->sk_mark;
David S. Miller1958b852011-03-12 16:36:19 -0500234 fl6.fl6_dport = usin->sin6_port;
235 fl6.fl6_sport = inet->inet_sport;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700236
Hannes Frederic Sowa1e1d04e2016-04-05 17:10:15 +0200237 opt = rcu_dereference_protected(np->opt, lockdep_sock_is_held(sk));
Eric Dumazet45f6fad2015-11-29 19:37:57 -0800238 final_p = fl6_update_dst(&fl6, opt, &final);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700239
David S. Miller4c9483b2011-03-12 16:22:43 -0500240 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
Venkat Yekkiralabeb8d132006-08-04 23:12:42 -0700241
Steffen Klassert0e0d44a2013-08-28 08:04:14 +0200242 dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
David S. Miller68d0c6d2011-03-01 13:19:07 -0800243 if (IS_ERR(dst)) {
244 err = PTR_ERR(dst);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700245 goto failure;
David S. Miller14e50e52007-05-24 18:17:54 -0700246 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247
Ian Morris63159f22015-03-29 14:00:04 +0100248 if (!saddr) {
David S. Miller4c9483b2011-03-12 16:22:43 -0500249 saddr = &fl6.saddr;
Eric Dumazetefe42082013-10-03 15:42:29 -0700250 sk->sk_v6_rcv_saddr = *saddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251 }
252
253 /* set the source address */
Alexey Dobriyan4e3fd7a2011-11-21 03:39:03 +0000254 np->saddr = *saddr;
Eric Dumazetc720c7e2009-10-15 06:30:45 +0000255 inet->inet_rcv_saddr = LOOPBACK4_IPV6;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256
Herbert Xuf83ef8c2006-06-30 13:37:03 -0700257 sk->sk_gso_type = SKB_GSO_TCPV6;
Eric Dumazet6bd4f352015-12-02 21:53:57 -0800258 ip6_dst_store(sk, dst, NULL, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259
David S. Miller493f3772010-12-02 12:14:29 -0800260 if (tcp_death_row.sysctl_tw_recycle &&
261 !tp->rx_opt.ts_recent_stamp &&
Martin KaFai Laufd0273d2015-05-22 20:55:57 -0700262 ipv6_addr_equal(&fl6.daddr, &sk->sk_v6_daddr))
David S. Miller81166dd2012-07-10 03:14:24 -0700263 tcp_fetch_timewait_stamp(sk, dst);
David S. Miller493f3772010-12-02 12:14:29 -0800264
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -0800265 icsk->icsk_ext_hdr_len = 0;
Eric Dumazet45f6fad2015-11-29 19:37:57 -0800266 if (opt)
267 icsk->icsk_ext_hdr_len = opt->opt_flen +
268 opt->opt_nflen;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269
270 tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
271
Eric Dumazetc720c7e2009-10-15 06:30:45 +0000272 inet->inet_dport = usin->sin6_port;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700273
274 tcp_set_state(sk, TCP_SYN_SENT);
Arnaldo Carvalho de Melod8313f52005-12-13 23:25:44 -0800275 err = inet6_hash_connect(&tcp_death_row, sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700276 if (err)
277 goto late_failure;
278
Tom Herbert877d1f62015-07-28 16:02:05 -0700279 sk_set_txhash(sk);
Sathya Perla9e7ceb02014-10-22 21:42:01 +0530280
Andrey Vagin2b916472012-11-22 01:13:58 +0000281 if (!tp->write_seq && likely(!tp->repair))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700282 tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
Eric Dumazetefe42082013-10-03 15:42:29 -0700283 sk->sk_v6_daddr.s6_addr32,
Eric Dumazetc720c7e2009-10-15 06:30:45 +0000284 inet->inet_sport,
285 inet->inet_dport);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700286
287 err = tcp_connect(sk);
288 if (err)
289 goto late_failure;
290
291 return 0;
292
293late_failure:
294 tcp_set_state(sk, TCP_CLOSE);
295 __sk_dst_reset(sk);
296failure:
Eric Dumazetc720c7e2009-10-15 06:30:45 +0000297 inet->inet_dport = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700298 sk->sk_route_caps = 0;
299 return err;
300}
301
Eric Dumazet563d34d2012-07-23 09:48:52 +0200302static void tcp_v6_mtu_reduced(struct sock *sk)
303{
304 struct dst_entry *dst;
305
306 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
307 return;
308
309 dst = inet6_csk_update_pmtu(sk, tcp_sk(sk)->mtu_info);
310 if (!dst)
311 return;
312
313 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
314 tcp_sync_mss(sk, dst_mtu(dst));
315 tcp_simple_retransmit(sk);
316 }
317}
318
Linus Torvalds1da177e2005-04-16 15:20:36 -0700319static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
Brian Haleyd5fdd6b2009-06-23 04:31:07 -0700320 u8 type, u8 code, int offset, __be32 info)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700321{
Weilong Chen4c99aa42013-12-19 18:44:34 +0800322 const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
Arnaldo Carvalho de Melo505cbfc2005-08-12 09:19:38 -0300323 const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
Eric Dumazet22150892015-03-22 10:22:23 -0700324 struct net *net = dev_net(skb->dev);
325 struct request_sock *fastopen;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700326 struct ipv6_pinfo *np;
Eric Dumazet22150892015-03-22 10:22:23 -0700327 struct tcp_sock *tp;
328 __u32 seq, snd_una;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700329 struct sock *sk;
Eric Dumazet9cf74902016-02-02 19:31:12 -0800330 bool fatal;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700331 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700332
Eric Dumazet22150892015-03-22 10:22:23 -0700333 sk = __inet6_lookup_established(net, &tcp_hashinfo,
334 &hdr->daddr, th->dest,
335 &hdr->saddr, ntohs(th->source),
336 skb->dev->ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700337
Eric Dumazet22150892015-03-22 10:22:23 -0700338 if (!sk) {
Eric Dumazeta16292a2016-04-27 16:44:36 -0700339 __ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
340 ICMP6_MIB_INERRORS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700341 return;
342 }
343
344 if (sk->sk_state == TCP_TIME_WAIT) {
YOSHIFUJI Hideaki9469c7b2006-10-10 19:41:46 -0700345 inet_twsk_put(inet_twsk(sk));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346 return;
347 }
Eric Dumazet22150892015-03-22 10:22:23 -0700348 seq = ntohl(th->seq);
Eric Dumazet9cf74902016-02-02 19:31:12 -0800349 fatal = icmpv6_err_convert(type, code, &err);
Eric Dumazet22150892015-03-22 10:22:23 -0700350 if (sk->sk_state == TCP_NEW_SYN_RECV)
Eric Dumazet9cf74902016-02-02 19:31:12 -0800351 return tcp_req_err(sk, seq, fatal);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700352
353 bh_lock_sock(sk);
Eric Dumazet563d34d2012-07-23 09:48:52 +0200354 if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
Eric Dumazet02a1d6e2016-04-27 16:44:39 -0700355 __NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700356
357 if (sk->sk_state == TCP_CLOSE)
358 goto out;
359
Stephen Hemmingere802af92010-04-22 15:24:53 -0700360 if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) {
Eric Dumazet02a1d6e2016-04-27 16:44:39 -0700361 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
Stephen Hemmingere802af92010-04-22 15:24:53 -0700362 goto out;
363 }
364
Linus Torvalds1da177e2005-04-16 15:20:36 -0700365 tp = tcp_sk(sk);
Yuchung Cheng0a672f72014-05-11 20:22:12 -0700366 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
367 fastopen = tp->fastopen_rsk;
368 snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700369 if (sk->sk_state != TCP_LISTEN &&
Yuchung Cheng0a672f72014-05-11 20:22:12 -0700370 !between(seq, snd_una, tp->snd_nxt)) {
Eric Dumazet02a1d6e2016-04-27 16:44:39 -0700371 __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700372 goto out;
373 }
374
375 np = inet6_sk(sk);
376
David S. Millerec18d9a2012-07-12 00:25:15 -0700377 if (type == NDISC_REDIRECT) {
378 struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
379
David S. Miller1ed5c482012-07-12 00:41:25 -0700380 if (dst)
David S. Miller6700c272012-07-17 03:29:28 -0700381 dst->ops->redirect(dst, sk, skb);
Christoph Paasch50a75a82013-04-07 04:53:15 +0000382 goto out;
David S. Millerec18d9a2012-07-12 00:25:15 -0700383 }
384
Linus Torvalds1da177e2005-04-16 15:20:36 -0700385 if (type == ICMPV6_PKT_TOOBIG) {
Eric Dumazet0d4f0602013-03-18 07:01:28 +0000386 /* We are not interested in TCP_LISTEN and open_requests
387 * (SYN-ACKs send out by Linux are always <576bytes so
388 * they should go through unfragmented).
389 */
390 if (sk->sk_state == TCP_LISTEN)
391 goto out;
392
Hannes Frederic Sowa93b36cf2013-12-15 03:41:14 +0100393 if (!ip6_sk_accept_pmtu(sk))
394 goto out;
395
Eric Dumazet563d34d2012-07-23 09:48:52 +0200396 tp->mtu_info = ntohl(info);
397 if (!sock_owned_by_user(sk))
398 tcp_v6_mtu_reduced(sk);
Julian Anastasovd013ef2a2012-09-05 10:53:18 +0000399 else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED,
400 &tp->tsq_flags))
401 sock_hold(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700402 goto out;
403 }
404
Linus Torvalds1da177e2005-04-16 15:20:36 -0700405
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -0700406 /* Might be for an request_sock */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700407 switch (sk->sk_state) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700408 case TCP_SYN_SENT:
Yuchung Cheng0a672f72014-05-11 20:22:12 -0700409 case TCP_SYN_RECV:
410 /* Only in fast or simultaneous open. If a fast open socket is
411 * is already accepted it is treated as a connected one below.
412 */
Ian Morris63159f22015-03-29 14:00:04 +0100413 if (fastopen && !fastopen->sk)
Yuchung Cheng0a672f72014-05-11 20:22:12 -0700414 break;
415
Linus Torvalds1da177e2005-04-16 15:20:36 -0700416 if (!sock_owned_by_user(sk)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700417 sk->sk_err = err;
418 sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */
419
420 tcp_done(sk);
421 } else
422 sk->sk_err_soft = err;
423 goto out;
424 }
425
426 if (!sock_owned_by_user(sk) && np->recverr) {
427 sk->sk_err = err;
428 sk->sk_error_report(sk);
429 } else
430 sk->sk_err_soft = err;
431
432out:
433 bh_unlock_sock(sk);
434 sock_put(sk);
435}
436
437
Eric Dumazet0f935dbe2015-09-25 07:39:21 -0700438static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
Octavian Purdilad6274bd2014-06-25 17:09:58 +0300439 struct flowi *fl,
Neal Cardwell3840a062012-06-28 12:34:19 +0000440 struct request_sock *req,
Eric Dumazetca6fb062015-10-02 11:43:35 -0700441 struct tcp_fastopen_cookie *foc,
Eric Dumazetb3d05142016-04-13 22:05:39 -0700442 enum tcp_synack_type synack_type)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700443{
Eric Dumazet634fb9792013-10-09 15:21:29 -0700444 struct inet_request_sock *ireq = inet_rsk(req);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700445 struct ipv6_pinfo *np = inet6_sk(sk);
Huw Davies56ac42b2016-06-27 15:05:28 -0400446 struct ipv6_txoptions *opt;
Octavian Purdilad6274bd2014-06-25 17:09:58 +0300447 struct flowi6 *fl6 = &fl->u.ip6;
Weilong Chen4c99aa42013-12-19 18:44:34 +0800448 struct sk_buff *skb;
Neal Cardwell94942182012-06-28 12:34:20 +0000449 int err = -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700450
Neal Cardwell9f10d3f2012-06-28 12:34:21 +0000451 /* First, grab a route. */
Eric Dumazetf76b33c2015-09-29 07:42:42 -0700452 if (!dst && (dst = inet6_csk_route_req(sk, fl6, req,
453 IPPROTO_TCP)) == NULL)
Denis V. Lunevfd80eb92008-02-29 11:43:03 -0800454 goto done;
Neal Cardwell94942182012-06-28 12:34:20 +0000455
Eric Dumazetb3d05142016-04-13 22:05:39 -0700456 skb = tcp_make_synack(sk, dst, req, foc, synack_type);
Neal Cardwell94942182012-06-28 12:34:20 +0000457
Linus Torvalds1da177e2005-04-16 15:20:36 -0700458 if (skb) {
Eric Dumazet634fb9792013-10-09 15:21:29 -0700459 __tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr,
460 &ireq->ir_v6_rmt_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700461
Eric Dumazet634fb9792013-10-09 15:21:29 -0700462 fl6->daddr = ireq->ir_v6_rmt_addr;
Ian Morris53b24b82015-03-29 14:00:05 +0100463 if (np->repflow && ireq->pktopts)
Florent Fourcotdf3687f2014-01-17 17:15:03 +0100464 fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
465
Eric Dumazet3e4006f2016-01-08 09:35:51 -0800466 rcu_read_lock();
Huw Davies56ac42b2016-06-27 15:05:28 -0400467 opt = ireq->ipv6_opt;
468 if (!opt)
469 opt = rcu_dereference(np->opt);
470 err = ip6_xmit(sk, skb, fl6, opt, np->tclass);
Eric Dumazet3e4006f2016-01-08 09:35:51 -0800471 rcu_read_unlock();
Gerrit Renkerb9df3cb2006-11-14 11:21:36 -0200472 err = net_xmit_eval(err);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700473 }
474
475done:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700476 return err;
477}
478
Octavian Purdila72659ec2010-01-17 19:09:39 -0800479
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -0700480static void tcp_v6_reqsk_destructor(struct request_sock *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700481{
Huw Davies56ac42b2016-06-27 15:05:28 -0400482 kfree(inet_rsk(req)->ipv6_opt);
Eric Dumazet634fb9792013-10-09 15:21:29 -0700483 kfree_skb(inet_rsk(req)->pktopts);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700484}
485
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800486#ifdef CONFIG_TCP_MD5SIG
Eric Dumazetb83e3de2015-09-25 07:39:15 -0700487static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
Eric Dumazetb71d1d42011-04-22 04:53:02 +0000488 const struct in6_addr *addr)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800489{
Eric Dumazeta915da9b2012-01-31 05:18:33 +0000490 return tcp_md5_do_lookup(sk, (union tcp_md5_addr *)addr, AF_INET6);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800491}
492
Eric Dumazetb83e3de2015-09-25 07:39:15 -0700493static struct tcp_md5sig_key *tcp_v6_md5_lookup(const struct sock *sk,
Eric Dumazetfd3a1542015-03-24 15:58:56 -0700494 const struct sock *addr_sk)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800495{
Eric Dumazetefe42082013-10-03 15:42:29 -0700496 return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800497}
498
Wang Yufen4aa956d2014-03-29 09:27:29 +0800499static int tcp_v6_parse_md5_keys(struct sock *sk, char __user *optval,
500 int optlen)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800501{
502 struct tcp_md5sig cmd;
503 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800504
505 if (optlen < sizeof(cmd))
506 return -EINVAL;
507
508 if (copy_from_user(&cmd, optval, sizeof(cmd)))
509 return -EFAULT;
510
511 if (sin6->sin6_family != AF_INET6)
512 return -EINVAL;
513
514 if (!cmd.tcpm_keylen) {
Brian Haleye773e4f2007-08-24 23:16:08 -0700515 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
Eric Dumazeta915da9b2012-01-31 05:18:33 +0000516 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
517 AF_INET);
518 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
519 AF_INET6);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800520 }
521
522 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
523 return -EINVAL;
524
Eric Dumazeta915da9b2012-01-31 05:18:33 +0000525 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
526 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
527 AF_INET, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800528
Eric Dumazeta915da9b2012-01-31 05:18:33 +0000529 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
530 AF_INET6, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800531}
532
Eric Dumazet19689e32016-06-27 18:51:53 +0200533static int tcp_v6_md5_hash_headers(struct tcp_md5sig_pool *hp,
534 const struct in6_addr *daddr,
535 const struct in6_addr *saddr,
536 const struct tcphdr *th, int nbytes)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800537{
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800538 struct tcp6_pseudohdr *bp;
Adam Langley49a72df2008-07-19 00:01:42 -0700539 struct scatterlist sg;
Eric Dumazet19689e32016-06-27 18:51:53 +0200540 struct tcphdr *_th;
YOSHIFUJI Hideaki8d26d762008-04-17 13:19:16 +0900541
Eric Dumazet19689e32016-06-27 18:51:53 +0200542 bp = hp->scratch;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800543 /* 1. TCP pseudo-header (RFC2460) */
Alexey Dobriyan4e3fd7a2011-11-21 03:39:03 +0000544 bp->saddr = *saddr;
545 bp->daddr = *daddr;
Adam Langley49a72df2008-07-19 00:01:42 -0700546 bp->protocol = cpu_to_be32(IPPROTO_TCP);
Adam Langley00b13042008-07-31 21:36:07 -0700547 bp->len = cpu_to_be32(nbytes);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800548
Eric Dumazet19689e32016-06-27 18:51:53 +0200549 _th = (struct tcphdr *)(bp + 1);
550 memcpy(_th, th, sizeof(*th));
551 _th->check = 0;
552
553 sg_init_one(&sg, bp, sizeof(*bp) + sizeof(*th));
554 ahash_request_set_crypt(hp->md5_req, &sg, NULL,
555 sizeof(*bp) + sizeof(*th));
Herbert Xucf80e0e2016-01-24 21:20:23 +0800556 return crypto_ahash_update(hp->md5_req);
Adam Langley49a72df2008-07-19 00:01:42 -0700557}
David S. Millerc7da57a2007-10-26 00:41:21 -0700558
Eric Dumazet19689e32016-06-27 18:51:53 +0200559static int tcp_v6_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
Eric Dumazetb71d1d42011-04-22 04:53:02 +0000560 const struct in6_addr *daddr, struct in6_addr *saddr,
Eric Dumazet318cf7a2011-10-24 02:46:04 -0400561 const struct tcphdr *th)
Adam Langley49a72df2008-07-19 00:01:42 -0700562{
563 struct tcp_md5sig_pool *hp;
Herbert Xucf80e0e2016-01-24 21:20:23 +0800564 struct ahash_request *req;
Adam Langley49a72df2008-07-19 00:01:42 -0700565
566 hp = tcp_get_md5sig_pool();
567 if (!hp)
568 goto clear_hash_noput;
Herbert Xucf80e0e2016-01-24 21:20:23 +0800569 req = hp->md5_req;
Adam Langley49a72df2008-07-19 00:01:42 -0700570
Herbert Xucf80e0e2016-01-24 21:20:23 +0800571 if (crypto_ahash_init(req))
Adam Langley49a72df2008-07-19 00:01:42 -0700572 goto clear_hash;
Eric Dumazet19689e32016-06-27 18:51:53 +0200573 if (tcp_v6_md5_hash_headers(hp, daddr, saddr, th, th->doff << 2))
Adam Langley49a72df2008-07-19 00:01:42 -0700574 goto clear_hash;
575 if (tcp_md5_hash_key(hp, key))
576 goto clear_hash;
Herbert Xucf80e0e2016-01-24 21:20:23 +0800577 ahash_request_set_crypt(req, NULL, md5_hash, 0);
578 if (crypto_ahash_final(req))
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800579 goto clear_hash;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800580
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800581 tcp_put_md5sig_pool();
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800582 return 0;
Adam Langley49a72df2008-07-19 00:01:42 -0700583
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800584clear_hash:
585 tcp_put_md5sig_pool();
586clear_hash_noput:
587 memset(md5_hash, 0, 16);
Adam Langley49a72df2008-07-19 00:01:42 -0700588 return 1;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800589}
590
Eric Dumazet39f8e582015-03-24 15:58:55 -0700591static int tcp_v6_md5_hash_skb(char *md5_hash,
592 const struct tcp_md5sig_key *key,
Eric Dumazet318cf7a2011-10-24 02:46:04 -0400593 const struct sock *sk,
Eric Dumazet318cf7a2011-10-24 02:46:04 -0400594 const struct sk_buff *skb)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800595{
Eric Dumazetb71d1d42011-04-22 04:53:02 +0000596 const struct in6_addr *saddr, *daddr;
Adam Langley49a72df2008-07-19 00:01:42 -0700597 struct tcp_md5sig_pool *hp;
Herbert Xucf80e0e2016-01-24 21:20:23 +0800598 struct ahash_request *req;
Eric Dumazet318cf7a2011-10-24 02:46:04 -0400599 const struct tcphdr *th = tcp_hdr(skb);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800600
Eric Dumazet39f8e582015-03-24 15:58:55 -0700601 if (sk) { /* valid for establish/request sockets */
602 saddr = &sk->sk_v6_rcv_saddr;
Eric Dumazetefe42082013-10-03 15:42:29 -0700603 daddr = &sk->sk_v6_daddr;
Adam Langley49a72df2008-07-19 00:01:42 -0700604 } else {
Eric Dumazetb71d1d42011-04-22 04:53:02 +0000605 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
Adam Langley49a72df2008-07-19 00:01:42 -0700606 saddr = &ip6h->saddr;
607 daddr = &ip6h->daddr;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800608 }
Adam Langley49a72df2008-07-19 00:01:42 -0700609
610 hp = tcp_get_md5sig_pool();
611 if (!hp)
612 goto clear_hash_noput;
Herbert Xucf80e0e2016-01-24 21:20:23 +0800613 req = hp->md5_req;
Adam Langley49a72df2008-07-19 00:01:42 -0700614
Herbert Xucf80e0e2016-01-24 21:20:23 +0800615 if (crypto_ahash_init(req))
Adam Langley49a72df2008-07-19 00:01:42 -0700616 goto clear_hash;
617
Eric Dumazet19689e32016-06-27 18:51:53 +0200618 if (tcp_v6_md5_hash_headers(hp, daddr, saddr, th, skb->len))
Adam Langley49a72df2008-07-19 00:01:42 -0700619 goto clear_hash;
620 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
621 goto clear_hash;
622 if (tcp_md5_hash_key(hp, key))
623 goto clear_hash;
Herbert Xucf80e0e2016-01-24 21:20:23 +0800624 ahash_request_set_crypt(req, NULL, md5_hash, 0);
625 if (crypto_ahash_final(req))
Adam Langley49a72df2008-07-19 00:01:42 -0700626 goto clear_hash;
627
628 tcp_put_md5sig_pool();
629 return 0;
630
631clear_hash:
632 tcp_put_md5sig_pool();
633clear_hash_noput:
634 memset(md5_hash, 0, 16);
635 return 1;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800636}
637
Eric Dumazetba8e2752015-10-02 11:43:28 -0700638#endif
639
640static bool tcp_v6_inbound_md5_hash(const struct sock *sk,
641 const struct sk_buff *skb)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800642{
Eric Dumazetba8e2752015-10-02 11:43:28 -0700643#ifdef CONFIG_TCP_MD5SIG
Eric Dumazetcf533ea2011-10-21 05:22:42 -0400644 const __u8 *hash_location = NULL;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800645 struct tcp_md5sig_key *hash_expected;
Eric Dumazetb71d1d42011-04-22 04:53:02 +0000646 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
Eric Dumazet318cf7a2011-10-24 02:46:04 -0400647 const struct tcphdr *th = tcp_hdr(skb);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800648 int genhash;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800649 u8 newhash[16];
650
651 hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
YOSHIFUJI Hideaki7d5d5522008-04-17 12:29:53 +0900652 hash_location = tcp_parse_md5sig_option(th);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800653
David S. Miller785957d2008-07-30 03:03:15 -0700654 /* We've parsed the options - do we have a hash? */
655 if (!hash_expected && !hash_location)
Eric Dumazetff74e232015-03-24 15:58:54 -0700656 return false;
David S. Miller785957d2008-07-30 03:03:15 -0700657
658 if (hash_expected && !hash_location) {
Eric Dumazetc10d9312016-04-29 14:16:47 -0700659 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
Eric Dumazetff74e232015-03-24 15:58:54 -0700660 return true;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800661 }
662
David S. Miller785957d2008-07-30 03:03:15 -0700663 if (!hash_expected && hash_location) {
Eric Dumazetc10d9312016-04-29 14:16:47 -0700664 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
Eric Dumazetff74e232015-03-24 15:58:54 -0700665 return true;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800666 }
667
668 /* check the signature */
Adam Langley49a72df2008-07-19 00:01:42 -0700669 genhash = tcp_v6_md5_hash_skb(newhash,
670 hash_expected,
Eric Dumazet39f8e582015-03-24 15:58:55 -0700671 NULL, skb);
Adam Langley49a72df2008-07-19 00:01:42 -0700672
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800673 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
Joe Perchese87cc472012-05-13 21:56:26 +0000674 net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
675 genhash ? "failed" : "mismatch",
676 &ip6h->saddr, ntohs(th->source),
677 &ip6h->daddr, ntohs(th->dest));
Eric Dumazetff74e232015-03-24 15:58:54 -0700678 return true;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800679 }
Eric Dumazetba8e2752015-10-02 11:43:28 -0700680#endif
Eric Dumazetff74e232015-03-24 15:58:54 -0700681 return false;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800682}
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800683
Eric Dumazetb40cf182015-09-25 07:39:08 -0700684static void tcp_v6_init_req(struct request_sock *req,
685 const struct sock *sk_listener,
Octavian Purdila16bea702014-06-25 17:09:53 +0300686 struct sk_buff *skb)
687{
688 struct inet_request_sock *ireq = inet_rsk(req);
Eric Dumazetb40cf182015-09-25 07:39:08 -0700689 const struct ipv6_pinfo *np = inet6_sk(sk_listener);
Octavian Purdila16bea702014-06-25 17:09:53 +0300690
691 ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
692 ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
693
Octavian Purdila16bea702014-06-25 17:09:53 +0300694 /* So that link locals have meaning */
Eric Dumazetb40cf182015-09-25 07:39:08 -0700695 if (!sk_listener->sk_bound_dev_if &&
Octavian Purdila16bea702014-06-25 17:09:53 +0300696 ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
Eric Dumazet870c3152014-10-17 09:17:20 -0700697 ireq->ir_iif = tcp_v6_iif(skb);
Octavian Purdila16bea702014-06-25 17:09:53 +0300698
Eric Dumazet04317da2014-09-05 15:33:32 -0700699 if (!TCP_SKB_CB(skb)->tcp_tw_isn &&
Eric Dumazetb40cf182015-09-25 07:39:08 -0700700 (ipv6_opt_accepted(sk_listener, skb, &TCP_SKB_CB(skb)->header.h6) ||
Eric Dumazeta2247722014-09-27 09:50:56 -0700701 np->rxopt.bits.rxinfo ||
Octavian Purdila16bea702014-06-25 17:09:53 +0300702 np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim ||
703 np->rxopt.bits.rxohlim || np->repflow)) {
704 atomic_inc(&skb->users);
705 ireq->pktopts = skb;
706 }
707}
708
Eric Dumazetf9646292015-09-29 07:42:50 -0700709static struct dst_entry *tcp_v6_route_req(const struct sock *sk,
710 struct flowi *fl,
Octavian Purdilad94e0412014-06-25 17:09:55 +0300711 const struct request_sock *req,
712 bool *strict)
713{
714 if (strict)
715 *strict = true;
Eric Dumazetf76b33c2015-09-29 07:42:42 -0700716 return inet6_csk_route_req(sk, &fl->u.ip6, req, IPPROTO_TCP);
Octavian Purdilad94e0412014-06-25 17:09:55 +0300717}
718
Glenn Griffinc6aefaf2008-02-07 21:49:26 -0800719struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700720 .family = AF_INET6,
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -0700721 .obj_size = sizeof(struct tcp6_request_sock),
Octavian Purdila5db92c92014-06-25 17:09:59 +0300722 .rtx_syn_ack = tcp_rtx_synack,
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -0700723 .send_ack = tcp_v6_reqsk_send_ack,
724 .destructor = tcp_v6_reqsk_destructor,
Octavian Purdila72659ec2010-01-17 19:09:39 -0800725 .send_reset = tcp_v6_send_reset,
Wang Yufen4aa956d2014-03-29 09:27:29 +0800726 .syn_ack_timeout = tcp_syn_ack_timeout,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700727};
728
Stephen Hemmingerb2e4b3d2009-09-01 19:25:03 +0000729static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
Octavian Purdila2aec4a22014-06-25 17:10:00 +0300730 .mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) -
731 sizeof(struct ipv6hdr),
Octavian Purdila16bea702014-06-25 17:09:53 +0300732#ifdef CONFIG_TCP_MD5SIG
Eric Dumazetfd3a1542015-03-24 15:58:56 -0700733 .req_md5_lookup = tcp_v6_md5_lookup,
John Dykstrae3afe7b2009-07-16 05:04:51 +0000734 .calc_md5_hash = tcp_v6_md5_hash_skb,
Andrew Mortonb6332e62006-11-30 19:16:28 -0800735#endif
Octavian Purdila16bea702014-06-25 17:09:53 +0300736 .init_req = tcp_v6_init_req,
Octavian Purdilafb7b37a2014-06-25 17:09:54 +0300737#ifdef CONFIG_SYN_COOKIES
738 .cookie_init_seq = cookie_v6_init_sequence,
739#endif
Octavian Purdilad94e0412014-06-25 17:09:55 +0300740 .route_req = tcp_v6_route_req,
Octavian Purdila936b8bd2014-06-25 17:09:57 +0300741 .init_seq = tcp_v6_init_sequence,
Octavian Purdilad6274bd2014-06-25 17:09:58 +0300742 .send_synack = tcp_v6_send_synack,
Octavian Purdila16bea702014-06-25 17:09:53 +0300743};
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800744
Eric Dumazeta00e7442015-09-29 07:42:39 -0700745static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 seq,
Eric Dumazet0f85fea2014-12-09 09:56:08 -0800746 u32 ack, u32 win, u32 tsval, u32 tsecr,
747 int oif, struct tcp_md5sig_key *key, int rst,
Hannes Frederic Sowa5119bd12016-06-11 20:41:38 +0200748 u8 tclass, __be32 label)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700749{
Eric Dumazetcf533ea2011-10-21 05:22:42 -0400750 const struct tcphdr *th = tcp_hdr(skb);
751 struct tcphdr *t1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700752 struct sk_buff *buff;
David S. Miller4c9483b2011-03-12 16:22:43 -0500753 struct flowi6 fl6;
Eric Dumazet0f85fea2014-12-09 09:56:08 -0800754 struct net *net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
Daniel Lezcanoe5047992008-03-07 11:16:26 -0800755 struct sock *ctl_sk = net->ipv6.tcp_sk;
YOSHIFUJI Hideaki9cb57342008-01-12 02:16:03 -0800756 unsigned int tot_len = sizeof(struct tcphdr);
Eric Dumazetadf30902009-06-02 05:19:30 +0000757 struct dst_entry *dst;
Al Viroe69a4adc2006-11-14 20:56:00 -0800758 __be32 *topt;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700759
Andrey Vaginee684b62013-02-11 05:50:19 +0000760 if (tsecr)
YOSHIFUJI Hideaki4244f8a2006-10-10 19:40:50 -0700761 tot_len += TCPOLEN_TSTAMP_ALIGNED;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800762#ifdef CONFIG_TCP_MD5SIG
763 if (key)
764 tot_len += TCPOLEN_MD5SIG_ALIGNED;
765#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700766
767 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
768 GFP_ATOMIC);
Ian Morris63159f22015-03-29 14:00:04 +0100769 if (!buff)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700770 return;
771
772 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
773
Ilpo Järvinen77c676d2008-10-09 14:41:38 -0700774 t1 = (struct tcphdr *) skb_push(buff, tot_len);
Herbert Xu6651ffc2010-04-21 00:47:15 -0700775 skb_reset_transport_header(buff);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700776
777 /* Swap the send and the receive. */
778 memset(t1, 0, sizeof(*t1));
779 t1->dest = th->source;
780 t1->source = th->dest;
Ilpo Järvinen77c676d2008-10-09 14:41:38 -0700781 t1->doff = tot_len / 4;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700782 t1->seq = htonl(seq);
783 t1->ack_seq = htonl(ack);
Ilpo Järvinen626e2642008-10-09 14:42:40 -0700784 t1->ack = !rst || !th->ack;
785 t1->rst = rst;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700786 t1->window = htons(win);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800787
Al Viroe69a4adc2006-11-14 20:56:00 -0800788 topt = (__be32 *)(t1 + 1);
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +0900789
Andrey Vaginee684b62013-02-11 05:50:19 +0000790 if (tsecr) {
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800791 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
792 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
Andrey Vaginee684b62013-02-11 05:50:19 +0000793 *topt++ = htonl(tsval);
794 *topt++ = htonl(tsecr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700795 }
796
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800797#ifdef CONFIG_TCP_MD5SIG
798 if (key) {
799 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
800 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
Adam Langley49a72df2008-07-19 00:01:42 -0700801 tcp_v6_md5_hash_hdr((__u8 *)topt, key,
Adam Langley90b7e112008-07-31 20:49:48 -0700802 &ipv6_hdr(skb)->saddr,
803 &ipv6_hdr(skb)->daddr, t1);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800804 }
805#endif
806
David S. Miller4c9483b2011-03-12 16:22:43 -0500807 memset(&fl6, 0, sizeof(fl6));
Alexey Dobriyan4e3fd7a2011-11-21 03:39:03 +0000808 fl6.daddr = ipv6_hdr(skb)->saddr;
809 fl6.saddr = ipv6_hdr(skb)->daddr;
Florent Fourcot1d13a962014-01-16 17:21:22 +0100810 fl6.flowlabel = label;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700811
David S. Millere5700af2010-04-21 14:59:20 -0700812 buff->ip_summed = CHECKSUM_PARTIAL;
813 buff->csum = 0;
814
David S. Miller4c9483b2011-03-12 16:22:43 -0500815 __tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700816
David S. Miller4c9483b2011-03-12 16:22:43 -0500817 fl6.flowi6_proto = IPPROTO_TCP;
Lorenzo Colittia36dbdb2014-04-11 13:19:12 +0900818 if (rt6_need_strict(&fl6.daddr) && !oif)
Eric Dumazet870c3152014-10-17 09:17:20 -0700819 fl6.flowi6_oif = tcp_v6_iif(skb);
David Ahern1d2f7b22016-05-04 21:26:08 -0700820 else {
821 if (!oif && netif_index_is_l3_master(net, skb->skb_iif))
822 oif = skb->skb_iif;
823
Wang Yufen9c76a112014-03-29 09:27:31 +0800824 fl6.flowi6_oif = oif;
David Ahern1d2f7b22016-05-04 21:26:08 -0700825 }
826
Lorenzo Colittie1108612014-05-13 10:17:33 -0700827 fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark);
David S. Miller1958b852011-03-12 16:36:19 -0500828 fl6.fl6_dport = t1->dest;
829 fl6.fl6_sport = t1->source;
David S. Miller4c9483b2011-03-12 16:22:43 -0500830 security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700831
Ilpo Järvinen626e2642008-10-09 14:42:40 -0700832 /* Pass a socket to ip6_dst_lookup either it is for RST
833 * Underlying function will use this to retrieve the network
834 * namespace
835 */
Steffen Klassert0e0d44a2013-08-28 08:04:14 +0200836 dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL);
David S. Miller68d0c6d2011-03-01 13:19:07 -0800837 if (!IS_ERR(dst)) {
838 skb_dst_set(buff, dst);
Eric Dumazetb903d322011-10-27 00:44:35 -0400839 ip6_xmit(ctl_sk, buff, &fl6, NULL, tclass);
Eric Dumazetc10d9312016-04-29 14:16:47 -0700840 TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
David S. Miller68d0c6d2011-03-01 13:19:07 -0800841 if (rst)
Eric Dumazetc10d9312016-04-29 14:16:47 -0700842 TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
David S. Miller68d0c6d2011-03-01 13:19:07 -0800843 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700844 }
845
846 kfree_skb(buff);
847}
848
Eric Dumazeta00e7442015-09-29 07:42:39 -0700849static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb)
Ilpo Järvinen626e2642008-10-09 14:42:40 -0700850{
Eric Dumazetcf533ea2011-10-21 05:22:42 -0400851 const struct tcphdr *th = tcp_hdr(skb);
Ilpo Järvinen626e2642008-10-09 14:42:40 -0700852 u32 seq = 0, ack_seq = 0;
Guo-Fu Tsengfa3e5b42008-10-09 21:11:56 -0700853 struct tcp_md5sig_key *key = NULL;
Shawn Lu658ddaa2012-01-31 22:35:48 +0000854#ifdef CONFIG_TCP_MD5SIG
855 const __u8 *hash_location = NULL;
856 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
857 unsigned char newhash[16];
858 int genhash;
859 struct sock *sk1 = NULL;
860#endif
Wang Yufen9c76a112014-03-29 09:27:31 +0800861 int oif;
Ilpo Järvinen626e2642008-10-09 14:42:40 -0700862
863 if (th->rst)
864 return;
865
Eric Dumazetc3658e82014-11-25 07:40:04 -0800866 /* If sk not NULL, it means we did a successful lookup and incoming
867 * route had to be correct. prequeue might have dropped our dst.
868 */
869 if (!sk && !ipv6_unicast_destination(skb))
Ilpo Järvinen626e2642008-10-09 14:42:40 -0700870 return;
871
872#ifdef CONFIG_TCP_MD5SIG
Eric Dumazet3b24d852016-04-01 08:52:17 -0700873 rcu_read_lock();
Shawn Lu658ddaa2012-01-31 22:35:48 +0000874 hash_location = tcp_parse_md5sig_option(th);
Florian Westphal271c3b92015-12-21 21:29:26 +0100875 if (sk && sk_fullsock(sk)) {
Florian Westphale46787f2015-12-21 21:29:25 +0100876 key = tcp_v6_md5_do_lookup(sk, &ipv6h->saddr);
877 } else if (hash_location) {
Shawn Lu658ddaa2012-01-31 22:35:48 +0000878 /*
879 * active side is lost. Try to find listening socket through
880 * source port, and then find md5 key through listening socket.
881 * we are not loose security here:
882 * Incoming packet is checked with md5 hash with finding key,
883 * no RST generated if md5 hash doesn't match.
884 */
885 sk1 = inet6_lookup_listener(dev_net(skb_dst(skb)->dev),
Craig Galleka5836362016-02-10 11:50:38 -0500886 &tcp_hashinfo, NULL, 0,
887 &ipv6h->saddr,
Tom Herbert5ba24952013-01-22 09:50:39 +0000888 th->source, &ipv6h->daddr,
Eric Dumazet870c3152014-10-17 09:17:20 -0700889 ntohs(th->source), tcp_v6_iif(skb));
Shawn Lu658ddaa2012-01-31 22:35:48 +0000890 if (!sk1)
Eric Dumazet3b24d852016-04-01 08:52:17 -0700891 goto out;
Shawn Lu658ddaa2012-01-31 22:35:48 +0000892
Shawn Lu658ddaa2012-01-31 22:35:48 +0000893 key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr);
894 if (!key)
Eric Dumazet3b24d852016-04-01 08:52:17 -0700895 goto out;
Shawn Lu658ddaa2012-01-31 22:35:48 +0000896
Eric Dumazet39f8e582015-03-24 15:58:55 -0700897 genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, skb);
Shawn Lu658ddaa2012-01-31 22:35:48 +0000898 if (genhash || memcmp(hash_location, newhash, 16) != 0)
Eric Dumazet3b24d852016-04-01 08:52:17 -0700899 goto out;
Shawn Lu658ddaa2012-01-31 22:35:48 +0000900 }
Ilpo Järvinen626e2642008-10-09 14:42:40 -0700901#endif
902
903 if (th->ack)
904 seq = ntohl(th->ack_seq);
905 else
906 ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
907 (th->doff << 2);
908
Wang Yufen9c76a112014-03-29 09:27:31 +0800909 oif = sk ? sk->sk_bound_dev_if : 0;
Eric Dumazet0f85fea2014-12-09 09:56:08 -0800910 tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, key, 1, 0, 0);
Shawn Lu658ddaa2012-01-31 22:35:48 +0000911
912#ifdef CONFIG_TCP_MD5SIG
Eric Dumazet3b24d852016-04-01 08:52:17 -0700913out:
914 rcu_read_unlock();
Shawn Lu658ddaa2012-01-31 22:35:48 +0000915#endif
Ilpo Järvinen626e2642008-10-09 14:42:40 -0700916}
917
Eric Dumazeta00e7442015-09-29 07:42:39 -0700918static void tcp_v6_send_ack(const struct sock *sk, struct sk_buff *skb, u32 seq,
Eric Dumazet0f85fea2014-12-09 09:56:08 -0800919 u32 ack, u32 win, u32 tsval, u32 tsecr, int oif,
Florent Fourcot1d13a962014-01-16 17:21:22 +0100920 struct tcp_md5sig_key *key, u8 tclass,
Hannes Frederic Sowa5119bd12016-06-11 20:41:38 +0200921 __be32 label)
Ilpo Järvinen626e2642008-10-09 14:42:40 -0700922{
Eric Dumazet0f85fea2014-12-09 09:56:08 -0800923 tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, key, 0,
924 tclass, label);
Ilpo Järvinen626e2642008-10-09 14:42:40 -0700925}
926
Linus Torvalds1da177e2005-04-16 15:20:36 -0700927static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
928{
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700929 struct inet_timewait_sock *tw = inet_twsk(sk);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800930 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700931
Eric Dumazet0f85fea2014-12-09 09:56:08 -0800932 tcp_v6_send_ack(sk, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700933 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
Andrey Vaginee684b62013-02-11 05:50:19 +0000934 tcp_time_stamp + tcptw->tw_ts_offset,
Wang Yufen9c76a112014-03-29 09:27:31 +0800935 tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw),
Florent Fourcot21858cd2015-05-16 00:24:59 +0200936 tw->tw_tclass, cpu_to_be32(tw->tw_flowlabel));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700937
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700938 inet_twsk_put(tw);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700939}
940
Eric Dumazeta00e7442015-09-29 07:42:39 -0700941static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
Gui Jianfeng6edafaa2008-08-06 23:50:04 -0700942 struct request_sock *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700943{
Daniel Lee3a19ce02014-05-11 20:22:13 -0700944 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
945 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
946 */
Eric Dumazet20a2b492016-08-22 11:31:10 -0700947 /* RFC 7323 2.3
948 * The window field (SEG.WND) of every outgoing segment, with the
949 * exception of <SYN> segments, MUST be right-shifted by
950 * Rcv.Wind.Shift bits:
951 */
Eric Dumazet0f85fea2014-12-09 09:56:08 -0800952 tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ?
Daniel Lee3a19ce02014-05-11 20:22:13 -0700953 tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
Eric Dumazet20a2b492016-08-22 11:31:10 -0700954 tcp_rsk(req)->rcv_nxt,
955 req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
Eric Dumazet0f85fea2014-12-09 09:56:08 -0800956 tcp_time_stamp, req->ts_recent, sk->sk_bound_dev_if,
Florent Fourcot1d13a962014-01-16 17:21:22 +0100957 tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr),
958 0, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700959}
960
961
Eric Dumazet079096f2015-10-02 11:43:32 -0700962static struct sock *tcp_v6_cookie_check(struct sock *sk, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700963{
Glenn Griffinc6aefaf2008-02-07 21:49:26 -0800964#ifdef CONFIG_SYN_COOKIES
Eric Dumazet079096f2015-10-02 11:43:32 -0700965 const struct tcphdr *th = tcp_hdr(skb);
966
Florian Westphalaf9b4732010-06-03 00:43:44 +0000967 if (!th->syn)
Glenn Griffinc6aefaf2008-02-07 21:49:26 -0800968 sk = cookie_v6_check(sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700969#endif
970 return sk;
971}
972
Linus Torvalds1da177e2005-04-16 15:20:36 -0700973static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
974{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700975 if (skb->protocol == htons(ETH_P_IP))
976 return tcp_v4_conn_request(sk, skb);
977
978 if (!ipv6_unicast_destination(skb))
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +0900979 goto drop;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700980
Octavian Purdila1fb6f152014-06-25 17:10:02 +0300981 return tcp_conn_request(&tcp6_request_sock_ops,
982 &tcp_request_sock_ipv6_ops, sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700983
Linus Torvalds1da177e2005-04-16 15:20:36 -0700984drop:
Eric Dumazet9caad862016-04-01 08:52:20 -0700985 tcp_listendrop(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700986 return 0; /* don't send reset */
987}
988
Eric Dumazet0c271712015-09-29 07:42:48 -0700989static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
Weilong Chen4c99aa42013-12-19 18:44:34 +0800990 struct request_sock *req,
Eric Dumazet5e0724d2015-10-22 08:20:46 -0700991 struct dst_entry *dst,
992 struct request_sock *req_unhash,
993 bool *own_req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700994{
Eric Dumazet634fb9792013-10-09 15:21:29 -0700995 struct inet_request_sock *ireq;
Eric Dumazet0c271712015-09-29 07:42:48 -0700996 struct ipv6_pinfo *newnp;
997 const struct ipv6_pinfo *np = inet6_sk(sk);
Eric Dumazet45f6fad2015-11-29 19:37:57 -0800998 struct ipv6_txoptions *opt;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700999 struct tcp6_sock *newtcp6sk;
1000 struct inet_sock *newinet;
1001 struct tcp_sock *newtp;
1002 struct sock *newsk;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001003#ifdef CONFIG_TCP_MD5SIG
1004 struct tcp_md5sig_key *key;
1005#endif
Neal Cardwell3840a062012-06-28 12:34:19 +00001006 struct flowi6 fl6;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001007
1008 if (skb->protocol == htons(ETH_P_IP)) {
1009 /*
1010 * v6 mapped
1011 */
1012
Eric Dumazet5e0724d2015-10-22 08:20:46 -07001013 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst,
1014 req_unhash, own_req);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001015
Ian Morris63159f22015-03-29 14:00:04 +01001016 if (!newsk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001017 return NULL;
1018
1019 newtcp6sk = (struct tcp6_sock *)newsk;
1020 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1021
1022 newinet = inet_sk(newsk);
1023 newnp = inet6_sk(newsk);
1024 newtp = tcp_sk(newsk);
1025
1026 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1027
Eric Dumazetd1e559d2015-03-18 14:05:35 -07001028 newnp->saddr = newsk->sk_v6_rcv_saddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001029
Arnaldo Carvalho de Melo8292a172005-12-13 23:15:52 -08001030 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001031 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001032#ifdef CONFIG_TCP_MD5SIG
1033 newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1034#endif
1035
Yan, Zheng676a1182011-09-25 02:21:30 +00001036 newnp->ipv6_ac_list = NULL;
1037 newnp->ipv6_fl_list = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001038 newnp->pktoptions = NULL;
1039 newnp->opt = NULL;
Eric Dumazet870c3152014-10-17 09:17:20 -07001040 newnp->mcast_oif = tcp_v6_iif(skb);
Arnaldo Carvalho de Melo0660e032007-04-25 17:54:47 -07001041 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
Florent Fourcot1397ed32013-12-08 15:46:57 +01001042 newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
Florent Fourcotdf3687f2014-01-17 17:15:03 +01001043 if (np->repflow)
1044 newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001045
Arnaldo Carvalho de Meloe6848972005-08-09 19:45:38 -07001046 /*
1047 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1048 * here, tcp_create_openreq_child now does this for us, see the comment in
1049 * that function for the gory details. -acme
Linus Torvalds1da177e2005-04-16 15:20:36 -07001050 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001051
1052 /* It is tricky place. Until this moment IPv4 tcp
Arnaldo Carvalho de Melo8292a172005-12-13 23:15:52 -08001053 worked with IPv6 icsk.icsk_af_ops.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001054 Sync it now.
1055 */
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -08001056 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001057
1058 return newsk;
1059 }
1060
Eric Dumazet634fb9792013-10-09 15:21:29 -07001061 ireq = inet_rsk(req);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001062
1063 if (sk_acceptq_is_full(sk))
1064 goto out_overflow;
1065
David S. Miller493f3772010-12-02 12:14:29 -08001066 if (!dst) {
Eric Dumazetf76b33c2015-09-29 07:42:42 -07001067 dst = inet6_csk_route_req(sk, &fl6, req, IPPROTO_TCP);
David S. Miller493f3772010-12-02 12:14:29 -08001068 if (!dst)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001069 goto out;
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001070 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001071
1072 newsk = tcp_create_openreq_child(sk, req, skb);
Ian Morris63159f22015-03-29 14:00:04 +01001073 if (!newsk)
Balazs Scheidler093d2822010-10-21 13:06:43 +02001074 goto out_nonewsk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001075
Arnaldo Carvalho de Meloe6848972005-08-09 19:45:38 -07001076 /*
1077 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1078 * count here, tcp_create_openreq_child now does this for us, see the
1079 * comment in that function for the gory details. -acme
1080 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001081
Stephen Hemminger59eed272006-08-25 15:55:43 -07001082 newsk->sk_gso_type = SKB_GSO_TCPV6;
Eric Dumazet6bd4f352015-12-02 21:53:57 -08001083 ip6_dst_store(newsk, dst, NULL, NULL);
Neal Cardwellfae6ef82012-08-19 03:30:38 +00001084 inet6_sk_rx_dst_set(newsk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001085
1086 newtcp6sk = (struct tcp6_sock *)newsk;
1087 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1088
1089 newtp = tcp_sk(newsk);
1090 newinet = inet_sk(newsk);
1091 newnp = inet6_sk(newsk);
1092
1093 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1094
Eric Dumazet634fb9792013-10-09 15:21:29 -07001095 newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr;
1096 newnp->saddr = ireq->ir_v6_loc_addr;
1097 newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
1098 newsk->sk_bound_dev_if = ireq->ir_iif;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001099
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001100 /* Now IPv6 options...
Linus Torvalds1da177e2005-04-16 15:20:36 -07001101
1102 First: no IPv4 options.
1103 */
Eric Dumazetf6d8bd02011-04-21 09:45:37 +00001104 newinet->inet_opt = NULL;
Yan, Zheng676a1182011-09-25 02:21:30 +00001105 newnp->ipv6_ac_list = NULL;
Masayuki Nakagawad35690b2007-03-16 16:14:03 -07001106 newnp->ipv6_fl_list = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001107
1108 /* Clone RX bits */
1109 newnp->rxopt.all = np->rxopt.all;
1110
Linus Torvalds1da177e2005-04-16 15:20:36 -07001111 newnp->pktoptions = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001112 newnp->opt = NULL;
Eric Dumazet870c3152014-10-17 09:17:20 -07001113 newnp->mcast_oif = tcp_v6_iif(skb);
Arnaldo Carvalho de Melo0660e032007-04-25 17:54:47 -07001114 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
Florent Fourcot1397ed32013-12-08 15:46:57 +01001115 newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
Florent Fourcotdf3687f2014-01-17 17:15:03 +01001116 if (np->repflow)
1117 newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001118
1119 /* Clone native IPv6 options from listening socket (if any)
1120
1121 Yes, keeping reference count would be much more clever,
1122 but we make one more one thing there: reattach optmem
1123 to newsk.
1124 */
Huw Davies56ac42b2016-06-27 15:05:28 -04001125 opt = ireq->ipv6_opt;
1126 if (!opt)
1127 opt = rcu_dereference(np->opt);
Eric Dumazet45f6fad2015-11-29 19:37:57 -08001128 if (opt) {
1129 opt = ipv6_dup_options(newsk, opt);
1130 RCU_INIT_POINTER(newnp->opt, opt);
1131 }
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -08001132 inet_csk(newsk)->icsk_ext_hdr_len = 0;
Eric Dumazet45f6fad2015-11-29 19:37:57 -08001133 if (opt)
1134 inet_csk(newsk)->icsk_ext_hdr_len = opt->opt_nflen +
1135 opt->opt_flen;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001136
Daniel Borkmann81164412015-01-05 23:57:48 +01001137 tcp_ca_openreq_child(newsk, dst);
1138
Linus Torvalds1da177e2005-04-16 15:20:36 -07001139 tcp_sync_mss(newsk, dst_mtu(dst));
David S. Miller0dbaee32010-12-13 12:52:14 -08001140 newtp->advmss = dst_metric_advmss(dst);
Neal Cardwelld135c522012-04-22 09:45:47 +00001141 if (tcp_sk(sk)->rx_opt.user_mss &&
1142 tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1143 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1144
Linus Torvalds1da177e2005-04-16 15:20:36 -07001145 tcp_initialize_rcv_mss(newsk);
1146
Eric Dumazetc720c7e2009-10-15 06:30:45 +00001147 newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1148 newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001149
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001150#ifdef CONFIG_TCP_MD5SIG
1151 /* Copy over the MD5 key from the original socket */
Wang Yufen4aa956d2014-03-29 09:27:29 +08001152 key = tcp_v6_md5_do_lookup(sk, &newsk->sk_v6_daddr);
Ian Morris53b24b82015-03-29 14:00:05 +01001153 if (key) {
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001154 /* We're using one, so create a matching key
1155 * on the newsk structure. If we fail to get
1156 * memory, then we end up not copying the key
1157 * across. Shucks.
1158 */
Eric Dumazetefe42082013-10-03 15:42:29 -07001159 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newsk->sk_v6_daddr,
Mel Gorman99a1dec2012-07-31 16:44:14 -07001160 AF_INET6, key->key, key->keylen,
Eric Dumazet7450aaf2015-11-30 08:57:28 -08001161 sk_gfp_mask(sk, GFP_ATOMIC));
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001162 }
1163#endif
1164
Balazs Scheidler093d2822010-10-21 13:06:43 +02001165 if (__inet_inherit_port(sk, newsk) < 0) {
Christoph Paasche337e242012-12-14 04:07:58 +00001166 inet_csk_prepare_forced_close(newsk);
1167 tcp_done(newsk);
Balazs Scheidler093d2822010-10-21 13:06:43 +02001168 goto out;
1169 }
Eric Dumazet5e0724d2015-10-22 08:20:46 -07001170 *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
Eric Dumazet805c4bc2015-11-05 11:07:13 -08001171 if (*own_req) {
Eric Dumazet49a496c2015-11-05 12:50:19 -08001172 tcp_move_syn(newtp, req);
Eric Dumazet805c4bc2015-11-05 11:07:13 -08001173
1174 /* Clone pktoptions received with SYN, if we own the req */
1175 if (ireq->pktopts) {
1176 newnp->pktoptions = skb_clone(ireq->pktopts,
Eric Dumazet7450aaf2015-11-30 08:57:28 -08001177 sk_gfp_mask(sk, GFP_ATOMIC));
Eric Dumazet805c4bc2015-11-05 11:07:13 -08001178 consume_skb(ireq->pktopts);
1179 ireq->pktopts = NULL;
1180 if (newnp->pktoptions)
1181 skb_set_owner_r(newnp->pktoptions, newsk);
1182 }
Eric Dumazetce105002015-10-30 09:46:12 -07001183 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001184
1185 return newsk;
1186
1187out_overflow:
Eric Dumazet02a1d6e2016-04-27 16:44:39 -07001188 __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
Balazs Scheidler093d2822010-10-21 13:06:43 +02001189out_nonewsk:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001190 dst_release(dst);
Balazs Scheidler093d2822010-10-21 13:06:43 +02001191out:
Eric Dumazet9caad862016-04-01 08:52:20 -07001192 tcp_listendrop(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001193 return NULL;
1194}
1195
Linus Torvalds1da177e2005-04-16 15:20:36 -07001196/* The socket must have it's spinlock held when we get
Eric Dumazete994b2f2015-10-02 11:43:39 -07001197 * here, unless it is a TCP_LISTEN socket.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001198 *
1199 * We have a potential double-lock case here, so even when
1200 * doing backlog processing we use the BH locking scheme.
1201 * This is because we cannot sleep with the original spinlock
1202 * held.
1203 */
1204static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1205{
1206 struct ipv6_pinfo *np = inet6_sk(sk);
1207 struct tcp_sock *tp;
1208 struct sk_buff *opt_skb = NULL;
1209
1210 /* Imagine: socket is IPv6. IPv4 packet arrives,
1211 goes to IPv4 receive handler and backlogged.
1212 From backlog it always goes here. Kerboom...
1213 Fortunately, tcp_rcv_established and rcv_established
1214 handle them correctly, but it is not case with
1215 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1216 */
1217
1218 if (skb->protocol == htons(ETH_P_IP))
1219 return tcp_v4_do_rcv(sk, skb);
1220
Dmitry Mishinfda9ef52006-08-31 15:28:39 -07001221 if (sk_filter(sk, skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001222 goto discard;
1223
1224 /*
1225 * socket locking is here for SMP purposes as backlog rcv
1226 * is currently called with bh processing disabled.
1227 */
1228
1229 /* Do Stevens' IPV6_PKTOPTIONS.
1230
1231 Yes, guys, it is the only place in our code, where we
1232 may make it not affecting IPv4.
1233 The rest of code is protocol independent,
1234 and I do not like idea to uglify IPv4.
1235
1236 Actually, all the idea behind IPV6_PKTOPTIONS
1237 looks not very well thought. For now we latch
1238 options, received in the last packet, enqueued
1239 by tcp. Feel free to propose better solution.
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001240 --ANK (980728)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001241 */
1242 if (np->rxopt.all)
Eric Dumazet7450aaf2015-11-30 08:57:28 -08001243 opt_skb = skb_clone(skb, sk_gfp_mask(sk, GFP_ATOMIC));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001244
1245 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
Eric Dumazet5d299f32012-08-06 05:09:33 +00001246 struct dst_entry *dst = sk->sk_rx_dst;
1247
Tom Herbertbdeab992011-08-14 19:45:55 +00001248 sock_rps_save_rxhash(sk, skb);
Eric Dumazet3d973792014-11-11 05:54:27 -08001249 sk_mark_napi_id(sk, skb);
Eric Dumazet5d299f32012-08-06 05:09:33 +00001250 if (dst) {
1251 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1252 dst->ops->check(dst, np->rx_dst_cookie) == NULL) {
1253 dst_release(dst);
1254 sk->sk_rx_dst = NULL;
1255 }
1256 }
1257
Vijay Subramanianc995ae22013-09-03 12:23:22 -07001258 tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001259 if (opt_skb)
1260 goto ipv6_pktoptions;
1261 return 0;
1262 }
1263
Eric Dumazet12e25e12015-06-03 23:49:21 -07001264 if (tcp_checksum_complete(skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001265 goto csum_err;
1266
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001267 if (sk->sk_state == TCP_LISTEN) {
Eric Dumazet079096f2015-10-02 11:43:32 -07001268 struct sock *nsk = tcp_v6_cookie_check(sk, skb);
1269
Linus Torvalds1da177e2005-04-16 15:20:36 -07001270 if (!nsk)
1271 goto discard;
1272
Weilong Chen4c99aa42013-12-19 18:44:34 +08001273 if (nsk != sk) {
Tom Herbertbdeab992011-08-14 19:45:55 +00001274 sock_rps_save_rxhash(nsk, skb);
Eric Dumazet38cb5242015-10-02 11:43:26 -07001275 sk_mark_napi_id(nsk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001276 if (tcp_child_process(sk, nsk, skb))
1277 goto reset;
1278 if (opt_skb)
1279 __kfree_skb(opt_skb);
1280 return 0;
1281 }
Neil Horman47482f12011-04-06 13:07:09 -07001282 } else
Tom Herbertbdeab992011-08-14 19:45:55 +00001283 sock_rps_save_rxhash(sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001284
Eric Dumazet72ab4a82015-09-29 07:42:41 -07001285 if (tcp_rcv_state_process(sk, skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001286 goto reset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001287 if (opt_skb)
1288 goto ipv6_pktoptions;
1289 return 0;
1290
1291reset:
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001292 tcp_v6_send_reset(sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001293discard:
1294 if (opt_skb)
1295 __kfree_skb(opt_skb);
1296 kfree_skb(skb);
1297 return 0;
1298csum_err:
Eric Dumazetc10d9312016-04-29 14:16:47 -07001299 TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
1300 TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001301 goto discard;
1302
1303
1304ipv6_pktoptions:
1305 /* Do you ask, what is it?
1306
1307 1. skb was enqueued by tcp.
1308 2. skb is added to tail of read queue, rather than out of order.
1309 3. socket is not in passive state.
1310 4. Finally, it really contains options, which user wants to receive.
1311 */
1312 tp = tcp_sk(sk);
1313 if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1314 !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
YOSHIFUJI Hideaki333fad52005-09-08 09:59:17 +09001315 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
Eric Dumazet870c3152014-10-17 09:17:20 -07001316 np->mcast_oif = tcp_v6_iif(opt_skb);
YOSHIFUJI Hideaki333fad52005-09-08 09:59:17 +09001317 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
Arnaldo Carvalho de Melo0660e032007-04-25 17:54:47 -07001318 np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
Florent Fourcot82e9f102013-12-08 15:46:59 +01001319 if (np->rxopt.bits.rxflow || np->rxopt.bits.rxtclass)
Florent Fourcot1397ed32013-12-08 15:46:57 +01001320 np->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(opt_skb));
Florent Fourcotdf3687f2014-01-17 17:15:03 +01001321 if (np->repflow)
1322 np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb));
Eric Dumazeta2247722014-09-27 09:50:56 -07001323 if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001324 skb_set_owner_r(opt_skb, sk);
1325 opt_skb = xchg(&np->pktoptions, opt_skb);
1326 } else {
1327 __kfree_skb(opt_skb);
1328 opt_skb = xchg(&np->pktoptions, NULL);
1329 }
1330 }
1331
Wei Yongjun800d55f2009-02-23 21:45:33 +00001332 kfree_skb(opt_skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001333 return 0;
1334}
1335
Nicolas Dichtel2dc49d12014-12-22 18:22:48 +01001336static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr,
1337 const struct tcphdr *th)
1338{
1339 /* This is tricky: we move IP6CB at its correct location into
1340 * TCP_SKB_CB(). It must be done after xfrm6_policy_check(), because
1341 * _decode_session6() uses IP6CB().
1342 * barrier() makes sure compiler won't play aliasing games.
1343 */
1344 memmove(&TCP_SKB_CB(skb)->header.h6, IP6CB(skb),
1345 sizeof(struct inet6_skb_parm));
1346 barrier();
1347
1348 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1349 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1350 skb->len - th->doff*4);
1351 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1352 TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1353 TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1354 TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
1355 TCP_SKB_CB(skb)->sacked = 0;
1356}
1357
Alexey Kodanev4ad19de2015-03-27 12:24:22 +03001358static void tcp_v6_restore_cb(struct sk_buff *skb)
1359{
1360 /* We need to move header back to the beginning if xfrm6_policy_check()
1361 * and tcp_v6_fill_cb() are going to be called again.
1362 */
1363 memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
1364 sizeof(struct inet6_skb_parm));
1365}
1366
Herbert Xue5bbef22007-10-15 12:50:28 -07001367static int tcp_v6_rcv(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001368{
Eric Dumazetcf533ea2011-10-21 05:22:42 -04001369 const struct tcphdr *th;
Eric Dumazetb71d1d42011-04-22 04:53:02 +00001370 const struct ipv6hdr *hdr;
Eric Dumazet3b24d852016-04-01 08:52:17 -07001371 bool refcounted;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001372 struct sock *sk;
1373 int ret;
Pavel Emelyanova86b1e32008-07-16 20:20:58 -07001374 struct net *net = dev_net(skb->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001375
1376 if (skb->pkt_type != PACKET_HOST)
1377 goto discard_it;
1378
1379 /*
1380 * Count it even if it's bad.
1381 */
Eric Dumazet90bbcc62016-04-27 16:44:32 -07001382 __TCP_INC_STATS(net, TCP_MIB_INSEGS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001383
1384 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1385 goto discard_it;
1386
Eric Dumazetea1627c2016-05-13 09:16:40 -07001387 th = (const struct tcphdr *)skb->data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001388
Eric Dumazetea1627c2016-05-13 09:16:40 -07001389 if (unlikely(th->doff < sizeof(struct tcphdr)/4))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001390 goto bad_packet;
1391 if (!pskb_may_pull(skb, th->doff*4))
1392 goto discard_it;
1393
Tom Herberte4f45b72014-05-02 16:29:51 -07001394 if (skb_checksum_init(skb, IPPROTO_TCP, ip6_compute_pseudo))
Eric Dumazet6a5dc9e2013-04-29 08:39:56 +00001395 goto csum_error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001396
Eric Dumazetea1627c2016-05-13 09:16:40 -07001397 th = (const struct tcphdr *)skb->data;
Stephen Hemmingere802af92010-04-22 15:24:53 -07001398 hdr = ipv6_hdr(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001399
Eric Dumazet4bdc3d62015-10-13 17:12:54 -07001400lookup:
Craig Galleka5836362016-02-10 11:50:38 -05001401 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th),
Eric Dumazet3b24d852016-04-01 08:52:17 -07001402 th->source, th->dest, inet6_iif(skb),
1403 &refcounted);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001404 if (!sk)
1405 goto no_tcp_socket;
1406
1407process:
1408 if (sk->sk_state == TCP_TIME_WAIT)
1409 goto do_time_wait;
1410
Eric Dumazet079096f2015-10-02 11:43:32 -07001411 if (sk->sk_state == TCP_NEW_SYN_RECV) {
1412 struct request_sock *req = inet_reqsk(sk);
Eric Dumazet77166822016-02-18 05:39:18 -08001413 struct sock *nsk;
Eric Dumazet079096f2015-10-02 11:43:32 -07001414
1415 sk = req->rsk_listener;
1416 tcp_v6_fill_cb(skb, hdr, th);
1417 if (tcp_v6_inbound_md5_hash(sk, skb)) {
1418 reqsk_put(req);
1419 goto discard_it;
1420 }
Eric Dumazet77166822016-02-18 05:39:18 -08001421 if (unlikely(sk->sk_state != TCP_LISTEN)) {
Eric Dumazetf03f2e12015-10-14 11:16:27 -07001422 inet_csk_reqsk_queue_drop_and_put(sk, req);
Eric Dumazet4bdc3d62015-10-13 17:12:54 -07001423 goto lookup;
1424 }
Eric Dumazet77166822016-02-18 05:39:18 -08001425 sock_hold(sk);
Eric Dumazet3b24d852016-04-01 08:52:17 -07001426 refcounted = true;
Eric Dumazet77166822016-02-18 05:39:18 -08001427 nsk = tcp_check_req(sk, skb, req, false);
Eric Dumazet079096f2015-10-02 11:43:32 -07001428 if (!nsk) {
1429 reqsk_put(req);
Eric Dumazet77166822016-02-18 05:39:18 -08001430 goto discard_and_relse;
Eric Dumazet079096f2015-10-02 11:43:32 -07001431 }
1432 if (nsk == sk) {
Eric Dumazet079096f2015-10-02 11:43:32 -07001433 reqsk_put(req);
1434 tcp_v6_restore_cb(skb);
1435 } else if (tcp_child_process(sk, nsk, skb)) {
1436 tcp_v6_send_reset(nsk, skb);
Eric Dumazet77166822016-02-18 05:39:18 -08001437 goto discard_and_relse;
Eric Dumazet079096f2015-10-02 11:43:32 -07001438 } else {
Eric Dumazet77166822016-02-18 05:39:18 -08001439 sock_put(sk);
Eric Dumazet079096f2015-10-02 11:43:32 -07001440 return 0;
1441 }
1442 }
Stephen Hemmingere802af92010-04-22 15:24:53 -07001443 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
Eric Dumazet02a1d6e2016-04-27 16:44:39 -07001444 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
Stephen Hemmingere802af92010-04-22 15:24:53 -07001445 goto discard_and_relse;
1446 }
1447
Linus Torvalds1da177e2005-04-16 15:20:36 -07001448 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1449 goto discard_and_relse;
1450
Nicolas Dichtel2dc49d12014-12-22 18:22:48 +01001451 tcp_v6_fill_cb(skb, hdr, th);
1452
Dmitry Popov9ea88a12014-08-07 02:38:22 +04001453 if (tcp_v6_inbound_md5_hash(sk, skb))
1454 goto discard_and_relse;
Dmitry Popov9ea88a12014-08-07 02:38:22 +04001455
Dmitry Mishinfda9ef52006-08-31 15:28:39 -07001456 if (sk_filter(sk, skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001457 goto discard_and_relse;
1458
1459 skb->dev = NULL;
1460
Eric Dumazete994b2f2015-10-02 11:43:39 -07001461 if (sk->sk_state == TCP_LISTEN) {
1462 ret = tcp_v6_do_rcv(sk, skb);
1463 goto put_and_return;
1464 }
1465
1466 sk_incoming_cpu_update(sk);
1467
Fabio Olive Leite293b9c42006-09-25 22:28:47 -07001468 bh_lock_sock_nested(sk);
Martin KaFai Laua44d6ea2016-03-14 10:52:15 -07001469 tcp_segs_in(tcp_sk(sk), skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001470 ret = 0;
1471 if (!sock_owned_by_user(sk)) {
Dan Williams7bced392013-12-30 12:37:29 -08001472 if (!tcp_prequeue(sk, skb))
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001473 ret = tcp_v6_do_rcv(sk, skb);
Eric Dumazetda882c12012-04-22 23:38:54 +00001474 } else if (unlikely(sk_add_backlog(sk, skb,
1475 sk->sk_rcvbuf + sk->sk_sndbuf))) {
Zhu Yi6b03a532010-03-04 18:01:41 +00001476 bh_unlock_sock(sk);
Eric Dumazet02a1d6e2016-04-27 16:44:39 -07001477 __NET_INC_STATS(net, LINUX_MIB_TCPBACKLOGDROP);
Zhu Yi6b03a532010-03-04 18:01:41 +00001478 goto discard_and_relse;
1479 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001480 bh_unlock_sock(sk);
1481
Eric Dumazete994b2f2015-10-02 11:43:39 -07001482put_and_return:
Eric Dumazet3b24d852016-04-01 08:52:17 -07001483 if (refcounted)
1484 sock_put(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001485 return ret ? -1 : 0;
1486
1487no_tcp_socket:
1488 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1489 goto discard_it;
1490
Nicolas Dichtel2dc49d12014-12-22 18:22:48 +01001491 tcp_v6_fill_cb(skb, hdr, th);
1492
Eric Dumazet12e25e12015-06-03 23:49:21 -07001493 if (tcp_checksum_complete(skb)) {
Eric Dumazet6a5dc9e2013-04-29 08:39:56 +00001494csum_error:
Eric Dumazet90bbcc62016-04-27 16:44:32 -07001495 __TCP_INC_STATS(net, TCP_MIB_CSUMERRORS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001496bad_packet:
Eric Dumazet90bbcc62016-04-27 16:44:32 -07001497 __TCP_INC_STATS(net, TCP_MIB_INERRS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001498 } else {
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001499 tcp_v6_send_reset(NULL, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001500 }
1501
1502discard_it:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001503 kfree_skb(skb);
1504 return 0;
1505
1506discard_and_relse:
Eric Dumazet532182c2016-04-01 08:52:19 -07001507 sk_drops_add(sk, skb);
Eric Dumazet3b24d852016-04-01 08:52:17 -07001508 if (refcounted)
1509 sock_put(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001510 goto discard_it;
1511
1512do_time_wait:
1513 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
YOSHIFUJI Hideaki9469c7b2006-10-10 19:41:46 -07001514 inet_twsk_put(inet_twsk(sk));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001515 goto discard_it;
1516 }
1517
Nicolas Dichtel2dc49d12014-12-22 18:22:48 +01001518 tcp_v6_fill_cb(skb, hdr, th);
1519
Eric Dumazet6a5dc9e2013-04-29 08:39:56 +00001520 if (tcp_checksum_complete(skb)) {
1521 inet_twsk_put(inet_twsk(sk));
1522 goto csum_error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001523 }
1524
YOSHIFUJI Hideaki9469c7b2006-10-10 19:41:46 -07001525 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001526 case TCP_TW_SYN:
1527 {
1528 struct sock *sk2;
1529
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09001530 sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
Craig Galleka5836362016-02-10 11:50:38 -05001531 skb, __tcp_hdrlen(th),
Tom Herbert5ba24952013-01-22 09:50:39 +00001532 &ipv6_hdr(skb)->saddr, th->source,
Arnaldo Carvalho de Melo0660e032007-04-25 17:54:47 -07001533 &ipv6_hdr(skb)->daddr,
Eric Dumazet870c3152014-10-17 09:17:20 -07001534 ntohs(th->dest), tcp_v6_iif(skb));
Ian Morris53b24b82015-03-29 14:00:05 +01001535 if (sk2) {
Arnaldo Carvalho de Melo295ff7e2005-08-09 20:44:40 -07001536 struct inet_timewait_sock *tw = inet_twsk(sk);
Eric Dumazetdbe7faa2015-07-08 14:28:30 -07001537 inet_twsk_deschedule_put(tw);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001538 sk = sk2;
Alexey Kodanev4ad19de2015-03-27 12:24:22 +03001539 tcp_v6_restore_cb(skb);
Eric Dumazet3b24d852016-04-01 08:52:17 -07001540 refcounted = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001541 goto process;
1542 }
1543 /* Fall through to ACK */
1544 }
1545 case TCP_TW_ACK:
1546 tcp_v6_timewait_ack(sk, skb);
1547 break;
1548 case TCP_TW_RST:
Alexey Kodanev4ad19de2015-03-27 12:24:22 +03001549 tcp_v6_restore_cb(skb);
Florian Westphal271c3b92015-12-21 21:29:26 +01001550 tcp_v6_send_reset(sk, skb);
1551 inet_twsk_deschedule_put(inet_twsk(sk));
1552 goto discard_it;
Wang Yufen4aa956d2014-03-29 09:27:29 +08001553 case TCP_TW_SUCCESS:
1554 ;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001555 }
1556 goto discard_it;
1557}
1558
Eric Dumazetc7109982012-07-26 12:18:11 +00001559static void tcp_v6_early_demux(struct sk_buff *skb)
1560{
1561 const struct ipv6hdr *hdr;
1562 const struct tcphdr *th;
1563 struct sock *sk;
1564
1565 if (skb->pkt_type != PACKET_HOST)
1566 return;
1567
1568 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1569 return;
1570
1571 hdr = ipv6_hdr(skb);
1572 th = tcp_hdr(skb);
1573
1574 if (th->doff < sizeof(struct tcphdr) / 4)
1575 return;
1576
Eric Dumazet870c3152014-10-17 09:17:20 -07001577 /* Note : We use inet6_iif() here, not tcp_v6_iif() */
Eric Dumazetc7109982012-07-26 12:18:11 +00001578 sk = __inet6_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1579 &hdr->saddr, th->source,
1580 &hdr->daddr, ntohs(th->dest),
1581 inet6_iif(skb));
1582 if (sk) {
1583 skb->sk = sk;
1584 skb->destructor = sock_edemux;
Eric Dumazetf7e4eb02015-03-15 21:12:13 -07001585 if (sk_fullsock(sk)) {
Michal Kubečekd0c294c2015-03-23 15:14:00 +01001586 struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
Neal Cardwellf3f12132012-10-22 21:41:48 +00001587
Eric Dumazetc7109982012-07-26 12:18:11 +00001588 if (dst)
Eric Dumazet5d299f32012-08-06 05:09:33 +00001589 dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie);
Eric Dumazetc7109982012-07-26 12:18:11 +00001590 if (dst &&
Neal Cardwellf3f12132012-10-22 21:41:48 +00001591 inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
Eric Dumazetc7109982012-07-26 12:18:11 +00001592 skb_dst_set_noref(skb, dst);
1593 }
1594 }
1595}
1596
David S. Millerccb7c412010-12-01 18:09:13 -08001597static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1598 .twsk_obj_size = sizeof(struct tcp6_timewait_sock),
1599 .twsk_unique = tcp_twsk_unique,
Wang Yufen4aa956d2014-03-29 09:27:29 +08001600 .twsk_destructor = tcp_twsk_destructor,
David S. Millerccb7c412010-12-01 18:09:13 -08001601};
1602
Stephen Hemminger3b401a82009-09-01 19:25:04 +00001603static const struct inet_connection_sock_af_ops ipv6_specific = {
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001604 .queue_xmit = inet6_csk_xmit,
1605 .send_check = tcp_v6_send_check,
1606 .rebuild_header = inet6_sk_rebuild_header,
Eric Dumazet5d299f32012-08-06 05:09:33 +00001607 .sk_rx_dst_set = inet6_sk_rx_dst_set,
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001608 .conn_request = tcp_v6_conn_request,
1609 .syn_recv_sock = tcp_v6_syn_recv_sock,
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001610 .net_header_len = sizeof(struct ipv6hdr),
Eric Dumazet67469602012-04-24 07:37:38 +00001611 .net_frag_header_len = sizeof(struct frag_hdr),
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001612 .setsockopt = ipv6_setsockopt,
1613 .getsockopt = ipv6_getsockopt,
1614 .addr2sockaddr = inet6_csk_addr2sockaddr,
1615 .sockaddr_len = sizeof(struct sockaddr_in6),
Arnaldo Carvalho de Meloab1e0a12008-02-03 04:06:04 -08001616 .bind_conflict = inet6_csk_bind_conflict,
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08001617#ifdef CONFIG_COMPAT
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001618 .compat_setsockopt = compat_ipv6_setsockopt,
1619 .compat_getsockopt = compat_ipv6_getsockopt,
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08001620#endif
Neal Cardwell4fab9072014-08-14 12:40:05 -04001621 .mtu_reduced = tcp_v6_mtu_reduced,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001622};
1623
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001624#ifdef CONFIG_TCP_MD5SIG
Stephen Hemmingerb2e4b3d2009-09-01 19:25:03 +00001625static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001626 .md5_lookup = tcp_v6_md5_lookup,
Adam Langley49a72df2008-07-19 00:01:42 -07001627 .calc_md5_hash = tcp_v6_md5_hash_skb,
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001628 .md5_parse = tcp_v6_parse_md5_keys,
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001629};
David S. Millera9286302006-11-14 19:53:22 -08001630#endif
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001631
Linus Torvalds1da177e2005-04-16 15:20:36 -07001632/*
1633 * TCP over IPv4 via INET6 API
1634 */
Stephen Hemminger3b401a82009-09-01 19:25:04 +00001635static const struct inet_connection_sock_af_ops ipv6_mapped = {
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001636 .queue_xmit = ip_queue_xmit,
1637 .send_check = tcp_v4_send_check,
1638 .rebuild_header = inet_sk_rebuild_header,
Eric Dumazet63d02d12012-08-09 14:11:00 +00001639 .sk_rx_dst_set = inet_sk_rx_dst_set,
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001640 .conn_request = tcp_v6_conn_request,
1641 .syn_recv_sock = tcp_v6_syn_recv_sock,
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001642 .net_header_len = sizeof(struct iphdr),
1643 .setsockopt = ipv6_setsockopt,
1644 .getsockopt = ipv6_getsockopt,
1645 .addr2sockaddr = inet6_csk_addr2sockaddr,
1646 .sockaddr_len = sizeof(struct sockaddr_in6),
Arnaldo Carvalho de Meloab1e0a12008-02-03 04:06:04 -08001647 .bind_conflict = inet6_csk_bind_conflict,
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08001648#ifdef CONFIG_COMPAT
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001649 .compat_setsockopt = compat_ipv6_setsockopt,
1650 .compat_getsockopt = compat_ipv6_getsockopt,
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08001651#endif
Neal Cardwell4fab9072014-08-14 12:40:05 -04001652 .mtu_reduced = tcp_v4_mtu_reduced,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001653};
1654
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001655#ifdef CONFIG_TCP_MD5SIG
Stephen Hemmingerb2e4b3d2009-09-01 19:25:03 +00001656static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001657 .md5_lookup = tcp_v4_md5_lookup,
Adam Langley49a72df2008-07-19 00:01:42 -07001658 .calc_md5_hash = tcp_v4_md5_hash_skb,
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001659 .md5_parse = tcp_v6_parse_md5_keys,
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001660};
David S. Millera9286302006-11-14 19:53:22 -08001661#endif
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001662
Linus Torvalds1da177e2005-04-16 15:20:36 -07001663/* NOTE: A lot of things set to zero explicitly by call to
1664 * sk_alloc() so need not be done here.
1665 */
1666static int tcp_v6_init_sock(struct sock *sk)
1667{
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -03001668 struct inet_connection_sock *icsk = inet_csk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001669
Neal Cardwell900f65d2012-04-19 09:55:21 +00001670 tcp_init_sock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001671
Arnaldo Carvalho de Melo8292a172005-12-13 23:15:52 -08001672 icsk->icsk_af_ops = &ipv6_specific;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001673
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001674#ifdef CONFIG_TCP_MD5SIG
David S. Millerac807fa2012-04-23 03:21:58 -04001675 tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001676#endif
1677
Linus Torvalds1da177e2005-04-16 15:20:36 -07001678 return 0;
1679}
1680
Brian Haley7d06b2e2008-06-14 17:04:49 -07001681static void tcp_v6_destroy_sock(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001682{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001683 tcp_v4_destroy_sock(sk);
Brian Haley7d06b2e2008-06-14 17:04:49 -07001684 inet6_destroy_sock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001685}
1686
YOSHIFUJI Hideaki952a10b2007-04-21 20:13:44 +09001687#ifdef CONFIG_PROC_FS
Linus Torvalds1da177e2005-04-16 15:20:36 -07001688/* Proc filesystem TCPv6 sock list dumping. */
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001689static void get_openreq6(struct seq_file *seq,
Eric Dumazetaa3a0c82015-10-02 11:43:30 -07001690 const struct request_sock *req, int i)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001691{
Eric Dumazetfa76ce732015-03-19 19:04:20 -07001692 long ttd = req->rsk_timer.expires - jiffies;
Eric Dumazet634fb9792013-10-09 15:21:29 -07001693 const struct in6_addr *src = &inet_rsk(req)->ir_v6_loc_addr;
1694 const struct in6_addr *dest = &inet_rsk(req)->ir_v6_rmt_addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001695
1696 if (ttd < 0)
1697 ttd = 0;
1698
Linus Torvalds1da177e2005-04-16 15:20:36 -07001699 seq_printf(seq,
1700 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
Francesco Fuscod14c5ab2013-08-15 13:42:14 +02001701 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %d %d %pK\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001702 i,
1703 src->s6_addr32[0], src->s6_addr32[1],
1704 src->s6_addr32[2], src->s6_addr32[3],
Eric Dumazetb44084c2013-10-10 00:04:37 -07001705 inet_rsk(req)->ir_num,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001706 dest->s6_addr32[0], dest->s6_addr32[1],
1707 dest->s6_addr32[2], dest->s6_addr32[3],
Eric Dumazet634fb9792013-10-09 15:21:29 -07001708 ntohs(inet_rsk(req)->ir_rmt_port),
Linus Torvalds1da177e2005-04-16 15:20:36 -07001709 TCP_SYN_RECV,
Weilong Chen4c99aa42013-12-19 18:44:34 +08001710 0, 0, /* could print option size, but that is af dependent. */
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001711 1, /* timers active (only the expire timer) */
1712 jiffies_to_clock_t(ttd),
Eric Dumazete6c022a2012-10-27 23:16:46 +00001713 req->num_timeout,
Eric Dumazetaa3a0c82015-10-02 11:43:30 -07001714 from_kuid_munged(seq_user_ns(seq),
1715 sock_i_uid(req->rsk_listener)),
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001716 0, /* non standard timer */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001717 0, /* open_requests have no inode */
1718 0, req);
1719}
1720
1721static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1722{
Eric Dumazetb71d1d42011-04-22 04:53:02 +00001723 const struct in6_addr *dest, *src;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001724 __u16 destp, srcp;
1725 int timer_active;
1726 unsigned long timer_expires;
Eric Dumazetcf533ea2011-10-21 05:22:42 -04001727 const struct inet_sock *inet = inet_sk(sp);
1728 const struct tcp_sock *tp = tcp_sk(sp);
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001729 const struct inet_connection_sock *icsk = inet_csk(sp);
Eric Dumazet0536fcc2015-09-29 07:42:52 -07001730 const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
Eric Dumazet00fd38d2015-11-12 08:43:18 -08001731 int rx_queue;
1732 int state;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001733
Eric Dumazetefe42082013-10-03 15:42:29 -07001734 dest = &sp->sk_v6_daddr;
1735 src = &sp->sk_v6_rcv_saddr;
Eric Dumazetc720c7e2009-10-15 06:30:45 +00001736 destp = ntohs(inet->inet_dport);
1737 srcp = ntohs(inet->inet_sport);
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001738
Yuchung Chengce3cf4e2016-06-06 15:07:18 -07001739 if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
1740 icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
1741 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001742 timer_active = 1;
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001743 timer_expires = icsk->icsk_timeout;
1744 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001745 timer_active = 4;
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001746 timer_expires = icsk->icsk_timeout;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001747 } else if (timer_pending(&sp->sk_timer)) {
1748 timer_active = 2;
1749 timer_expires = sp->sk_timer.expires;
1750 } else {
1751 timer_active = 0;
1752 timer_expires = jiffies;
1753 }
1754
Eric Dumazet00fd38d2015-11-12 08:43:18 -08001755 state = sk_state_load(sp);
1756 if (state == TCP_LISTEN)
1757 rx_queue = sp->sk_ack_backlog;
1758 else
1759 /* Because we don't lock the socket,
1760 * we might find a transient negative value.
1761 */
1762 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
1763
Linus Torvalds1da177e2005-04-16 15:20:36 -07001764 seq_printf(seq,
1765 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
Francesco Fuscod14c5ab2013-08-15 13:42:14 +02001766 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001767 i,
1768 src->s6_addr32[0], src->s6_addr32[1],
1769 src->s6_addr32[2], src->s6_addr32[3], srcp,
1770 dest->s6_addr32[0], dest->s6_addr32[1],
1771 dest->s6_addr32[2], dest->s6_addr32[3], destp,
Eric Dumazet00fd38d2015-11-12 08:43:18 -08001772 state,
1773 tp->write_seq - tp->snd_una,
1774 rx_queue,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001775 timer_active,
Eric Dumazeta399a802012-08-08 21:13:53 +00001776 jiffies_delta_to_clock_t(timer_expires - jiffies),
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001777 icsk->icsk_retransmits,
Eric W. Biedermana7cb5a42012-05-24 01:10:10 -06001778 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -03001779 icsk->icsk_probes_out,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001780 sock_i_ino(sp),
1781 atomic_read(&sp->sk_refcnt), sp,
Stephen Hemminger7be87352008-06-27 20:00:19 -07001782 jiffies_to_clock_t(icsk->icsk_rto),
1783 jiffies_to_clock_t(icsk->icsk_ack.ato),
Weilong Chen4c99aa42013-12-19 18:44:34 +08001784 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
Ilpo Järvinen0b6a05c2009-09-15 01:30:10 -07001785 tp->snd_cwnd,
Eric Dumazet00fd38d2015-11-12 08:43:18 -08001786 state == TCP_LISTEN ?
Eric Dumazet0536fcc2015-09-29 07:42:52 -07001787 fastopenq->max_qlen :
Yuchung Cheng0a672f72014-05-11 20:22:12 -07001788 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001789 );
1790}
1791
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001792static void get_timewait6_sock(struct seq_file *seq,
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07001793 struct inet_timewait_sock *tw, int i)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001794{
Eric Dumazet789f5582015-04-12 18:51:09 -07001795 long delta = tw->tw_timer.expires - jiffies;
Eric Dumazetb71d1d42011-04-22 04:53:02 +00001796 const struct in6_addr *dest, *src;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001797 __u16 destp, srcp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001798
Eric Dumazetefe42082013-10-03 15:42:29 -07001799 dest = &tw->tw_v6_daddr;
1800 src = &tw->tw_v6_rcv_saddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001801 destp = ntohs(tw->tw_dport);
1802 srcp = ntohs(tw->tw_sport);
1803
1804 seq_printf(seq,
1805 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
Dan Rosenberg71338aa2011-05-23 12:17:35 +00001806 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001807 i,
1808 src->s6_addr32[0], src->s6_addr32[1],
1809 src->s6_addr32[2], src->s6_addr32[3], srcp,
1810 dest->s6_addr32[0], dest->s6_addr32[1],
1811 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1812 tw->tw_substate, 0, 0,
Eric Dumazeta399a802012-08-08 21:13:53 +00001813 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001814 atomic_read(&tw->tw_refcnt), tw);
1815}
1816
Linus Torvalds1da177e2005-04-16 15:20:36 -07001817static int tcp6_seq_show(struct seq_file *seq, void *v)
1818{
1819 struct tcp_iter_state *st;
Eric Dumazet05dbc7b2013-10-03 00:22:02 -07001820 struct sock *sk = v;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001821
1822 if (v == SEQ_START_TOKEN) {
1823 seq_puts(seq,
1824 " sl "
1825 "local_address "
1826 "remote_address "
1827 "st tx_queue rx_queue tr tm->when retrnsmt"
1828 " uid timeout inode\n");
1829 goto out;
1830 }
1831 st = seq->private;
1832
Eric Dumazet079096f2015-10-02 11:43:32 -07001833 if (sk->sk_state == TCP_TIME_WAIT)
1834 get_timewait6_sock(seq, v, st->num);
1835 else if (sk->sk_state == TCP_NEW_SYN_RECV)
Eric Dumazetaa3a0c82015-10-02 11:43:30 -07001836 get_openreq6(seq, v, st->num);
Eric Dumazet079096f2015-10-02 11:43:32 -07001837 else
1838 get_tcp6_sock(seq, v, st->num);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001839out:
1840 return 0;
1841}
1842
Arjan van de Ven73cb88e2011-10-30 06:46:30 +00001843static const struct file_operations tcp6_afinfo_seq_fops = {
1844 .owner = THIS_MODULE,
1845 .open = tcp_seq_open,
1846 .read = seq_read,
1847 .llseek = seq_lseek,
1848 .release = seq_release_net
1849};
1850
Linus Torvalds1da177e2005-04-16 15:20:36 -07001851static struct tcp_seq_afinfo tcp6_seq_afinfo = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001852 .name = "tcp6",
1853 .family = AF_INET6,
Arjan van de Ven73cb88e2011-10-30 06:46:30 +00001854 .seq_fops = &tcp6_afinfo_seq_fops,
Denis V. Lunev9427c4b2008-04-13 22:12:13 -07001855 .seq_ops = {
1856 .show = tcp6_seq_show,
1857 },
Linus Torvalds1da177e2005-04-16 15:20:36 -07001858};
1859
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00001860int __net_init tcp6_proc_init(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001861{
Daniel Lezcano6f8b13b2008-03-21 04:14:45 -07001862 return tcp_proc_register(net, &tcp6_seq_afinfo);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001863}
1864
Daniel Lezcano6f8b13b2008-03-21 04:14:45 -07001865void tcp6_proc_exit(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001866{
Daniel Lezcano6f8b13b2008-03-21 04:14:45 -07001867 tcp_proc_unregister(net, &tcp6_seq_afinfo);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001868}
1869#endif
1870
Eric Dumazetf77d6022013-05-09 10:28:16 +00001871static void tcp_v6_clear_sk(struct sock *sk, int size)
1872{
1873 struct inet_sock *inet = inet_sk(sk);
1874
1875 /* we do not want to clear pinet6 field, because of RCU lookups */
1876 sk_prot_clear_nulls(sk, offsetof(struct inet_sock, pinet6));
1877
1878 size -= offsetof(struct inet_sock, pinet6) + sizeof(inet->pinet6);
1879 memset(&inet->pinet6 + 1, 0, size);
1880}
1881
Linus Torvalds1da177e2005-04-16 15:20:36 -07001882struct proto tcpv6_prot = {
1883 .name = "TCPv6",
1884 .owner = THIS_MODULE,
1885 .close = tcp_close,
1886 .connect = tcp_v6_connect,
1887 .disconnect = tcp_disconnect,
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001888 .accept = inet_csk_accept,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001889 .ioctl = tcp_ioctl,
1890 .init = tcp_v6_init_sock,
1891 .destroy = tcp_v6_destroy_sock,
1892 .shutdown = tcp_shutdown,
1893 .setsockopt = tcp_setsockopt,
1894 .getsockopt = tcp_getsockopt,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001895 .recvmsg = tcp_recvmsg,
Changli Gao7ba42912010-07-10 20:41:55 +00001896 .sendmsg = tcp_sendmsg,
1897 .sendpage = tcp_sendpage,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001898 .backlog_rcv = tcp_v6_do_rcv,
Eric Dumazet46d3cea2012-07-11 05:50:31 +00001899 .release_cb = tcp_release_cb,
Craig Gallek496611d2016-02-10 11:50:36 -05001900 .hash = inet6_hash,
Arnaldo Carvalho de Meloab1e0a12008-02-03 04:06:04 -08001901 .unhash = inet_unhash,
1902 .get_port = inet_csk_get_port,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001903 .enter_memory_pressure = tcp_enter_memory_pressure,
Eric Dumazetc9bee3b72013-07-22 20:27:07 -07001904 .stream_memory_free = tcp_stream_memory_free,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001905 .sockets_allocated = &tcp_sockets_allocated,
1906 .memory_allocated = &tcp_memory_allocated,
1907 .memory_pressure = &tcp_memory_pressure,
Arnaldo Carvalho de Melo0a5578c2005-08-09 20:11:41 -07001908 .orphan_count = &tcp_orphan_count,
Eric W. Biedermana4fe34b2013-10-19 16:25:36 -07001909 .sysctl_mem = sysctl_tcp_mem,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001910 .sysctl_wmem = sysctl_tcp_wmem,
1911 .sysctl_rmem = sysctl_tcp_rmem,
1912 .max_header = MAX_TCP_HEADER,
1913 .obj_size = sizeof(struct tcp6_sock),
Eric Dumazet3ab5aee2008-11-16 19:40:17 -08001914 .slab_flags = SLAB_DESTROY_BY_RCU,
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08001915 .twsk_prot = &tcp6_timewait_sock_ops,
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -07001916 .rsk_prot = &tcp6_request_sock_ops,
Pavel Emelyanov39d8cda2008-03-22 16:50:58 -07001917 .h.hashinfo = &tcp_hashinfo,
Changli Gao7ba42912010-07-10 20:41:55 +00001918 .no_autobind = true,
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001919#ifdef CONFIG_COMPAT
1920 .compat_setsockopt = compat_tcp_setsockopt,
1921 .compat_getsockopt = compat_tcp_getsockopt,
1922#endif
Eric Dumazetf77d6022013-05-09 10:28:16 +00001923 .clear_sk = tcp_v6_clear_sk,
Lorenzo Colittic1e64e22015-12-16 12:30:05 +09001924 .diag_destroy = tcp_abort,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001925};
1926
Alexey Dobriyan41135cc2009-09-14 12:22:28 +00001927static const struct inet6_protocol tcpv6_protocol = {
Eric Dumazetc7109982012-07-26 12:18:11 +00001928 .early_demux = tcp_v6_early_demux,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001929 .handler = tcp_v6_rcv,
1930 .err_handler = tcp_v6_err,
1931 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
1932};
1933
Linus Torvalds1da177e2005-04-16 15:20:36 -07001934static struct inet_protosw tcpv6_protosw = {
1935 .type = SOCK_STREAM,
1936 .protocol = IPPROTO_TCP,
1937 .prot = &tcpv6_prot,
1938 .ops = &inet6_stream_ops,
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -08001939 .flags = INET_PROTOSW_PERMANENT |
1940 INET_PROTOSW_ICSK,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001941};
1942
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00001943static int __net_init tcpv6_net_init(struct net *net)
Daniel Lezcano93ec9262008-03-07 11:16:02 -08001944{
Denis V. Lunev56772422008-04-03 14:28:30 -07001945 return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
1946 SOCK_RAW, IPPROTO_TCP, net);
Daniel Lezcano93ec9262008-03-07 11:16:02 -08001947}
1948
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00001949static void __net_exit tcpv6_net_exit(struct net *net)
Daniel Lezcano93ec9262008-03-07 11:16:02 -08001950{
Denis V. Lunev56772422008-04-03 14:28:30 -07001951 inet_ctl_sock_destroy(net->ipv6.tcp_sk);
Eric W. Biedermanb099ce22009-12-03 02:29:09 +00001952}
1953
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00001954static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
Eric W. Biedermanb099ce22009-12-03 02:29:09 +00001955{
1956 inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET6);
Daniel Lezcano93ec9262008-03-07 11:16:02 -08001957}
1958
1959static struct pernet_operations tcpv6_net_ops = {
Eric W. Biedermanb099ce22009-12-03 02:29:09 +00001960 .init = tcpv6_net_init,
1961 .exit = tcpv6_net_exit,
1962 .exit_batch = tcpv6_net_exit_batch,
Daniel Lezcano93ec9262008-03-07 11:16:02 -08001963};
1964
Daniel Lezcano7f4e4862007-12-11 02:25:35 -08001965int __init tcpv6_init(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001966{
Daniel Lezcano7f4e4862007-12-11 02:25:35 -08001967 int ret;
David Woodhouseae0f7d52006-01-11 15:53:04 -08001968
Vlad Yasevich33362882012-11-15 08:49:15 +00001969 ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
1970 if (ret)
Vlad Yasevichc6b641a2012-11-15 08:49:22 +00001971 goto out;
Vlad Yasevich33362882012-11-15 08:49:15 +00001972
Daniel Lezcano7f4e4862007-12-11 02:25:35 -08001973 /* register inet6 protocol */
1974 ret = inet6_register_protosw(&tcpv6_protosw);
1975 if (ret)
1976 goto out_tcpv6_protocol;
1977
Daniel Lezcano93ec9262008-03-07 11:16:02 -08001978 ret = register_pernet_subsys(&tcpv6_net_ops);
Daniel Lezcano7f4e4862007-12-11 02:25:35 -08001979 if (ret)
1980 goto out_tcpv6_protosw;
1981out:
1982 return ret;
1983
Daniel Lezcano7f4e4862007-12-11 02:25:35 -08001984out_tcpv6_protosw:
1985 inet6_unregister_protosw(&tcpv6_protosw);
Vlad Yasevich33362882012-11-15 08:49:15 +00001986out_tcpv6_protocol:
1987 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
Daniel Lezcano7f4e4862007-12-11 02:25:35 -08001988 goto out;
1989}
1990
Daniel Lezcano09f77092007-12-13 05:34:58 -08001991void tcpv6_exit(void)
Daniel Lezcano7f4e4862007-12-11 02:25:35 -08001992{
Daniel Lezcano93ec9262008-03-07 11:16:02 -08001993 unregister_pernet_subsys(&tcpv6_net_ops);
Daniel Lezcano7f4e4862007-12-11 02:25:35 -08001994 inet6_unregister_protosw(&tcpv6_protosw);
1995 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001996}