blob: eaad72c3d7462b4af09d632fe88466148964e679 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * TCP over IPv6
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09003 * Linux INET6 implementation
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 *
5 * Authors:
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09006 * Pedro Roque <roque@di.fc.ul.pt>
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 *
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09008 * Based on:
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 * linux/net/ipv4/tcp.c
10 * linux/net/ipv4/tcp_input.c
11 * linux/net/ipv4/tcp_output.c
12 *
13 * Fixes:
14 * Hideaki YOSHIFUJI : sin6_scope_id support
15 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
16 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
17 * a single port at the same time.
18 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
19 *
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
24 */
25
Herbert Xueb4dea52008-12-29 23:04:08 -080026#include <linux/bottom_half.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070027#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <linux/errno.h>
29#include <linux/types.h>
30#include <linux/socket.h>
31#include <linux/sockios.h>
32#include <linux/net.h>
33#include <linux/jiffies.h>
34#include <linux/in.h>
35#include <linux/in6.h>
36#include <linux/netdevice.h>
37#include <linux/init.h>
38#include <linux/jhash.h>
39#include <linux/ipsec.h>
40#include <linux/times.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090041#include <linux/slab.h>
Wang Yufen4aa956d2014-03-29 09:27:29 +080042#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070043#include <linux/ipv6.h>
44#include <linux/icmpv6.h>
45#include <linux/random.h>
46
47#include <net/tcp.h>
48#include <net/ndisc.h>
Arnaldo Carvalho de Melo5324a042005-08-12 09:26:18 -030049#include <net/inet6_hashtables.h>
Arnaldo Carvalho de Melo81297652005-12-13 23:15:24 -080050#include <net/inet6_connection_sock.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070051#include <net/ipv6.h>
52#include <net/transp_v6.h>
53#include <net/addrconf.h>
54#include <net/ip6_route.h>
55#include <net/ip6_checksum.h>
56#include <net/inet_ecn.h>
57#include <net/protocol.h>
58#include <net/xfrm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070059#include <net/snmp.h>
60#include <net/dsfield.h>
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -080061#include <net/timewait_sock.h>
Denis V. Lunev3d58b5f2008-04-03 14:22:32 -070062#include <net/inet_common.h>
David S. Miller6e5714e2011-08-03 20:50:44 -070063#include <net/secure_seq.h>
Eliezer Tamir076bb0c2013-07-10 17:13:17 +030064#include <net/busy_poll.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070065
Linus Torvalds1da177e2005-04-16 15:20:36 -070066#include <linux/proc_fs.h>
67#include <linux/seq_file.h>
68
Herbert Xucf80e0e2016-01-24 21:20:23 +080069#include <crypto/hash.h>
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -080070#include <linux/scatterlist.h>
71
Eric Dumazeta00e7442015-09-29 07:42:39 -070072static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb);
73static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
Gui Jianfeng6edafaa2008-08-06 23:50:04 -070074 struct request_sock *req);
Linus Torvalds1da177e2005-04-16 15:20:36 -070075
76static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070077
Stephen Hemminger3b401a82009-09-01 19:25:04 +000078static const struct inet_connection_sock_af_ops ipv6_mapped;
79static const struct inet_connection_sock_af_ops ipv6_specific;
David S. Millera9286302006-11-14 19:53:22 -080080#ifdef CONFIG_TCP_MD5SIG
Stephen Hemmingerb2e4b3d2009-09-01 19:25:03 +000081static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
82static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
YOSHIFUJI Hideaki9501f972008-04-18 12:45:16 +090083#else
Eric Dumazet51723932015-09-29 21:24:05 -070084static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
Eric Dumazetb71d1d42011-04-22 04:53:02 +000085 const struct in6_addr *addr)
YOSHIFUJI Hideaki9501f972008-04-18 12:45:16 +090086{
87 return NULL;
88}
David S. Millera9286302006-11-14 19:53:22 -080089#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070090
Neal Cardwellfae6ef82012-08-19 03:30:38 +000091static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
92{
93 struct dst_entry *dst = skb_dst(skb);
Neal Cardwellfae6ef82012-08-19 03:30:38 +000094
Eric Dumazet5037e9e2015-12-14 14:08:53 -080095 if (dst && dst_hold_safe(dst)) {
Eric Dumazetca777ef2014-09-08 08:06:07 -070096 const struct rt6_info *rt = (const struct rt6_info *)dst;
97
Eric Dumazetca777ef2014-09-08 08:06:07 -070098 sk->sk_rx_dst = dst;
99 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
Martin KaFai Laub197df42015-05-22 20:56:01 -0700100 inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt);
Eric Dumazetca777ef2014-09-08 08:06:07 -0700101 }
Neal Cardwellfae6ef82012-08-19 03:30:38 +0000102}
103
Florian Westphal95a22ca2016-12-01 11:32:06 +0100104static u32 tcp_v6_init_sequence(const struct sk_buff *skb, u32 *tsoff)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105{
Arnaldo Carvalho de Melo0660e032007-04-25 17:54:47 -0700106 return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
107 ipv6_hdr(skb)->saddr.s6_addr32,
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -0700108 tcp_hdr(skb)->dest,
Florian Westphal95a22ca2016-12-01 11:32:06 +0100109 tcp_hdr(skb)->source, tsoff);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110}
111
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +0900112static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113 int addr_len)
114{
115 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +0900116 struct inet_sock *inet = inet_sk(sk);
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -0800117 struct inet_connection_sock *icsk = inet_csk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118 struct ipv6_pinfo *np = inet6_sk(sk);
119 struct tcp_sock *tp = tcp_sk(sk);
Arnaud Ebalard20c59de2010-06-01 21:35:01 +0000120 struct in6_addr *saddr = NULL, *final_p, final;
Eric Dumazet45f6fad2015-11-29 19:37:57 -0800121 struct ipv6_txoptions *opt;
David S. Miller4c9483b2011-03-12 16:22:43 -0500122 struct flowi6 fl6;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700123 struct dst_entry *dst;
124 int addr_type;
125 int err;
126
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +0900127 if (addr_len < SIN6_LEN_RFC2133)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128 return -EINVAL;
129
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +0900130 if (usin->sin6_family != AF_INET6)
Eric Dumazeta02cec22010-09-22 20:43:57 +0000131 return -EAFNOSUPPORT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132
David S. Miller4c9483b2011-03-12 16:22:43 -0500133 memset(&fl6, 0, sizeof(fl6));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134
135 if (np->sndflow) {
David S. Miller4c9483b2011-03-12 16:22:43 -0500136 fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
137 IP6_ECN_flow_init(fl6.flowlabel);
138 if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139 struct ip6_flowlabel *flowlabel;
David S. Miller4c9483b2011-03-12 16:22:43 -0500140 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
Ian Morris63159f22015-03-29 14:00:04 +0100141 if (!flowlabel)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143 fl6_sock_release(flowlabel);
144 }
145 }
146
147 /*
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +0900148 * connect() to INADDR_ANY means loopback (BSD'ism).
149 */
150
Weilong Chen4c99aa42013-12-19 18:44:34 +0800151 if (ipv6_addr_any(&usin->sin6_addr))
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +0900152 usin->sin6_addr.s6_addr[15] = 0x1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153
154 addr_type = ipv6_addr_type(&usin->sin6_addr);
155
Weilong Chen4c99aa42013-12-19 18:44:34 +0800156 if (addr_type & IPV6_ADDR_MULTICAST)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157 return -ENETUNREACH;
158
159 if (addr_type&IPV6_ADDR_LINKLOCAL) {
160 if (addr_len >= sizeof(struct sockaddr_in6) &&
161 usin->sin6_scope_id) {
162 /* If interface is set while binding, indices
163 * must coincide.
164 */
165 if (sk->sk_bound_dev_if &&
166 sk->sk_bound_dev_if != usin->sin6_scope_id)
167 return -EINVAL;
168
169 sk->sk_bound_dev_if = usin->sin6_scope_id;
170 }
171
172 /* Connect to link-local address requires an interface */
173 if (!sk->sk_bound_dev_if)
174 return -EINVAL;
175 }
176
177 if (tp->rx_opt.ts_recent_stamp &&
Eric Dumazetefe42082013-10-03 15:42:29 -0700178 !ipv6_addr_equal(&sk->sk_v6_daddr, &usin->sin6_addr)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700179 tp->rx_opt.ts_recent = 0;
180 tp->rx_opt.ts_recent_stamp = 0;
181 tp->write_seq = 0;
182 }
183
Eric Dumazetefe42082013-10-03 15:42:29 -0700184 sk->sk_v6_daddr = usin->sin6_addr;
David S. Miller4c9483b2011-03-12 16:22:43 -0500185 np->flow_label = fl6.flowlabel;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186
187 /*
188 * TCP over IPv4
189 */
190
191 if (addr_type == IPV6_ADDR_MAPPED) {
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -0800192 u32 exthdrlen = icsk->icsk_ext_hdr_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193 struct sockaddr_in sin;
194
195 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
196
197 if (__ipv6_only_sock(sk))
198 return -ENETUNREACH;
199
200 sin.sin_family = AF_INET;
201 sin.sin_port = usin->sin6_port;
202 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
203
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -0800204 icsk->icsk_af_ops = &ipv6_mapped;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205 sk->sk_backlog_rcv = tcp_v4_do_rcv;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800206#ifdef CONFIG_TCP_MD5SIG
207 tp->af_specific = &tcp_sock_ipv6_mapped_specific;
208#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209
210 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
211
212 if (err) {
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -0800213 icsk->icsk_ext_hdr_len = exthdrlen;
214 icsk->icsk_af_ops = &ipv6_specific;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215 sk->sk_backlog_rcv = tcp_v6_do_rcv;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800216#ifdef CONFIG_TCP_MD5SIG
217 tp->af_specific = &tcp_sock_ipv6_specific;
218#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700219 goto failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220 }
Eric Dumazetd1e559d2015-03-18 14:05:35 -0700221 np->saddr = sk->sk_v6_rcv_saddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700222
223 return err;
224 }
225
Eric Dumazetefe42082013-10-03 15:42:29 -0700226 if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr))
227 saddr = &sk->sk_v6_rcv_saddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700228
David S. Miller4c9483b2011-03-12 16:22:43 -0500229 fl6.flowi6_proto = IPPROTO_TCP;
Eric Dumazetefe42082013-10-03 15:42:29 -0700230 fl6.daddr = sk->sk_v6_daddr;
Alexey Dobriyan4e3fd7a2011-11-21 03:39:03 +0000231 fl6.saddr = saddr ? *saddr : np->saddr;
David S. Miller4c9483b2011-03-12 16:22:43 -0500232 fl6.flowi6_oif = sk->sk_bound_dev_if;
233 fl6.flowi6_mark = sk->sk_mark;
David S. Miller1958b852011-03-12 16:36:19 -0500234 fl6.fl6_dport = usin->sin6_port;
235 fl6.fl6_sport = inet->inet_sport;
Lorenzo Colittie2d118a2016-11-04 02:23:43 +0900236 fl6.flowi6_uid = sk->sk_uid;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700237
Hannes Frederic Sowa1e1d04e2016-04-05 17:10:15 +0200238 opt = rcu_dereference_protected(np->opt, lockdep_sock_is_held(sk));
Eric Dumazet45f6fad2015-11-29 19:37:57 -0800239 final_p = fl6_update_dst(&fl6, opt, &final);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240
David S. Miller4c9483b2011-03-12 16:22:43 -0500241 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
Venkat Yekkiralabeb8d132006-08-04 23:12:42 -0700242
Steffen Klassert0e0d44a2013-08-28 08:04:14 +0200243 dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
David S. Miller68d0c6d2011-03-01 13:19:07 -0800244 if (IS_ERR(dst)) {
245 err = PTR_ERR(dst);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700246 goto failure;
David S. Miller14e50e52007-05-24 18:17:54 -0700247 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700248
Ian Morris63159f22015-03-29 14:00:04 +0100249 if (!saddr) {
David S. Miller4c9483b2011-03-12 16:22:43 -0500250 saddr = &fl6.saddr;
Eric Dumazetefe42082013-10-03 15:42:29 -0700251 sk->sk_v6_rcv_saddr = *saddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252 }
253
254 /* set the source address */
Alexey Dobriyan4e3fd7a2011-11-21 03:39:03 +0000255 np->saddr = *saddr;
Eric Dumazetc720c7e82009-10-15 06:30:45 +0000256 inet->inet_rcv_saddr = LOOPBACK4_IPV6;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257
Herbert Xuf83ef8c2006-06-30 13:37:03 -0700258 sk->sk_gso_type = SKB_GSO_TCPV6;
Eric Dumazet6bd4f352015-12-02 21:53:57 -0800259 ip6_dst_store(sk, dst, NULL, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700260
David S. Miller493f3772010-12-02 12:14:29 -0800261 if (tcp_death_row.sysctl_tw_recycle &&
262 !tp->rx_opt.ts_recent_stamp &&
Martin KaFai Laufd0273d2015-05-22 20:55:57 -0700263 ipv6_addr_equal(&fl6.daddr, &sk->sk_v6_daddr))
David S. Miller81166dd2012-07-10 03:14:24 -0700264 tcp_fetch_timewait_stamp(sk, dst);
David S. Miller493f3772010-12-02 12:14:29 -0800265
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -0800266 icsk->icsk_ext_hdr_len = 0;
Eric Dumazet45f6fad2015-11-29 19:37:57 -0800267 if (opt)
268 icsk->icsk_ext_hdr_len = opt->opt_flen +
269 opt->opt_nflen;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700270
271 tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
272
Eric Dumazetc720c7e82009-10-15 06:30:45 +0000273 inet->inet_dport = usin->sin6_port;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700274
275 tcp_set_state(sk, TCP_SYN_SENT);
Arnaldo Carvalho de Melod8313f52005-12-13 23:25:44 -0800276 err = inet6_hash_connect(&tcp_death_row, sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700277 if (err)
278 goto late_failure;
279
Tom Herbert877d1f62015-07-28 16:02:05 -0700280 sk_set_txhash(sk);
Sathya Perla9e7ceb02014-10-22 21:42:01 +0530281
Andrey Vagin2b916472012-11-22 01:13:58 +0000282 if (!tp->write_seq && likely(!tp->repair))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283 tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
Eric Dumazetefe42082013-10-03 15:42:29 -0700284 sk->sk_v6_daddr.s6_addr32,
Eric Dumazetc720c7e82009-10-15 06:30:45 +0000285 inet->inet_sport,
Florian Westphal95a22ca2016-12-01 11:32:06 +0100286 inet->inet_dport,
287 &tp->tsoffset);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700288
289 err = tcp_connect(sk);
290 if (err)
291 goto late_failure;
292
293 return 0;
294
295late_failure:
296 tcp_set_state(sk, TCP_CLOSE);
297 __sk_dst_reset(sk);
298failure:
Eric Dumazetc720c7e82009-10-15 06:30:45 +0000299 inet->inet_dport = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300 sk->sk_route_caps = 0;
301 return err;
302}
303
Eric Dumazet563d34d2012-07-23 09:48:52 +0200304static void tcp_v6_mtu_reduced(struct sock *sk)
305{
306 struct dst_entry *dst;
307
308 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
309 return;
310
311 dst = inet6_csk_update_pmtu(sk, tcp_sk(sk)->mtu_info);
312 if (!dst)
313 return;
314
315 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
316 tcp_sync_mss(sk, dst_mtu(dst));
317 tcp_simple_retransmit(sk);
318 }
319}
320
Linus Torvalds1da177e2005-04-16 15:20:36 -0700321static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
Brian Haleyd5fdd6b2009-06-23 04:31:07 -0700322 u8 type, u8 code, int offset, __be32 info)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700323{
Weilong Chen4c99aa42013-12-19 18:44:34 +0800324 const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
Arnaldo Carvalho de Melo505cbfc2005-08-12 09:19:38 -0300325 const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
Eric Dumazet22150892015-03-22 10:22:23 -0700326 struct net *net = dev_net(skb->dev);
327 struct request_sock *fastopen;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700328 struct ipv6_pinfo *np;
Eric Dumazet22150892015-03-22 10:22:23 -0700329 struct tcp_sock *tp;
330 __u32 seq, snd_una;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700331 struct sock *sk;
Eric Dumazet9cf74902016-02-02 19:31:12 -0800332 bool fatal;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700333 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700334
Eric Dumazet22150892015-03-22 10:22:23 -0700335 sk = __inet6_lookup_established(net, &tcp_hashinfo,
336 &hdr->daddr, th->dest,
337 &hdr->saddr, ntohs(th->source),
338 skb->dev->ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700339
Eric Dumazet22150892015-03-22 10:22:23 -0700340 if (!sk) {
Eric Dumazeta16292a2016-04-27 16:44:36 -0700341 __ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
342 ICMP6_MIB_INERRORS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700343 return;
344 }
345
346 if (sk->sk_state == TCP_TIME_WAIT) {
YOSHIFUJI Hideaki9469c7b2006-10-10 19:41:46 -0700347 inet_twsk_put(inet_twsk(sk));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700348 return;
349 }
Eric Dumazet22150892015-03-22 10:22:23 -0700350 seq = ntohl(th->seq);
Eric Dumazet9cf74902016-02-02 19:31:12 -0800351 fatal = icmpv6_err_convert(type, code, &err);
Eric Dumazet22150892015-03-22 10:22:23 -0700352 if (sk->sk_state == TCP_NEW_SYN_RECV)
Eric Dumazet9cf74902016-02-02 19:31:12 -0800353 return tcp_req_err(sk, seq, fatal);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700354
355 bh_lock_sock(sk);
Eric Dumazet563d34d2012-07-23 09:48:52 +0200356 if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
Eric Dumazet02a1d6e2016-04-27 16:44:39 -0700357 __NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700358
359 if (sk->sk_state == TCP_CLOSE)
360 goto out;
361
Stephen Hemmingere802af92010-04-22 15:24:53 -0700362 if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) {
Eric Dumazet02a1d6e2016-04-27 16:44:39 -0700363 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
Stephen Hemmingere802af92010-04-22 15:24:53 -0700364 goto out;
365 }
366
Linus Torvalds1da177e2005-04-16 15:20:36 -0700367 tp = tcp_sk(sk);
Yuchung Cheng0a672f72014-05-11 20:22:12 -0700368 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
369 fastopen = tp->fastopen_rsk;
370 snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700371 if (sk->sk_state != TCP_LISTEN &&
Yuchung Cheng0a672f72014-05-11 20:22:12 -0700372 !between(seq, snd_una, tp->snd_nxt)) {
Eric Dumazet02a1d6e2016-04-27 16:44:39 -0700373 __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700374 goto out;
375 }
376
377 np = inet6_sk(sk);
378
David S. Millerec18d9a2012-07-12 00:25:15 -0700379 if (type == NDISC_REDIRECT) {
380 struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
381
David S. Miller1ed5c482012-07-12 00:41:25 -0700382 if (dst)
David S. Miller6700c272012-07-17 03:29:28 -0700383 dst->ops->redirect(dst, sk, skb);
Christoph Paasch50a75a82013-04-07 04:53:15 +0000384 goto out;
David S. Millerec18d9a2012-07-12 00:25:15 -0700385 }
386
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387 if (type == ICMPV6_PKT_TOOBIG) {
Eric Dumazet0d4f0602013-03-18 07:01:28 +0000388 /* We are not interested in TCP_LISTEN and open_requests
389 * (SYN-ACKs send out by Linux are always <576bytes so
390 * they should go through unfragmented).
391 */
392 if (sk->sk_state == TCP_LISTEN)
393 goto out;
394
Hannes Frederic Sowa93b36cf2013-12-15 03:41:14 +0100395 if (!ip6_sk_accept_pmtu(sk))
396 goto out;
397
Eric Dumazet563d34d2012-07-23 09:48:52 +0200398 tp->mtu_info = ntohl(info);
399 if (!sock_owned_by_user(sk))
400 tcp_v6_mtu_reduced(sk);
Julian Anastasovd013ef2a2012-09-05 10:53:18 +0000401 else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED,
Eric Dumazet7aa54702016-12-03 11:14:57 -0800402 &sk->sk_tsq_flags))
Julian Anastasovd013ef2a2012-09-05 10:53:18 +0000403 sock_hold(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700404 goto out;
405 }
406
Linus Torvalds1da177e2005-04-16 15:20:36 -0700407
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -0700408 /* Might be for an request_sock */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700409 switch (sk->sk_state) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700410 case TCP_SYN_SENT:
Yuchung Cheng0a672f72014-05-11 20:22:12 -0700411 case TCP_SYN_RECV:
412 /* Only in fast or simultaneous open. If a fast open socket is
413 * is already accepted it is treated as a connected one below.
414 */
Ian Morris63159f22015-03-29 14:00:04 +0100415 if (fastopen && !fastopen->sk)
Yuchung Cheng0a672f72014-05-11 20:22:12 -0700416 break;
417
Linus Torvalds1da177e2005-04-16 15:20:36 -0700418 if (!sock_owned_by_user(sk)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700419 sk->sk_err = err;
420 sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */
421
422 tcp_done(sk);
423 } else
424 sk->sk_err_soft = err;
425 goto out;
426 }
427
428 if (!sock_owned_by_user(sk) && np->recverr) {
429 sk->sk_err = err;
430 sk->sk_error_report(sk);
431 } else
432 sk->sk_err_soft = err;
433
434out:
435 bh_unlock_sock(sk);
436 sock_put(sk);
437}
438
439
Eric Dumazet0f935db2015-09-25 07:39:21 -0700440static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
Octavian Purdilad6274bd2014-06-25 17:09:58 +0300441 struct flowi *fl,
Neal Cardwell3840a062012-06-28 12:34:19 +0000442 struct request_sock *req,
Eric Dumazetca6fb062015-10-02 11:43:35 -0700443 struct tcp_fastopen_cookie *foc,
Eric Dumazetb3d05142016-04-13 22:05:39 -0700444 enum tcp_synack_type synack_type)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700445{
Eric Dumazet634fb9792013-10-09 15:21:29 -0700446 struct inet_request_sock *ireq = inet_rsk(req);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700447 struct ipv6_pinfo *np = inet6_sk(sk);
Huw Davies56ac42b2016-06-27 15:05:28 -0400448 struct ipv6_txoptions *opt;
Octavian Purdilad6274bd2014-06-25 17:09:58 +0300449 struct flowi6 *fl6 = &fl->u.ip6;
Weilong Chen4c99aa42013-12-19 18:44:34 +0800450 struct sk_buff *skb;
Neal Cardwell94942182012-06-28 12:34:20 +0000451 int err = -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700452
Neal Cardwell9f10d3f2012-06-28 12:34:21 +0000453 /* First, grab a route. */
Eric Dumazetf76b33c2015-09-29 07:42:42 -0700454 if (!dst && (dst = inet6_csk_route_req(sk, fl6, req,
455 IPPROTO_TCP)) == NULL)
Denis V. Lunevfd80eb92008-02-29 11:43:03 -0800456 goto done;
Neal Cardwell94942182012-06-28 12:34:20 +0000457
Eric Dumazetb3d05142016-04-13 22:05:39 -0700458 skb = tcp_make_synack(sk, dst, req, foc, synack_type);
Neal Cardwell94942182012-06-28 12:34:20 +0000459
Linus Torvalds1da177e2005-04-16 15:20:36 -0700460 if (skb) {
Eric Dumazet634fb9792013-10-09 15:21:29 -0700461 __tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr,
462 &ireq->ir_v6_rmt_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700463
Eric Dumazet634fb9792013-10-09 15:21:29 -0700464 fl6->daddr = ireq->ir_v6_rmt_addr;
Ian Morris53b24b82015-03-29 14:00:05 +0100465 if (np->repflow && ireq->pktopts)
Florent Fourcotdf3687f2014-01-17 17:15:03 +0100466 fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
467
Eric Dumazet3e4006f2016-01-08 09:35:51 -0800468 rcu_read_lock();
Huw Davies56ac42b2016-06-27 15:05:28 -0400469 opt = ireq->ipv6_opt;
470 if (!opt)
471 opt = rcu_dereference(np->opt);
Pablo Neira92e55f42017-01-26 22:56:21 +0100472 err = ip6_xmit(sk, skb, fl6, sk->sk_mark, opt, np->tclass);
Eric Dumazet3e4006f2016-01-08 09:35:51 -0800473 rcu_read_unlock();
Gerrit Renkerb9df3cb2006-11-14 11:21:36 -0200474 err = net_xmit_eval(err);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700475 }
476
477done:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700478 return err;
479}
480
Octavian Purdila72659ec2010-01-17 19:09:39 -0800481
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -0700482static void tcp_v6_reqsk_destructor(struct request_sock *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700483{
Huw Davies56ac42b2016-06-27 15:05:28 -0400484 kfree(inet_rsk(req)->ipv6_opt);
Eric Dumazet634fb9792013-10-09 15:21:29 -0700485 kfree_skb(inet_rsk(req)->pktopts);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700486}
487
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800488#ifdef CONFIG_TCP_MD5SIG
Eric Dumazetb83e3de2015-09-25 07:39:15 -0700489static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
Eric Dumazetb71d1d42011-04-22 04:53:02 +0000490 const struct in6_addr *addr)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800491{
Eric Dumazeta915da9b2012-01-31 05:18:33 +0000492 return tcp_md5_do_lookup(sk, (union tcp_md5_addr *)addr, AF_INET6);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800493}
494
Eric Dumazetb83e3de2015-09-25 07:39:15 -0700495static struct tcp_md5sig_key *tcp_v6_md5_lookup(const struct sock *sk,
Eric Dumazetfd3a1542015-03-24 15:58:56 -0700496 const struct sock *addr_sk)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800497{
Eric Dumazetefe42082013-10-03 15:42:29 -0700498 return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800499}
500
Wang Yufen4aa956d2014-03-29 09:27:29 +0800501static int tcp_v6_parse_md5_keys(struct sock *sk, char __user *optval,
502 int optlen)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800503{
504 struct tcp_md5sig cmd;
505 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800506
507 if (optlen < sizeof(cmd))
508 return -EINVAL;
509
510 if (copy_from_user(&cmd, optval, sizeof(cmd)))
511 return -EFAULT;
512
513 if (sin6->sin6_family != AF_INET6)
514 return -EINVAL;
515
516 if (!cmd.tcpm_keylen) {
Brian Haleye773e4f2007-08-24 23:16:08 -0700517 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
Eric Dumazeta915da9b2012-01-31 05:18:33 +0000518 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
519 AF_INET);
520 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
521 AF_INET6);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800522 }
523
524 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
525 return -EINVAL;
526
Eric Dumazeta915da9b2012-01-31 05:18:33 +0000527 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
528 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
529 AF_INET, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800530
Eric Dumazeta915da9b2012-01-31 05:18:33 +0000531 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
532 AF_INET6, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800533}
534
Eric Dumazet19689e32016-06-27 18:51:53 +0200535static int tcp_v6_md5_hash_headers(struct tcp_md5sig_pool *hp,
536 const struct in6_addr *daddr,
537 const struct in6_addr *saddr,
538 const struct tcphdr *th, int nbytes)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800539{
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800540 struct tcp6_pseudohdr *bp;
Adam Langley49a72df2008-07-19 00:01:42 -0700541 struct scatterlist sg;
Eric Dumazet19689e32016-06-27 18:51:53 +0200542 struct tcphdr *_th;
YOSHIFUJI Hideaki8d26d762008-04-17 13:19:16 +0900543
Eric Dumazet19689e32016-06-27 18:51:53 +0200544 bp = hp->scratch;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800545 /* 1. TCP pseudo-header (RFC2460) */
Alexey Dobriyan4e3fd7a2011-11-21 03:39:03 +0000546 bp->saddr = *saddr;
547 bp->daddr = *daddr;
Adam Langley49a72df2008-07-19 00:01:42 -0700548 bp->protocol = cpu_to_be32(IPPROTO_TCP);
Adam Langley00b13042008-07-31 21:36:07 -0700549 bp->len = cpu_to_be32(nbytes);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800550
Eric Dumazet19689e32016-06-27 18:51:53 +0200551 _th = (struct tcphdr *)(bp + 1);
552 memcpy(_th, th, sizeof(*th));
553 _th->check = 0;
554
555 sg_init_one(&sg, bp, sizeof(*bp) + sizeof(*th));
556 ahash_request_set_crypt(hp->md5_req, &sg, NULL,
557 sizeof(*bp) + sizeof(*th));
Herbert Xucf80e0e2016-01-24 21:20:23 +0800558 return crypto_ahash_update(hp->md5_req);
Adam Langley49a72df2008-07-19 00:01:42 -0700559}
David S. Millerc7da57a2007-10-26 00:41:21 -0700560
Eric Dumazet19689e32016-06-27 18:51:53 +0200561static int tcp_v6_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
Eric Dumazetb71d1d42011-04-22 04:53:02 +0000562 const struct in6_addr *daddr, struct in6_addr *saddr,
Eric Dumazet318cf7a2011-10-24 02:46:04 -0400563 const struct tcphdr *th)
Adam Langley49a72df2008-07-19 00:01:42 -0700564{
565 struct tcp_md5sig_pool *hp;
Herbert Xucf80e0e2016-01-24 21:20:23 +0800566 struct ahash_request *req;
Adam Langley49a72df2008-07-19 00:01:42 -0700567
568 hp = tcp_get_md5sig_pool();
569 if (!hp)
570 goto clear_hash_noput;
Herbert Xucf80e0e2016-01-24 21:20:23 +0800571 req = hp->md5_req;
Adam Langley49a72df2008-07-19 00:01:42 -0700572
Herbert Xucf80e0e2016-01-24 21:20:23 +0800573 if (crypto_ahash_init(req))
Adam Langley49a72df2008-07-19 00:01:42 -0700574 goto clear_hash;
Eric Dumazet19689e32016-06-27 18:51:53 +0200575 if (tcp_v6_md5_hash_headers(hp, daddr, saddr, th, th->doff << 2))
Adam Langley49a72df2008-07-19 00:01:42 -0700576 goto clear_hash;
577 if (tcp_md5_hash_key(hp, key))
578 goto clear_hash;
Herbert Xucf80e0e2016-01-24 21:20:23 +0800579 ahash_request_set_crypt(req, NULL, md5_hash, 0);
580 if (crypto_ahash_final(req))
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800581 goto clear_hash;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800582
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800583 tcp_put_md5sig_pool();
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800584 return 0;
Adam Langley49a72df2008-07-19 00:01:42 -0700585
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800586clear_hash:
587 tcp_put_md5sig_pool();
588clear_hash_noput:
589 memset(md5_hash, 0, 16);
Adam Langley49a72df2008-07-19 00:01:42 -0700590 return 1;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800591}
592
Eric Dumazet39f8e582015-03-24 15:58:55 -0700593static int tcp_v6_md5_hash_skb(char *md5_hash,
594 const struct tcp_md5sig_key *key,
Eric Dumazet318cf7a2011-10-24 02:46:04 -0400595 const struct sock *sk,
Eric Dumazet318cf7a2011-10-24 02:46:04 -0400596 const struct sk_buff *skb)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800597{
Eric Dumazetb71d1d42011-04-22 04:53:02 +0000598 const struct in6_addr *saddr, *daddr;
Adam Langley49a72df2008-07-19 00:01:42 -0700599 struct tcp_md5sig_pool *hp;
Herbert Xucf80e0e2016-01-24 21:20:23 +0800600 struct ahash_request *req;
Eric Dumazet318cf7a2011-10-24 02:46:04 -0400601 const struct tcphdr *th = tcp_hdr(skb);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800602
Eric Dumazet39f8e582015-03-24 15:58:55 -0700603 if (sk) { /* valid for establish/request sockets */
604 saddr = &sk->sk_v6_rcv_saddr;
Eric Dumazetefe42082013-10-03 15:42:29 -0700605 daddr = &sk->sk_v6_daddr;
Adam Langley49a72df2008-07-19 00:01:42 -0700606 } else {
Eric Dumazetb71d1d42011-04-22 04:53:02 +0000607 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
Adam Langley49a72df2008-07-19 00:01:42 -0700608 saddr = &ip6h->saddr;
609 daddr = &ip6h->daddr;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800610 }
Adam Langley49a72df2008-07-19 00:01:42 -0700611
612 hp = tcp_get_md5sig_pool();
613 if (!hp)
614 goto clear_hash_noput;
Herbert Xucf80e0e2016-01-24 21:20:23 +0800615 req = hp->md5_req;
Adam Langley49a72df2008-07-19 00:01:42 -0700616
Herbert Xucf80e0e2016-01-24 21:20:23 +0800617 if (crypto_ahash_init(req))
Adam Langley49a72df2008-07-19 00:01:42 -0700618 goto clear_hash;
619
Eric Dumazet19689e32016-06-27 18:51:53 +0200620 if (tcp_v6_md5_hash_headers(hp, daddr, saddr, th, skb->len))
Adam Langley49a72df2008-07-19 00:01:42 -0700621 goto clear_hash;
622 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
623 goto clear_hash;
624 if (tcp_md5_hash_key(hp, key))
625 goto clear_hash;
Herbert Xucf80e0e2016-01-24 21:20:23 +0800626 ahash_request_set_crypt(req, NULL, md5_hash, 0);
627 if (crypto_ahash_final(req))
Adam Langley49a72df2008-07-19 00:01:42 -0700628 goto clear_hash;
629
630 tcp_put_md5sig_pool();
631 return 0;
632
633clear_hash:
634 tcp_put_md5sig_pool();
635clear_hash_noput:
636 memset(md5_hash, 0, 16);
637 return 1;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800638}
639
Eric Dumazetba8e2752015-10-02 11:43:28 -0700640#endif
641
642static bool tcp_v6_inbound_md5_hash(const struct sock *sk,
643 const struct sk_buff *skb)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800644{
Eric Dumazetba8e2752015-10-02 11:43:28 -0700645#ifdef CONFIG_TCP_MD5SIG
Eric Dumazetcf533ea2011-10-21 05:22:42 -0400646 const __u8 *hash_location = NULL;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800647 struct tcp_md5sig_key *hash_expected;
Eric Dumazetb71d1d42011-04-22 04:53:02 +0000648 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
Eric Dumazet318cf7a2011-10-24 02:46:04 -0400649 const struct tcphdr *th = tcp_hdr(skb);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800650 int genhash;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800651 u8 newhash[16];
652
653 hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
YOSHIFUJI Hideaki7d5d5522008-04-17 12:29:53 +0900654 hash_location = tcp_parse_md5sig_option(th);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800655
David S. Miller785957d2008-07-30 03:03:15 -0700656 /* We've parsed the options - do we have a hash? */
657 if (!hash_expected && !hash_location)
Eric Dumazetff74e232015-03-24 15:58:54 -0700658 return false;
David S. Miller785957d2008-07-30 03:03:15 -0700659
660 if (hash_expected && !hash_location) {
Eric Dumazetc10d9312016-04-29 14:16:47 -0700661 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
Eric Dumazetff74e232015-03-24 15:58:54 -0700662 return true;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800663 }
664
David S. Miller785957d2008-07-30 03:03:15 -0700665 if (!hash_expected && hash_location) {
Eric Dumazetc10d9312016-04-29 14:16:47 -0700666 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
Eric Dumazetff74e232015-03-24 15:58:54 -0700667 return true;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800668 }
669
670 /* check the signature */
Adam Langley49a72df2008-07-19 00:01:42 -0700671 genhash = tcp_v6_md5_hash_skb(newhash,
672 hash_expected,
Eric Dumazet39f8e582015-03-24 15:58:55 -0700673 NULL, skb);
Adam Langley49a72df2008-07-19 00:01:42 -0700674
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800675 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
Eric Dumazet72145a62016-08-24 09:01:23 -0700676 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE);
Joe Perchese87cc472012-05-13 21:56:26 +0000677 net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
678 genhash ? "failed" : "mismatch",
679 &ip6h->saddr, ntohs(th->source),
680 &ip6h->daddr, ntohs(th->dest));
Eric Dumazetff74e232015-03-24 15:58:54 -0700681 return true;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800682 }
Eric Dumazetba8e2752015-10-02 11:43:28 -0700683#endif
Eric Dumazetff74e232015-03-24 15:58:54 -0700684 return false;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800685}
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800686
Eric Dumazetb40cf182015-09-25 07:39:08 -0700687static void tcp_v6_init_req(struct request_sock *req,
688 const struct sock *sk_listener,
Octavian Purdila16bea702014-06-25 17:09:53 +0300689 struct sk_buff *skb)
690{
691 struct inet_request_sock *ireq = inet_rsk(req);
Eric Dumazetb40cf182015-09-25 07:39:08 -0700692 const struct ipv6_pinfo *np = inet6_sk(sk_listener);
Octavian Purdila16bea702014-06-25 17:09:53 +0300693
694 ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
695 ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
696
Octavian Purdila16bea702014-06-25 17:09:53 +0300697 /* So that link locals have meaning */
Eric Dumazetb40cf182015-09-25 07:39:08 -0700698 if (!sk_listener->sk_bound_dev_if &&
Octavian Purdila16bea702014-06-25 17:09:53 +0300699 ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
Eric Dumazet870c3152014-10-17 09:17:20 -0700700 ireq->ir_iif = tcp_v6_iif(skb);
Octavian Purdila16bea702014-06-25 17:09:53 +0300701
Eric Dumazet04317da2014-09-05 15:33:32 -0700702 if (!TCP_SKB_CB(skb)->tcp_tw_isn &&
Eric Dumazetb40cf182015-09-25 07:39:08 -0700703 (ipv6_opt_accepted(sk_listener, skb, &TCP_SKB_CB(skb)->header.h6) ||
Eric Dumazeta2247722014-09-27 09:50:56 -0700704 np->rxopt.bits.rxinfo ||
Octavian Purdila16bea702014-06-25 17:09:53 +0300705 np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim ||
706 np->rxopt.bits.rxohlim || np->repflow)) {
707 atomic_inc(&skb->users);
708 ireq->pktopts = skb;
709 }
710}
711
Eric Dumazetf9646292015-09-29 07:42:50 -0700712static struct dst_entry *tcp_v6_route_req(const struct sock *sk,
713 struct flowi *fl,
Octavian Purdilad94e0412014-06-25 17:09:55 +0300714 const struct request_sock *req,
715 bool *strict)
716{
717 if (strict)
718 *strict = true;
Eric Dumazetf76b33c2015-09-29 07:42:42 -0700719 return inet6_csk_route_req(sk, &fl->u.ip6, req, IPPROTO_TCP);
Octavian Purdilad94e0412014-06-25 17:09:55 +0300720}
721
Glenn Griffinc6aefaf2008-02-07 21:49:26 -0800722struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700723 .family = AF_INET6,
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -0700724 .obj_size = sizeof(struct tcp6_request_sock),
Octavian Purdila5db92c92014-06-25 17:09:59 +0300725 .rtx_syn_ack = tcp_rtx_synack,
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -0700726 .send_ack = tcp_v6_reqsk_send_ack,
727 .destructor = tcp_v6_reqsk_destructor,
Octavian Purdila72659ec2010-01-17 19:09:39 -0800728 .send_reset = tcp_v6_send_reset,
Wang Yufen4aa956d2014-03-29 09:27:29 +0800729 .syn_ack_timeout = tcp_syn_ack_timeout,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700730};
731
Stephen Hemmingerb2e4b3d2009-09-01 19:25:03 +0000732static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
Octavian Purdila2aec4a22014-06-25 17:10:00 +0300733 .mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) -
734 sizeof(struct ipv6hdr),
Octavian Purdila16bea702014-06-25 17:09:53 +0300735#ifdef CONFIG_TCP_MD5SIG
Eric Dumazetfd3a1542015-03-24 15:58:56 -0700736 .req_md5_lookup = tcp_v6_md5_lookup,
John Dykstrae3afe7b2009-07-16 05:04:51 +0000737 .calc_md5_hash = tcp_v6_md5_hash_skb,
Andrew Mortonb6332e62006-11-30 19:16:28 -0800738#endif
Octavian Purdila16bea702014-06-25 17:09:53 +0300739 .init_req = tcp_v6_init_req,
Octavian Purdilafb7b37a2014-06-25 17:09:54 +0300740#ifdef CONFIG_SYN_COOKIES
741 .cookie_init_seq = cookie_v6_init_sequence,
742#endif
Octavian Purdilad94e0412014-06-25 17:09:55 +0300743 .route_req = tcp_v6_route_req,
Octavian Purdila936b8bd2014-06-25 17:09:57 +0300744 .init_seq = tcp_v6_init_sequence,
Octavian Purdilad6274bd2014-06-25 17:09:58 +0300745 .send_synack = tcp_v6_send_synack,
Octavian Purdila16bea702014-06-25 17:09:53 +0300746};
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800747
Eric Dumazeta00e7442015-09-29 07:42:39 -0700748static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 seq,
Eric Dumazet0f85fea2014-12-09 09:56:08 -0800749 u32 ack, u32 win, u32 tsval, u32 tsecr,
750 int oif, struct tcp_md5sig_key *key, int rst,
Hannes Frederic Sowa5119bd12016-06-11 20:41:38 +0200751 u8 tclass, __be32 label)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700752{
Eric Dumazetcf533ea2011-10-21 05:22:42 -0400753 const struct tcphdr *th = tcp_hdr(skb);
754 struct tcphdr *t1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700755 struct sk_buff *buff;
David S. Miller4c9483b2011-03-12 16:22:43 -0500756 struct flowi6 fl6;
Eric Dumazet0f85fea2014-12-09 09:56:08 -0800757 struct net *net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
Daniel Lezcanoe5047992008-03-07 11:16:26 -0800758 struct sock *ctl_sk = net->ipv6.tcp_sk;
YOSHIFUJI Hideaki9cb57342008-01-12 02:16:03 -0800759 unsigned int tot_len = sizeof(struct tcphdr);
Eric Dumazetadf30902009-06-02 05:19:30 +0000760 struct dst_entry *dst;
Al Viroe69a4ad2006-11-14 20:56:00 -0800761 __be32 *topt;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700762
Andrey Vaginee684b62013-02-11 05:50:19 +0000763 if (tsecr)
YOSHIFUJI Hideaki4244f8a2006-10-10 19:40:50 -0700764 tot_len += TCPOLEN_TSTAMP_ALIGNED;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800765#ifdef CONFIG_TCP_MD5SIG
766 if (key)
767 tot_len += TCPOLEN_MD5SIG_ALIGNED;
768#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700769
770 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
771 GFP_ATOMIC);
Ian Morris63159f22015-03-29 14:00:04 +0100772 if (!buff)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700773 return;
774
775 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
776
Ilpo Järvinen77c676d2008-10-09 14:41:38 -0700777 t1 = (struct tcphdr *) skb_push(buff, tot_len);
Herbert Xu6651ffc2010-04-21 00:47:15 -0700778 skb_reset_transport_header(buff);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700779
780 /* Swap the send and the receive. */
781 memset(t1, 0, sizeof(*t1));
782 t1->dest = th->source;
783 t1->source = th->dest;
Ilpo Järvinen77c676d2008-10-09 14:41:38 -0700784 t1->doff = tot_len / 4;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700785 t1->seq = htonl(seq);
786 t1->ack_seq = htonl(ack);
Ilpo Järvinen626e2642008-10-09 14:42:40 -0700787 t1->ack = !rst || !th->ack;
788 t1->rst = rst;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700789 t1->window = htons(win);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800790
Al Viroe69a4ad2006-11-14 20:56:00 -0800791 topt = (__be32 *)(t1 + 1);
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +0900792
Andrey Vaginee684b62013-02-11 05:50:19 +0000793 if (tsecr) {
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800794 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
795 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
Andrey Vaginee684b62013-02-11 05:50:19 +0000796 *topt++ = htonl(tsval);
797 *topt++ = htonl(tsecr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700798 }
799
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800800#ifdef CONFIG_TCP_MD5SIG
801 if (key) {
802 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
803 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
Adam Langley49a72df2008-07-19 00:01:42 -0700804 tcp_v6_md5_hash_hdr((__u8 *)topt, key,
Adam Langley90b7e112008-07-31 20:49:48 -0700805 &ipv6_hdr(skb)->saddr,
806 &ipv6_hdr(skb)->daddr, t1);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800807 }
808#endif
809
David S. Miller4c9483b2011-03-12 16:22:43 -0500810 memset(&fl6, 0, sizeof(fl6));
Alexey Dobriyan4e3fd7a2011-11-21 03:39:03 +0000811 fl6.daddr = ipv6_hdr(skb)->saddr;
812 fl6.saddr = ipv6_hdr(skb)->daddr;
Florent Fourcot1d13a962014-01-16 17:21:22 +0100813 fl6.flowlabel = label;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700814
David S. Millere5700af2010-04-21 14:59:20 -0700815 buff->ip_summed = CHECKSUM_PARTIAL;
816 buff->csum = 0;
817
David S. Miller4c9483b2011-03-12 16:22:43 -0500818 __tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700819
David S. Miller4c9483b2011-03-12 16:22:43 -0500820 fl6.flowi6_proto = IPPROTO_TCP;
Lorenzo Colittia36dbdb2014-04-11 13:19:12 +0900821 if (rt6_need_strict(&fl6.daddr) && !oif)
Eric Dumazet870c3152014-10-17 09:17:20 -0700822 fl6.flowi6_oif = tcp_v6_iif(skb);
David Ahern9b6c14d2016-11-09 09:07:26 -0800823 else {
824 if (!oif && netif_index_is_l3_master(net, skb->skb_iif))
825 oif = skb->skb_iif;
826
827 fl6.flowi6_oif = oif;
828 }
David Ahern1d2f7b22016-05-04 21:26:08 -0700829
Lorenzo Colittie1108612014-05-13 10:17:33 -0700830 fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark);
David S. Miller1958b852011-03-12 16:36:19 -0500831 fl6.fl6_dport = t1->dest;
832 fl6.fl6_sport = t1->source;
Lorenzo Colittie2d118a2016-11-04 02:23:43 +0900833 fl6.flowi6_uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL);
David S. Miller4c9483b2011-03-12 16:22:43 -0500834 security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700835
Ilpo Järvinen626e2642008-10-09 14:42:40 -0700836 /* Pass a socket to ip6_dst_lookup either it is for RST
837 * Underlying function will use this to retrieve the network
838 * namespace
839 */
Steffen Klassert0e0d44a2013-08-28 08:04:14 +0200840 dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL);
David S. Miller68d0c6d2011-03-01 13:19:07 -0800841 if (!IS_ERR(dst)) {
842 skb_dst_set(buff, dst);
Pablo Neira92e55f42017-01-26 22:56:21 +0100843 ip6_xmit(ctl_sk, buff, &fl6, fl6.flowi6_mark, NULL, tclass);
Eric Dumazetc10d9312016-04-29 14:16:47 -0700844 TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
David S. Miller68d0c6d2011-03-01 13:19:07 -0800845 if (rst)
Eric Dumazetc10d9312016-04-29 14:16:47 -0700846 TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
David S. Miller68d0c6d2011-03-01 13:19:07 -0800847 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700848 }
849
850 kfree_skb(buff);
851}
852
Eric Dumazeta00e7442015-09-29 07:42:39 -0700853static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb)
Ilpo Järvinen626e2642008-10-09 14:42:40 -0700854{
Eric Dumazetcf533ea2011-10-21 05:22:42 -0400855 const struct tcphdr *th = tcp_hdr(skb);
Ilpo Järvinen626e2642008-10-09 14:42:40 -0700856 u32 seq = 0, ack_seq = 0;
Guo-Fu Tsengfa3e5b42008-10-09 21:11:56 -0700857 struct tcp_md5sig_key *key = NULL;
Shawn Lu658ddaa2012-01-31 22:35:48 +0000858#ifdef CONFIG_TCP_MD5SIG
859 const __u8 *hash_location = NULL;
860 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
861 unsigned char newhash[16];
862 int genhash;
863 struct sock *sk1 = NULL;
864#endif
Wang Yufen9c76a112014-03-29 09:27:31 +0800865 int oif;
Ilpo Järvinen626e2642008-10-09 14:42:40 -0700866
867 if (th->rst)
868 return;
869
Eric Dumazetc3658e82014-11-25 07:40:04 -0800870 /* If sk not NULL, it means we did a successful lookup and incoming
871 * route had to be correct. prequeue might have dropped our dst.
872 */
873 if (!sk && !ipv6_unicast_destination(skb))
Ilpo Järvinen626e2642008-10-09 14:42:40 -0700874 return;
875
876#ifdef CONFIG_TCP_MD5SIG
Eric Dumazet3b24d852016-04-01 08:52:17 -0700877 rcu_read_lock();
Shawn Lu658ddaa2012-01-31 22:35:48 +0000878 hash_location = tcp_parse_md5sig_option(th);
Florian Westphal271c3b92015-12-21 21:29:26 +0100879 if (sk && sk_fullsock(sk)) {
Florian Westphale46787f2015-12-21 21:29:25 +0100880 key = tcp_v6_md5_do_lookup(sk, &ipv6h->saddr);
881 } else if (hash_location) {
Shawn Lu658ddaa2012-01-31 22:35:48 +0000882 /*
883 * active side is lost. Try to find listening socket through
884 * source port, and then find md5 key through listening socket.
885 * we are not loose security here:
886 * Incoming packet is checked with md5 hash with finding key,
887 * no RST generated if md5 hash doesn't match.
888 */
889 sk1 = inet6_lookup_listener(dev_net(skb_dst(skb)->dev),
Craig Galleka5836362016-02-10 11:50:38 -0500890 &tcp_hashinfo, NULL, 0,
891 &ipv6h->saddr,
Tom Herbert5ba24952013-01-22 09:50:39 +0000892 th->source, &ipv6h->daddr,
Eric Dumazet870c3152014-10-17 09:17:20 -0700893 ntohs(th->source), tcp_v6_iif(skb));
Shawn Lu658ddaa2012-01-31 22:35:48 +0000894 if (!sk1)
Eric Dumazet3b24d852016-04-01 08:52:17 -0700895 goto out;
Shawn Lu658ddaa2012-01-31 22:35:48 +0000896
Shawn Lu658ddaa2012-01-31 22:35:48 +0000897 key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr);
898 if (!key)
Eric Dumazet3b24d852016-04-01 08:52:17 -0700899 goto out;
Shawn Lu658ddaa2012-01-31 22:35:48 +0000900
Eric Dumazet39f8e582015-03-24 15:58:55 -0700901 genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, skb);
Shawn Lu658ddaa2012-01-31 22:35:48 +0000902 if (genhash || memcmp(hash_location, newhash, 16) != 0)
Eric Dumazet3b24d852016-04-01 08:52:17 -0700903 goto out;
Shawn Lu658ddaa2012-01-31 22:35:48 +0000904 }
Ilpo Järvinen626e2642008-10-09 14:42:40 -0700905#endif
906
907 if (th->ack)
908 seq = ntohl(th->ack_seq);
909 else
910 ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
911 (th->doff << 2);
912
Wang Yufen9c76a112014-03-29 09:27:31 +0800913 oif = sk ? sk->sk_bound_dev_if : 0;
Eric Dumazet0f85fea2014-12-09 09:56:08 -0800914 tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, key, 1, 0, 0);
Shawn Lu658ddaa2012-01-31 22:35:48 +0000915
916#ifdef CONFIG_TCP_MD5SIG
Eric Dumazet3b24d852016-04-01 08:52:17 -0700917out:
918 rcu_read_unlock();
Shawn Lu658ddaa2012-01-31 22:35:48 +0000919#endif
Ilpo Järvinen626e2642008-10-09 14:42:40 -0700920}
921
Eric Dumazeta00e7442015-09-29 07:42:39 -0700922static void tcp_v6_send_ack(const struct sock *sk, struct sk_buff *skb, u32 seq,
Eric Dumazet0f85fea2014-12-09 09:56:08 -0800923 u32 ack, u32 win, u32 tsval, u32 tsecr, int oif,
Florent Fourcot1d13a962014-01-16 17:21:22 +0100924 struct tcp_md5sig_key *key, u8 tclass,
Hannes Frederic Sowa5119bd12016-06-11 20:41:38 +0200925 __be32 label)
Ilpo Järvinen626e2642008-10-09 14:42:40 -0700926{
Eric Dumazet0f85fea2014-12-09 09:56:08 -0800927 tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, key, 0,
928 tclass, label);
Ilpo Järvinen626e2642008-10-09 14:42:40 -0700929}
930
Linus Torvalds1da177e2005-04-16 15:20:36 -0700931static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
932{
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700933 struct inet_timewait_sock *tw = inet_twsk(sk);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800934 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700935
Eric Dumazet0f85fea2014-12-09 09:56:08 -0800936 tcp_v6_send_ack(sk, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700937 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
Andrey Vaginee684b62013-02-11 05:50:19 +0000938 tcp_time_stamp + tcptw->tw_ts_offset,
Wang Yufen9c76a112014-03-29 09:27:31 +0800939 tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw),
Florent Fourcot21858cd2015-05-16 00:24:59 +0200940 tw->tw_tclass, cpu_to_be32(tw->tw_flowlabel));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700941
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700942 inet_twsk_put(tw);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700943}
944
Eric Dumazeta00e7442015-09-29 07:42:39 -0700945static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
Gui Jianfeng6edafaa2008-08-06 23:50:04 -0700946 struct request_sock *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700947{
Daniel Lee3a19ce02014-05-11 20:22:13 -0700948 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
949 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
950 */
Eric Dumazet20a2b492016-08-22 11:31:10 -0700951 /* RFC 7323 2.3
952 * The window field (SEG.WND) of every outgoing segment, with the
953 * exception of <SYN> segments, MUST be right-shifted by
954 * Rcv.Wind.Shift bits:
955 */
Eric Dumazet0f85fea2014-12-09 09:56:08 -0800956 tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ?
Daniel Lee3a19ce02014-05-11 20:22:13 -0700957 tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
Eric Dumazet20a2b492016-08-22 11:31:10 -0700958 tcp_rsk(req)->rcv_nxt,
959 req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
Florian Westphal95a22ca2016-12-01 11:32:06 +0100960 tcp_time_stamp + tcp_rsk(req)->ts_off,
961 req->ts_recent, sk->sk_bound_dev_if,
Florent Fourcot1d13a962014-01-16 17:21:22 +0100962 tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr),
963 0, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700964}
965
966
Eric Dumazet079096f2015-10-02 11:43:32 -0700967static struct sock *tcp_v6_cookie_check(struct sock *sk, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700968{
Glenn Griffinc6aefaf2008-02-07 21:49:26 -0800969#ifdef CONFIG_SYN_COOKIES
Eric Dumazet079096f2015-10-02 11:43:32 -0700970 const struct tcphdr *th = tcp_hdr(skb);
971
Florian Westphalaf9b4732010-06-03 00:43:44 +0000972 if (!th->syn)
Glenn Griffinc6aefaf2008-02-07 21:49:26 -0800973 sk = cookie_v6_check(sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700974#endif
975 return sk;
976}
977
Linus Torvalds1da177e2005-04-16 15:20:36 -0700978static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
979{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700980 if (skb->protocol == htons(ETH_P_IP))
981 return tcp_v4_conn_request(sk, skb);
982
983 if (!ipv6_unicast_destination(skb))
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +0900984 goto drop;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700985
Octavian Purdila1fb6f152014-06-25 17:10:02 +0300986 return tcp_conn_request(&tcp6_request_sock_ops,
987 &tcp_request_sock_ipv6_ops, sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700988
Linus Torvalds1da177e2005-04-16 15:20:36 -0700989drop:
Eric Dumazet9caad862016-04-01 08:52:20 -0700990 tcp_listendrop(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700991 return 0; /* don't send reset */
992}
993
Eric Dumazetebf6c9c2017-02-05 20:23:22 -0800994static void tcp_v6_restore_cb(struct sk_buff *skb)
995{
996 /* We need to move header back to the beginning if xfrm6_policy_check()
997 * and tcp_v6_fill_cb() are going to be called again.
998 * ip6_datagram_recv_specific_ctl() also expects IP6CB to be there.
999 */
1000 memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
1001 sizeof(struct inet6_skb_parm));
1002}
1003
Eric Dumazet0c271712015-09-29 07:42:48 -07001004static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
Weilong Chen4c99aa42013-12-19 18:44:34 +08001005 struct request_sock *req,
Eric Dumazet5e0724d2015-10-22 08:20:46 -07001006 struct dst_entry *dst,
1007 struct request_sock *req_unhash,
1008 bool *own_req)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001009{
Eric Dumazet634fb9792013-10-09 15:21:29 -07001010 struct inet_request_sock *ireq;
Eric Dumazet0c271712015-09-29 07:42:48 -07001011 struct ipv6_pinfo *newnp;
1012 const struct ipv6_pinfo *np = inet6_sk(sk);
Eric Dumazet45f6fad2015-11-29 19:37:57 -08001013 struct ipv6_txoptions *opt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001014 struct tcp6_sock *newtcp6sk;
1015 struct inet_sock *newinet;
1016 struct tcp_sock *newtp;
1017 struct sock *newsk;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001018#ifdef CONFIG_TCP_MD5SIG
1019 struct tcp_md5sig_key *key;
1020#endif
Neal Cardwell3840a062012-06-28 12:34:19 +00001021 struct flowi6 fl6;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001022
1023 if (skb->protocol == htons(ETH_P_IP)) {
1024 /*
1025 * v6 mapped
1026 */
1027
Eric Dumazet5e0724d2015-10-22 08:20:46 -07001028 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst,
1029 req_unhash, own_req);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001030
Ian Morris63159f22015-03-29 14:00:04 +01001031 if (!newsk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001032 return NULL;
1033
1034 newtcp6sk = (struct tcp6_sock *)newsk;
1035 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1036
1037 newinet = inet_sk(newsk);
1038 newnp = inet6_sk(newsk);
1039 newtp = tcp_sk(newsk);
1040
1041 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1042
Eric Dumazetd1e559d2015-03-18 14:05:35 -07001043 newnp->saddr = newsk->sk_v6_rcv_saddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001044
Arnaldo Carvalho de Melo8292a172005-12-13 23:15:52 -08001045 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001046 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001047#ifdef CONFIG_TCP_MD5SIG
1048 newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1049#endif
1050
Yan, Zheng676a1182011-09-25 02:21:30 +00001051 newnp->ipv6_ac_list = NULL;
1052 newnp->ipv6_fl_list = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001053 newnp->pktoptions = NULL;
1054 newnp->opt = NULL;
Eric Dumazet870c3152014-10-17 09:17:20 -07001055 newnp->mcast_oif = tcp_v6_iif(skb);
Arnaldo Carvalho de Melo0660e032007-04-25 17:54:47 -07001056 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
Florent Fourcot1397ed32013-12-08 15:46:57 +01001057 newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
Florent Fourcotdf3687f2014-01-17 17:15:03 +01001058 if (np->repflow)
1059 newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001060
Arnaldo Carvalho de Meloe6848972005-08-09 19:45:38 -07001061 /*
1062 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1063 * here, tcp_create_openreq_child now does this for us, see the comment in
1064 * that function for the gory details. -acme
Linus Torvalds1da177e2005-04-16 15:20:36 -07001065 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001066
1067 /* It is tricky place. Until this moment IPv4 tcp
Arnaldo Carvalho de Melo8292a172005-12-13 23:15:52 -08001068 worked with IPv6 icsk.icsk_af_ops.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001069 Sync it now.
1070 */
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -08001071 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001072
1073 return newsk;
1074 }
1075
Eric Dumazet634fb9792013-10-09 15:21:29 -07001076 ireq = inet_rsk(req);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001077
1078 if (sk_acceptq_is_full(sk))
1079 goto out_overflow;
1080
David S. Miller493f3772010-12-02 12:14:29 -08001081 if (!dst) {
Eric Dumazetf76b33c2015-09-29 07:42:42 -07001082 dst = inet6_csk_route_req(sk, &fl6, req, IPPROTO_TCP);
David S. Miller493f3772010-12-02 12:14:29 -08001083 if (!dst)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001084 goto out;
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001085 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001086
1087 newsk = tcp_create_openreq_child(sk, req, skb);
Ian Morris63159f22015-03-29 14:00:04 +01001088 if (!newsk)
Balazs Scheidler093d2822010-10-21 13:06:43 +02001089 goto out_nonewsk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001090
Arnaldo Carvalho de Meloe6848972005-08-09 19:45:38 -07001091 /*
1092 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1093 * count here, tcp_create_openreq_child now does this for us, see the
1094 * comment in that function for the gory details. -acme
1095 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001096
Stephen Hemminger59eed272006-08-25 15:55:43 -07001097 newsk->sk_gso_type = SKB_GSO_TCPV6;
Eric Dumazet6bd4f352015-12-02 21:53:57 -08001098 ip6_dst_store(newsk, dst, NULL, NULL);
Neal Cardwellfae6ef82012-08-19 03:30:38 +00001099 inet6_sk_rx_dst_set(newsk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001100
1101 newtcp6sk = (struct tcp6_sock *)newsk;
1102 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1103
1104 newtp = tcp_sk(newsk);
1105 newinet = inet_sk(newsk);
1106 newnp = inet6_sk(newsk);
1107
1108 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1109
Eric Dumazet634fb9792013-10-09 15:21:29 -07001110 newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr;
1111 newnp->saddr = ireq->ir_v6_loc_addr;
1112 newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
1113 newsk->sk_bound_dev_if = ireq->ir_iif;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001114
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001115 /* Now IPv6 options...
Linus Torvalds1da177e2005-04-16 15:20:36 -07001116
1117 First: no IPv4 options.
1118 */
Eric Dumazetf6d8bd02011-04-21 09:45:37 +00001119 newinet->inet_opt = NULL;
Yan, Zheng676a1182011-09-25 02:21:30 +00001120 newnp->ipv6_ac_list = NULL;
Masayuki Nakagawad35690b2007-03-16 16:14:03 -07001121 newnp->ipv6_fl_list = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001122
1123 /* Clone RX bits */
1124 newnp->rxopt.all = np->rxopt.all;
1125
Linus Torvalds1da177e2005-04-16 15:20:36 -07001126 newnp->pktoptions = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001127 newnp->opt = NULL;
Eric Dumazet870c3152014-10-17 09:17:20 -07001128 newnp->mcast_oif = tcp_v6_iif(skb);
Arnaldo Carvalho de Melo0660e032007-04-25 17:54:47 -07001129 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
Florent Fourcot1397ed32013-12-08 15:46:57 +01001130 newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
Florent Fourcotdf3687f2014-01-17 17:15:03 +01001131 if (np->repflow)
1132 newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001133
1134 /* Clone native IPv6 options from listening socket (if any)
1135
1136 Yes, keeping reference count would be much more clever,
1137 but we make one more one thing there: reattach optmem
1138 to newsk.
1139 */
Huw Davies56ac42b2016-06-27 15:05:28 -04001140 opt = ireq->ipv6_opt;
1141 if (!opt)
1142 opt = rcu_dereference(np->opt);
Eric Dumazet45f6fad2015-11-29 19:37:57 -08001143 if (opt) {
1144 opt = ipv6_dup_options(newsk, opt);
1145 RCU_INIT_POINTER(newnp->opt, opt);
1146 }
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -08001147 inet_csk(newsk)->icsk_ext_hdr_len = 0;
Eric Dumazet45f6fad2015-11-29 19:37:57 -08001148 if (opt)
1149 inet_csk(newsk)->icsk_ext_hdr_len = opt->opt_nflen +
1150 opt->opt_flen;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001151
Daniel Borkmann81164412015-01-05 23:57:48 +01001152 tcp_ca_openreq_child(newsk, dst);
1153
Linus Torvalds1da177e2005-04-16 15:20:36 -07001154 tcp_sync_mss(newsk, dst_mtu(dst));
David S. Miller0dbaee32010-12-13 12:52:14 -08001155 newtp->advmss = dst_metric_advmss(dst);
Neal Cardwelld135c522012-04-22 09:45:47 +00001156 if (tcp_sk(sk)->rx_opt.user_mss &&
1157 tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1158 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1159
Linus Torvalds1da177e2005-04-16 15:20:36 -07001160 tcp_initialize_rcv_mss(newsk);
1161
Eric Dumazetc720c7e82009-10-15 06:30:45 +00001162 newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1163 newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001164
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001165#ifdef CONFIG_TCP_MD5SIG
1166 /* Copy over the MD5 key from the original socket */
Wang Yufen4aa956d2014-03-29 09:27:29 +08001167 key = tcp_v6_md5_do_lookup(sk, &newsk->sk_v6_daddr);
Ian Morris53b24b82015-03-29 14:00:05 +01001168 if (key) {
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001169 /* We're using one, so create a matching key
1170 * on the newsk structure. If we fail to get
1171 * memory, then we end up not copying the key
1172 * across. Shucks.
1173 */
Eric Dumazetefe42082013-10-03 15:42:29 -07001174 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newsk->sk_v6_daddr,
Mel Gorman99a1dec2012-07-31 16:44:14 -07001175 AF_INET6, key->key, key->keylen,
Eric Dumazet7450aaf2015-11-30 08:57:28 -08001176 sk_gfp_mask(sk, GFP_ATOMIC));
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001177 }
1178#endif
1179
Balazs Scheidler093d2822010-10-21 13:06:43 +02001180 if (__inet_inherit_port(sk, newsk) < 0) {
Christoph Paasche337e242012-12-14 04:07:58 +00001181 inet_csk_prepare_forced_close(newsk);
1182 tcp_done(newsk);
Balazs Scheidler093d2822010-10-21 13:06:43 +02001183 goto out;
1184 }
Eric Dumazet5e0724d2015-10-22 08:20:46 -07001185 *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
Eric Dumazet805c4bc2015-11-05 11:07:13 -08001186 if (*own_req) {
Eric Dumazet49a496c2015-11-05 12:50:19 -08001187 tcp_move_syn(newtp, req);
Eric Dumazet805c4bc2015-11-05 11:07:13 -08001188
1189 /* Clone pktoptions received with SYN, if we own the req */
1190 if (ireq->pktopts) {
1191 newnp->pktoptions = skb_clone(ireq->pktopts,
Eric Dumazet7450aaf2015-11-30 08:57:28 -08001192 sk_gfp_mask(sk, GFP_ATOMIC));
Eric Dumazet805c4bc2015-11-05 11:07:13 -08001193 consume_skb(ireq->pktopts);
1194 ireq->pktopts = NULL;
Eric Dumazetebf6c9c2017-02-05 20:23:22 -08001195 if (newnp->pktoptions) {
1196 tcp_v6_restore_cb(newnp->pktoptions);
Eric Dumazet805c4bc2015-11-05 11:07:13 -08001197 skb_set_owner_r(newnp->pktoptions, newsk);
Eric Dumazetebf6c9c2017-02-05 20:23:22 -08001198 }
Eric Dumazet805c4bc2015-11-05 11:07:13 -08001199 }
Eric Dumazetce105002015-10-30 09:46:12 -07001200 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001201
1202 return newsk;
1203
1204out_overflow:
Eric Dumazet02a1d6e2016-04-27 16:44:39 -07001205 __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
Balazs Scheidler093d2822010-10-21 13:06:43 +02001206out_nonewsk:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001207 dst_release(dst);
Balazs Scheidler093d2822010-10-21 13:06:43 +02001208out:
Eric Dumazet9caad862016-04-01 08:52:20 -07001209 tcp_listendrop(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001210 return NULL;
1211}
1212
Linus Torvalds1da177e2005-04-16 15:20:36 -07001213/* The socket must have it's spinlock held when we get
Eric Dumazete994b2f2015-10-02 11:43:39 -07001214 * here, unless it is a TCP_LISTEN socket.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001215 *
1216 * We have a potential double-lock case here, so even when
1217 * doing backlog processing we use the BH locking scheme.
1218 * This is because we cannot sleep with the original spinlock
1219 * held.
1220 */
1221static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1222{
1223 struct ipv6_pinfo *np = inet6_sk(sk);
1224 struct tcp_sock *tp;
1225 struct sk_buff *opt_skb = NULL;
1226
1227 /* Imagine: socket is IPv6. IPv4 packet arrives,
1228 goes to IPv4 receive handler and backlogged.
1229 From backlog it always goes here. Kerboom...
1230 Fortunately, tcp_rcv_established and rcv_established
1231 handle them correctly, but it is not case with
1232 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1233 */
1234
1235 if (skb->protocol == htons(ETH_P_IP))
1236 return tcp_v4_do_rcv(sk, skb);
1237
Eric Dumazetac6e7802016-11-10 13:12:35 -08001238 if (tcp_filter(sk, skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001239 goto discard;
1240
1241 /*
1242 * socket locking is here for SMP purposes as backlog rcv
1243 * is currently called with bh processing disabled.
1244 */
1245
1246 /* Do Stevens' IPV6_PKTOPTIONS.
1247
1248 Yes, guys, it is the only place in our code, where we
1249 may make it not affecting IPv4.
1250 The rest of code is protocol independent,
1251 and I do not like idea to uglify IPv4.
1252
1253 Actually, all the idea behind IPV6_PKTOPTIONS
1254 looks not very well thought. For now we latch
1255 options, received in the last packet, enqueued
1256 by tcp. Feel free to propose better solution.
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001257 --ANK (980728)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001258 */
1259 if (np->rxopt.all)
Eric Dumazet7450aaf2015-11-30 08:57:28 -08001260 opt_skb = skb_clone(skb, sk_gfp_mask(sk, GFP_ATOMIC));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001261
1262 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
Eric Dumazet5d299f32012-08-06 05:09:33 +00001263 struct dst_entry *dst = sk->sk_rx_dst;
1264
Tom Herbertbdeab992011-08-14 19:45:55 +00001265 sock_rps_save_rxhash(sk, skb);
Eric Dumazet3d973792014-11-11 05:54:27 -08001266 sk_mark_napi_id(sk, skb);
Eric Dumazet5d299f32012-08-06 05:09:33 +00001267 if (dst) {
1268 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1269 dst->ops->check(dst, np->rx_dst_cookie) == NULL) {
1270 dst_release(dst);
1271 sk->sk_rx_dst = NULL;
1272 }
1273 }
1274
Vijay Subramanianc995ae22013-09-03 12:23:22 -07001275 tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001276 if (opt_skb)
1277 goto ipv6_pktoptions;
1278 return 0;
1279 }
1280
Eric Dumazet12e25e12015-06-03 23:49:21 -07001281 if (tcp_checksum_complete(skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001282 goto csum_err;
1283
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001284 if (sk->sk_state == TCP_LISTEN) {
Eric Dumazet079096f2015-10-02 11:43:32 -07001285 struct sock *nsk = tcp_v6_cookie_check(sk, skb);
1286
Linus Torvalds1da177e2005-04-16 15:20:36 -07001287 if (!nsk)
1288 goto discard;
1289
Weilong Chen4c99aa42013-12-19 18:44:34 +08001290 if (nsk != sk) {
Tom Herbertbdeab992011-08-14 19:45:55 +00001291 sock_rps_save_rxhash(nsk, skb);
Eric Dumazet38cb5242015-10-02 11:43:26 -07001292 sk_mark_napi_id(nsk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001293 if (tcp_child_process(sk, nsk, skb))
1294 goto reset;
1295 if (opt_skb)
1296 __kfree_skb(opt_skb);
1297 return 0;
1298 }
Neil Horman47482f12011-04-06 13:07:09 -07001299 } else
Tom Herbertbdeab992011-08-14 19:45:55 +00001300 sock_rps_save_rxhash(sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001301
Eric Dumazet72ab4a82015-09-29 07:42:41 -07001302 if (tcp_rcv_state_process(sk, skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001303 goto reset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001304 if (opt_skb)
1305 goto ipv6_pktoptions;
1306 return 0;
1307
1308reset:
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001309 tcp_v6_send_reset(sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001310discard:
1311 if (opt_skb)
1312 __kfree_skb(opt_skb);
1313 kfree_skb(skb);
1314 return 0;
1315csum_err:
Eric Dumazetc10d9312016-04-29 14:16:47 -07001316 TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
1317 TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001318 goto discard;
1319
1320
1321ipv6_pktoptions:
1322 /* Do you ask, what is it?
1323
1324 1. skb was enqueued by tcp.
1325 2. skb is added to tail of read queue, rather than out of order.
1326 3. socket is not in passive state.
1327 4. Finally, it really contains options, which user wants to receive.
1328 */
1329 tp = tcp_sk(sk);
1330 if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1331 !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
YOSHIFUJI Hideaki333fad52005-09-08 09:59:17 +09001332 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
Eric Dumazet870c3152014-10-17 09:17:20 -07001333 np->mcast_oif = tcp_v6_iif(opt_skb);
YOSHIFUJI Hideaki333fad52005-09-08 09:59:17 +09001334 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
Arnaldo Carvalho de Melo0660e032007-04-25 17:54:47 -07001335 np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
Florent Fourcot82e9f102013-12-08 15:46:59 +01001336 if (np->rxopt.bits.rxflow || np->rxopt.bits.rxtclass)
Florent Fourcot1397ed32013-12-08 15:46:57 +01001337 np->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(opt_skb));
Florent Fourcotdf3687f2014-01-17 17:15:03 +01001338 if (np->repflow)
1339 np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb));
Eric Dumazeta2247722014-09-27 09:50:56 -07001340 if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001341 skb_set_owner_r(opt_skb, sk);
Eric Dumazet8ce48622016-10-12 19:01:45 +02001342 tcp_v6_restore_cb(opt_skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001343 opt_skb = xchg(&np->pktoptions, opt_skb);
1344 } else {
1345 __kfree_skb(opt_skb);
1346 opt_skb = xchg(&np->pktoptions, NULL);
1347 }
1348 }
1349
Wei Yongjun800d55f2009-02-23 21:45:33 +00001350 kfree_skb(opt_skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001351 return 0;
1352}
1353
Nicolas Dichtel2dc49d12014-12-22 18:22:48 +01001354static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr,
1355 const struct tcphdr *th)
1356{
1357 /* This is tricky: we move IP6CB at its correct location into
1358 * TCP_SKB_CB(). It must be done after xfrm6_policy_check(), because
1359 * _decode_session6() uses IP6CB().
1360 * barrier() makes sure compiler won't play aliasing games.
1361 */
1362 memmove(&TCP_SKB_CB(skb)->header.h6, IP6CB(skb),
1363 sizeof(struct inet6_skb_parm));
1364 barrier();
1365
1366 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1367 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1368 skb->len - th->doff*4);
1369 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1370 TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1371 TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1372 TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
1373 TCP_SKB_CB(skb)->sacked = 0;
1374}
1375
Herbert Xue5bbef22007-10-15 12:50:28 -07001376static int tcp_v6_rcv(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001377{
Eric Dumazetcf533ea2011-10-21 05:22:42 -04001378 const struct tcphdr *th;
Eric Dumazetb71d1d42011-04-22 04:53:02 +00001379 const struct ipv6hdr *hdr;
Eric Dumazet3b24d852016-04-01 08:52:17 -07001380 bool refcounted;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001381 struct sock *sk;
1382 int ret;
Pavel Emelyanova86b1e32008-07-16 20:20:58 -07001383 struct net *net = dev_net(skb->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001384
1385 if (skb->pkt_type != PACKET_HOST)
1386 goto discard_it;
1387
1388 /*
1389 * Count it even if it's bad.
1390 */
Eric Dumazet90bbcc62016-04-27 16:44:32 -07001391 __TCP_INC_STATS(net, TCP_MIB_INSEGS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001392
1393 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1394 goto discard_it;
1395
Eric Dumazetea1627c2016-05-13 09:16:40 -07001396 th = (const struct tcphdr *)skb->data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001397
Eric Dumazetea1627c2016-05-13 09:16:40 -07001398 if (unlikely(th->doff < sizeof(struct tcphdr)/4))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001399 goto bad_packet;
1400 if (!pskb_may_pull(skb, th->doff*4))
1401 goto discard_it;
1402
Tom Herberte4f45b72014-05-02 16:29:51 -07001403 if (skb_checksum_init(skb, IPPROTO_TCP, ip6_compute_pseudo))
Eric Dumazet6a5dc9e2013-04-29 08:39:56 +00001404 goto csum_error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001405
Eric Dumazetea1627c2016-05-13 09:16:40 -07001406 th = (const struct tcphdr *)skb->data;
Stephen Hemmingere802af92010-04-22 15:24:53 -07001407 hdr = ipv6_hdr(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001408
Eric Dumazet4bdc3d62015-10-13 17:12:54 -07001409lookup:
Craig Galleka5836362016-02-10 11:50:38 -05001410 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th),
Eric Dumazet3b24d852016-04-01 08:52:17 -07001411 th->source, th->dest, inet6_iif(skb),
1412 &refcounted);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001413 if (!sk)
1414 goto no_tcp_socket;
1415
1416process:
1417 if (sk->sk_state == TCP_TIME_WAIT)
1418 goto do_time_wait;
1419
Eric Dumazet079096f2015-10-02 11:43:32 -07001420 if (sk->sk_state == TCP_NEW_SYN_RECV) {
1421 struct request_sock *req = inet_reqsk(sk);
Eric Dumazet77166822016-02-18 05:39:18 -08001422 struct sock *nsk;
Eric Dumazet079096f2015-10-02 11:43:32 -07001423
1424 sk = req->rsk_listener;
1425 tcp_v6_fill_cb(skb, hdr, th);
1426 if (tcp_v6_inbound_md5_hash(sk, skb)) {
Eric Dumazete65c3322016-08-24 08:50:24 -07001427 sk_drops_add(sk, skb);
Eric Dumazet079096f2015-10-02 11:43:32 -07001428 reqsk_put(req);
1429 goto discard_it;
1430 }
Eric Dumazet77166822016-02-18 05:39:18 -08001431 if (unlikely(sk->sk_state != TCP_LISTEN)) {
Eric Dumazetf03f2e12015-10-14 11:16:27 -07001432 inet_csk_reqsk_queue_drop_and_put(sk, req);
Eric Dumazet4bdc3d62015-10-13 17:12:54 -07001433 goto lookup;
1434 }
Eric Dumazet77166822016-02-18 05:39:18 -08001435 sock_hold(sk);
Eric Dumazet3b24d852016-04-01 08:52:17 -07001436 refcounted = true;
Eric Dumazet77166822016-02-18 05:39:18 -08001437 nsk = tcp_check_req(sk, skb, req, false);
Eric Dumazet079096f2015-10-02 11:43:32 -07001438 if (!nsk) {
1439 reqsk_put(req);
Eric Dumazet77166822016-02-18 05:39:18 -08001440 goto discard_and_relse;
Eric Dumazet079096f2015-10-02 11:43:32 -07001441 }
1442 if (nsk == sk) {
Eric Dumazet079096f2015-10-02 11:43:32 -07001443 reqsk_put(req);
1444 tcp_v6_restore_cb(skb);
1445 } else if (tcp_child_process(sk, nsk, skb)) {
1446 tcp_v6_send_reset(nsk, skb);
Eric Dumazet77166822016-02-18 05:39:18 -08001447 goto discard_and_relse;
Eric Dumazet079096f2015-10-02 11:43:32 -07001448 } else {
Eric Dumazet77166822016-02-18 05:39:18 -08001449 sock_put(sk);
Eric Dumazet079096f2015-10-02 11:43:32 -07001450 return 0;
1451 }
1452 }
Stephen Hemmingere802af92010-04-22 15:24:53 -07001453 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
Eric Dumazet02a1d6e2016-04-27 16:44:39 -07001454 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
Stephen Hemmingere802af92010-04-22 15:24:53 -07001455 goto discard_and_relse;
1456 }
1457
Linus Torvalds1da177e2005-04-16 15:20:36 -07001458 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1459 goto discard_and_relse;
1460
Nicolas Dichtel2dc49d12014-12-22 18:22:48 +01001461 tcp_v6_fill_cb(skb, hdr, th);
1462
Dmitry Popov9ea88a12014-08-07 02:38:22 +04001463 if (tcp_v6_inbound_md5_hash(sk, skb))
1464 goto discard_and_relse;
Dmitry Popov9ea88a12014-08-07 02:38:22 +04001465
Eric Dumazetac6e7802016-11-10 13:12:35 -08001466 if (tcp_filter(sk, skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001467 goto discard_and_relse;
Eric Dumazetac6e7802016-11-10 13:12:35 -08001468 th = (const struct tcphdr *)skb->data;
1469 hdr = ipv6_hdr(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001470
1471 skb->dev = NULL;
1472
Eric Dumazete994b2f2015-10-02 11:43:39 -07001473 if (sk->sk_state == TCP_LISTEN) {
1474 ret = tcp_v6_do_rcv(sk, skb);
1475 goto put_and_return;
1476 }
1477
1478 sk_incoming_cpu_update(sk);
1479
Fabio Olive Leite293b9c42006-09-25 22:28:47 -07001480 bh_lock_sock_nested(sk);
Martin KaFai Laua44d6ea2016-03-14 10:52:15 -07001481 tcp_segs_in(tcp_sk(sk), skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001482 ret = 0;
1483 if (!sock_owned_by_user(sk)) {
Dan Williams7bced392013-12-30 12:37:29 -08001484 if (!tcp_prequeue(sk, skb))
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001485 ret = tcp_v6_do_rcv(sk, skb);
Eric Dumazetc9c33212016-08-27 07:37:54 -07001486 } else if (tcp_add_backlog(sk, skb)) {
Zhu Yi6b03a532010-03-04 18:01:41 +00001487 goto discard_and_relse;
1488 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001489 bh_unlock_sock(sk);
1490
Eric Dumazete994b2f2015-10-02 11:43:39 -07001491put_and_return:
Eric Dumazet3b24d852016-04-01 08:52:17 -07001492 if (refcounted)
1493 sock_put(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001494 return ret ? -1 : 0;
1495
1496no_tcp_socket:
1497 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1498 goto discard_it;
1499
Nicolas Dichtel2dc49d12014-12-22 18:22:48 +01001500 tcp_v6_fill_cb(skb, hdr, th);
1501
Eric Dumazet12e25e12015-06-03 23:49:21 -07001502 if (tcp_checksum_complete(skb)) {
Eric Dumazet6a5dc9e2013-04-29 08:39:56 +00001503csum_error:
Eric Dumazet90bbcc62016-04-27 16:44:32 -07001504 __TCP_INC_STATS(net, TCP_MIB_CSUMERRORS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001505bad_packet:
Eric Dumazet90bbcc62016-04-27 16:44:32 -07001506 __TCP_INC_STATS(net, TCP_MIB_INERRS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001507 } else {
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001508 tcp_v6_send_reset(NULL, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001509 }
1510
1511discard_it:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001512 kfree_skb(skb);
1513 return 0;
1514
1515discard_and_relse:
Eric Dumazet532182c2016-04-01 08:52:19 -07001516 sk_drops_add(sk, skb);
Eric Dumazet3b24d852016-04-01 08:52:17 -07001517 if (refcounted)
1518 sock_put(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001519 goto discard_it;
1520
1521do_time_wait:
1522 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
YOSHIFUJI Hideaki9469c7b2006-10-10 19:41:46 -07001523 inet_twsk_put(inet_twsk(sk));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001524 goto discard_it;
1525 }
1526
Nicolas Dichtel2dc49d12014-12-22 18:22:48 +01001527 tcp_v6_fill_cb(skb, hdr, th);
1528
Eric Dumazet6a5dc9e2013-04-29 08:39:56 +00001529 if (tcp_checksum_complete(skb)) {
1530 inet_twsk_put(inet_twsk(sk));
1531 goto csum_error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001532 }
1533
YOSHIFUJI Hideaki9469c7b2006-10-10 19:41:46 -07001534 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001535 case TCP_TW_SYN:
1536 {
1537 struct sock *sk2;
1538
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09001539 sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
Craig Galleka5836362016-02-10 11:50:38 -05001540 skb, __tcp_hdrlen(th),
Tom Herbert5ba24952013-01-22 09:50:39 +00001541 &ipv6_hdr(skb)->saddr, th->source,
Arnaldo Carvalho de Melo0660e032007-04-25 17:54:47 -07001542 &ipv6_hdr(skb)->daddr,
Eric Dumazet870c3152014-10-17 09:17:20 -07001543 ntohs(th->dest), tcp_v6_iif(skb));
Ian Morris53b24b82015-03-29 14:00:05 +01001544 if (sk2) {
Arnaldo Carvalho de Melo295ff7e2005-08-09 20:44:40 -07001545 struct inet_timewait_sock *tw = inet_twsk(sk);
Eric Dumazetdbe7faa2015-07-08 14:28:30 -07001546 inet_twsk_deschedule_put(tw);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001547 sk = sk2;
Alexey Kodanev4ad19de2015-03-27 12:24:22 +03001548 tcp_v6_restore_cb(skb);
Eric Dumazet3b24d852016-04-01 08:52:17 -07001549 refcounted = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001550 goto process;
1551 }
1552 /* Fall through to ACK */
1553 }
1554 case TCP_TW_ACK:
1555 tcp_v6_timewait_ack(sk, skb);
1556 break;
1557 case TCP_TW_RST:
Alexey Kodanev4ad19de2015-03-27 12:24:22 +03001558 tcp_v6_restore_cb(skb);
Florian Westphal271c3b92015-12-21 21:29:26 +01001559 tcp_v6_send_reset(sk, skb);
1560 inet_twsk_deschedule_put(inet_twsk(sk));
1561 goto discard_it;
Wang Yufen4aa956d2014-03-29 09:27:29 +08001562 case TCP_TW_SUCCESS:
1563 ;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001564 }
1565 goto discard_it;
1566}
1567
Eric Dumazetc7109982012-07-26 12:18:11 +00001568static void tcp_v6_early_demux(struct sk_buff *skb)
1569{
1570 const struct ipv6hdr *hdr;
1571 const struct tcphdr *th;
1572 struct sock *sk;
1573
1574 if (skb->pkt_type != PACKET_HOST)
1575 return;
1576
1577 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1578 return;
1579
1580 hdr = ipv6_hdr(skb);
1581 th = tcp_hdr(skb);
1582
1583 if (th->doff < sizeof(struct tcphdr) / 4)
1584 return;
1585
Eric Dumazet870c3152014-10-17 09:17:20 -07001586 /* Note : We use inet6_iif() here, not tcp_v6_iif() */
Eric Dumazetc7109982012-07-26 12:18:11 +00001587 sk = __inet6_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1588 &hdr->saddr, th->source,
1589 &hdr->daddr, ntohs(th->dest),
1590 inet6_iif(skb));
1591 if (sk) {
1592 skb->sk = sk;
1593 skb->destructor = sock_edemux;
Eric Dumazetf7e4eb02015-03-15 21:12:13 -07001594 if (sk_fullsock(sk)) {
Michal Kubečekd0c294c2015-03-23 15:14:00 +01001595 struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
Neal Cardwellf3f12132012-10-22 21:41:48 +00001596
Eric Dumazetc7109982012-07-26 12:18:11 +00001597 if (dst)
Eric Dumazet5d299f32012-08-06 05:09:33 +00001598 dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie);
Eric Dumazetc7109982012-07-26 12:18:11 +00001599 if (dst &&
Neal Cardwellf3f12132012-10-22 21:41:48 +00001600 inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
Eric Dumazetc7109982012-07-26 12:18:11 +00001601 skb_dst_set_noref(skb, dst);
1602 }
1603 }
1604}
1605
David S. Millerccb7c412010-12-01 18:09:13 -08001606static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1607 .twsk_obj_size = sizeof(struct tcp6_timewait_sock),
1608 .twsk_unique = tcp_twsk_unique,
Wang Yufen4aa956d2014-03-29 09:27:29 +08001609 .twsk_destructor = tcp_twsk_destructor,
David S. Millerccb7c412010-12-01 18:09:13 -08001610};
1611
Stephen Hemminger3b401a82009-09-01 19:25:04 +00001612static const struct inet_connection_sock_af_ops ipv6_specific = {
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001613 .queue_xmit = inet6_csk_xmit,
1614 .send_check = tcp_v6_send_check,
1615 .rebuild_header = inet6_sk_rebuild_header,
Eric Dumazet5d299f32012-08-06 05:09:33 +00001616 .sk_rx_dst_set = inet6_sk_rx_dst_set,
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001617 .conn_request = tcp_v6_conn_request,
1618 .syn_recv_sock = tcp_v6_syn_recv_sock,
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001619 .net_header_len = sizeof(struct ipv6hdr),
Eric Dumazet67469602012-04-24 07:37:38 +00001620 .net_frag_header_len = sizeof(struct frag_hdr),
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001621 .setsockopt = ipv6_setsockopt,
1622 .getsockopt = ipv6_getsockopt,
1623 .addr2sockaddr = inet6_csk_addr2sockaddr,
1624 .sockaddr_len = sizeof(struct sockaddr_in6),
Arnaldo Carvalho de Meloab1e0a12008-02-03 04:06:04 -08001625 .bind_conflict = inet6_csk_bind_conflict,
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08001626#ifdef CONFIG_COMPAT
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001627 .compat_setsockopt = compat_ipv6_setsockopt,
1628 .compat_getsockopt = compat_ipv6_getsockopt,
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08001629#endif
Neal Cardwell4fab9072014-08-14 12:40:05 -04001630 .mtu_reduced = tcp_v6_mtu_reduced,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001631};
1632
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001633#ifdef CONFIG_TCP_MD5SIG
Stephen Hemmingerb2e4b3d2009-09-01 19:25:03 +00001634static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001635 .md5_lookup = tcp_v6_md5_lookup,
Adam Langley49a72df2008-07-19 00:01:42 -07001636 .calc_md5_hash = tcp_v6_md5_hash_skb,
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001637 .md5_parse = tcp_v6_parse_md5_keys,
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001638};
David S. Millera9286302006-11-14 19:53:22 -08001639#endif
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001640
Linus Torvalds1da177e2005-04-16 15:20:36 -07001641/*
1642 * TCP over IPv4 via INET6 API
1643 */
Stephen Hemminger3b401a82009-09-01 19:25:04 +00001644static const struct inet_connection_sock_af_ops ipv6_mapped = {
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001645 .queue_xmit = ip_queue_xmit,
1646 .send_check = tcp_v4_send_check,
1647 .rebuild_header = inet_sk_rebuild_header,
Eric Dumazet63d02d12012-08-09 14:11:00 +00001648 .sk_rx_dst_set = inet_sk_rx_dst_set,
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001649 .conn_request = tcp_v6_conn_request,
1650 .syn_recv_sock = tcp_v6_syn_recv_sock,
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001651 .net_header_len = sizeof(struct iphdr),
1652 .setsockopt = ipv6_setsockopt,
1653 .getsockopt = ipv6_getsockopt,
1654 .addr2sockaddr = inet6_csk_addr2sockaddr,
1655 .sockaddr_len = sizeof(struct sockaddr_in6),
Arnaldo Carvalho de Meloab1e0a12008-02-03 04:06:04 -08001656 .bind_conflict = inet6_csk_bind_conflict,
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08001657#ifdef CONFIG_COMPAT
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001658 .compat_setsockopt = compat_ipv6_setsockopt,
1659 .compat_getsockopt = compat_ipv6_getsockopt,
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08001660#endif
Neal Cardwell4fab9072014-08-14 12:40:05 -04001661 .mtu_reduced = tcp_v4_mtu_reduced,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001662};
1663
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001664#ifdef CONFIG_TCP_MD5SIG
Stephen Hemmingerb2e4b3d2009-09-01 19:25:03 +00001665static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001666 .md5_lookup = tcp_v4_md5_lookup,
Adam Langley49a72df2008-07-19 00:01:42 -07001667 .calc_md5_hash = tcp_v4_md5_hash_skb,
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001668 .md5_parse = tcp_v6_parse_md5_keys,
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001669};
David S. Millera9286302006-11-14 19:53:22 -08001670#endif
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001671
Linus Torvalds1da177e2005-04-16 15:20:36 -07001672/* NOTE: A lot of things set to zero explicitly by call to
1673 * sk_alloc() so need not be done here.
1674 */
1675static int tcp_v6_init_sock(struct sock *sk)
1676{
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -03001677 struct inet_connection_sock *icsk = inet_csk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001678
Neal Cardwell900f65d2012-04-19 09:55:21 +00001679 tcp_init_sock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001680
Arnaldo Carvalho de Melo8292a172005-12-13 23:15:52 -08001681 icsk->icsk_af_ops = &ipv6_specific;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001682
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001683#ifdef CONFIG_TCP_MD5SIG
David S. Millerac807fa2012-04-23 03:21:58 -04001684 tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001685#endif
1686
Linus Torvalds1da177e2005-04-16 15:20:36 -07001687 return 0;
1688}
1689
Brian Haley7d06b2e2008-06-14 17:04:49 -07001690static void tcp_v6_destroy_sock(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001691{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001692 tcp_v4_destroy_sock(sk);
Brian Haley7d06b2e2008-06-14 17:04:49 -07001693 inet6_destroy_sock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001694}
1695
YOSHIFUJI Hideaki952a10b2007-04-21 20:13:44 +09001696#ifdef CONFIG_PROC_FS
Linus Torvalds1da177e2005-04-16 15:20:36 -07001697/* Proc filesystem TCPv6 sock list dumping. */
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001698static void get_openreq6(struct seq_file *seq,
Eric Dumazetaa3a0c82015-10-02 11:43:30 -07001699 const struct request_sock *req, int i)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001700{
Eric Dumazetfa76ce732015-03-19 19:04:20 -07001701 long ttd = req->rsk_timer.expires - jiffies;
Eric Dumazet634fb9792013-10-09 15:21:29 -07001702 const struct in6_addr *src = &inet_rsk(req)->ir_v6_loc_addr;
1703 const struct in6_addr *dest = &inet_rsk(req)->ir_v6_rmt_addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001704
1705 if (ttd < 0)
1706 ttd = 0;
1707
Linus Torvalds1da177e2005-04-16 15:20:36 -07001708 seq_printf(seq,
1709 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
Francesco Fuscod14c5ab2013-08-15 13:42:14 +02001710 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %d %d %pK\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001711 i,
1712 src->s6_addr32[0], src->s6_addr32[1],
1713 src->s6_addr32[2], src->s6_addr32[3],
Eric Dumazetb44084c2013-10-10 00:04:37 -07001714 inet_rsk(req)->ir_num,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001715 dest->s6_addr32[0], dest->s6_addr32[1],
1716 dest->s6_addr32[2], dest->s6_addr32[3],
Eric Dumazet634fb9792013-10-09 15:21:29 -07001717 ntohs(inet_rsk(req)->ir_rmt_port),
Linus Torvalds1da177e2005-04-16 15:20:36 -07001718 TCP_SYN_RECV,
Weilong Chen4c99aa42013-12-19 18:44:34 +08001719 0, 0, /* could print option size, but that is af dependent. */
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001720 1, /* timers active (only the expire timer) */
1721 jiffies_to_clock_t(ttd),
Eric Dumazete6c022a2012-10-27 23:16:46 +00001722 req->num_timeout,
Eric Dumazetaa3a0c82015-10-02 11:43:30 -07001723 from_kuid_munged(seq_user_ns(seq),
1724 sock_i_uid(req->rsk_listener)),
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001725 0, /* non standard timer */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001726 0, /* open_requests have no inode */
1727 0, req);
1728}
1729
1730static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1731{
Eric Dumazetb71d1d42011-04-22 04:53:02 +00001732 const struct in6_addr *dest, *src;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001733 __u16 destp, srcp;
1734 int timer_active;
1735 unsigned long timer_expires;
Eric Dumazetcf533ea2011-10-21 05:22:42 -04001736 const struct inet_sock *inet = inet_sk(sp);
1737 const struct tcp_sock *tp = tcp_sk(sp);
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001738 const struct inet_connection_sock *icsk = inet_csk(sp);
Eric Dumazet0536fcc2015-09-29 07:42:52 -07001739 const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
Eric Dumazet00fd38d2015-11-12 08:43:18 -08001740 int rx_queue;
1741 int state;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001742
Eric Dumazetefe42082013-10-03 15:42:29 -07001743 dest = &sp->sk_v6_daddr;
1744 src = &sp->sk_v6_rcv_saddr;
Eric Dumazetc720c7e82009-10-15 06:30:45 +00001745 destp = ntohs(inet->inet_dport);
1746 srcp = ntohs(inet->inet_sport);
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001747
Yuchung Chengce3cf4e2016-06-06 15:07:18 -07001748 if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
1749 icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
1750 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001751 timer_active = 1;
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001752 timer_expires = icsk->icsk_timeout;
1753 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001754 timer_active = 4;
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001755 timer_expires = icsk->icsk_timeout;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001756 } else if (timer_pending(&sp->sk_timer)) {
1757 timer_active = 2;
1758 timer_expires = sp->sk_timer.expires;
1759 } else {
1760 timer_active = 0;
1761 timer_expires = jiffies;
1762 }
1763
Eric Dumazet00fd38d2015-11-12 08:43:18 -08001764 state = sk_state_load(sp);
1765 if (state == TCP_LISTEN)
1766 rx_queue = sp->sk_ack_backlog;
1767 else
1768 /* Because we don't lock the socket,
1769 * we might find a transient negative value.
1770 */
1771 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
1772
Linus Torvalds1da177e2005-04-16 15:20:36 -07001773 seq_printf(seq,
1774 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
Francesco Fuscod14c5ab2013-08-15 13:42:14 +02001775 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001776 i,
1777 src->s6_addr32[0], src->s6_addr32[1],
1778 src->s6_addr32[2], src->s6_addr32[3], srcp,
1779 dest->s6_addr32[0], dest->s6_addr32[1],
1780 dest->s6_addr32[2], dest->s6_addr32[3], destp,
Eric Dumazet00fd38d2015-11-12 08:43:18 -08001781 state,
1782 tp->write_seq - tp->snd_una,
1783 rx_queue,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001784 timer_active,
Eric Dumazeta399a802012-08-08 21:13:53 +00001785 jiffies_delta_to_clock_t(timer_expires - jiffies),
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001786 icsk->icsk_retransmits,
Eric W. Biedermana7cb5a42012-05-24 01:10:10 -06001787 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -03001788 icsk->icsk_probes_out,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001789 sock_i_ino(sp),
1790 atomic_read(&sp->sk_refcnt), sp,
Stephen Hemminger7be87352008-06-27 20:00:19 -07001791 jiffies_to_clock_t(icsk->icsk_rto),
1792 jiffies_to_clock_t(icsk->icsk_ack.ato),
Weilong Chen4c99aa42013-12-19 18:44:34 +08001793 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
Ilpo Järvinen0b6a05c2009-09-15 01:30:10 -07001794 tp->snd_cwnd,
Eric Dumazet00fd38d2015-11-12 08:43:18 -08001795 state == TCP_LISTEN ?
Eric Dumazet0536fcc2015-09-29 07:42:52 -07001796 fastopenq->max_qlen :
Yuchung Cheng0a672f72014-05-11 20:22:12 -07001797 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001798 );
1799}
1800
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001801static void get_timewait6_sock(struct seq_file *seq,
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07001802 struct inet_timewait_sock *tw, int i)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001803{
Eric Dumazet789f5582015-04-12 18:51:09 -07001804 long delta = tw->tw_timer.expires - jiffies;
Eric Dumazetb71d1d42011-04-22 04:53:02 +00001805 const struct in6_addr *dest, *src;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001806 __u16 destp, srcp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001807
Eric Dumazetefe42082013-10-03 15:42:29 -07001808 dest = &tw->tw_v6_daddr;
1809 src = &tw->tw_v6_rcv_saddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001810 destp = ntohs(tw->tw_dport);
1811 srcp = ntohs(tw->tw_sport);
1812
1813 seq_printf(seq,
1814 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
Dan Rosenberg71338aa2011-05-23 12:17:35 +00001815 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001816 i,
1817 src->s6_addr32[0], src->s6_addr32[1],
1818 src->s6_addr32[2], src->s6_addr32[3], srcp,
1819 dest->s6_addr32[0], dest->s6_addr32[1],
1820 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1821 tw->tw_substate, 0, 0,
Eric Dumazeta399a802012-08-08 21:13:53 +00001822 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001823 atomic_read(&tw->tw_refcnt), tw);
1824}
1825
Linus Torvalds1da177e2005-04-16 15:20:36 -07001826static int tcp6_seq_show(struct seq_file *seq, void *v)
1827{
1828 struct tcp_iter_state *st;
Eric Dumazet05dbc7b2013-10-03 00:22:02 -07001829 struct sock *sk = v;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001830
1831 if (v == SEQ_START_TOKEN) {
1832 seq_puts(seq,
1833 " sl "
1834 "local_address "
1835 "remote_address "
1836 "st tx_queue rx_queue tr tm->when retrnsmt"
1837 " uid timeout inode\n");
1838 goto out;
1839 }
1840 st = seq->private;
1841
Eric Dumazet079096f2015-10-02 11:43:32 -07001842 if (sk->sk_state == TCP_TIME_WAIT)
1843 get_timewait6_sock(seq, v, st->num);
1844 else if (sk->sk_state == TCP_NEW_SYN_RECV)
Eric Dumazetaa3a0c82015-10-02 11:43:30 -07001845 get_openreq6(seq, v, st->num);
Eric Dumazet079096f2015-10-02 11:43:32 -07001846 else
1847 get_tcp6_sock(seq, v, st->num);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001848out:
1849 return 0;
1850}
1851
Arjan van de Ven73cb88e2011-10-30 06:46:30 +00001852static const struct file_operations tcp6_afinfo_seq_fops = {
1853 .owner = THIS_MODULE,
1854 .open = tcp_seq_open,
1855 .read = seq_read,
1856 .llseek = seq_lseek,
1857 .release = seq_release_net
1858};
1859
Linus Torvalds1da177e2005-04-16 15:20:36 -07001860static struct tcp_seq_afinfo tcp6_seq_afinfo = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001861 .name = "tcp6",
1862 .family = AF_INET6,
Arjan van de Ven73cb88e2011-10-30 06:46:30 +00001863 .seq_fops = &tcp6_afinfo_seq_fops,
Denis V. Lunev9427c4b2008-04-13 22:12:13 -07001864 .seq_ops = {
1865 .show = tcp6_seq_show,
1866 },
Linus Torvalds1da177e2005-04-16 15:20:36 -07001867};
1868
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00001869int __net_init tcp6_proc_init(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001870{
Daniel Lezcano6f8b13b2008-03-21 04:14:45 -07001871 return tcp_proc_register(net, &tcp6_seq_afinfo);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001872}
1873
Daniel Lezcano6f8b13b2008-03-21 04:14:45 -07001874void tcp6_proc_exit(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001875{
Daniel Lezcano6f8b13b2008-03-21 04:14:45 -07001876 tcp_proc_unregister(net, &tcp6_seq_afinfo);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001877}
1878#endif
1879
1880struct proto tcpv6_prot = {
1881 .name = "TCPv6",
1882 .owner = THIS_MODULE,
1883 .close = tcp_close,
1884 .connect = tcp_v6_connect,
1885 .disconnect = tcp_disconnect,
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001886 .accept = inet_csk_accept,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001887 .ioctl = tcp_ioctl,
1888 .init = tcp_v6_init_sock,
1889 .destroy = tcp_v6_destroy_sock,
1890 .shutdown = tcp_shutdown,
1891 .setsockopt = tcp_setsockopt,
1892 .getsockopt = tcp_getsockopt,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001893 .recvmsg = tcp_recvmsg,
Changli Gao7ba42912010-07-10 20:41:55 +00001894 .sendmsg = tcp_sendmsg,
1895 .sendpage = tcp_sendpage,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001896 .backlog_rcv = tcp_v6_do_rcv,
Eric Dumazet46d3cea2012-07-11 05:50:31 +00001897 .release_cb = tcp_release_cb,
Craig Gallek496611d2016-02-10 11:50:36 -05001898 .hash = inet6_hash,
Arnaldo Carvalho de Meloab1e0a12008-02-03 04:06:04 -08001899 .unhash = inet_unhash,
1900 .get_port = inet_csk_get_port,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001901 .enter_memory_pressure = tcp_enter_memory_pressure,
Eric Dumazetc9bee3b72013-07-22 20:27:07 -07001902 .stream_memory_free = tcp_stream_memory_free,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001903 .sockets_allocated = &tcp_sockets_allocated,
1904 .memory_allocated = &tcp_memory_allocated,
1905 .memory_pressure = &tcp_memory_pressure,
Arnaldo Carvalho de Melo0a5578c2005-08-09 20:11:41 -07001906 .orphan_count = &tcp_orphan_count,
Eric W. Biedermana4fe34b2013-10-19 16:25:36 -07001907 .sysctl_mem = sysctl_tcp_mem,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001908 .sysctl_wmem = sysctl_tcp_wmem,
1909 .sysctl_rmem = sysctl_tcp_rmem,
1910 .max_header = MAX_TCP_HEADER,
1911 .obj_size = sizeof(struct tcp6_sock),
Eric Dumazet3ab5aee2008-11-16 19:40:17 -08001912 .slab_flags = SLAB_DESTROY_BY_RCU,
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08001913 .twsk_prot = &tcp6_timewait_sock_ops,
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -07001914 .rsk_prot = &tcp6_request_sock_ops,
Pavel Emelyanov39d8cda2008-03-22 16:50:58 -07001915 .h.hashinfo = &tcp_hashinfo,
Changli Gao7ba42912010-07-10 20:41:55 +00001916 .no_autobind = true,
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001917#ifdef CONFIG_COMPAT
1918 .compat_setsockopt = compat_tcp_setsockopt,
1919 .compat_getsockopt = compat_tcp_getsockopt,
1920#endif
Lorenzo Colittic1e64e22015-12-16 12:30:05 +09001921 .diag_destroy = tcp_abort,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001922};
1923
Alexey Dobriyan41135cc2009-09-14 12:22:28 +00001924static const struct inet6_protocol tcpv6_protocol = {
Eric Dumazetc7109982012-07-26 12:18:11 +00001925 .early_demux = tcp_v6_early_demux,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001926 .handler = tcp_v6_rcv,
1927 .err_handler = tcp_v6_err,
1928 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
1929};
1930
Linus Torvalds1da177e2005-04-16 15:20:36 -07001931static struct inet_protosw tcpv6_protosw = {
1932 .type = SOCK_STREAM,
1933 .protocol = IPPROTO_TCP,
1934 .prot = &tcpv6_prot,
1935 .ops = &inet6_stream_ops,
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -08001936 .flags = INET_PROTOSW_PERMANENT |
1937 INET_PROTOSW_ICSK,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001938};
1939
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00001940static int __net_init tcpv6_net_init(struct net *net)
Daniel Lezcano93ec9262008-03-07 11:16:02 -08001941{
Denis V. Lunev56772422008-04-03 14:28:30 -07001942 return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
1943 SOCK_RAW, IPPROTO_TCP, net);
Daniel Lezcano93ec9262008-03-07 11:16:02 -08001944}
1945
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00001946static void __net_exit tcpv6_net_exit(struct net *net)
Daniel Lezcano93ec9262008-03-07 11:16:02 -08001947{
Denis V. Lunev56772422008-04-03 14:28:30 -07001948 inet_ctl_sock_destroy(net->ipv6.tcp_sk);
Eric W. Biedermanb099ce22009-12-03 02:29:09 +00001949}
1950
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00001951static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
Eric W. Biedermanb099ce22009-12-03 02:29:09 +00001952{
1953 inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET6);
Daniel Lezcano93ec9262008-03-07 11:16:02 -08001954}
1955
1956static struct pernet_operations tcpv6_net_ops = {
Eric W. Biedermanb099ce22009-12-03 02:29:09 +00001957 .init = tcpv6_net_init,
1958 .exit = tcpv6_net_exit,
1959 .exit_batch = tcpv6_net_exit_batch,
Daniel Lezcano93ec9262008-03-07 11:16:02 -08001960};
1961
Daniel Lezcano7f4e4862007-12-11 02:25:35 -08001962int __init tcpv6_init(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001963{
Daniel Lezcano7f4e4862007-12-11 02:25:35 -08001964 int ret;
David Woodhouseae0f7d52006-01-11 15:53:04 -08001965
Vlad Yasevich33362882012-11-15 08:49:15 +00001966 ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
1967 if (ret)
Vlad Yasevichc6b641a2012-11-15 08:49:22 +00001968 goto out;
Vlad Yasevich33362882012-11-15 08:49:15 +00001969
Daniel Lezcano7f4e4862007-12-11 02:25:35 -08001970 /* register inet6 protocol */
1971 ret = inet6_register_protosw(&tcpv6_protosw);
1972 if (ret)
1973 goto out_tcpv6_protocol;
1974
Daniel Lezcano93ec9262008-03-07 11:16:02 -08001975 ret = register_pernet_subsys(&tcpv6_net_ops);
Daniel Lezcano7f4e4862007-12-11 02:25:35 -08001976 if (ret)
1977 goto out_tcpv6_protosw;
1978out:
1979 return ret;
1980
Daniel Lezcano7f4e4862007-12-11 02:25:35 -08001981out_tcpv6_protosw:
1982 inet6_unregister_protosw(&tcpv6_protosw);
Vlad Yasevich33362882012-11-15 08:49:15 +00001983out_tcpv6_protocol:
1984 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
Daniel Lezcano7f4e4862007-12-11 02:25:35 -08001985 goto out;
1986}
1987
Daniel Lezcano09f77092007-12-13 05:34:58 -08001988void tcpv6_exit(void)
Daniel Lezcano7f4e4862007-12-11 02:25:35 -08001989{
Daniel Lezcano93ec9262008-03-07 11:16:02 -08001990 unregister_pernet_subsys(&tcpv6_net_ops);
Daniel Lezcano7f4e4862007-12-11 02:25:35 -08001991 inet6_unregister_protosw(&tcpv6_protosw);
1992 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001993}