blob: b5d27212db2fe6d2147719c18850263cd9ecb611 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * TCP over IPv6
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09003 * Linux INET6 implementation
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 *
5 * Authors:
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09006 * Pedro Roque <roque@di.fc.ul.pt>
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 *
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09008 * Based on:
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 * linux/net/ipv4/tcp.c
10 * linux/net/ipv4/tcp_input.c
11 * linux/net/ipv4/tcp_output.c
12 *
13 * Fixes:
14 * Hideaki YOSHIFUJI : sin6_scope_id support
15 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
16 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
17 * a single port at the same time.
18 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
19 *
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
24 */
25
Herbert Xueb4dea52008-12-29 23:04:08 -080026#include <linux/bottom_half.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070027#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <linux/errno.h>
29#include <linux/types.h>
30#include <linux/socket.h>
31#include <linux/sockios.h>
32#include <linux/net.h>
33#include <linux/jiffies.h>
34#include <linux/in.h>
35#include <linux/in6.h>
36#include <linux/netdevice.h>
37#include <linux/init.h>
38#include <linux/jhash.h>
39#include <linux/ipsec.h>
40#include <linux/times.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090041#include <linux/slab.h>
Wang Yufen4aa956d2014-03-29 09:27:29 +080042#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070043#include <linux/ipv6.h>
44#include <linux/icmpv6.h>
45#include <linux/random.h>
46
47#include <net/tcp.h>
48#include <net/ndisc.h>
Arnaldo Carvalho de Melo5324a042005-08-12 09:26:18 -030049#include <net/inet6_hashtables.h>
Arnaldo Carvalho de Melo81297652005-12-13 23:15:24 -080050#include <net/inet6_connection_sock.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070051#include <net/ipv6.h>
52#include <net/transp_v6.h>
53#include <net/addrconf.h>
54#include <net/ip6_route.h>
55#include <net/ip6_checksum.h>
56#include <net/inet_ecn.h>
57#include <net/protocol.h>
58#include <net/xfrm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070059#include <net/snmp.h>
60#include <net/dsfield.h>
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -080061#include <net/timewait_sock.h>
Denis V. Lunev3d58b5f2008-04-03 14:22:32 -070062#include <net/inet_common.h>
David S. Miller6e5714e2011-08-03 20:50:44 -070063#include <net/secure_seq.h>
Eliezer Tamir076bb0c2013-07-10 17:13:17 +030064#include <net/busy_poll.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070065
Linus Torvalds1da177e2005-04-16 15:20:36 -070066#include <linux/proc_fs.h>
67#include <linux/seq_file.h>
68
Herbert Xucf80e0e2016-01-24 21:20:23 +080069#include <crypto/hash.h>
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -080070#include <linux/scatterlist.h>
71
Eric Dumazeta00e7442015-09-29 07:42:39 -070072static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb);
73static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
Gui Jianfeng6edafaa2008-08-06 23:50:04 -070074 struct request_sock *req);
Linus Torvalds1da177e2005-04-16 15:20:36 -070075
76static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070077
Stephen Hemminger3b401a82009-09-01 19:25:04 +000078static const struct inet_connection_sock_af_ops ipv6_mapped;
79static const struct inet_connection_sock_af_ops ipv6_specific;
David S. Millera9286302006-11-14 19:53:22 -080080#ifdef CONFIG_TCP_MD5SIG
Stephen Hemmingerb2e4b3d2009-09-01 19:25:03 +000081static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
82static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
YOSHIFUJI Hideaki9501f972008-04-18 12:45:16 +090083#else
Eric Dumazet51723932015-09-29 21:24:05 -070084static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
Eric Dumazetb71d1d42011-04-22 04:53:02 +000085 const struct in6_addr *addr)
YOSHIFUJI Hideaki9501f972008-04-18 12:45:16 +090086{
87 return NULL;
88}
David S. Millera9286302006-11-14 19:53:22 -080089#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070090
Neal Cardwellfae6ef82012-08-19 03:30:38 +000091static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
92{
93 struct dst_entry *dst = skb_dst(skb);
Neal Cardwellfae6ef82012-08-19 03:30:38 +000094
Eric Dumazet5037e9e2015-12-14 14:08:53 -080095 if (dst && dst_hold_safe(dst)) {
Eric Dumazetca777ef2014-09-08 08:06:07 -070096 const struct rt6_info *rt = (const struct rt6_info *)dst;
97
Eric Dumazetca777ef2014-09-08 08:06:07 -070098 sk->sk_rx_dst = dst;
99 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
Martin KaFai Laub197df42015-05-22 20:56:01 -0700100 inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt);
Eric Dumazetca777ef2014-09-08 08:06:07 -0700101 }
Neal Cardwellfae6ef82012-08-19 03:30:38 +0000102}
103
Florian Westphal95a22ca2016-12-01 11:32:06 +0100104static u32 tcp_v6_init_sequence(const struct sk_buff *skb, u32 *tsoff)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105{
Arnaldo Carvalho de Melo0660e032007-04-25 17:54:47 -0700106 return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
107 ipv6_hdr(skb)->saddr.s6_addr32,
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -0700108 tcp_hdr(skb)->dest,
Florian Westphal95a22ca2016-12-01 11:32:06 +0100109 tcp_hdr(skb)->source, tsoff);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110}
111
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +0900112static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113 int addr_len)
114{
115 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +0900116 struct inet_sock *inet = inet_sk(sk);
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -0800117 struct inet_connection_sock *icsk = inet_csk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118 struct ipv6_pinfo *np = inet6_sk(sk);
119 struct tcp_sock *tp = tcp_sk(sk);
Arnaud Ebalard20c59de2010-06-01 21:35:01 +0000120 struct in6_addr *saddr = NULL, *final_p, final;
Eric Dumazet45f6fad2015-11-29 19:37:57 -0800121 struct ipv6_txoptions *opt;
David S. Miller4c9483b2011-03-12 16:22:43 -0500122 struct flowi6 fl6;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700123 struct dst_entry *dst;
124 int addr_type;
125 int err;
Haishuang Yan1946e672016-12-28 17:52:32 +0800126 struct inet_timewait_death_row *tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +0900128 if (addr_len < SIN6_LEN_RFC2133)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129 return -EINVAL;
130
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +0900131 if (usin->sin6_family != AF_INET6)
Eric Dumazeta02cec22010-09-22 20:43:57 +0000132 return -EAFNOSUPPORT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700133
David S. Miller4c9483b2011-03-12 16:22:43 -0500134 memset(&fl6, 0, sizeof(fl6));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135
136 if (np->sndflow) {
David S. Miller4c9483b2011-03-12 16:22:43 -0500137 fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
138 IP6_ECN_flow_init(fl6.flowlabel);
139 if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700140 struct ip6_flowlabel *flowlabel;
David S. Miller4c9483b2011-03-12 16:22:43 -0500141 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
Ian Morris63159f22015-03-29 14:00:04 +0100142 if (!flowlabel)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700144 fl6_sock_release(flowlabel);
145 }
146 }
147
148 /*
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +0900149 * connect() to INADDR_ANY means loopback (BSD'ism).
150 */
151
Weilong Chen4c99aa42013-12-19 18:44:34 +0800152 if (ipv6_addr_any(&usin->sin6_addr))
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +0900153 usin->sin6_addr.s6_addr[15] = 0x1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700154
155 addr_type = ipv6_addr_type(&usin->sin6_addr);
156
Weilong Chen4c99aa42013-12-19 18:44:34 +0800157 if (addr_type & IPV6_ADDR_MULTICAST)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158 return -ENETUNREACH;
159
160 if (addr_type&IPV6_ADDR_LINKLOCAL) {
161 if (addr_len >= sizeof(struct sockaddr_in6) &&
162 usin->sin6_scope_id) {
163 /* If interface is set while binding, indices
164 * must coincide.
165 */
166 if (sk->sk_bound_dev_if &&
167 sk->sk_bound_dev_if != usin->sin6_scope_id)
168 return -EINVAL;
169
170 sk->sk_bound_dev_if = usin->sin6_scope_id;
171 }
172
173 /* Connect to link-local address requires an interface */
174 if (!sk->sk_bound_dev_if)
175 return -EINVAL;
176 }
177
178 if (tp->rx_opt.ts_recent_stamp &&
Eric Dumazetefe42082013-10-03 15:42:29 -0700179 !ipv6_addr_equal(&sk->sk_v6_daddr, &usin->sin6_addr)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180 tp->rx_opt.ts_recent = 0;
181 tp->rx_opt.ts_recent_stamp = 0;
182 tp->write_seq = 0;
183 }
184
Eric Dumazetefe42082013-10-03 15:42:29 -0700185 sk->sk_v6_daddr = usin->sin6_addr;
David S. Miller4c9483b2011-03-12 16:22:43 -0500186 np->flow_label = fl6.flowlabel;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187
188 /*
189 * TCP over IPv4
190 */
191
192 if (addr_type == IPV6_ADDR_MAPPED) {
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -0800193 u32 exthdrlen = icsk->icsk_ext_hdr_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194 struct sockaddr_in sin;
195
196 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
197
198 if (__ipv6_only_sock(sk))
199 return -ENETUNREACH;
200
201 sin.sin_family = AF_INET;
202 sin.sin_port = usin->sin6_port;
203 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
204
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -0800205 icsk->icsk_af_ops = &ipv6_mapped;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206 sk->sk_backlog_rcv = tcp_v4_do_rcv;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800207#ifdef CONFIG_TCP_MD5SIG
208 tp->af_specific = &tcp_sock_ipv6_mapped_specific;
209#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700210
211 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
212
213 if (err) {
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -0800214 icsk->icsk_ext_hdr_len = exthdrlen;
215 icsk->icsk_af_ops = &ipv6_specific;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216 sk->sk_backlog_rcv = tcp_v6_do_rcv;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800217#ifdef CONFIG_TCP_MD5SIG
218 tp->af_specific = &tcp_sock_ipv6_specific;
219#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220 goto failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221 }
Eric Dumazetd1e559d2015-03-18 14:05:35 -0700222 np->saddr = sk->sk_v6_rcv_saddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223
224 return err;
225 }
226
Eric Dumazetefe42082013-10-03 15:42:29 -0700227 if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr))
228 saddr = &sk->sk_v6_rcv_saddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700229
David S. Miller4c9483b2011-03-12 16:22:43 -0500230 fl6.flowi6_proto = IPPROTO_TCP;
Eric Dumazetefe42082013-10-03 15:42:29 -0700231 fl6.daddr = sk->sk_v6_daddr;
Alexey Dobriyan4e3fd7a2011-11-21 03:39:03 +0000232 fl6.saddr = saddr ? *saddr : np->saddr;
David S. Miller4c9483b2011-03-12 16:22:43 -0500233 fl6.flowi6_oif = sk->sk_bound_dev_if;
234 fl6.flowi6_mark = sk->sk_mark;
David S. Miller1958b852011-03-12 16:36:19 -0500235 fl6.fl6_dport = usin->sin6_port;
236 fl6.fl6_sport = inet->inet_sport;
Lorenzo Colittie2d118a2016-11-04 02:23:43 +0900237 fl6.flowi6_uid = sk->sk_uid;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700238
Hannes Frederic Sowa1e1d04e2016-04-05 17:10:15 +0200239 opt = rcu_dereference_protected(np->opt, lockdep_sock_is_held(sk));
Eric Dumazet45f6fad2015-11-29 19:37:57 -0800240 final_p = fl6_update_dst(&fl6, opt, &final);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700241
David S. Miller4c9483b2011-03-12 16:22:43 -0500242 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
Venkat Yekkiralabeb8d132006-08-04 23:12:42 -0700243
Steffen Klassert0e0d44a2013-08-28 08:04:14 +0200244 dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
David S. Miller68d0c6d2011-03-01 13:19:07 -0800245 if (IS_ERR(dst)) {
246 err = PTR_ERR(dst);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247 goto failure;
David S. Miller14e50e52007-05-24 18:17:54 -0700248 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700249
Ian Morris63159f22015-03-29 14:00:04 +0100250 if (!saddr) {
David S. Miller4c9483b2011-03-12 16:22:43 -0500251 saddr = &fl6.saddr;
Eric Dumazetefe42082013-10-03 15:42:29 -0700252 sk->sk_v6_rcv_saddr = *saddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253 }
254
255 /* set the source address */
Alexey Dobriyan4e3fd7a2011-11-21 03:39:03 +0000256 np->saddr = *saddr;
Eric Dumazetc720c7e2009-10-15 06:30:45 +0000257 inet->inet_rcv_saddr = LOOPBACK4_IPV6;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700258
Herbert Xuf83ef8c2006-06-30 13:37:03 -0700259 sk->sk_gso_type = SKB_GSO_TCPV6;
Eric Dumazet6bd4f352015-12-02 21:53:57 -0800260 ip6_dst_store(sk, dst, NULL, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700261
Haishuang Yan1946e672016-12-28 17:52:32 +0800262 if (tcp_death_row->sysctl_tw_recycle &&
David S. Miller493f3772010-12-02 12:14:29 -0800263 !tp->rx_opt.ts_recent_stamp &&
Martin KaFai Laufd0273d2015-05-22 20:55:57 -0700264 ipv6_addr_equal(&fl6.daddr, &sk->sk_v6_daddr))
David S. Miller81166dd2012-07-10 03:14:24 -0700265 tcp_fetch_timewait_stamp(sk, dst);
David S. Miller493f3772010-12-02 12:14:29 -0800266
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -0800267 icsk->icsk_ext_hdr_len = 0;
Eric Dumazet45f6fad2015-11-29 19:37:57 -0800268 if (opt)
269 icsk->icsk_ext_hdr_len = opt->opt_flen +
270 opt->opt_nflen;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700271
272 tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
273
Eric Dumazetc720c7e2009-10-15 06:30:45 +0000274 inet->inet_dport = usin->sin6_port;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275
276 tcp_set_state(sk, TCP_SYN_SENT);
Haishuang Yan1946e672016-12-28 17:52:32 +0800277 err = inet6_hash_connect(tcp_death_row, sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278 if (err)
279 goto late_failure;
280
Tom Herbert877d1f62015-07-28 16:02:05 -0700281 sk_set_txhash(sk);
Sathya Perla9e7ceb02014-10-22 21:42:01 +0530282
Andrey Vagin2b916472012-11-22 01:13:58 +0000283 if (!tp->write_seq && likely(!tp->repair))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700284 tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
Eric Dumazetefe42082013-10-03 15:42:29 -0700285 sk->sk_v6_daddr.s6_addr32,
Eric Dumazetc720c7e2009-10-15 06:30:45 +0000286 inet->inet_sport,
Florian Westphal95a22ca2016-12-01 11:32:06 +0100287 inet->inet_dport,
288 &tp->tsoffset);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700289
Wei Wang19f6d3f2017-01-23 10:59:22 -0800290 if (tcp_fastopen_defer_connect(sk, &err))
291 return err;
292 if (err)
293 goto late_failure;
294
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295 err = tcp_connect(sk);
296 if (err)
297 goto late_failure;
298
299 return 0;
300
301late_failure:
302 tcp_set_state(sk, TCP_CLOSE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700303failure:
Eric Dumazetc720c7e2009-10-15 06:30:45 +0000304 inet->inet_dport = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700305 sk->sk_route_caps = 0;
306 return err;
307}
308
Eric Dumazet563d34d2012-07-23 09:48:52 +0200309static void tcp_v6_mtu_reduced(struct sock *sk)
310{
311 struct dst_entry *dst;
312
313 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
314 return;
315
316 dst = inet6_csk_update_pmtu(sk, tcp_sk(sk)->mtu_info);
317 if (!dst)
318 return;
319
320 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
321 tcp_sync_mss(sk, dst_mtu(dst));
322 tcp_simple_retransmit(sk);
323 }
324}
325
Linus Torvalds1da177e2005-04-16 15:20:36 -0700326static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
Brian Haleyd5fdd6b2009-06-23 04:31:07 -0700327 u8 type, u8 code, int offset, __be32 info)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700328{
Weilong Chen4c99aa42013-12-19 18:44:34 +0800329 const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
Arnaldo Carvalho de Melo505cbfc2005-08-12 09:19:38 -0300330 const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
Eric Dumazet22150892015-03-22 10:22:23 -0700331 struct net *net = dev_net(skb->dev);
332 struct request_sock *fastopen;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700333 struct ipv6_pinfo *np;
Eric Dumazet22150892015-03-22 10:22:23 -0700334 struct tcp_sock *tp;
335 __u32 seq, snd_una;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336 struct sock *sk;
Eric Dumazet9cf74902016-02-02 19:31:12 -0800337 bool fatal;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700338 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700339
Eric Dumazet22150892015-03-22 10:22:23 -0700340 sk = __inet6_lookup_established(net, &tcp_hashinfo,
341 &hdr->daddr, th->dest,
342 &hdr->saddr, ntohs(th->source),
343 skb->dev->ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344
Eric Dumazet22150892015-03-22 10:22:23 -0700345 if (!sk) {
Eric Dumazeta16292a2016-04-27 16:44:36 -0700346 __ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
347 ICMP6_MIB_INERRORS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700348 return;
349 }
350
351 if (sk->sk_state == TCP_TIME_WAIT) {
YOSHIFUJI Hideaki9469c7b2006-10-10 19:41:46 -0700352 inet_twsk_put(inet_twsk(sk));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700353 return;
354 }
Eric Dumazet22150892015-03-22 10:22:23 -0700355 seq = ntohl(th->seq);
Eric Dumazet9cf74902016-02-02 19:31:12 -0800356 fatal = icmpv6_err_convert(type, code, &err);
Eric Dumazet22150892015-03-22 10:22:23 -0700357 if (sk->sk_state == TCP_NEW_SYN_RECV)
Eric Dumazet9cf74902016-02-02 19:31:12 -0800358 return tcp_req_err(sk, seq, fatal);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700359
360 bh_lock_sock(sk);
Eric Dumazet563d34d2012-07-23 09:48:52 +0200361 if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
Eric Dumazet02a1d6e2016-04-27 16:44:39 -0700362 __NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700363
364 if (sk->sk_state == TCP_CLOSE)
365 goto out;
366
Stephen Hemmingere802af92010-04-22 15:24:53 -0700367 if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) {
Eric Dumazet02a1d6e2016-04-27 16:44:39 -0700368 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
Stephen Hemmingere802af92010-04-22 15:24:53 -0700369 goto out;
370 }
371
Linus Torvalds1da177e2005-04-16 15:20:36 -0700372 tp = tcp_sk(sk);
Yuchung Cheng0a672f72014-05-11 20:22:12 -0700373 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
374 fastopen = tp->fastopen_rsk;
375 snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700376 if (sk->sk_state != TCP_LISTEN &&
Yuchung Cheng0a672f72014-05-11 20:22:12 -0700377 !between(seq, snd_una, tp->snd_nxt)) {
Eric Dumazet02a1d6e2016-04-27 16:44:39 -0700378 __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700379 goto out;
380 }
381
382 np = inet6_sk(sk);
383
David S. Millerec18d9a2012-07-12 00:25:15 -0700384 if (type == NDISC_REDIRECT) {
385 struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
386
David S. Miller1ed5c482012-07-12 00:41:25 -0700387 if (dst)
David S. Miller6700c272012-07-17 03:29:28 -0700388 dst->ops->redirect(dst, sk, skb);
Christoph Paasch50a75a82013-04-07 04:53:15 +0000389 goto out;
David S. Millerec18d9a2012-07-12 00:25:15 -0700390 }
391
Linus Torvalds1da177e2005-04-16 15:20:36 -0700392 if (type == ICMPV6_PKT_TOOBIG) {
Eric Dumazet0d4f0602013-03-18 07:01:28 +0000393 /* We are not interested in TCP_LISTEN and open_requests
394 * (SYN-ACKs send out by Linux are always <576bytes so
395 * they should go through unfragmented).
396 */
397 if (sk->sk_state == TCP_LISTEN)
398 goto out;
399
Hannes Frederic Sowa93b36cf2013-12-15 03:41:14 +0100400 if (!ip6_sk_accept_pmtu(sk))
401 goto out;
402
Eric Dumazet563d34d2012-07-23 09:48:52 +0200403 tp->mtu_info = ntohl(info);
404 if (!sock_owned_by_user(sk))
405 tcp_v6_mtu_reduced(sk);
Julian Anastasovd013ef2a2012-09-05 10:53:18 +0000406 else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED,
Eric Dumazet7aa54702016-12-03 11:14:57 -0800407 &sk->sk_tsq_flags))
Julian Anastasovd013ef2a2012-09-05 10:53:18 +0000408 sock_hold(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700409 goto out;
410 }
411
Linus Torvalds1da177e2005-04-16 15:20:36 -0700412
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -0700413 /* Might be for an request_sock */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700414 switch (sk->sk_state) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700415 case TCP_SYN_SENT:
Yuchung Cheng0a672f72014-05-11 20:22:12 -0700416 case TCP_SYN_RECV:
417 /* Only in fast or simultaneous open. If a fast open socket is
418 * is already accepted it is treated as a connected one below.
419 */
Ian Morris63159f22015-03-29 14:00:04 +0100420 if (fastopen && !fastopen->sk)
Yuchung Cheng0a672f72014-05-11 20:22:12 -0700421 break;
422
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423 if (!sock_owned_by_user(sk)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700424 sk->sk_err = err;
425 sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */
426
427 tcp_done(sk);
428 } else
429 sk->sk_err_soft = err;
430 goto out;
431 }
432
433 if (!sock_owned_by_user(sk) && np->recverr) {
434 sk->sk_err = err;
435 sk->sk_error_report(sk);
436 } else
437 sk->sk_err_soft = err;
438
439out:
440 bh_unlock_sock(sk);
441 sock_put(sk);
442}
443
444
Eric Dumazet0f935db2015-09-25 07:39:21 -0700445static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
Octavian Purdilad6274bd2014-06-25 17:09:58 +0300446 struct flowi *fl,
Neal Cardwell3840a062012-06-28 12:34:19 +0000447 struct request_sock *req,
Eric Dumazetca6fb062015-10-02 11:43:35 -0700448 struct tcp_fastopen_cookie *foc,
Eric Dumazetb3d05142016-04-13 22:05:39 -0700449 enum tcp_synack_type synack_type)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700450{
Eric Dumazet634fb9792013-10-09 15:21:29 -0700451 struct inet_request_sock *ireq = inet_rsk(req);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700452 struct ipv6_pinfo *np = inet6_sk(sk);
Huw Davies56ac42b2016-06-27 15:05:28 -0400453 struct ipv6_txoptions *opt;
Octavian Purdilad6274bd2014-06-25 17:09:58 +0300454 struct flowi6 *fl6 = &fl->u.ip6;
Weilong Chen4c99aa42013-12-19 18:44:34 +0800455 struct sk_buff *skb;
Neal Cardwell94942182012-06-28 12:34:20 +0000456 int err = -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700457
Neal Cardwell9f10d3f2012-06-28 12:34:21 +0000458 /* First, grab a route. */
Eric Dumazetf76b33c2015-09-29 07:42:42 -0700459 if (!dst && (dst = inet6_csk_route_req(sk, fl6, req,
460 IPPROTO_TCP)) == NULL)
Denis V. Lunevfd80eb92008-02-29 11:43:03 -0800461 goto done;
Neal Cardwell94942182012-06-28 12:34:20 +0000462
Eric Dumazetb3d05142016-04-13 22:05:39 -0700463 skb = tcp_make_synack(sk, dst, req, foc, synack_type);
Neal Cardwell94942182012-06-28 12:34:20 +0000464
Linus Torvalds1da177e2005-04-16 15:20:36 -0700465 if (skb) {
Eric Dumazet634fb9792013-10-09 15:21:29 -0700466 __tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr,
467 &ireq->ir_v6_rmt_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700468
Eric Dumazet634fb9792013-10-09 15:21:29 -0700469 fl6->daddr = ireq->ir_v6_rmt_addr;
Ian Morris53b24b82015-03-29 14:00:05 +0100470 if (np->repflow && ireq->pktopts)
Florent Fourcotdf3687f2014-01-17 17:15:03 +0100471 fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
472
Eric Dumazet3e4006f2016-01-08 09:35:51 -0800473 rcu_read_lock();
Huw Davies56ac42b2016-06-27 15:05:28 -0400474 opt = ireq->ipv6_opt;
475 if (!opt)
476 opt = rcu_dereference(np->opt);
Pablo Neira92e55f42017-01-26 22:56:21 +0100477 err = ip6_xmit(sk, skb, fl6, sk->sk_mark, opt, np->tclass);
Eric Dumazet3e4006f2016-01-08 09:35:51 -0800478 rcu_read_unlock();
Gerrit Renkerb9df3cb2006-11-14 11:21:36 -0200479 err = net_xmit_eval(err);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700480 }
481
482done:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700483 return err;
484}
485
Octavian Purdila72659ec2010-01-17 19:09:39 -0800486
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -0700487static void tcp_v6_reqsk_destructor(struct request_sock *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700488{
Huw Davies56ac42b2016-06-27 15:05:28 -0400489 kfree(inet_rsk(req)->ipv6_opt);
Eric Dumazet634fb9792013-10-09 15:21:29 -0700490 kfree_skb(inet_rsk(req)->pktopts);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700491}
492
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800493#ifdef CONFIG_TCP_MD5SIG
Eric Dumazetb83e3de2015-09-25 07:39:15 -0700494static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
Eric Dumazetb71d1d42011-04-22 04:53:02 +0000495 const struct in6_addr *addr)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800496{
Eric Dumazeta915da9b2012-01-31 05:18:33 +0000497 return tcp_md5_do_lookup(sk, (union tcp_md5_addr *)addr, AF_INET6);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800498}
499
Eric Dumazetb83e3de2015-09-25 07:39:15 -0700500static struct tcp_md5sig_key *tcp_v6_md5_lookup(const struct sock *sk,
Eric Dumazetfd3a1542015-03-24 15:58:56 -0700501 const struct sock *addr_sk)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800502{
Eric Dumazetefe42082013-10-03 15:42:29 -0700503 return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800504}
505
Wang Yufen4aa956d2014-03-29 09:27:29 +0800506static int tcp_v6_parse_md5_keys(struct sock *sk, char __user *optval,
507 int optlen)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800508{
509 struct tcp_md5sig cmd;
510 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800511
512 if (optlen < sizeof(cmd))
513 return -EINVAL;
514
515 if (copy_from_user(&cmd, optval, sizeof(cmd)))
516 return -EFAULT;
517
518 if (sin6->sin6_family != AF_INET6)
519 return -EINVAL;
520
521 if (!cmd.tcpm_keylen) {
Brian Haleye773e4f2007-08-24 23:16:08 -0700522 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
Eric Dumazeta915da9b2012-01-31 05:18:33 +0000523 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
524 AF_INET);
525 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
526 AF_INET6);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800527 }
528
529 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
530 return -EINVAL;
531
Eric Dumazeta915da9b2012-01-31 05:18:33 +0000532 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
533 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
534 AF_INET, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800535
Eric Dumazeta915da9b2012-01-31 05:18:33 +0000536 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
537 AF_INET6, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800538}
539
Eric Dumazet19689e32016-06-27 18:51:53 +0200540static int tcp_v6_md5_hash_headers(struct tcp_md5sig_pool *hp,
541 const struct in6_addr *daddr,
542 const struct in6_addr *saddr,
543 const struct tcphdr *th, int nbytes)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800544{
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800545 struct tcp6_pseudohdr *bp;
Adam Langley49a72df2008-07-19 00:01:42 -0700546 struct scatterlist sg;
Eric Dumazet19689e32016-06-27 18:51:53 +0200547 struct tcphdr *_th;
YOSHIFUJI Hideaki8d26d762008-04-17 13:19:16 +0900548
Eric Dumazet19689e32016-06-27 18:51:53 +0200549 bp = hp->scratch;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800550 /* 1. TCP pseudo-header (RFC2460) */
Alexey Dobriyan4e3fd7a2011-11-21 03:39:03 +0000551 bp->saddr = *saddr;
552 bp->daddr = *daddr;
Adam Langley49a72df2008-07-19 00:01:42 -0700553 bp->protocol = cpu_to_be32(IPPROTO_TCP);
Adam Langley00b13042008-07-31 21:36:07 -0700554 bp->len = cpu_to_be32(nbytes);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800555
Eric Dumazet19689e32016-06-27 18:51:53 +0200556 _th = (struct tcphdr *)(bp + 1);
557 memcpy(_th, th, sizeof(*th));
558 _th->check = 0;
559
560 sg_init_one(&sg, bp, sizeof(*bp) + sizeof(*th));
561 ahash_request_set_crypt(hp->md5_req, &sg, NULL,
562 sizeof(*bp) + sizeof(*th));
Herbert Xucf80e0e2016-01-24 21:20:23 +0800563 return crypto_ahash_update(hp->md5_req);
Adam Langley49a72df2008-07-19 00:01:42 -0700564}
David S. Millerc7da57a2007-10-26 00:41:21 -0700565
Eric Dumazet19689e32016-06-27 18:51:53 +0200566static int tcp_v6_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
Eric Dumazetb71d1d42011-04-22 04:53:02 +0000567 const struct in6_addr *daddr, struct in6_addr *saddr,
Eric Dumazet318cf7a2011-10-24 02:46:04 -0400568 const struct tcphdr *th)
Adam Langley49a72df2008-07-19 00:01:42 -0700569{
570 struct tcp_md5sig_pool *hp;
Herbert Xucf80e0e2016-01-24 21:20:23 +0800571 struct ahash_request *req;
Adam Langley49a72df2008-07-19 00:01:42 -0700572
573 hp = tcp_get_md5sig_pool();
574 if (!hp)
575 goto clear_hash_noput;
Herbert Xucf80e0e2016-01-24 21:20:23 +0800576 req = hp->md5_req;
Adam Langley49a72df2008-07-19 00:01:42 -0700577
Herbert Xucf80e0e2016-01-24 21:20:23 +0800578 if (crypto_ahash_init(req))
Adam Langley49a72df2008-07-19 00:01:42 -0700579 goto clear_hash;
Eric Dumazet19689e32016-06-27 18:51:53 +0200580 if (tcp_v6_md5_hash_headers(hp, daddr, saddr, th, th->doff << 2))
Adam Langley49a72df2008-07-19 00:01:42 -0700581 goto clear_hash;
582 if (tcp_md5_hash_key(hp, key))
583 goto clear_hash;
Herbert Xucf80e0e2016-01-24 21:20:23 +0800584 ahash_request_set_crypt(req, NULL, md5_hash, 0);
585 if (crypto_ahash_final(req))
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800586 goto clear_hash;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800587
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800588 tcp_put_md5sig_pool();
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800589 return 0;
Adam Langley49a72df2008-07-19 00:01:42 -0700590
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800591clear_hash:
592 tcp_put_md5sig_pool();
593clear_hash_noput:
594 memset(md5_hash, 0, 16);
Adam Langley49a72df2008-07-19 00:01:42 -0700595 return 1;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800596}
597
Eric Dumazet39f8e582015-03-24 15:58:55 -0700598static int tcp_v6_md5_hash_skb(char *md5_hash,
599 const struct tcp_md5sig_key *key,
Eric Dumazet318cf7a2011-10-24 02:46:04 -0400600 const struct sock *sk,
Eric Dumazet318cf7a2011-10-24 02:46:04 -0400601 const struct sk_buff *skb)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800602{
Eric Dumazetb71d1d42011-04-22 04:53:02 +0000603 const struct in6_addr *saddr, *daddr;
Adam Langley49a72df2008-07-19 00:01:42 -0700604 struct tcp_md5sig_pool *hp;
Herbert Xucf80e0e2016-01-24 21:20:23 +0800605 struct ahash_request *req;
Eric Dumazet318cf7a2011-10-24 02:46:04 -0400606 const struct tcphdr *th = tcp_hdr(skb);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800607
Eric Dumazet39f8e582015-03-24 15:58:55 -0700608 if (sk) { /* valid for establish/request sockets */
609 saddr = &sk->sk_v6_rcv_saddr;
Eric Dumazetefe42082013-10-03 15:42:29 -0700610 daddr = &sk->sk_v6_daddr;
Adam Langley49a72df2008-07-19 00:01:42 -0700611 } else {
Eric Dumazetb71d1d42011-04-22 04:53:02 +0000612 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
Adam Langley49a72df2008-07-19 00:01:42 -0700613 saddr = &ip6h->saddr;
614 daddr = &ip6h->daddr;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800615 }
Adam Langley49a72df2008-07-19 00:01:42 -0700616
617 hp = tcp_get_md5sig_pool();
618 if (!hp)
619 goto clear_hash_noput;
Herbert Xucf80e0e2016-01-24 21:20:23 +0800620 req = hp->md5_req;
Adam Langley49a72df2008-07-19 00:01:42 -0700621
Herbert Xucf80e0e2016-01-24 21:20:23 +0800622 if (crypto_ahash_init(req))
Adam Langley49a72df2008-07-19 00:01:42 -0700623 goto clear_hash;
624
Eric Dumazet19689e32016-06-27 18:51:53 +0200625 if (tcp_v6_md5_hash_headers(hp, daddr, saddr, th, skb->len))
Adam Langley49a72df2008-07-19 00:01:42 -0700626 goto clear_hash;
627 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
628 goto clear_hash;
629 if (tcp_md5_hash_key(hp, key))
630 goto clear_hash;
Herbert Xucf80e0e2016-01-24 21:20:23 +0800631 ahash_request_set_crypt(req, NULL, md5_hash, 0);
632 if (crypto_ahash_final(req))
Adam Langley49a72df2008-07-19 00:01:42 -0700633 goto clear_hash;
634
635 tcp_put_md5sig_pool();
636 return 0;
637
638clear_hash:
639 tcp_put_md5sig_pool();
640clear_hash_noput:
641 memset(md5_hash, 0, 16);
642 return 1;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800643}
644
Eric Dumazetba8e2752015-10-02 11:43:28 -0700645#endif
646
647static bool tcp_v6_inbound_md5_hash(const struct sock *sk,
648 const struct sk_buff *skb)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800649{
Eric Dumazetba8e2752015-10-02 11:43:28 -0700650#ifdef CONFIG_TCP_MD5SIG
Eric Dumazetcf533ea2011-10-21 05:22:42 -0400651 const __u8 *hash_location = NULL;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800652 struct tcp_md5sig_key *hash_expected;
Eric Dumazetb71d1d42011-04-22 04:53:02 +0000653 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
Eric Dumazet318cf7a2011-10-24 02:46:04 -0400654 const struct tcphdr *th = tcp_hdr(skb);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800655 int genhash;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800656 u8 newhash[16];
657
658 hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
YOSHIFUJI Hideaki7d5d5522008-04-17 12:29:53 +0900659 hash_location = tcp_parse_md5sig_option(th);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800660
David S. Miller785957d2008-07-30 03:03:15 -0700661 /* We've parsed the options - do we have a hash? */
662 if (!hash_expected && !hash_location)
Eric Dumazetff74e232015-03-24 15:58:54 -0700663 return false;
David S. Miller785957d2008-07-30 03:03:15 -0700664
665 if (hash_expected && !hash_location) {
Eric Dumazetc10d9312016-04-29 14:16:47 -0700666 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
Eric Dumazetff74e232015-03-24 15:58:54 -0700667 return true;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800668 }
669
David S. Miller785957d2008-07-30 03:03:15 -0700670 if (!hash_expected && hash_location) {
Eric Dumazetc10d9312016-04-29 14:16:47 -0700671 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
Eric Dumazetff74e232015-03-24 15:58:54 -0700672 return true;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800673 }
674
675 /* check the signature */
Adam Langley49a72df2008-07-19 00:01:42 -0700676 genhash = tcp_v6_md5_hash_skb(newhash,
677 hash_expected,
Eric Dumazet39f8e582015-03-24 15:58:55 -0700678 NULL, skb);
Adam Langley49a72df2008-07-19 00:01:42 -0700679
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800680 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
Eric Dumazet72145a62016-08-24 09:01:23 -0700681 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE);
Joe Perchese87cc472012-05-13 21:56:26 +0000682 net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
683 genhash ? "failed" : "mismatch",
684 &ip6h->saddr, ntohs(th->source),
685 &ip6h->daddr, ntohs(th->dest));
Eric Dumazetff74e232015-03-24 15:58:54 -0700686 return true;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800687 }
Eric Dumazetba8e2752015-10-02 11:43:28 -0700688#endif
Eric Dumazetff74e232015-03-24 15:58:54 -0700689 return false;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800690}
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800691
Eric Dumazetb40cf182015-09-25 07:39:08 -0700692static void tcp_v6_init_req(struct request_sock *req,
693 const struct sock *sk_listener,
Octavian Purdila16bea702014-06-25 17:09:53 +0300694 struct sk_buff *skb)
695{
696 struct inet_request_sock *ireq = inet_rsk(req);
Eric Dumazetb40cf182015-09-25 07:39:08 -0700697 const struct ipv6_pinfo *np = inet6_sk(sk_listener);
Octavian Purdila16bea702014-06-25 17:09:53 +0300698
699 ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
700 ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
701
Octavian Purdila16bea702014-06-25 17:09:53 +0300702 /* So that link locals have meaning */
Eric Dumazetb40cf182015-09-25 07:39:08 -0700703 if (!sk_listener->sk_bound_dev_if &&
Octavian Purdila16bea702014-06-25 17:09:53 +0300704 ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
Eric Dumazet870c3152014-10-17 09:17:20 -0700705 ireq->ir_iif = tcp_v6_iif(skb);
Octavian Purdila16bea702014-06-25 17:09:53 +0300706
Eric Dumazet04317da2014-09-05 15:33:32 -0700707 if (!TCP_SKB_CB(skb)->tcp_tw_isn &&
Eric Dumazetb40cf182015-09-25 07:39:08 -0700708 (ipv6_opt_accepted(sk_listener, skb, &TCP_SKB_CB(skb)->header.h6) ||
Eric Dumazeta2247722014-09-27 09:50:56 -0700709 np->rxopt.bits.rxinfo ||
Octavian Purdila16bea702014-06-25 17:09:53 +0300710 np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim ||
711 np->rxopt.bits.rxohlim || np->repflow)) {
712 atomic_inc(&skb->users);
713 ireq->pktopts = skb;
714 }
715}
716
Eric Dumazetf9646292015-09-29 07:42:50 -0700717static struct dst_entry *tcp_v6_route_req(const struct sock *sk,
718 struct flowi *fl,
Octavian Purdilad94e0412014-06-25 17:09:55 +0300719 const struct request_sock *req,
720 bool *strict)
721{
722 if (strict)
723 *strict = true;
Eric Dumazetf76b33c2015-09-29 07:42:42 -0700724 return inet6_csk_route_req(sk, &fl->u.ip6, req, IPPROTO_TCP);
Octavian Purdilad94e0412014-06-25 17:09:55 +0300725}
726
Glenn Griffinc6aefaf2008-02-07 21:49:26 -0800727struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700728 .family = AF_INET6,
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -0700729 .obj_size = sizeof(struct tcp6_request_sock),
Octavian Purdila5db92c92014-06-25 17:09:59 +0300730 .rtx_syn_ack = tcp_rtx_synack,
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -0700731 .send_ack = tcp_v6_reqsk_send_ack,
732 .destructor = tcp_v6_reqsk_destructor,
Octavian Purdila72659ec2010-01-17 19:09:39 -0800733 .send_reset = tcp_v6_send_reset,
Wang Yufen4aa956d2014-03-29 09:27:29 +0800734 .syn_ack_timeout = tcp_syn_ack_timeout,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700735};
736
Stephen Hemmingerb2e4b3d2009-09-01 19:25:03 +0000737static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
Octavian Purdila2aec4a22014-06-25 17:10:00 +0300738 .mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) -
739 sizeof(struct ipv6hdr),
Octavian Purdila16bea702014-06-25 17:09:53 +0300740#ifdef CONFIG_TCP_MD5SIG
Eric Dumazetfd3a1542015-03-24 15:58:56 -0700741 .req_md5_lookup = tcp_v6_md5_lookup,
John Dykstrae3afe7b2009-07-16 05:04:51 +0000742 .calc_md5_hash = tcp_v6_md5_hash_skb,
Andrew Mortonb6332e62006-11-30 19:16:28 -0800743#endif
Octavian Purdila16bea702014-06-25 17:09:53 +0300744 .init_req = tcp_v6_init_req,
Octavian Purdilafb7b37a2014-06-25 17:09:54 +0300745#ifdef CONFIG_SYN_COOKIES
746 .cookie_init_seq = cookie_v6_init_sequence,
747#endif
Octavian Purdilad94e0412014-06-25 17:09:55 +0300748 .route_req = tcp_v6_route_req,
Octavian Purdila936b8bd2014-06-25 17:09:57 +0300749 .init_seq = tcp_v6_init_sequence,
Octavian Purdilad6274bd2014-06-25 17:09:58 +0300750 .send_synack = tcp_v6_send_synack,
Octavian Purdila16bea702014-06-25 17:09:53 +0300751};
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800752
Eric Dumazeta00e7442015-09-29 07:42:39 -0700753static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 seq,
Eric Dumazet0f85fea2014-12-09 09:56:08 -0800754 u32 ack, u32 win, u32 tsval, u32 tsecr,
755 int oif, struct tcp_md5sig_key *key, int rst,
Hannes Frederic Sowa5119bd12016-06-11 20:41:38 +0200756 u8 tclass, __be32 label)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700757{
Eric Dumazetcf533ea2011-10-21 05:22:42 -0400758 const struct tcphdr *th = tcp_hdr(skb);
759 struct tcphdr *t1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700760 struct sk_buff *buff;
David S. Miller4c9483b2011-03-12 16:22:43 -0500761 struct flowi6 fl6;
Eric Dumazet0f85fea2014-12-09 09:56:08 -0800762 struct net *net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
Daniel Lezcanoe5047992008-03-07 11:16:26 -0800763 struct sock *ctl_sk = net->ipv6.tcp_sk;
YOSHIFUJI Hideaki9cb57342008-01-12 02:16:03 -0800764 unsigned int tot_len = sizeof(struct tcphdr);
Eric Dumazetadf30902009-06-02 05:19:30 +0000765 struct dst_entry *dst;
Al Viroe69a4ad2006-11-14 20:56:00 -0800766 __be32 *topt;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700767
Andrey Vaginee684b62013-02-11 05:50:19 +0000768 if (tsecr)
YOSHIFUJI Hideaki4244f8a2006-10-10 19:40:50 -0700769 tot_len += TCPOLEN_TSTAMP_ALIGNED;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800770#ifdef CONFIG_TCP_MD5SIG
771 if (key)
772 tot_len += TCPOLEN_MD5SIG_ALIGNED;
773#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700774
775 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
776 GFP_ATOMIC);
Ian Morris63159f22015-03-29 14:00:04 +0100777 if (!buff)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700778 return;
779
780 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
781
Ilpo Järvinen77c676d2008-10-09 14:41:38 -0700782 t1 = (struct tcphdr *) skb_push(buff, tot_len);
Herbert Xu6651ffc2010-04-21 00:47:15 -0700783 skb_reset_transport_header(buff);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700784
785 /* Swap the send and the receive. */
786 memset(t1, 0, sizeof(*t1));
787 t1->dest = th->source;
788 t1->source = th->dest;
Ilpo Järvinen77c676d2008-10-09 14:41:38 -0700789 t1->doff = tot_len / 4;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700790 t1->seq = htonl(seq);
791 t1->ack_seq = htonl(ack);
Ilpo Järvinen626e2642008-10-09 14:42:40 -0700792 t1->ack = !rst || !th->ack;
793 t1->rst = rst;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700794 t1->window = htons(win);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800795
Al Viroe69a4ad2006-11-14 20:56:00 -0800796 topt = (__be32 *)(t1 + 1);
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +0900797
Andrey Vaginee684b62013-02-11 05:50:19 +0000798 if (tsecr) {
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800799 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
800 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
Andrey Vaginee684b62013-02-11 05:50:19 +0000801 *topt++ = htonl(tsval);
802 *topt++ = htonl(tsecr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700803 }
804
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800805#ifdef CONFIG_TCP_MD5SIG
806 if (key) {
807 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
808 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
Adam Langley49a72df2008-07-19 00:01:42 -0700809 tcp_v6_md5_hash_hdr((__u8 *)topt, key,
Adam Langley90b7e112008-07-31 20:49:48 -0700810 &ipv6_hdr(skb)->saddr,
811 &ipv6_hdr(skb)->daddr, t1);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800812 }
813#endif
814
David S. Miller4c9483b2011-03-12 16:22:43 -0500815 memset(&fl6, 0, sizeof(fl6));
Alexey Dobriyan4e3fd7a2011-11-21 03:39:03 +0000816 fl6.daddr = ipv6_hdr(skb)->saddr;
817 fl6.saddr = ipv6_hdr(skb)->daddr;
Florent Fourcot1d13a962014-01-16 17:21:22 +0100818 fl6.flowlabel = label;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700819
David S. Millere5700af2010-04-21 14:59:20 -0700820 buff->ip_summed = CHECKSUM_PARTIAL;
821 buff->csum = 0;
822
David S. Miller4c9483b2011-03-12 16:22:43 -0500823 __tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700824
David S. Miller4c9483b2011-03-12 16:22:43 -0500825 fl6.flowi6_proto = IPPROTO_TCP;
Lorenzo Colittia36dbdb2014-04-11 13:19:12 +0900826 if (rt6_need_strict(&fl6.daddr) && !oif)
Eric Dumazet870c3152014-10-17 09:17:20 -0700827 fl6.flowi6_oif = tcp_v6_iif(skb);
David Ahern9b6c14d2016-11-09 09:07:26 -0800828 else {
829 if (!oif && netif_index_is_l3_master(net, skb->skb_iif))
830 oif = skb->skb_iif;
831
832 fl6.flowi6_oif = oif;
833 }
David Ahern1d2f7b22016-05-04 21:26:08 -0700834
Lorenzo Colittie1108612014-05-13 10:17:33 -0700835 fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark);
David S. Miller1958b852011-03-12 16:36:19 -0500836 fl6.fl6_dport = t1->dest;
837 fl6.fl6_sport = t1->source;
Lorenzo Colittie2d118a2016-11-04 02:23:43 +0900838 fl6.flowi6_uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL);
David S. Miller4c9483b2011-03-12 16:22:43 -0500839 security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700840
Ilpo Järvinen626e2642008-10-09 14:42:40 -0700841 /* Pass a socket to ip6_dst_lookup either it is for RST
842 * Underlying function will use this to retrieve the network
843 * namespace
844 */
Steffen Klassert0e0d44a2013-08-28 08:04:14 +0200845 dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL);
David S. Miller68d0c6d2011-03-01 13:19:07 -0800846 if (!IS_ERR(dst)) {
847 skb_dst_set(buff, dst);
Pablo Neira92e55f42017-01-26 22:56:21 +0100848 ip6_xmit(ctl_sk, buff, &fl6, fl6.flowi6_mark, NULL, tclass);
Eric Dumazetc10d9312016-04-29 14:16:47 -0700849 TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
David S. Miller68d0c6d2011-03-01 13:19:07 -0800850 if (rst)
Eric Dumazetc10d9312016-04-29 14:16:47 -0700851 TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
David S. Miller68d0c6d2011-03-01 13:19:07 -0800852 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700853 }
854
855 kfree_skb(buff);
856}
857
Eric Dumazeta00e7442015-09-29 07:42:39 -0700858static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb)
Ilpo Järvinen626e2642008-10-09 14:42:40 -0700859{
Eric Dumazetcf533ea2011-10-21 05:22:42 -0400860 const struct tcphdr *th = tcp_hdr(skb);
Ilpo Järvinen626e2642008-10-09 14:42:40 -0700861 u32 seq = 0, ack_seq = 0;
Guo-Fu Tsengfa3e5b42008-10-09 21:11:56 -0700862 struct tcp_md5sig_key *key = NULL;
Shawn Lu658ddaa2012-01-31 22:35:48 +0000863#ifdef CONFIG_TCP_MD5SIG
864 const __u8 *hash_location = NULL;
865 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
866 unsigned char newhash[16];
867 int genhash;
868 struct sock *sk1 = NULL;
869#endif
Wang Yufen9c76a112014-03-29 09:27:31 +0800870 int oif;
Ilpo Järvinen626e2642008-10-09 14:42:40 -0700871
872 if (th->rst)
873 return;
874
Eric Dumazetc3658e82014-11-25 07:40:04 -0800875 /* If sk not NULL, it means we did a successful lookup and incoming
876 * route had to be correct. prequeue might have dropped our dst.
877 */
878 if (!sk && !ipv6_unicast_destination(skb))
Ilpo Järvinen626e2642008-10-09 14:42:40 -0700879 return;
880
881#ifdef CONFIG_TCP_MD5SIG
Eric Dumazet3b24d852016-04-01 08:52:17 -0700882 rcu_read_lock();
Shawn Lu658ddaa2012-01-31 22:35:48 +0000883 hash_location = tcp_parse_md5sig_option(th);
Florian Westphal271c3b92015-12-21 21:29:26 +0100884 if (sk && sk_fullsock(sk)) {
Florian Westphale46787f2015-12-21 21:29:25 +0100885 key = tcp_v6_md5_do_lookup(sk, &ipv6h->saddr);
886 } else if (hash_location) {
Shawn Lu658ddaa2012-01-31 22:35:48 +0000887 /*
888 * active side is lost. Try to find listening socket through
889 * source port, and then find md5 key through listening socket.
890 * we are not loose security here:
891 * Incoming packet is checked with md5 hash with finding key,
892 * no RST generated if md5 hash doesn't match.
893 */
894 sk1 = inet6_lookup_listener(dev_net(skb_dst(skb)->dev),
Craig Galleka5836362016-02-10 11:50:38 -0500895 &tcp_hashinfo, NULL, 0,
896 &ipv6h->saddr,
Tom Herbert5ba24952013-01-22 09:50:39 +0000897 th->source, &ipv6h->daddr,
Eric Dumazet870c3152014-10-17 09:17:20 -0700898 ntohs(th->source), tcp_v6_iif(skb));
Shawn Lu658ddaa2012-01-31 22:35:48 +0000899 if (!sk1)
Eric Dumazet3b24d852016-04-01 08:52:17 -0700900 goto out;
Shawn Lu658ddaa2012-01-31 22:35:48 +0000901
Shawn Lu658ddaa2012-01-31 22:35:48 +0000902 key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr);
903 if (!key)
Eric Dumazet3b24d852016-04-01 08:52:17 -0700904 goto out;
Shawn Lu658ddaa2012-01-31 22:35:48 +0000905
Eric Dumazet39f8e582015-03-24 15:58:55 -0700906 genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, skb);
Shawn Lu658ddaa2012-01-31 22:35:48 +0000907 if (genhash || memcmp(hash_location, newhash, 16) != 0)
Eric Dumazet3b24d852016-04-01 08:52:17 -0700908 goto out;
Shawn Lu658ddaa2012-01-31 22:35:48 +0000909 }
Ilpo Järvinen626e2642008-10-09 14:42:40 -0700910#endif
911
912 if (th->ack)
913 seq = ntohl(th->ack_seq);
914 else
915 ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
916 (th->doff << 2);
917
Wang Yufen9c76a112014-03-29 09:27:31 +0800918 oif = sk ? sk->sk_bound_dev_if : 0;
Eric Dumazet0f85fea2014-12-09 09:56:08 -0800919 tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, key, 1, 0, 0);
Shawn Lu658ddaa2012-01-31 22:35:48 +0000920
921#ifdef CONFIG_TCP_MD5SIG
Eric Dumazet3b24d852016-04-01 08:52:17 -0700922out:
923 rcu_read_unlock();
Shawn Lu658ddaa2012-01-31 22:35:48 +0000924#endif
Ilpo Järvinen626e2642008-10-09 14:42:40 -0700925}
926
Eric Dumazeta00e7442015-09-29 07:42:39 -0700927static void tcp_v6_send_ack(const struct sock *sk, struct sk_buff *skb, u32 seq,
Eric Dumazet0f85fea2014-12-09 09:56:08 -0800928 u32 ack, u32 win, u32 tsval, u32 tsecr, int oif,
Florent Fourcot1d13a962014-01-16 17:21:22 +0100929 struct tcp_md5sig_key *key, u8 tclass,
Hannes Frederic Sowa5119bd12016-06-11 20:41:38 +0200930 __be32 label)
Ilpo Järvinen626e2642008-10-09 14:42:40 -0700931{
Eric Dumazet0f85fea2014-12-09 09:56:08 -0800932 tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, key, 0,
933 tclass, label);
Ilpo Järvinen626e2642008-10-09 14:42:40 -0700934}
935
Linus Torvalds1da177e2005-04-16 15:20:36 -0700936static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
937{
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700938 struct inet_timewait_sock *tw = inet_twsk(sk);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800939 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700940
Eric Dumazet0f85fea2014-12-09 09:56:08 -0800941 tcp_v6_send_ack(sk, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700942 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
Andrey Vaginee684b62013-02-11 05:50:19 +0000943 tcp_time_stamp + tcptw->tw_ts_offset,
Wang Yufen9c76a112014-03-29 09:27:31 +0800944 tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw),
Florent Fourcot21858cd2015-05-16 00:24:59 +0200945 tw->tw_tclass, cpu_to_be32(tw->tw_flowlabel));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700946
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700947 inet_twsk_put(tw);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700948}
949
Eric Dumazeta00e7442015-09-29 07:42:39 -0700950static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
Gui Jianfeng6edafaa2008-08-06 23:50:04 -0700951 struct request_sock *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700952{
Daniel Lee3a19ce02014-05-11 20:22:13 -0700953 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
954 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
955 */
Eric Dumazet20a2b492016-08-22 11:31:10 -0700956 /* RFC 7323 2.3
957 * The window field (SEG.WND) of every outgoing segment, with the
958 * exception of <SYN> segments, MUST be right-shifted by
959 * Rcv.Wind.Shift bits:
960 */
Eric Dumazet0f85fea2014-12-09 09:56:08 -0800961 tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ?
Daniel Lee3a19ce02014-05-11 20:22:13 -0700962 tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
Eric Dumazet20a2b492016-08-22 11:31:10 -0700963 tcp_rsk(req)->rcv_nxt,
964 req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
Florian Westphal95a22ca2016-12-01 11:32:06 +0100965 tcp_time_stamp + tcp_rsk(req)->ts_off,
966 req->ts_recent, sk->sk_bound_dev_if,
Florent Fourcot1d13a962014-01-16 17:21:22 +0100967 tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr),
968 0, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700969}
970
971
Eric Dumazet079096f2015-10-02 11:43:32 -0700972static struct sock *tcp_v6_cookie_check(struct sock *sk, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700973{
Glenn Griffinc6aefaf2008-02-07 21:49:26 -0800974#ifdef CONFIG_SYN_COOKIES
Eric Dumazet079096f2015-10-02 11:43:32 -0700975 const struct tcphdr *th = tcp_hdr(skb);
976
Florian Westphalaf9b4732010-06-03 00:43:44 +0000977 if (!th->syn)
Glenn Griffinc6aefaf2008-02-07 21:49:26 -0800978 sk = cookie_v6_check(sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700979#endif
980 return sk;
981}
982
Linus Torvalds1da177e2005-04-16 15:20:36 -0700983static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
984{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700985 if (skb->protocol == htons(ETH_P_IP))
986 return tcp_v4_conn_request(sk, skb);
987
988 if (!ipv6_unicast_destination(skb))
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +0900989 goto drop;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700990
Octavian Purdila1fb6f152014-06-25 17:10:02 +0300991 return tcp_conn_request(&tcp6_request_sock_ops,
992 &tcp_request_sock_ipv6_ops, sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700993
Linus Torvalds1da177e2005-04-16 15:20:36 -0700994drop:
Eric Dumazet9caad862016-04-01 08:52:20 -0700995 tcp_listendrop(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700996 return 0; /* don't send reset */
997}
998
Eric Dumazetebf6c9c2017-02-05 20:23:22 -0800999static void tcp_v6_restore_cb(struct sk_buff *skb)
1000{
1001 /* We need to move header back to the beginning if xfrm6_policy_check()
1002 * and tcp_v6_fill_cb() are going to be called again.
1003 * ip6_datagram_recv_specific_ctl() also expects IP6CB to be there.
1004 */
1005 memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
1006 sizeof(struct inet6_skb_parm));
1007}
1008
Eric Dumazet0c271712015-09-29 07:42:48 -07001009static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
Weilong Chen4c99aa42013-12-19 18:44:34 +08001010 struct request_sock *req,
Eric Dumazet5e0724d2015-10-22 08:20:46 -07001011 struct dst_entry *dst,
1012 struct request_sock *req_unhash,
1013 bool *own_req)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001014{
Eric Dumazet634fb9792013-10-09 15:21:29 -07001015 struct inet_request_sock *ireq;
Eric Dumazet0c271712015-09-29 07:42:48 -07001016 struct ipv6_pinfo *newnp;
1017 const struct ipv6_pinfo *np = inet6_sk(sk);
Eric Dumazet45f6fad2015-11-29 19:37:57 -08001018 struct ipv6_txoptions *opt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001019 struct tcp6_sock *newtcp6sk;
1020 struct inet_sock *newinet;
1021 struct tcp_sock *newtp;
1022 struct sock *newsk;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001023#ifdef CONFIG_TCP_MD5SIG
1024 struct tcp_md5sig_key *key;
1025#endif
Neal Cardwell3840a062012-06-28 12:34:19 +00001026 struct flowi6 fl6;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001027
1028 if (skb->protocol == htons(ETH_P_IP)) {
1029 /*
1030 * v6 mapped
1031 */
1032
Eric Dumazet5e0724d2015-10-22 08:20:46 -07001033 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst,
1034 req_unhash, own_req);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001035
Ian Morris63159f22015-03-29 14:00:04 +01001036 if (!newsk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001037 return NULL;
1038
1039 newtcp6sk = (struct tcp6_sock *)newsk;
1040 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1041
1042 newinet = inet_sk(newsk);
1043 newnp = inet6_sk(newsk);
1044 newtp = tcp_sk(newsk);
1045
1046 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1047
Eric Dumazetd1e559d2015-03-18 14:05:35 -07001048 newnp->saddr = newsk->sk_v6_rcv_saddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001049
Arnaldo Carvalho de Melo8292a172005-12-13 23:15:52 -08001050 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001051 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001052#ifdef CONFIG_TCP_MD5SIG
1053 newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1054#endif
1055
Yan, Zheng676a1182011-09-25 02:21:30 +00001056 newnp->ipv6_ac_list = NULL;
1057 newnp->ipv6_fl_list = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001058 newnp->pktoptions = NULL;
1059 newnp->opt = NULL;
Eric Dumazet870c3152014-10-17 09:17:20 -07001060 newnp->mcast_oif = tcp_v6_iif(skb);
Arnaldo Carvalho de Melo0660e032007-04-25 17:54:47 -07001061 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
Florent Fourcot1397ed32013-12-08 15:46:57 +01001062 newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
Florent Fourcotdf3687f2014-01-17 17:15:03 +01001063 if (np->repflow)
1064 newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001065
Arnaldo Carvalho de Meloe6848972005-08-09 19:45:38 -07001066 /*
1067 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1068 * here, tcp_create_openreq_child now does this for us, see the comment in
1069 * that function for the gory details. -acme
Linus Torvalds1da177e2005-04-16 15:20:36 -07001070 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001071
1072 /* It is tricky place. Until this moment IPv4 tcp
Arnaldo Carvalho de Melo8292a172005-12-13 23:15:52 -08001073 worked with IPv6 icsk.icsk_af_ops.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001074 Sync it now.
1075 */
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -08001076 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001077
1078 return newsk;
1079 }
1080
Eric Dumazet634fb9792013-10-09 15:21:29 -07001081 ireq = inet_rsk(req);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001082
1083 if (sk_acceptq_is_full(sk))
1084 goto out_overflow;
1085
David S. Miller493f3772010-12-02 12:14:29 -08001086 if (!dst) {
Eric Dumazetf76b33c2015-09-29 07:42:42 -07001087 dst = inet6_csk_route_req(sk, &fl6, req, IPPROTO_TCP);
David S. Miller493f3772010-12-02 12:14:29 -08001088 if (!dst)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001089 goto out;
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001090 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001091
1092 newsk = tcp_create_openreq_child(sk, req, skb);
Ian Morris63159f22015-03-29 14:00:04 +01001093 if (!newsk)
Balazs Scheidler093d2822010-10-21 13:06:43 +02001094 goto out_nonewsk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001095
Arnaldo Carvalho de Meloe6848972005-08-09 19:45:38 -07001096 /*
1097 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1098 * count here, tcp_create_openreq_child now does this for us, see the
1099 * comment in that function for the gory details. -acme
1100 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001101
Stephen Hemminger59eed272006-08-25 15:55:43 -07001102 newsk->sk_gso_type = SKB_GSO_TCPV6;
Eric Dumazet6bd4f352015-12-02 21:53:57 -08001103 ip6_dst_store(newsk, dst, NULL, NULL);
Neal Cardwellfae6ef82012-08-19 03:30:38 +00001104 inet6_sk_rx_dst_set(newsk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001105
1106 newtcp6sk = (struct tcp6_sock *)newsk;
1107 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1108
1109 newtp = tcp_sk(newsk);
1110 newinet = inet_sk(newsk);
1111 newnp = inet6_sk(newsk);
1112
1113 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1114
Eric Dumazet634fb9792013-10-09 15:21:29 -07001115 newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr;
1116 newnp->saddr = ireq->ir_v6_loc_addr;
1117 newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
1118 newsk->sk_bound_dev_if = ireq->ir_iif;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001119
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001120 /* Now IPv6 options...
Linus Torvalds1da177e2005-04-16 15:20:36 -07001121
1122 First: no IPv4 options.
1123 */
Eric Dumazetf6d8bd02011-04-21 09:45:37 +00001124 newinet->inet_opt = NULL;
Yan, Zheng676a1182011-09-25 02:21:30 +00001125 newnp->ipv6_ac_list = NULL;
Masayuki Nakagawad35690b2007-03-16 16:14:03 -07001126 newnp->ipv6_fl_list = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001127
1128 /* Clone RX bits */
1129 newnp->rxopt.all = np->rxopt.all;
1130
Linus Torvalds1da177e2005-04-16 15:20:36 -07001131 newnp->pktoptions = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001132 newnp->opt = NULL;
Eric Dumazet870c3152014-10-17 09:17:20 -07001133 newnp->mcast_oif = tcp_v6_iif(skb);
Arnaldo Carvalho de Melo0660e032007-04-25 17:54:47 -07001134 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
Florent Fourcot1397ed32013-12-08 15:46:57 +01001135 newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
Florent Fourcotdf3687f2014-01-17 17:15:03 +01001136 if (np->repflow)
1137 newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001138
1139 /* Clone native IPv6 options from listening socket (if any)
1140
1141 Yes, keeping reference count would be much more clever,
1142 but we make one more one thing there: reattach optmem
1143 to newsk.
1144 */
Huw Davies56ac42b2016-06-27 15:05:28 -04001145 opt = ireq->ipv6_opt;
1146 if (!opt)
1147 opt = rcu_dereference(np->opt);
Eric Dumazet45f6fad2015-11-29 19:37:57 -08001148 if (opt) {
1149 opt = ipv6_dup_options(newsk, opt);
1150 RCU_INIT_POINTER(newnp->opt, opt);
1151 }
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -08001152 inet_csk(newsk)->icsk_ext_hdr_len = 0;
Eric Dumazet45f6fad2015-11-29 19:37:57 -08001153 if (opt)
1154 inet_csk(newsk)->icsk_ext_hdr_len = opt->opt_nflen +
1155 opt->opt_flen;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001156
Daniel Borkmann81164412015-01-05 23:57:48 +01001157 tcp_ca_openreq_child(newsk, dst);
1158
Linus Torvalds1da177e2005-04-16 15:20:36 -07001159 tcp_sync_mss(newsk, dst_mtu(dst));
Eric Dumazet3541f9e2017-02-02 08:04:56 -08001160 newtp->advmss = tcp_mss_clamp(tcp_sk(sk), dst_metric_advmss(dst));
Neal Cardwelld135c522012-04-22 09:45:47 +00001161
Linus Torvalds1da177e2005-04-16 15:20:36 -07001162 tcp_initialize_rcv_mss(newsk);
1163
Eric Dumazetc720c7e2009-10-15 06:30:45 +00001164 newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1165 newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001166
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001167#ifdef CONFIG_TCP_MD5SIG
1168 /* Copy over the MD5 key from the original socket */
Wang Yufen4aa956d2014-03-29 09:27:29 +08001169 key = tcp_v6_md5_do_lookup(sk, &newsk->sk_v6_daddr);
Ian Morris53b24b82015-03-29 14:00:05 +01001170 if (key) {
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001171 /* We're using one, so create a matching key
1172 * on the newsk structure. If we fail to get
1173 * memory, then we end up not copying the key
1174 * across. Shucks.
1175 */
Eric Dumazetefe42082013-10-03 15:42:29 -07001176 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newsk->sk_v6_daddr,
Mel Gorman99a1dec2012-07-31 16:44:14 -07001177 AF_INET6, key->key, key->keylen,
Eric Dumazet7450aaf2015-11-30 08:57:28 -08001178 sk_gfp_mask(sk, GFP_ATOMIC));
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001179 }
1180#endif
1181
Balazs Scheidler093d2822010-10-21 13:06:43 +02001182 if (__inet_inherit_port(sk, newsk) < 0) {
Christoph Paasche337e242012-12-14 04:07:58 +00001183 inet_csk_prepare_forced_close(newsk);
1184 tcp_done(newsk);
Balazs Scheidler093d2822010-10-21 13:06:43 +02001185 goto out;
1186 }
Eric Dumazet5e0724d2015-10-22 08:20:46 -07001187 *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
Eric Dumazet805c4bc2015-11-05 11:07:13 -08001188 if (*own_req) {
Eric Dumazet49a496c2015-11-05 12:50:19 -08001189 tcp_move_syn(newtp, req);
Eric Dumazet805c4bc2015-11-05 11:07:13 -08001190
1191 /* Clone pktoptions received with SYN, if we own the req */
1192 if (ireq->pktopts) {
1193 newnp->pktoptions = skb_clone(ireq->pktopts,
Eric Dumazet7450aaf2015-11-30 08:57:28 -08001194 sk_gfp_mask(sk, GFP_ATOMIC));
Eric Dumazet805c4bc2015-11-05 11:07:13 -08001195 consume_skb(ireq->pktopts);
1196 ireq->pktopts = NULL;
Eric Dumazetebf6c9c2017-02-05 20:23:22 -08001197 if (newnp->pktoptions) {
1198 tcp_v6_restore_cb(newnp->pktoptions);
Eric Dumazet805c4bc2015-11-05 11:07:13 -08001199 skb_set_owner_r(newnp->pktoptions, newsk);
Eric Dumazetebf6c9c2017-02-05 20:23:22 -08001200 }
Eric Dumazet805c4bc2015-11-05 11:07:13 -08001201 }
Eric Dumazetce105002015-10-30 09:46:12 -07001202 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001203
1204 return newsk;
1205
1206out_overflow:
Eric Dumazet02a1d6e2016-04-27 16:44:39 -07001207 __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
Balazs Scheidler093d2822010-10-21 13:06:43 +02001208out_nonewsk:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001209 dst_release(dst);
Balazs Scheidler093d2822010-10-21 13:06:43 +02001210out:
Eric Dumazet9caad862016-04-01 08:52:20 -07001211 tcp_listendrop(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001212 return NULL;
1213}
1214
Linus Torvalds1da177e2005-04-16 15:20:36 -07001215/* The socket must have it's spinlock held when we get
Eric Dumazete994b2f2015-10-02 11:43:39 -07001216 * here, unless it is a TCP_LISTEN socket.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001217 *
1218 * We have a potential double-lock case here, so even when
1219 * doing backlog processing we use the BH locking scheme.
1220 * This is because we cannot sleep with the original spinlock
1221 * held.
1222 */
1223static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1224{
1225 struct ipv6_pinfo *np = inet6_sk(sk);
1226 struct tcp_sock *tp;
1227 struct sk_buff *opt_skb = NULL;
1228
1229 /* Imagine: socket is IPv6. IPv4 packet arrives,
1230 goes to IPv4 receive handler and backlogged.
1231 From backlog it always goes here. Kerboom...
1232 Fortunately, tcp_rcv_established and rcv_established
1233 handle them correctly, but it is not case with
1234 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1235 */
1236
1237 if (skb->protocol == htons(ETH_P_IP))
1238 return tcp_v4_do_rcv(sk, skb);
1239
Eric Dumazetac6e7802016-11-10 13:12:35 -08001240 if (tcp_filter(sk, skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001241 goto discard;
1242
1243 /*
1244 * socket locking is here for SMP purposes as backlog rcv
1245 * is currently called with bh processing disabled.
1246 */
1247
1248 /* Do Stevens' IPV6_PKTOPTIONS.
1249
1250 Yes, guys, it is the only place in our code, where we
1251 may make it not affecting IPv4.
1252 The rest of code is protocol independent,
1253 and I do not like idea to uglify IPv4.
1254
1255 Actually, all the idea behind IPV6_PKTOPTIONS
1256 looks not very well thought. For now we latch
1257 options, received in the last packet, enqueued
1258 by tcp. Feel free to propose better solution.
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001259 --ANK (980728)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001260 */
1261 if (np->rxopt.all)
Eric Dumazet7450aaf2015-11-30 08:57:28 -08001262 opt_skb = skb_clone(skb, sk_gfp_mask(sk, GFP_ATOMIC));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001263
1264 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
Eric Dumazet5d299f32012-08-06 05:09:33 +00001265 struct dst_entry *dst = sk->sk_rx_dst;
1266
Tom Herbertbdeab992011-08-14 19:45:55 +00001267 sock_rps_save_rxhash(sk, skb);
Eric Dumazet3d973792014-11-11 05:54:27 -08001268 sk_mark_napi_id(sk, skb);
Eric Dumazet5d299f32012-08-06 05:09:33 +00001269 if (dst) {
1270 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1271 dst->ops->check(dst, np->rx_dst_cookie) == NULL) {
1272 dst_release(dst);
1273 sk->sk_rx_dst = NULL;
1274 }
1275 }
1276
Vijay Subramanianc995ae22013-09-03 12:23:22 -07001277 tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001278 if (opt_skb)
1279 goto ipv6_pktoptions;
1280 return 0;
1281 }
1282
Eric Dumazet12e25e12015-06-03 23:49:21 -07001283 if (tcp_checksum_complete(skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001284 goto csum_err;
1285
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001286 if (sk->sk_state == TCP_LISTEN) {
Eric Dumazet079096f2015-10-02 11:43:32 -07001287 struct sock *nsk = tcp_v6_cookie_check(sk, skb);
1288
Linus Torvalds1da177e2005-04-16 15:20:36 -07001289 if (!nsk)
1290 goto discard;
1291
Weilong Chen4c99aa42013-12-19 18:44:34 +08001292 if (nsk != sk) {
Tom Herbertbdeab992011-08-14 19:45:55 +00001293 sock_rps_save_rxhash(nsk, skb);
Eric Dumazet38cb5242015-10-02 11:43:26 -07001294 sk_mark_napi_id(nsk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001295 if (tcp_child_process(sk, nsk, skb))
1296 goto reset;
1297 if (opt_skb)
1298 __kfree_skb(opt_skb);
1299 return 0;
1300 }
Neil Horman47482f12011-04-06 13:07:09 -07001301 } else
Tom Herbertbdeab992011-08-14 19:45:55 +00001302 sock_rps_save_rxhash(sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001303
Eric Dumazet72ab4a82015-09-29 07:42:41 -07001304 if (tcp_rcv_state_process(sk, skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001305 goto reset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001306 if (opt_skb)
1307 goto ipv6_pktoptions;
1308 return 0;
1309
1310reset:
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001311 tcp_v6_send_reset(sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001312discard:
1313 if (opt_skb)
1314 __kfree_skb(opt_skb);
1315 kfree_skb(skb);
1316 return 0;
1317csum_err:
Eric Dumazetc10d9312016-04-29 14:16:47 -07001318 TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
1319 TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001320 goto discard;
1321
1322
1323ipv6_pktoptions:
1324 /* Do you ask, what is it?
1325
1326 1. skb was enqueued by tcp.
1327 2. skb is added to tail of read queue, rather than out of order.
1328 3. socket is not in passive state.
1329 4. Finally, it really contains options, which user wants to receive.
1330 */
1331 tp = tcp_sk(sk);
1332 if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1333 !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
YOSHIFUJI Hideaki333fad52005-09-08 09:59:17 +09001334 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
Eric Dumazet870c3152014-10-17 09:17:20 -07001335 np->mcast_oif = tcp_v6_iif(opt_skb);
YOSHIFUJI Hideaki333fad52005-09-08 09:59:17 +09001336 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
Arnaldo Carvalho de Melo0660e032007-04-25 17:54:47 -07001337 np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
Florent Fourcot82e9f102013-12-08 15:46:59 +01001338 if (np->rxopt.bits.rxflow || np->rxopt.bits.rxtclass)
Florent Fourcot1397ed32013-12-08 15:46:57 +01001339 np->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(opt_skb));
Florent Fourcotdf3687f2014-01-17 17:15:03 +01001340 if (np->repflow)
1341 np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb));
Eric Dumazeta2247722014-09-27 09:50:56 -07001342 if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001343 skb_set_owner_r(opt_skb, sk);
Eric Dumazet8ce48622016-10-12 19:01:45 +02001344 tcp_v6_restore_cb(opt_skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001345 opt_skb = xchg(&np->pktoptions, opt_skb);
1346 } else {
1347 __kfree_skb(opt_skb);
1348 opt_skb = xchg(&np->pktoptions, NULL);
1349 }
1350 }
1351
Wei Yongjun800d55f2009-02-23 21:45:33 +00001352 kfree_skb(opt_skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001353 return 0;
1354}
1355
Nicolas Dichtel2dc49d12014-12-22 18:22:48 +01001356static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr,
1357 const struct tcphdr *th)
1358{
1359 /* This is tricky: we move IP6CB at its correct location into
1360 * TCP_SKB_CB(). It must be done after xfrm6_policy_check(), because
1361 * _decode_session6() uses IP6CB().
1362 * barrier() makes sure compiler won't play aliasing games.
1363 */
1364 memmove(&TCP_SKB_CB(skb)->header.h6, IP6CB(skb),
1365 sizeof(struct inet6_skb_parm));
1366 barrier();
1367
1368 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1369 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1370 skb->len - th->doff*4);
1371 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1372 TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1373 TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1374 TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
1375 TCP_SKB_CB(skb)->sacked = 0;
1376}
1377
Herbert Xue5bbef22007-10-15 12:50:28 -07001378static int tcp_v6_rcv(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001379{
Eric Dumazetcf533ea2011-10-21 05:22:42 -04001380 const struct tcphdr *th;
Eric Dumazetb71d1d42011-04-22 04:53:02 +00001381 const struct ipv6hdr *hdr;
Eric Dumazet3b24d852016-04-01 08:52:17 -07001382 bool refcounted;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001383 struct sock *sk;
1384 int ret;
Pavel Emelyanova86b1e32008-07-16 20:20:58 -07001385 struct net *net = dev_net(skb->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001386
1387 if (skb->pkt_type != PACKET_HOST)
1388 goto discard_it;
1389
1390 /*
1391 * Count it even if it's bad.
1392 */
Eric Dumazet90bbcc62016-04-27 16:44:32 -07001393 __TCP_INC_STATS(net, TCP_MIB_INSEGS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001394
1395 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1396 goto discard_it;
1397
Eric Dumazetea1627c2016-05-13 09:16:40 -07001398 th = (const struct tcphdr *)skb->data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001399
Eric Dumazetea1627c2016-05-13 09:16:40 -07001400 if (unlikely(th->doff < sizeof(struct tcphdr)/4))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001401 goto bad_packet;
1402 if (!pskb_may_pull(skb, th->doff*4))
1403 goto discard_it;
1404
Tom Herberte4f45b72014-05-02 16:29:51 -07001405 if (skb_checksum_init(skb, IPPROTO_TCP, ip6_compute_pseudo))
Eric Dumazet6a5dc9e2013-04-29 08:39:56 +00001406 goto csum_error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001407
Eric Dumazetea1627c2016-05-13 09:16:40 -07001408 th = (const struct tcphdr *)skb->data;
Stephen Hemmingere802af92010-04-22 15:24:53 -07001409 hdr = ipv6_hdr(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001410
Eric Dumazet4bdc3d62015-10-13 17:12:54 -07001411lookup:
Craig Galleka5836362016-02-10 11:50:38 -05001412 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th),
Eric Dumazet3b24d852016-04-01 08:52:17 -07001413 th->source, th->dest, inet6_iif(skb),
1414 &refcounted);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001415 if (!sk)
1416 goto no_tcp_socket;
1417
1418process:
1419 if (sk->sk_state == TCP_TIME_WAIT)
1420 goto do_time_wait;
1421
Eric Dumazet079096f2015-10-02 11:43:32 -07001422 if (sk->sk_state == TCP_NEW_SYN_RECV) {
1423 struct request_sock *req = inet_reqsk(sk);
Eric Dumazet77166822016-02-18 05:39:18 -08001424 struct sock *nsk;
Eric Dumazet079096f2015-10-02 11:43:32 -07001425
1426 sk = req->rsk_listener;
1427 tcp_v6_fill_cb(skb, hdr, th);
1428 if (tcp_v6_inbound_md5_hash(sk, skb)) {
Eric Dumazete65c3322016-08-24 08:50:24 -07001429 sk_drops_add(sk, skb);
Eric Dumazet079096f2015-10-02 11:43:32 -07001430 reqsk_put(req);
1431 goto discard_it;
1432 }
Eric Dumazet77166822016-02-18 05:39:18 -08001433 if (unlikely(sk->sk_state != TCP_LISTEN)) {
Eric Dumazetf03f2e12015-10-14 11:16:27 -07001434 inet_csk_reqsk_queue_drop_and_put(sk, req);
Eric Dumazet4bdc3d62015-10-13 17:12:54 -07001435 goto lookup;
1436 }
Eric Dumazet77166822016-02-18 05:39:18 -08001437 sock_hold(sk);
Eric Dumazet3b24d852016-04-01 08:52:17 -07001438 refcounted = true;
Eric Dumazet77166822016-02-18 05:39:18 -08001439 nsk = tcp_check_req(sk, skb, req, false);
Eric Dumazet079096f2015-10-02 11:43:32 -07001440 if (!nsk) {
1441 reqsk_put(req);
Eric Dumazet77166822016-02-18 05:39:18 -08001442 goto discard_and_relse;
Eric Dumazet079096f2015-10-02 11:43:32 -07001443 }
1444 if (nsk == sk) {
Eric Dumazet079096f2015-10-02 11:43:32 -07001445 reqsk_put(req);
1446 tcp_v6_restore_cb(skb);
1447 } else if (tcp_child_process(sk, nsk, skb)) {
1448 tcp_v6_send_reset(nsk, skb);
Eric Dumazet77166822016-02-18 05:39:18 -08001449 goto discard_and_relse;
Eric Dumazet079096f2015-10-02 11:43:32 -07001450 } else {
Eric Dumazet77166822016-02-18 05:39:18 -08001451 sock_put(sk);
Eric Dumazet079096f2015-10-02 11:43:32 -07001452 return 0;
1453 }
1454 }
Stephen Hemmingere802af92010-04-22 15:24:53 -07001455 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
Eric Dumazet02a1d6e2016-04-27 16:44:39 -07001456 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
Stephen Hemmingere802af92010-04-22 15:24:53 -07001457 goto discard_and_relse;
1458 }
1459
Linus Torvalds1da177e2005-04-16 15:20:36 -07001460 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1461 goto discard_and_relse;
1462
Nicolas Dichtel2dc49d12014-12-22 18:22:48 +01001463 tcp_v6_fill_cb(skb, hdr, th);
1464
Dmitry Popov9ea88a12014-08-07 02:38:22 +04001465 if (tcp_v6_inbound_md5_hash(sk, skb))
1466 goto discard_and_relse;
Dmitry Popov9ea88a12014-08-07 02:38:22 +04001467
Eric Dumazetac6e7802016-11-10 13:12:35 -08001468 if (tcp_filter(sk, skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001469 goto discard_and_relse;
Eric Dumazetac6e7802016-11-10 13:12:35 -08001470 th = (const struct tcphdr *)skb->data;
1471 hdr = ipv6_hdr(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001472
1473 skb->dev = NULL;
1474
Eric Dumazete994b2f2015-10-02 11:43:39 -07001475 if (sk->sk_state == TCP_LISTEN) {
1476 ret = tcp_v6_do_rcv(sk, skb);
1477 goto put_and_return;
1478 }
1479
1480 sk_incoming_cpu_update(sk);
1481
Fabio Olive Leite293b9c42006-09-25 22:28:47 -07001482 bh_lock_sock_nested(sk);
Martin KaFai Laua44d6ea2016-03-14 10:52:15 -07001483 tcp_segs_in(tcp_sk(sk), skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001484 ret = 0;
1485 if (!sock_owned_by_user(sk)) {
Dan Williams7bced392013-12-30 12:37:29 -08001486 if (!tcp_prequeue(sk, skb))
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001487 ret = tcp_v6_do_rcv(sk, skb);
Eric Dumazetc9c33212016-08-27 07:37:54 -07001488 } else if (tcp_add_backlog(sk, skb)) {
Zhu Yi6b03a532010-03-04 18:01:41 +00001489 goto discard_and_relse;
1490 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001491 bh_unlock_sock(sk);
1492
Eric Dumazete994b2f2015-10-02 11:43:39 -07001493put_and_return:
Eric Dumazet3b24d852016-04-01 08:52:17 -07001494 if (refcounted)
1495 sock_put(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001496 return ret ? -1 : 0;
1497
1498no_tcp_socket:
1499 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1500 goto discard_it;
1501
Nicolas Dichtel2dc49d12014-12-22 18:22:48 +01001502 tcp_v6_fill_cb(skb, hdr, th);
1503
Eric Dumazet12e25e12015-06-03 23:49:21 -07001504 if (tcp_checksum_complete(skb)) {
Eric Dumazet6a5dc9e2013-04-29 08:39:56 +00001505csum_error:
Eric Dumazet90bbcc62016-04-27 16:44:32 -07001506 __TCP_INC_STATS(net, TCP_MIB_CSUMERRORS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001507bad_packet:
Eric Dumazet90bbcc62016-04-27 16:44:32 -07001508 __TCP_INC_STATS(net, TCP_MIB_INERRS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001509 } else {
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001510 tcp_v6_send_reset(NULL, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001511 }
1512
1513discard_it:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001514 kfree_skb(skb);
1515 return 0;
1516
1517discard_and_relse:
Eric Dumazet532182c2016-04-01 08:52:19 -07001518 sk_drops_add(sk, skb);
Eric Dumazet3b24d852016-04-01 08:52:17 -07001519 if (refcounted)
1520 sock_put(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001521 goto discard_it;
1522
1523do_time_wait:
1524 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
YOSHIFUJI Hideaki9469c7b2006-10-10 19:41:46 -07001525 inet_twsk_put(inet_twsk(sk));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001526 goto discard_it;
1527 }
1528
Nicolas Dichtel2dc49d12014-12-22 18:22:48 +01001529 tcp_v6_fill_cb(skb, hdr, th);
1530
Eric Dumazet6a5dc9e2013-04-29 08:39:56 +00001531 if (tcp_checksum_complete(skb)) {
1532 inet_twsk_put(inet_twsk(sk));
1533 goto csum_error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001534 }
1535
YOSHIFUJI Hideaki9469c7b2006-10-10 19:41:46 -07001536 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001537 case TCP_TW_SYN:
1538 {
1539 struct sock *sk2;
1540
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09001541 sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
Craig Galleka5836362016-02-10 11:50:38 -05001542 skb, __tcp_hdrlen(th),
Tom Herbert5ba24952013-01-22 09:50:39 +00001543 &ipv6_hdr(skb)->saddr, th->source,
Arnaldo Carvalho de Melo0660e032007-04-25 17:54:47 -07001544 &ipv6_hdr(skb)->daddr,
Eric Dumazet870c3152014-10-17 09:17:20 -07001545 ntohs(th->dest), tcp_v6_iif(skb));
Ian Morris53b24b82015-03-29 14:00:05 +01001546 if (sk2) {
Arnaldo Carvalho de Melo295ff7e2005-08-09 20:44:40 -07001547 struct inet_timewait_sock *tw = inet_twsk(sk);
Eric Dumazetdbe7faa2015-07-08 14:28:30 -07001548 inet_twsk_deschedule_put(tw);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001549 sk = sk2;
Alexey Kodanev4ad19de2015-03-27 12:24:22 +03001550 tcp_v6_restore_cb(skb);
Eric Dumazet3b24d852016-04-01 08:52:17 -07001551 refcounted = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001552 goto process;
1553 }
1554 /* Fall through to ACK */
1555 }
1556 case TCP_TW_ACK:
1557 tcp_v6_timewait_ack(sk, skb);
1558 break;
1559 case TCP_TW_RST:
Alexey Kodanev4ad19de2015-03-27 12:24:22 +03001560 tcp_v6_restore_cb(skb);
Florian Westphal271c3b92015-12-21 21:29:26 +01001561 tcp_v6_send_reset(sk, skb);
1562 inet_twsk_deschedule_put(inet_twsk(sk));
1563 goto discard_it;
Wang Yufen4aa956d2014-03-29 09:27:29 +08001564 case TCP_TW_SUCCESS:
1565 ;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001566 }
1567 goto discard_it;
1568}
1569
Eric Dumazetc7109982012-07-26 12:18:11 +00001570static void tcp_v6_early_demux(struct sk_buff *skb)
1571{
1572 const struct ipv6hdr *hdr;
1573 const struct tcphdr *th;
1574 struct sock *sk;
1575
1576 if (skb->pkt_type != PACKET_HOST)
1577 return;
1578
1579 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1580 return;
1581
1582 hdr = ipv6_hdr(skb);
1583 th = tcp_hdr(skb);
1584
1585 if (th->doff < sizeof(struct tcphdr) / 4)
1586 return;
1587
Eric Dumazet870c3152014-10-17 09:17:20 -07001588 /* Note : We use inet6_iif() here, not tcp_v6_iif() */
Eric Dumazetc7109982012-07-26 12:18:11 +00001589 sk = __inet6_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1590 &hdr->saddr, th->source,
1591 &hdr->daddr, ntohs(th->dest),
1592 inet6_iif(skb));
1593 if (sk) {
1594 skb->sk = sk;
1595 skb->destructor = sock_edemux;
Eric Dumazetf7e4eb02015-03-15 21:12:13 -07001596 if (sk_fullsock(sk)) {
Michal Kubečekd0c294c2015-03-23 15:14:00 +01001597 struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
Neal Cardwellf3f12132012-10-22 21:41:48 +00001598
Eric Dumazetc7109982012-07-26 12:18:11 +00001599 if (dst)
Eric Dumazet5d299f32012-08-06 05:09:33 +00001600 dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie);
Eric Dumazetc7109982012-07-26 12:18:11 +00001601 if (dst &&
Neal Cardwellf3f12132012-10-22 21:41:48 +00001602 inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
Eric Dumazetc7109982012-07-26 12:18:11 +00001603 skb_dst_set_noref(skb, dst);
1604 }
1605 }
1606}
1607
David S. Millerccb7c412010-12-01 18:09:13 -08001608static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1609 .twsk_obj_size = sizeof(struct tcp6_timewait_sock),
1610 .twsk_unique = tcp_twsk_unique,
Wang Yufen4aa956d2014-03-29 09:27:29 +08001611 .twsk_destructor = tcp_twsk_destructor,
David S. Millerccb7c412010-12-01 18:09:13 -08001612};
1613
Stephen Hemminger3b401a82009-09-01 19:25:04 +00001614static const struct inet_connection_sock_af_ops ipv6_specific = {
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001615 .queue_xmit = inet6_csk_xmit,
1616 .send_check = tcp_v6_send_check,
1617 .rebuild_header = inet6_sk_rebuild_header,
Eric Dumazet5d299f32012-08-06 05:09:33 +00001618 .sk_rx_dst_set = inet6_sk_rx_dst_set,
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001619 .conn_request = tcp_v6_conn_request,
1620 .syn_recv_sock = tcp_v6_syn_recv_sock,
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001621 .net_header_len = sizeof(struct ipv6hdr),
Eric Dumazet67469602012-04-24 07:37:38 +00001622 .net_frag_header_len = sizeof(struct frag_hdr),
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001623 .setsockopt = ipv6_setsockopt,
1624 .getsockopt = ipv6_getsockopt,
1625 .addr2sockaddr = inet6_csk_addr2sockaddr,
1626 .sockaddr_len = sizeof(struct sockaddr_in6),
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08001627#ifdef CONFIG_COMPAT
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001628 .compat_setsockopt = compat_ipv6_setsockopt,
1629 .compat_getsockopt = compat_ipv6_getsockopt,
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08001630#endif
Neal Cardwell4fab9072014-08-14 12:40:05 -04001631 .mtu_reduced = tcp_v6_mtu_reduced,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001632};
1633
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001634#ifdef CONFIG_TCP_MD5SIG
Stephen Hemmingerb2e4b3d2009-09-01 19:25:03 +00001635static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001636 .md5_lookup = tcp_v6_md5_lookup,
Adam Langley49a72df2008-07-19 00:01:42 -07001637 .calc_md5_hash = tcp_v6_md5_hash_skb,
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001638 .md5_parse = tcp_v6_parse_md5_keys,
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001639};
David S. Millera9286302006-11-14 19:53:22 -08001640#endif
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001641
Linus Torvalds1da177e2005-04-16 15:20:36 -07001642/*
1643 * TCP over IPv4 via INET6 API
1644 */
Stephen Hemminger3b401a82009-09-01 19:25:04 +00001645static const struct inet_connection_sock_af_ops ipv6_mapped = {
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001646 .queue_xmit = ip_queue_xmit,
1647 .send_check = tcp_v4_send_check,
1648 .rebuild_header = inet_sk_rebuild_header,
Eric Dumazet63d02d12012-08-09 14:11:00 +00001649 .sk_rx_dst_set = inet_sk_rx_dst_set,
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001650 .conn_request = tcp_v6_conn_request,
1651 .syn_recv_sock = tcp_v6_syn_recv_sock,
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001652 .net_header_len = sizeof(struct iphdr),
1653 .setsockopt = ipv6_setsockopt,
1654 .getsockopt = ipv6_getsockopt,
1655 .addr2sockaddr = inet6_csk_addr2sockaddr,
1656 .sockaddr_len = sizeof(struct sockaddr_in6),
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08001657#ifdef CONFIG_COMPAT
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001658 .compat_setsockopt = compat_ipv6_setsockopt,
1659 .compat_getsockopt = compat_ipv6_getsockopt,
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08001660#endif
Neal Cardwell4fab9072014-08-14 12:40:05 -04001661 .mtu_reduced = tcp_v4_mtu_reduced,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001662};
1663
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001664#ifdef CONFIG_TCP_MD5SIG
Stephen Hemmingerb2e4b3d2009-09-01 19:25:03 +00001665static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001666 .md5_lookup = tcp_v4_md5_lookup,
Adam Langley49a72df2008-07-19 00:01:42 -07001667 .calc_md5_hash = tcp_v4_md5_hash_skb,
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001668 .md5_parse = tcp_v6_parse_md5_keys,
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001669};
David S. Millera9286302006-11-14 19:53:22 -08001670#endif
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001671
Linus Torvalds1da177e2005-04-16 15:20:36 -07001672/* NOTE: A lot of things set to zero explicitly by call to
1673 * sk_alloc() so need not be done here.
1674 */
1675static int tcp_v6_init_sock(struct sock *sk)
1676{
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -03001677 struct inet_connection_sock *icsk = inet_csk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001678
Neal Cardwell900f65d2012-04-19 09:55:21 +00001679 tcp_init_sock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001680
Arnaldo Carvalho de Melo8292a172005-12-13 23:15:52 -08001681 icsk->icsk_af_ops = &ipv6_specific;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001682
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001683#ifdef CONFIG_TCP_MD5SIG
David S. Millerac807fa2012-04-23 03:21:58 -04001684 tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001685#endif
1686
Linus Torvalds1da177e2005-04-16 15:20:36 -07001687 return 0;
1688}
1689
Brian Haley7d06b2e2008-06-14 17:04:49 -07001690static void tcp_v6_destroy_sock(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001691{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001692 tcp_v4_destroy_sock(sk);
Brian Haley7d06b2e2008-06-14 17:04:49 -07001693 inet6_destroy_sock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001694}
1695
YOSHIFUJI Hideaki952a10b2007-04-21 20:13:44 +09001696#ifdef CONFIG_PROC_FS
Linus Torvalds1da177e2005-04-16 15:20:36 -07001697/* Proc filesystem TCPv6 sock list dumping. */
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001698static void get_openreq6(struct seq_file *seq,
Eric Dumazetaa3a0c82015-10-02 11:43:30 -07001699 const struct request_sock *req, int i)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001700{
Eric Dumazetfa76ce732015-03-19 19:04:20 -07001701 long ttd = req->rsk_timer.expires - jiffies;
Eric Dumazet634fb9792013-10-09 15:21:29 -07001702 const struct in6_addr *src = &inet_rsk(req)->ir_v6_loc_addr;
1703 const struct in6_addr *dest = &inet_rsk(req)->ir_v6_rmt_addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001704
1705 if (ttd < 0)
1706 ttd = 0;
1707
Linus Torvalds1da177e2005-04-16 15:20:36 -07001708 seq_printf(seq,
1709 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
Francesco Fuscod14c5ab2013-08-15 13:42:14 +02001710 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %d %d %pK\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001711 i,
1712 src->s6_addr32[0], src->s6_addr32[1],
1713 src->s6_addr32[2], src->s6_addr32[3],
Eric Dumazetb44084c2013-10-10 00:04:37 -07001714 inet_rsk(req)->ir_num,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001715 dest->s6_addr32[0], dest->s6_addr32[1],
1716 dest->s6_addr32[2], dest->s6_addr32[3],
Eric Dumazet634fb9792013-10-09 15:21:29 -07001717 ntohs(inet_rsk(req)->ir_rmt_port),
Linus Torvalds1da177e2005-04-16 15:20:36 -07001718 TCP_SYN_RECV,
Weilong Chen4c99aa42013-12-19 18:44:34 +08001719 0, 0, /* could print option size, but that is af dependent. */
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001720 1, /* timers active (only the expire timer) */
1721 jiffies_to_clock_t(ttd),
Eric Dumazete6c022a2012-10-27 23:16:46 +00001722 req->num_timeout,
Eric Dumazetaa3a0c82015-10-02 11:43:30 -07001723 from_kuid_munged(seq_user_ns(seq),
1724 sock_i_uid(req->rsk_listener)),
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001725 0, /* non standard timer */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001726 0, /* open_requests have no inode */
1727 0, req);
1728}
1729
1730static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1731{
Eric Dumazetb71d1d42011-04-22 04:53:02 +00001732 const struct in6_addr *dest, *src;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001733 __u16 destp, srcp;
1734 int timer_active;
1735 unsigned long timer_expires;
Eric Dumazetcf533ea2011-10-21 05:22:42 -04001736 const struct inet_sock *inet = inet_sk(sp);
1737 const struct tcp_sock *tp = tcp_sk(sp);
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001738 const struct inet_connection_sock *icsk = inet_csk(sp);
Eric Dumazet0536fcc2015-09-29 07:42:52 -07001739 const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
Eric Dumazet00fd38d2015-11-12 08:43:18 -08001740 int rx_queue;
1741 int state;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001742
Eric Dumazetefe42082013-10-03 15:42:29 -07001743 dest = &sp->sk_v6_daddr;
1744 src = &sp->sk_v6_rcv_saddr;
Eric Dumazetc720c7e2009-10-15 06:30:45 +00001745 destp = ntohs(inet->inet_dport);
1746 srcp = ntohs(inet->inet_sport);
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001747
Yuchung Chengce3cf4e2016-06-06 15:07:18 -07001748 if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
Yuchung Cheng57dde7f2017-01-12 22:11:33 -08001749 icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
Yuchung Chengce3cf4e2016-06-06 15:07:18 -07001750 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001751 timer_active = 1;
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001752 timer_expires = icsk->icsk_timeout;
1753 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001754 timer_active = 4;
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001755 timer_expires = icsk->icsk_timeout;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001756 } else if (timer_pending(&sp->sk_timer)) {
1757 timer_active = 2;
1758 timer_expires = sp->sk_timer.expires;
1759 } else {
1760 timer_active = 0;
1761 timer_expires = jiffies;
1762 }
1763
Eric Dumazet00fd38d2015-11-12 08:43:18 -08001764 state = sk_state_load(sp);
1765 if (state == TCP_LISTEN)
1766 rx_queue = sp->sk_ack_backlog;
1767 else
1768 /* Because we don't lock the socket,
1769 * we might find a transient negative value.
1770 */
1771 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
1772
Linus Torvalds1da177e2005-04-16 15:20:36 -07001773 seq_printf(seq,
1774 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
Francesco Fuscod14c5ab2013-08-15 13:42:14 +02001775 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001776 i,
1777 src->s6_addr32[0], src->s6_addr32[1],
1778 src->s6_addr32[2], src->s6_addr32[3], srcp,
1779 dest->s6_addr32[0], dest->s6_addr32[1],
1780 dest->s6_addr32[2], dest->s6_addr32[3], destp,
Eric Dumazet00fd38d2015-11-12 08:43:18 -08001781 state,
1782 tp->write_seq - tp->snd_una,
1783 rx_queue,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001784 timer_active,
Eric Dumazeta399a802012-08-08 21:13:53 +00001785 jiffies_delta_to_clock_t(timer_expires - jiffies),
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001786 icsk->icsk_retransmits,
Eric W. Biedermana7cb5a42012-05-24 01:10:10 -06001787 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -03001788 icsk->icsk_probes_out,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001789 sock_i_ino(sp),
1790 atomic_read(&sp->sk_refcnt), sp,
Stephen Hemminger7be87352008-06-27 20:00:19 -07001791 jiffies_to_clock_t(icsk->icsk_rto),
1792 jiffies_to_clock_t(icsk->icsk_ack.ato),
Weilong Chen4c99aa42013-12-19 18:44:34 +08001793 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
Ilpo Järvinen0b6a05c2009-09-15 01:30:10 -07001794 tp->snd_cwnd,
Eric Dumazet00fd38d2015-11-12 08:43:18 -08001795 state == TCP_LISTEN ?
Eric Dumazet0536fcc2015-09-29 07:42:52 -07001796 fastopenq->max_qlen :
Yuchung Cheng0a672f72014-05-11 20:22:12 -07001797 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001798 );
1799}
1800
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001801static void get_timewait6_sock(struct seq_file *seq,
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07001802 struct inet_timewait_sock *tw, int i)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001803{
Eric Dumazet789f5582015-04-12 18:51:09 -07001804 long delta = tw->tw_timer.expires - jiffies;
Eric Dumazetb71d1d42011-04-22 04:53:02 +00001805 const struct in6_addr *dest, *src;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001806 __u16 destp, srcp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001807
Eric Dumazetefe42082013-10-03 15:42:29 -07001808 dest = &tw->tw_v6_daddr;
1809 src = &tw->tw_v6_rcv_saddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001810 destp = ntohs(tw->tw_dport);
1811 srcp = ntohs(tw->tw_sport);
1812
1813 seq_printf(seq,
1814 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
Dan Rosenberg71338aa2011-05-23 12:17:35 +00001815 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001816 i,
1817 src->s6_addr32[0], src->s6_addr32[1],
1818 src->s6_addr32[2], src->s6_addr32[3], srcp,
1819 dest->s6_addr32[0], dest->s6_addr32[1],
1820 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1821 tw->tw_substate, 0, 0,
Eric Dumazeta399a802012-08-08 21:13:53 +00001822 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001823 atomic_read(&tw->tw_refcnt), tw);
1824}
1825
Linus Torvalds1da177e2005-04-16 15:20:36 -07001826static int tcp6_seq_show(struct seq_file *seq, void *v)
1827{
1828 struct tcp_iter_state *st;
Eric Dumazet05dbc7b2013-10-03 00:22:02 -07001829 struct sock *sk = v;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001830
1831 if (v == SEQ_START_TOKEN) {
1832 seq_puts(seq,
1833 " sl "
1834 "local_address "
1835 "remote_address "
1836 "st tx_queue rx_queue tr tm->when retrnsmt"
1837 " uid timeout inode\n");
1838 goto out;
1839 }
1840 st = seq->private;
1841
Eric Dumazet079096f2015-10-02 11:43:32 -07001842 if (sk->sk_state == TCP_TIME_WAIT)
1843 get_timewait6_sock(seq, v, st->num);
1844 else if (sk->sk_state == TCP_NEW_SYN_RECV)
Eric Dumazetaa3a0c82015-10-02 11:43:30 -07001845 get_openreq6(seq, v, st->num);
Eric Dumazet079096f2015-10-02 11:43:32 -07001846 else
1847 get_tcp6_sock(seq, v, st->num);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001848out:
1849 return 0;
1850}
1851
Arjan van de Ven73cb88e2011-10-30 06:46:30 +00001852static const struct file_operations tcp6_afinfo_seq_fops = {
1853 .owner = THIS_MODULE,
1854 .open = tcp_seq_open,
1855 .read = seq_read,
1856 .llseek = seq_lseek,
1857 .release = seq_release_net
1858};
1859
Linus Torvalds1da177e2005-04-16 15:20:36 -07001860static struct tcp_seq_afinfo tcp6_seq_afinfo = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001861 .name = "tcp6",
1862 .family = AF_INET6,
Arjan van de Ven73cb88e2011-10-30 06:46:30 +00001863 .seq_fops = &tcp6_afinfo_seq_fops,
Denis V. Lunev9427c4b2008-04-13 22:12:13 -07001864 .seq_ops = {
1865 .show = tcp6_seq_show,
1866 },
Linus Torvalds1da177e2005-04-16 15:20:36 -07001867};
1868
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00001869int __net_init tcp6_proc_init(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001870{
Daniel Lezcano6f8b13b2008-03-21 04:14:45 -07001871 return tcp_proc_register(net, &tcp6_seq_afinfo);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001872}
1873
Daniel Lezcano6f8b13b2008-03-21 04:14:45 -07001874void tcp6_proc_exit(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001875{
Daniel Lezcano6f8b13b2008-03-21 04:14:45 -07001876 tcp_proc_unregister(net, &tcp6_seq_afinfo);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001877}
1878#endif
1879
1880struct proto tcpv6_prot = {
1881 .name = "TCPv6",
1882 .owner = THIS_MODULE,
1883 .close = tcp_close,
1884 .connect = tcp_v6_connect,
1885 .disconnect = tcp_disconnect,
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001886 .accept = inet_csk_accept,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001887 .ioctl = tcp_ioctl,
1888 .init = tcp_v6_init_sock,
1889 .destroy = tcp_v6_destroy_sock,
1890 .shutdown = tcp_shutdown,
1891 .setsockopt = tcp_setsockopt,
1892 .getsockopt = tcp_getsockopt,
Ursula Braun4b9d07a2017-01-09 16:55:12 +01001893 .keepalive = tcp_set_keepalive,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001894 .recvmsg = tcp_recvmsg,
Changli Gao7ba42912010-07-10 20:41:55 +00001895 .sendmsg = tcp_sendmsg,
1896 .sendpage = tcp_sendpage,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001897 .backlog_rcv = tcp_v6_do_rcv,
Eric Dumazet46d3cea2012-07-11 05:50:31 +00001898 .release_cb = tcp_release_cb,
Craig Gallek496611d2016-02-10 11:50:36 -05001899 .hash = inet6_hash,
Arnaldo Carvalho de Meloab1e0a12008-02-03 04:06:04 -08001900 .unhash = inet_unhash,
1901 .get_port = inet_csk_get_port,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001902 .enter_memory_pressure = tcp_enter_memory_pressure,
Eric Dumazetc9bee3b72013-07-22 20:27:07 -07001903 .stream_memory_free = tcp_stream_memory_free,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001904 .sockets_allocated = &tcp_sockets_allocated,
1905 .memory_allocated = &tcp_memory_allocated,
1906 .memory_pressure = &tcp_memory_pressure,
Arnaldo Carvalho de Melo0a5578c2005-08-09 20:11:41 -07001907 .orphan_count = &tcp_orphan_count,
Eric W. Biedermana4fe34b2013-10-19 16:25:36 -07001908 .sysctl_mem = sysctl_tcp_mem,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001909 .sysctl_wmem = sysctl_tcp_wmem,
1910 .sysctl_rmem = sysctl_tcp_rmem,
1911 .max_header = MAX_TCP_HEADER,
1912 .obj_size = sizeof(struct tcp6_sock),
Eric Dumazet3ab5aee2008-11-16 19:40:17 -08001913 .slab_flags = SLAB_DESTROY_BY_RCU,
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08001914 .twsk_prot = &tcp6_timewait_sock_ops,
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -07001915 .rsk_prot = &tcp6_request_sock_ops,
Pavel Emelyanov39d8cda2008-03-22 16:50:58 -07001916 .h.hashinfo = &tcp_hashinfo,
Changli Gao7ba42912010-07-10 20:41:55 +00001917 .no_autobind = true,
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001918#ifdef CONFIG_COMPAT
1919 .compat_setsockopt = compat_tcp_setsockopt,
1920 .compat_getsockopt = compat_tcp_getsockopt,
1921#endif
Lorenzo Colittic1e64e22015-12-16 12:30:05 +09001922 .diag_destroy = tcp_abort,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001923};
1924
Alexey Dobriyan41135cc2009-09-14 12:22:28 +00001925static const struct inet6_protocol tcpv6_protocol = {
Eric Dumazetc7109982012-07-26 12:18:11 +00001926 .early_demux = tcp_v6_early_demux,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001927 .handler = tcp_v6_rcv,
1928 .err_handler = tcp_v6_err,
1929 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
1930};
1931
Linus Torvalds1da177e2005-04-16 15:20:36 -07001932static struct inet_protosw tcpv6_protosw = {
1933 .type = SOCK_STREAM,
1934 .protocol = IPPROTO_TCP,
1935 .prot = &tcpv6_prot,
1936 .ops = &inet6_stream_ops,
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -08001937 .flags = INET_PROTOSW_PERMANENT |
1938 INET_PROTOSW_ICSK,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001939};
1940
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00001941static int __net_init tcpv6_net_init(struct net *net)
Daniel Lezcano93ec9262008-03-07 11:16:02 -08001942{
Denis V. Lunev56772422008-04-03 14:28:30 -07001943 return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
1944 SOCK_RAW, IPPROTO_TCP, net);
Daniel Lezcano93ec9262008-03-07 11:16:02 -08001945}
1946
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00001947static void __net_exit tcpv6_net_exit(struct net *net)
Daniel Lezcano93ec9262008-03-07 11:16:02 -08001948{
Denis V. Lunev56772422008-04-03 14:28:30 -07001949 inet_ctl_sock_destroy(net->ipv6.tcp_sk);
Eric W. Biedermanb099ce22009-12-03 02:29:09 +00001950}
1951
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00001952static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
Eric W. Biedermanb099ce22009-12-03 02:29:09 +00001953{
Haishuang Yan1946e672016-12-28 17:52:32 +08001954 inet_twsk_purge(&tcp_hashinfo, AF_INET6);
Daniel Lezcano93ec9262008-03-07 11:16:02 -08001955}
1956
1957static struct pernet_operations tcpv6_net_ops = {
Eric W. Biedermanb099ce22009-12-03 02:29:09 +00001958 .init = tcpv6_net_init,
1959 .exit = tcpv6_net_exit,
1960 .exit_batch = tcpv6_net_exit_batch,
Daniel Lezcano93ec9262008-03-07 11:16:02 -08001961};
1962
Daniel Lezcano7f4e4862007-12-11 02:25:35 -08001963int __init tcpv6_init(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001964{
Daniel Lezcano7f4e4862007-12-11 02:25:35 -08001965 int ret;
David Woodhouseae0f7d52006-01-11 15:53:04 -08001966
Vlad Yasevich33362882012-11-15 08:49:15 +00001967 ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
1968 if (ret)
Vlad Yasevichc6b641a2012-11-15 08:49:22 +00001969 goto out;
Vlad Yasevich33362882012-11-15 08:49:15 +00001970
Daniel Lezcano7f4e4862007-12-11 02:25:35 -08001971 /* register inet6 protocol */
1972 ret = inet6_register_protosw(&tcpv6_protosw);
1973 if (ret)
1974 goto out_tcpv6_protocol;
1975
Daniel Lezcano93ec9262008-03-07 11:16:02 -08001976 ret = register_pernet_subsys(&tcpv6_net_ops);
Daniel Lezcano7f4e4862007-12-11 02:25:35 -08001977 if (ret)
1978 goto out_tcpv6_protosw;
1979out:
1980 return ret;
1981
Daniel Lezcano7f4e4862007-12-11 02:25:35 -08001982out_tcpv6_protosw:
1983 inet6_unregister_protosw(&tcpv6_protosw);
Vlad Yasevich33362882012-11-15 08:49:15 +00001984out_tcpv6_protocol:
1985 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
Daniel Lezcano7f4e4862007-12-11 02:25:35 -08001986 goto out;
1987}
1988
Daniel Lezcano09f77092007-12-13 05:34:58 -08001989void tcpv6_exit(void)
Daniel Lezcano7f4e4862007-12-11 02:25:35 -08001990{
Daniel Lezcano93ec9262008-03-07 11:16:02 -08001991 unregister_pernet_subsys(&tcpv6_net_ops);
Daniel Lezcano7f4e4862007-12-11 02:25:35 -08001992 inet6_unregister_protosw(&tcpv6_protosw);
1993 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001994}