blob: 3201aa2b0927ffe86d8675d77712d1e7a75f7e29 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * TCP over IPv6
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09003 * Linux INET6 implementation
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 *
5 * Authors:
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09006 * Pedro Roque <roque@di.fc.ul.pt>
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 *
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09008 * Based on:
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 * linux/net/ipv4/tcp.c
10 * linux/net/ipv4/tcp_input.c
11 * linux/net/ipv4/tcp_output.c
12 *
13 * Fixes:
14 * Hideaki YOSHIFUJI : sin6_scope_id support
15 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
16 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
17 * a single port at the same time.
18 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
19 *
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
24 */
25
Herbert Xueb4dea52008-12-29 23:04:08 -080026#include <linux/bottom_half.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070027#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <linux/errno.h>
29#include <linux/types.h>
30#include <linux/socket.h>
31#include <linux/sockios.h>
32#include <linux/net.h>
33#include <linux/jiffies.h>
34#include <linux/in.h>
35#include <linux/in6.h>
36#include <linux/netdevice.h>
37#include <linux/init.h>
38#include <linux/jhash.h>
39#include <linux/ipsec.h>
40#include <linux/times.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090041#include <linux/slab.h>
Wang Yufen4aa956d2014-03-29 09:27:29 +080042#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070043#include <linux/ipv6.h>
44#include <linux/icmpv6.h>
45#include <linux/random.h>
46
47#include <net/tcp.h>
48#include <net/ndisc.h>
Arnaldo Carvalho de Melo5324a042005-08-12 09:26:18 -030049#include <net/inet6_hashtables.h>
Arnaldo Carvalho de Melo81297652005-12-13 23:15:24 -080050#include <net/inet6_connection_sock.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070051#include <net/ipv6.h>
52#include <net/transp_v6.h>
53#include <net/addrconf.h>
54#include <net/ip6_route.h>
55#include <net/ip6_checksum.h>
56#include <net/inet_ecn.h>
57#include <net/protocol.h>
58#include <net/xfrm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070059#include <net/snmp.h>
60#include <net/dsfield.h>
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -080061#include <net/timewait_sock.h>
Denis V. Lunev3d58b5f2008-04-03 14:22:32 -070062#include <net/inet_common.h>
David S. Miller6e5714e2011-08-03 20:50:44 -070063#include <net/secure_seq.h>
Eliezer Tamir076bb0c2013-07-10 17:13:17 +030064#include <net/busy_poll.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070065
Linus Torvalds1da177e2005-04-16 15:20:36 -070066#include <linux/proc_fs.h>
67#include <linux/seq_file.h>
68
Herbert Xucf80e0e2016-01-24 21:20:23 +080069#include <crypto/hash.h>
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -080070#include <linux/scatterlist.h>
71
Eric Dumazeta00e7442015-09-29 07:42:39 -070072static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb);
73static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
Gui Jianfeng6edafaa2008-08-06 23:50:04 -070074 struct request_sock *req);
Linus Torvalds1da177e2005-04-16 15:20:36 -070075
76static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070077
Stephen Hemminger3b401a82009-09-01 19:25:04 +000078static const struct inet_connection_sock_af_ops ipv6_mapped;
79static const struct inet_connection_sock_af_ops ipv6_specific;
David S. Millera9286302006-11-14 19:53:22 -080080#ifdef CONFIG_TCP_MD5SIG
Stephen Hemmingerb2e4b3d2009-09-01 19:25:03 +000081static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
82static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
YOSHIFUJI Hideaki9501f972008-04-18 12:45:16 +090083#else
Eric Dumazet51723932015-09-29 21:24:05 -070084static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
Eric Dumazetb71d1d42011-04-22 04:53:02 +000085 const struct in6_addr *addr)
YOSHIFUJI Hideaki9501f972008-04-18 12:45:16 +090086{
87 return NULL;
88}
David S. Millera9286302006-11-14 19:53:22 -080089#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070090
Neal Cardwellfae6ef82012-08-19 03:30:38 +000091static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
92{
93 struct dst_entry *dst = skb_dst(skb);
Neal Cardwellfae6ef82012-08-19 03:30:38 +000094
Eric Dumazet5037e9e2015-12-14 14:08:53 -080095 if (dst && dst_hold_safe(dst)) {
Eric Dumazetca777ef2014-09-08 08:06:07 -070096 const struct rt6_info *rt = (const struct rt6_info *)dst;
97
Eric Dumazetca777ef2014-09-08 08:06:07 -070098 sk->sk_rx_dst = dst;
99 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
Martin KaFai Laub197df42015-05-22 20:56:01 -0700100 inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt);
Eric Dumazetca777ef2014-09-08 08:06:07 -0700101 }
Neal Cardwellfae6ef82012-08-19 03:30:38 +0000102}
103
Eric Dumazetcf533ea2011-10-21 05:22:42 -0400104static __u32 tcp_v6_init_sequence(const struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105{
Arnaldo Carvalho de Melo0660e032007-04-25 17:54:47 -0700106 return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
107 ipv6_hdr(skb)->saddr.s6_addr32,
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -0700108 tcp_hdr(skb)->dest,
109 tcp_hdr(skb)->source);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110}
111
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +0900112static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113 int addr_len)
114{
115 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +0900116 struct inet_sock *inet = inet_sk(sk);
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -0800117 struct inet_connection_sock *icsk = inet_csk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118 struct ipv6_pinfo *np = inet6_sk(sk);
119 struct tcp_sock *tp = tcp_sk(sk);
Arnaud Ebalard20c59de2010-06-01 21:35:01 +0000120 struct in6_addr *saddr = NULL, *final_p, final;
Eric Dumazet45f6fad2015-11-29 19:37:57 -0800121 struct ipv6_txoptions *opt;
David S. Miller4c9483b2011-03-12 16:22:43 -0500122 struct flowi6 fl6;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700123 struct dst_entry *dst;
124 int addr_type;
125 int err;
126
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +0900127 if (addr_len < SIN6_LEN_RFC2133)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128 return -EINVAL;
129
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +0900130 if (usin->sin6_family != AF_INET6)
Eric Dumazeta02cec22010-09-22 20:43:57 +0000131 return -EAFNOSUPPORT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132
David S. Miller4c9483b2011-03-12 16:22:43 -0500133 memset(&fl6, 0, sizeof(fl6));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134
135 if (np->sndflow) {
David S. Miller4c9483b2011-03-12 16:22:43 -0500136 fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
137 IP6_ECN_flow_init(fl6.flowlabel);
138 if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139 struct ip6_flowlabel *flowlabel;
David S. Miller4c9483b2011-03-12 16:22:43 -0500140 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
Ian Morris63159f22015-03-29 14:00:04 +0100141 if (!flowlabel)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143 fl6_sock_release(flowlabel);
144 }
145 }
146
147 /*
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +0900148 * connect() to INADDR_ANY means loopback (BSD'ism).
149 */
150
Jonathan T. Leighton12ec2562017-05-23 21:53:33 -0400151 if (ipv6_addr_any(&usin->sin6_addr)) {
152 if (ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr))
153 ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK),
154 &usin->sin6_addr);
155 else
156 usin->sin6_addr = in6addr_loopback;
157 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158
159 addr_type = ipv6_addr_type(&usin->sin6_addr);
160
Weilong Chen4c99aa42013-12-19 18:44:34 +0800161 if (addr_type & IPV6_ADDR_MULTICAST)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162 return -ENETUNREACH;
163
164 if (addr_type&IPV6_ADDR_LINKLOCAL) {
165 if (addr_len >= sizeof(struct sockaddr_in6) &&
166 usin->sin6_scope_id) {
167 /* If interface is set while binding, indices
168 * must coincide.
169 */
170 if (sk->sk_bound_dev_if &&
171 sk->sk_bound_dev_if != usin->sin6_scope_id)
172 return -EINVAL;
173
174 sk->sk_bound_dev_if = usin->sin6_scope_id;
175 }
176
177 /* Connect to link-local address requires an interface */
178 if (!sk->sk_bound_dev_if)
179 return -EINVAL;
180 }
181
182 if (tp->rx_opt.ts_recent_stamp &&
Eric Dumazetefe42082013-10-03 15:42:29 -0700183 !ipv6_addr_equal(&sk->sk_v6_daddr, &usin->sin6_addr)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700184 tp->rx_opt.ts_recent = 0;
185 tp->rx_opt.ts_recent_stamp = 0;
186 tp->write_seq = 0;
187 }
188
Eric Dumazetefe42082013-10-03 15:42:29 -0700189 sk->sk_v6_daddr = usin->sin6_addr;
David S. Miller4c9483b2011-03-12 16:22:43 -0500190 np->flow_label = fl6.flowlabel;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191
192 /*
193 * TCP over IPv4
194 */
195
Jonathan T. Leighton12ec2562017-05-23 21:53:33 -0400196 if (addr_type & IPV6_ADDR_MAPPED) {
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -0800197 u32 exthdrlen = icsk->icsk_ext_hdr_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198 struct sockaddr_in sin;
199
200 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
201
202 if (__ipv6_only_sock(sk))
203 return -ENETUNREACH;
204
205 sin.sin_family = AF_INET;
206 sin.sin_port = usin->sin6_port;
207 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
208
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -0800209 icsk->icsk_af_ops = &ipv6_mapped;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700210 sk->sk_backlog_rcv = tcp_v4_do_rcv;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800211#ifdef CONFIG_TCP_MD5SIG
212 tp->af_specific = &tcp_sock_ipv6_mapped_specific;
213#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214
215 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
216
217 if (err) {
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -0800218 icsk->icsk_ext_hdr_len = exthdrlen;
219 icsk->icsk_af_ops = &ipv6_specific;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220 sk->sk_backlog_rcv = tcp_v6_do_rcv;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800221#ifdef CONFIG_TCP_MD5SIG
222 tp->af_specific = &tcp_sock_ipv6_specific;
223#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224 goto failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700225 }
Eric Dumazetd1e559d2015-03-18 14:05:35 -0700226 np->saddr = sk->sk_v6_rcv_saddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227
228 return err;
229 }
230
Eric Dumazetefe42082013-10-03 15:42:29 -0700231 if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr))
232 saddr = &sk->sk_v6_rcv_saddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700233
David S. Miller4c9483b2011-03-12 16:22:43 -0500234 fl6.flowi6_proto = IPPROTO_TCP;
Eric Dumazetefe42082013-10-03 15:42:29 -0700235 fl6.daddr = sk->sk_v6_daddr;
Alexey Dobriyan4e3fd7a2011-11-21 03:39:03 +0000236 fl6.saddr = saddr ? *saddr : np->saddr;
David S. Miller4c9483b2011-03-12 16:22:43 -0500237 fl6.flowi6_oif = sk->sk_bound_dev_if;
238 fl6.flowi6_mark = sk->sk_mark;
David S. Miller1958b852011-03-12 16:36:19 -0500239 fl6.fl6_dport = usin->sin6_port;
240 fl6.fl6_sport = inet->inet_sport;
Lorenzo Colitti50442922016-11-04 02:23:43 +0900241 fl6.flowi6_uid = sk->sk_uid;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700242
Hannes Frederic Sowa1e1d04e2016-04-05 17:10:15 +0200243 opt = rcu_dereference_protected(np->opt, lockdep_sock_is_held(sk));
Eric Dumazet45f6fad2015-11-29 19:37:57 -0800244 final_p = fl6_update_dst(&fl6, opt, &final);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700245
David S. Miller4c9483b2011-03-12 16:22:43 -0500246 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
Venkat Yekkiralabeb8d132006-08-04 23:12:42 -0700247
Steffen Klassert0e0d44a2013-08-28 08:04:14 +0200248 dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
David S. Miller68d0c6d2011-03-01 13:19:07 -0800249 if (IS_ERR(dst)) {
250 err = PTR_ERR(dst);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251 goto failure;
David S. Miller14e50e52007-05-24 18:17:54 -0700252 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253
Ian Morris63159f22015-03-29 14:00:04 +0100254 if (!saddr) {
David S. Miller4c9483b2011-03-12 16:22:43 -0500255 saddr = &fl6.saddr;
Eric Dumazetefe42082013-10-03 15:42:29 -0700256 sk->sk_v6_rcv_saddr = *saddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257 }
258
259 /* set the source address */
Alexey Dobriyan4e3fd7a2011-11-21 03:39:03 +0000260 np->saddr = *saddr;
Eric Dumazetc720c7e2009-10-15 06:30:45 +0000261 inet->inet_rcv_saddr = LOOPBACK4_IPV6;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700262
Herbert Xuf83ef8c2006-06-30 13:37:03 -0700263 sk->sk_gso_type = SKB_GSO_TCPV6;
Eric Dumazet6bd4f352015-12-02 21:53:57 -0800264 ip6_dst_store(sk, dst, NULL, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700265
David S. Miller493f3772010-12-02 12:14:29 -0800266 if (tcp_death_row.sysctl_tw_recycle &&
267 !tp->rx_opt.ts_recent_stamp &&
Martin KaFai Laufd0273d2015-05-22 20:55:57 -0700268 ipv6_addr_equal(&fl6.daddr, &sk->sk_v6_daddr))
David S. Miller81166dd2012-07-10 03:14:24 -0700269 tcp_fetch_timewait_stamp(sk, dst);
David S. Miller493f3772010-12-02 12:14:29 -0800270
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -0800271 icsk->icsk_ext_hdr_len = 0;
Eric Dumazet45f6fad2015-11-29 19:37:57 -0800272 if (opt)
273 icsk->icsk_ext_hdr_len = opt->opt_flen +
274 opt->opt_nflen;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275
276 tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
277
Eric Dumazetc720c7e2009-10-15 06:30:45 +0000278 inet->inet_dport = usin->sin6_port;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700279
280 tcp_set_state(sk, TCP_SYN_SENT);
Arnaldo Carvalho de Melod8313f52005-12-13 23:25:44 -0800281 err = inet6_hash_connect(&tcp_death_row, sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700282 if (err)
283 goto late_failure;
284
Tom Herbert877d1f62015-07-28 16:02:05 -0700285 sk_set_txhash(sk);
Sathya Perla9e7ceb02014-10-22 21:42:01 +0530286
Andrey Vagin2b916472012-11-22 01:13:58 +0000287 if (!tp->write_seq && likely(!tp->repair))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700288 tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
Eric Dumazetefe42082013-10-03 15:42:29 -0700289 sk->sk_v6_daddr.s6_addr32,
Eric Dumazetc720c7e2009-10-15 06:30:45 +0000290 inet->inet_sport,
291 inet->inet_dport);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292
Wei Wangd55e6302017-11-12 15:11:18 +0900293 if (tcp_fastopen_defer_connect(sk, &err))
294 return err;
295 if (err)
296 goto late_failure;
297
Linus Torvalds1da177e2005-04-16 15:20:36 -0700298 err = tcp_connect(sk);
299 if (err)
300 goto late_failure;
301
302 return 0;
303
304late_failure:
305 tcp_set_state(sk, TCP_CLOSE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700306failure:
Eric Dumazetc720c7e2009-10-15 06:30:45 +0000307 inet->inet_dport = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700308 sk->sk_route_caps = 0;
309 return err;
310}
311
Eric Dumazet563d34d2012-07-23 09:48:52 +0200312static void tcp_v6_mtu_reduced(struct sock *sk)
313{
314 struct dst_entry *dst;
315
316 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
317 return;
318
319 dst = inet6_csk_update_pmtu(sk, tcp_sk(sk)->mtu_info);
320 if (!dst)
321 return;
322
323 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
324 tcp_sync_mss(sk, dst_mtu(dst));
325 tcp_simple_retransmit(sk);
326 }
327}
328
Linus Torvalds1da177e2005-04-16 15:20:36 -0700329static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
Brian Haleyd5fdd6b2009-06-23 04:31:07 -0700330 u8 type, u8 code, int offset, __be32 info)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700331{
Weilong Chen4c99aa42013-12-19 18:44:34 +0800332 const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
Arnaldo Carvalho de Melo505cbfc2005-08-12 09:19:38 -0300333 const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
Eric Dumazet22150892015-03-22 10:22:23 -0700334 struct net *net = dev_net(skb->dev);
335 struct request_sock *fastopen;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336 struct ipv6_pinfo *np;
Eric Dumazet22150892015-03-22 10:22:23 -0700337 struct tcp_sock *tp;
338 __u32 seq, snd_una;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700339 struct sock *sk;
Eric Dumazet9cf74902016-02-02 19:31:12 -0800340 bool fatal;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700341 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700342
Eric Dumazet22150892015-03-22 10:22:23 -0700343 sk = __inet6_lookup_established(net, &tcp_hashinfo,
344 &hdr->daddr, th->dest,
345 &hdr->saddr, ntohs(th->source),
346 skb->dev->ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700347
Eric Dumazet22150892015-03-22 10:22:23 -0700348 if (!sk) {
Eric Dumazeta16292a2016-04-27 16:44:36 -0700349 __ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
350 ICMP6_MIB_INERRORS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700351 return;
352 }
353
354 if (sk->sk_state == TCP_TIME_WAIT) {
YOSHIFUJI Hideaki9469c7b2006-10-10 19:41:46 -0700355 inet_twsk_put(inet_twsk(sk));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700356 return;
357 }
Eric Dumazet22150892015-03-22 10:22:23 -0700358 seq = ntohl(th->seq);
Eric Dumazet9cf74902016-02-02 19:31:12 -0800359 fatal = icmpv6_err_convert(type, code, &err);
Eric Dumazet22150892015-03-22 10:22:23 -0700360 if (sk->sk_state == TCP_NEW_SYN_RECV)
Eric Dumazet9cf74902016-02-02 19:31:12 -0800361 return tcp_req_err(sk, seq, fatal);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362
363 bh_lock_sock(sk);
Eric Dumazet563d34d2012-07-23 09:48:52 +0200364 if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
Eric Dumazet02a1d6e2016-04-27 16:44:39 -0700365 __NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700366
367 if (sk->sk_state == TCP_CLOSE)
368 goto out;
369
Stephen Hemmingere802af92010-04-22 15:24:53 -0700370 if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) {
Eric Dumazet02a1d6e2016-04-27 16:44:39 -0700371 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
Stephen Hemmingere802af92010-04-22 15:24:53 -0700372 goto out;
373 }
374
Linus Torvalds1da177e2005-04-16 15:20:36 -0700375 tp = tcp_sk(sk);
Yuchung Cheng0a672f72014-05-11 20:22:12 -0700376 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
377 fastopen = tp->fastopen_rsk;
378 snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700379 if (sk->sk_state != TCP_LISTEN &&
Yuchung Cheng0a672f72014-05-11 20:22:12 -0700380 !between(seq, snd_una, tp->snd_nxt)) {
Eric Dumazet02a1d6e2016-04-27 16:44:39 -0700381 __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700382 goto out;
383 }
384
385 np = inet6_sk(sk);
386
David S. Millerec18d9a2012-07-12 00:25:15 -0700387 if (type == NDISC_REDIRECT) {
Jon Maxwell98933eb2017-03-10 16:40:33 +1100388 if (!sock_owned_by_user(sk)) {
389 struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
David S. Millerec18d9a2012-07-12 00:25:15 -0700390
Jon Maxwell98933eb2017-03-10 16:40:33 +1100391 if (dst)
392 dst->ops->redirect(dst, sk, skb);
393 }
Christoph Paasch50a75a82013-04-07 04:53:15 +0000394 goto out;
David S. Millerec18d9a2012-07-12 00:25:15 -0700395 }
396
Linus Torvalds1da177e2005-04-16 15:20:36 -0700397 if (type == ICMPV6_PKT_TOOBIG) {
Eric Dumazet0d4f0602013-03-18 07:01:28 +0000398 /* We are not interested in TCP_LISTEN and open_requests
399 * (SYN-ACKs send out by Linux are always <576bytes so
400 * they should go through unfragmented).
401 */
402 if (sk->sk_state == TCP_LISTEN)
403 goto out;
404
Hannes Frederic Sowa93b36cf2013-12-15 03:41:14 +0100405 if (!ip6_sk_accept_pmtu(sk))
406 goto out;
407
Eric Dumazet563d34d2012-07-23 09:48:52 +0200408 tp->mtu_info = ntohl(info);
409 if (!sock_owned_by_user(sk))
410 tcp_v6_mtu_reduced(sk);
Julian Anastasovd013ef2a2012-09-05 10:53:18 +0000411 else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED,
412 &tp->tsq_flags))
413 sock_hold(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700414 goto out;
415 }
416
Linus Torvalds1da177e2005-04-16 15:20:36 -0700417
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -0700418 /* Might be for an request_sock */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700419 switch (sk->sk_state) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700420 case TCP_SYN_SENT:
Yuchung Cheng0a672f72014-05-11 20:22:12 -0700421 case TCP_SYN_RECV:
422 /* Only in fast or simultaneous open. If a fast open socket is
423 * is already accepted it is treated as a connected one below.
424 */
Ian Morris63159f22015-03-29 14:00:04 +0100425 if (fastopen && !fastopen->sk)
Yuchung Cheng0a672f72014-05-11 20:22:12 -0700426 break;
427
Linus Torvalds1da177e2005-04-16 15:20:36 -0700428 if (!sock_owned_by_user(sk)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700429 sk->sk_err = err;
430 sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */
431
432 tcp_done(sk);
433 } else
434 sk->sk_err_soft = err;
435 goto out;
436 }
437
438 if (!sock_owned_by_user(sk) && np->recverr) {
439 sk->sk_err = err;
440 sk->sk_error_report(sk);
441 } else
442 sk->sk_err_soft = err;
443
444out:
445 bh_unlock_sock(sk);
446 sock_put(sk);
447}
448
449
Eric Dumazet0f935dbe2015-09-25 07:39:21 -0700450static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
Octavian Purdilad6274bd2014-06-25 17:09:58 +0300451 struct flowi *fl,
Neal Cardwell3840a062012-06-28 12:34:19 +0000452 struct request_sock *req,
Eric Dumazetca6fb062015-10-02 11:43:35 -0700453 struct tcp_fastopen_cookie *foc,
Eric Dumazetb3d05142016-04-13 22:05:39 -0700454 enum tcp_synack_type synack_type)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700455{
Eric Dumazet634fb9792013-10-09 15:21:29 -0700456 struct inet_request_sock *ireq = inet_rsk(req);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700457 struct ipv6_pinfo *np = inet6_sk(sk);
Huw Davies56ac42b2016-06-27 15:05:28 -0400458 struct ipv6_txoptions *opt;
Octavian Purdilad6274bd2014-06-25 17:09:58 +0300459 struct flowi6 *fl6 = &fl->u.ip6;
Weilong Chen4c99aa42013-12-19 18:44:34 +0800460 struct sk_buff *skb;
Neal Cardwell94942182012-06-28 12:34:20 +0000461 int err = -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700462
Neal Cardwell9f10d3f2012-06-28 12:34:21 +0000463 /* First, grab a route. */
Eric Dumazetf76b33c2015-09-29 07:42:42 -0700464 if (!dst && (dst = inet6_csk_route_req(sk, fl6, req,
465 IPPROTO_TCP)) == NULL)
Denis V. Lunevfd80eb92008-02-29 11:43:03 -0800466 goto done;
Neal Cardwell94942182012-06-28 12:34:20 +0000467
Eric Dumazetb3d05142016-04-13 22:05:39 -0700468 skb = tcp_make_synack(sk, dst, req, foc, synack_type);
Neal Cardwell94942182012-06-28 12:34:20 +0000469
Linus Torvalds1da177e2005-04-16 15:20:36 -0700470 if (skb) {
Eric Dumazet634fb9792013-10-09 15:21:29 -0700471 __tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr,
472 &ireq->ir_v6_rmt_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700473
Eric Dumazet634fb9792013-10-09 15:21:29 -0700474 fl6->daddr = ireq->ir_v6_rmt_addr;
Ian Morris53b24b82015-03-29 14:00:05 +0100475 if (np->repflow && ireq->pktopts)
Florent Fourcotdf3687f2014-01-17 17:15:03 +0100476 fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
477
Eric Dumazet3e4006f2016-01-08 09:35:51 -0800478 rcu_read_lock();
Huw Davies56ac42b2016-06-27 15:05:28 -0400479 opt = ireq->ipv6_opt;
480 if (!opt)
481 opt = rcu_dereference(np->opt);
Pablo Neira0d4c19e2017-01-26 22:56:21 +0100482 err = ip6_xmit(sk, skb, fl6, sk->sk_mark, opt, np->tclass);
Eric Dumazet3e4006f2016-01-08 09:35:51 -0800483 rcu_read_unlock();
Gerrit Renkerb9df3cb2006-11-14 11:21:36 -0200484 err = net_xmit_eval(err);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700485 }
486
487done:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700488 return err;
489}
490
Octavian Purdila72659ec2010-01-17 19:09:39 -0800491
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -0700492static void tcp_v6_reqsk_destructor(struct request_sock *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700493{
Huw Davies56ac42b2016-06-27 15:05:28 -0400494 kfree(inet_rsk(req)->ipv6_opt);
Eric Dumazet634fb9792013-10-09 15:21:29 -0700495 kfree_skb(inet_rsk(req)->pktopts);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700496}
497
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800498#ifdef CONFIG_TCP_MD5SIG
Eric Dumazetb83e3de2015-09-25 07:39:15 -0700499static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
Eric Dumazetb71d1d42011-04-22 04:53:02 +0000500 const struct in6_addr *addr)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800501{
Eric Dumazeta915da9b2012-01-31 05:18:33 +0000502 return tcp_md5_do_lookup(sk, (union tcp_md5_addr *)addr, AF_INET6);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800503}
504
Eric Dumazetb83e3de2015-09-25 07:39:15 -0700505static struct tcp_md5sig_key *tcp_v6_md5_lookup(const struct sock *sk,
Eric Dumazetfd3a1542015-03-24 15:58:56 -0700506 const struct sock *addr_sk)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800507{
Eric Dumazetefe42082013-10-03 15:42:29 -0700508 return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800509}
510
Wang Yufen4aa956d2014-03-29 09:27:29 +0800511static int tcp_v6_parse_md5_keys(struct sock *sk, char __user *optval,
512 int optlen)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800513{
514 struct tcp_md5sig cmd;
515 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800516
517 if (optlen < sizeof(cmd))
518 return -EINVAL;
519
520 if (copy_from_user(&cmd, optval, sizeof(cmd)))
521 return -EFAULT;
522
523 if (sin6->sin6_family != AF_INET6)
524 return -EINVAL;
525
526 if (!cmd.tcpm_keylen) {
Brian Haleye773e4f2007-08-24 23:16:08 -0700527 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
Eric Dumazeta915da9b2012-01-31 05:18:33 +0000528 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
529 AF_INET);
530 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
531 AF_INET6);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800532 }
533
534 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
535 return -EINVAL;
536
Eric Dumazeta915da9b2012-01-31 05:18:33 +0000537 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
538 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
539 AF_INET, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800540
Eric Dumazeta915da9b2012-01-31 05:18:33 +0000541 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
542 AF_INET6, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800543}
544
Eric Dumazet19689e32016-06-27 18:51:53 +0200545static int tcp_v6_md5_hash_headers(struct tcp_md5sig_pool *hp,
546 const struct in6_addr *daddr,
547 const struct in6_addr *saddr,
548 const struct tcphdr *th, int nbytes)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800549{
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800550 struct tcp6_pseudohdr *bp;
Adam Langley49a72df2008-07-19 00:01:42 -0700551 struct scatterlist sg;
Eric Dumazet19689e32016-06-27 18:51:53 +0200552 struct tcphdr *_th;
YOSHIFUJI Hideaki8d26d762008-04-17 13:19:16 +0900553
Eric Dumazet19689e32016-06-27 18:51:53 +0200554 bp = hp->scratch;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800555 /* 1. TCP pseudo-header (RFC2460) */
Alexey Dobriyan4e3fd7a2011-11-21 03:39:03 +0000556 bp->saddr = *saddr;
557 bp->daddr = *daddr;
Adam Langley49a72df2008-07-19 00:01:42 -0700558 bp->protocol = cpu_to_be32(IPPROTO_TCP);
Adam Langley00b13042008-07-31 21:36:07 -0700559 bp->len = cpu_to_be32(nbytes);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800560
Eric Dumazet19689e32016-06-27 18:51:53 +0200561 _th = (struct tcphdr *)(bp + 1);
562 memcpy(_th, th, sizeof(*th));
563 _th->check = 0;
564
565 sg_init_one(&sg, bp, sizeof(*bp) + sizeof(*th));
566 ahash_request_set_crypt(hp->md5_req, &sg, NULL,
567 sizeof(*bp) + sizeof(*th));
Herbert Xucf80e0e2016-01-24 21:20:23 +0800568 return crypto_ahash_update(hp->md5_req);
Adam Langley49a72df2008-07-19 00:01:42 -0700569}
David S. Millerc7da57a2007-10-26 00:41:21 -0700570
Eric Dumazet19689e32016-06-27 18:51:53 +0200571static int tcp_v6_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
Eric Dumazetb71d1d42011-04-22 04:53:02 +0000572 const struct in6_addr *daddr, struct in6_addr *saddr,
Eric Dumazet318cf7a2011-10-24 02:46:04 -0400573 const struct tcphdr *th)
Adam Langley49a72df2008-07-19 00:01:42 -0700574{
575 struct tcp_md5sig_pool *hp;
Herbert Xucf80e0e2016-01-24 21:20:23 +0800576 struct ahash_request *req;
Adam Langley49a72df2008-07-19 00:01:42 -0700577
578 hp = tcp_get_md5sig_pool();
579 if (!hp)
580 goto clear_hash_noput;
Herbert Xucf80e0e2016-01-24 21:20:23 +0800581 req = hp->md5_req;
Adam Langley49a72df2008-07-19 00:01:42 -0700582
Herbert Xucf80e0e2016-01-24 21:20:23 +0800583 if (crypto_ahash_init(req))
Adam Langley49a72df2008-07-19 00:01:42 -0700584 goto clear_hash;
Eric Dumazet19689e32016-06-27 18:51:53 +0200585 if (tcp_v6_md5_hash_headers(hp, daddr, saddr, th, th->doff << 2))
Adam Langley49a72df2008-07-19 00:01:42 -0700586 goto clear_hash;
587 if (tcp_md5_hash_key(hp, key))
588 goto clear_hash;
Herbert Xucf80e0e2016-01-24 21:20:23 +0800589 ahash_request_set_crypt(req, NULL, md5_hash, 0);
590 if (crypto_ahash_final(req))
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800591 goto clear_hash;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800592
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800593 tcp_put_md5sig_pool();
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800594 return 0;
Adam Langley49a72df2008-07-19 00:01:42 -0700595
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800596clear_hash:
597 tcp_put_md5sig_pool();
598clear_hash_noput:
599 memset(md5_hash, 0, 16);
Adam Langley49a72df2008-07-19 00:01:42 -0700600 return 1;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800601}
602
Eric Dumazet39f8e582015-03-24 15:58:55 -0700603static int tcp_v6_md5_hash_skb(char *md5_hash,
604 const struct tcp_md5sig_key *key,
Eric Dumazet318cf7a2011-10-24 02:46:04 -0400605 const struct sock *sk,
Eric Dumazet318cf7a2011-10-24 02:46:04 -0400606 const struct sk_buff *skb)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800607{
Eric Dumazetb71d1d42011-04-22 04:53:02 +0000608 const struct in6_addr *saddr, *daddr;
Adam Langley49a72df2008-07-19 00:01:42 -0700609 struct tcp_md5sig_pool *hp;
Herbert Xucf80e0e2016-01-24 21:20:23 +0800610 struct ahash_request *req;
Eric Dumazet318cf7a2011-10-24 02:46:04 -0400611 const struct tcphdr *th = tcp_hdr(skb);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800612
Eric Dumazet39f8e582015-03-24 15:58:55 -0700613 if (sk) { /* valid for establish/request sockets */
614 saddr = &sk->sk_v6_rcv_saddr;
Eric Dumazetefe42082013-10-03 15:42:29 -0700615 daddr = &sk->sk_v6_daddr;
Adam Langley49a72df2008-07-19 00:01:42 -0700616 } else {
Eric Dumazetb71d1d42011-04-22 04:53:02 +0000617 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
Adam Langley49a72df2008-07-19 00:01:42 -0700618 saddr = &ip6h->saddr;
619 daddr = &ip6h->daddr;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800620 }
Adam Langley49a72df2008-07-19 00:01:42 -0700621
622 hp = tcp_get_md5sig_pool();
623 if (!hp)
624 goto clear_hash_noput;
Herbert Xucf80e0e2016-01-24 21:20:23 +0800625 req = hp->md5_req;
Adam Langley49a72df2008-07-19 00:01:42 -0700626
Herbert Xucf80e0e2016-01-24 21:20:23 +0800627 if (crypto_ahash_init(req))
Adam Langley49a72df2008-07-19 00:01:42 -0700628 goto clear_hash;
629
Eric Dumazet19689e32016-06-27 18:51:53 +0200630 if (tcp_v6_md5_hash_headers(hp, daddr, saddr, th, skb->len))
Adam Langley49a72df2008-07-19 00:01:42 -0700631 goto clear_hash;
632 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
633 goto clear_hash;
634 if (tcp_md5_hash_key(hp, key))
635 goto clear_hash;
Herbert Xucf80e0e2016-01-24 21:20:23 +0800636 ahash_request_set_crypt(req, NULL, md5_hash, 0);
637 if (crypto_ahash_final(req))
Adam Langley49a72df2008-07-19 00:01:42 -0700638 goto clear_hash;
639
640 tcp_put_md5sig_pool();
641 return 0;
642
643clear_hash:
644 tcp_put_md5sig_pool();
645clear_hash_noput:
646 memset(md5_hash, 0, 16);
647 return 1;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800648}
649
Eric Dumazetba8e2752015-10-02 11:43:28 -0700650#endif
651
652static bool tcp_v6_inbound_md5_hash(const struct sock *sk,
653 const struct sk_buff *skb)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800654{
Eric Dumazetba8e2752015-10-02 11:43:28 -0700655#ifdef CONFIG_TCP_MD5SIG
Eric Dumazetcf533ea2011-10-21 05:22:42 -0400656 const __u8 *hash_location = NULL;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800657 struct tcp_md5sig_key *hash_expected;
Eric Dumazetb71d1d42011-04-22 04:53:02 +0000658 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
Eric Dumazet318cf7a2011-10-24 02:46:04 -0400659 const struct tcphdr *th = tcp_hdr(skb);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800660 int genhash;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800661 u8 newhash[16];
662
663 hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
YOSHIFUJI Hideaki7d5d5522008-04-17 12:29:53 +0900664 hash_location = tcp_parse_md5sig_option(th);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800665
David S. Miller785957d2008-07-30 03:03:15 -0700666 /* We've parsed the options - do we have a hash? */
667 if (!hash_expected && !hash_location)
Eric Dumazetff74e232015-03-24 15:58:54 -0700668 return false;
David S. Miller785957d2008-07-30 03:03:15 -0700669
670 if (hash_expected && !hash_location) {
Eric Dumazetc10d9312016-04-29 14:16:47 -0700671 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
Eric Dumazetff74e232015-03-24 15:58:54 -0700672 return true;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800673 }
674
David S. Miller785957d2008-07-30 03:03:15 -0700675 if (!hash_expected && hash_location) {
Eric Dumazetc10d9312016-04-29 14:16:47 -0700676 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
Eric Dumazetff74e232015-03-24 15:58:54 -0700677 return true;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800678 }
679
680 /* check the signature */
Adam Langley49a72df2008-07-19 00:01:42 -0700681 genhash = tcp_v6_md5_hash_skb(newhash,
682 hash_expected,
Eric Dumazet39f8e582015-03-24 15:58:55 -0700683 NULL, skb);
Adam Langley49a72df2008-07-19 00:01:42 -0700684
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800685 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
Eric Dumazet72145a62016-08-24 09:01:23 -0700686 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE);
Joe Perchese87cc472012-05-13 21:56:26 +0000687 net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
688 genhash ? "failed" : "mismatch",
689 &ip6h->saddr, ntohs(th->source),
690 &ip6h->daddr, ntohs(th->dest));
Eric Dumazetff74e232015-03-24 15:58:54 -0700691 return true;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800692 }
Eric Dumazetba8e2752015-10-02 11:43:28 -0700693#endif
Eric Dumazetff74e232015-03-24 15:58:54 -0700694 return false;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800695}
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800696
Eric Dumazetb40cf182015-09-25 07:39:08 -0700697static void tcp_v6_init_req(struct request_sock *req,
698 const struct sock *sk_listener,
Octavian Purdila16bea702014-06-25 17:09:53 +0300699 struct sk_buff *skb)
700{
701 struct inet_request_sock *ireq = inet_rsk(req);
Eric Dumazetb40cf182015-09-25 07:39:08 -0700702 const struct ipv6_pinfo *np = inet6_sk(sk_listener);
Octavian Purdila16bea702014-06-25 17:09:53 +0300703
704 ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
705 ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
706
Octavian Purdila16bea702014-06-25 17:09:53 +0300707 /* So that link locals have meaning */
Eric Dumazetb40cf182015-09-25 07:39:08 -0700708 if (!sk_listener->sk_bound_dev_if &&
Octavian Purdila16bea702014-06-25 17:09:53 +0300709 ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
Eric Dumazet870c3152014-10-17 09:17:20 -0700710 ireq->ir_iif = tcp_v6_iif(skb);
Octavian Purdila16bea702014-06-25 17:09:53 +0300711
Eric Dumazet04317da2014-09-05 15:33:32 -0700712 if (!TCP_SKB_CB(skb)->tcp_tw_isn &&
Eric Dumazetb40cf182015-09-25 07:39:08 -0700713 (ipv6_opt_accepted(sk_listener, skb, &TCP_SKB_CB(skb)->header.h6) ||
Eric Dumazeta2247722014-09-27 09:50:56 -0700714 np->rxopt.bits.rxinfo ||
Octavian Purdila16bea702014-06-25 17:09:53 +0300715 np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim ||
716 np->rxopt.bits.rxohlim || np->repflow)) {
717 atomic_inc(&skb->users);
718 ireq->pktopts = skb;
719 }
720}
721
Eric Dumazetf9646292015-09-29 07:42:50 -0700722static struct dst_entry *tcp_v6_route_req(const struct sock *sk,
723 struct flowi *fl,
Octavian Purdilad94e0412014-06-25 17:09:55 +0300724 const struct request_sock *req,
725 bool *strict)
726{
727 if (strict)
728 *strict = true;
Eric Dumazetf76b33c2015-09-29 07:42:42 -0700729 return inet6_csk_route_req(sk, &fl->u.ip6, req, IPPROTO_TCP);
Octavian Purdilad94e0412014-06-25 17:09:55 +0300730}
731
Glenn Griffinc6aefaf2008-02-07 21:49:26 -0800732struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700733 .family = AF_INET6,
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -0700734 .obj_size = sizeof(struct tcp6_request_sock),
Octavian Purdila5db92c92014-06-25 17:09:59 +0300735 .rtx_syn_ack = tcp_rtx_synack,
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -0700736 .send_ack = tcp_v6_reqsk_send_ack,
737 .destructor = tcp_v6_reqsk_destructor,
Octavian Purdila72659ec2010-01-17 19:09:39 -0800738 .send_reset = tcp_v6_send_reset,
Wang Yufen4aa956d2014-03-29 09:27:29 +0800739 .syn_ack_timeout = tcp_syn_ack_timeout,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700740};
741
Stephen Hemmingerb2e4b3d2009-09-01 19:25:03 +0000742static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
Octavian Purdila2aec4a22014-06-25 17:10:00 +0300743 .mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) -
744 sizeof(struct ipv6hdr),
Octavian Purdila16bea702014-06-25 17:09:53 +0300745#ifdef CONFIG_TCP_MD5SIG
Eric Dumazetfd3a1542015-03-24 15:58:56 -0700746 .req_md5_lookup = tcp_v6_md5_lookup,
John Dykstrae3afe7b2009-07-16 05:04:51 +0000747 .calc_md5_hash = tcp_v6_md5_hash_skb,
Andrew Mortonb6332e62006-11-30 19:16:28 -0800748#endif
Octavian Purdila16bea702014-06-25 17:09:53 +0300749 .init_req = tcp_v6_init_req,
Octavian Purdilafb7b37a2014-06-25 17:09:54 +0300750#ifdef CONFIG_SYN_COOKIES
751 .cookie_init_seq = cookie_v6_init_sequence,
752#endif
Octavian Purdilad94e0412014-06-25 17:09:55 +0300753 .route_req = tcp_v6_route_req,
Octavian Purdila936b8bd2014-06-25 17:09:57 +0300754 .init_seq = tcp_v6_init_sequence,
Octavian Purdilad6274bd2014-06-25 17:09:58 +0300755 .send_synack = tcp_v6_send_synack,
Octavian Purdila16bea702014-06-25 17:09:53 +0300756};
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800757
Eric Dumazeta00e7442015-09-29 07:42:39 -0700758static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 seq,
Eric Dumazet0f85fea2014-12-09 09:56:08 -0800759 u32 ack, u32 win, u32 tsval, u32 tsecr,
760 int oif, struct tcp_md5sig_key *key, int rst,
Hannes Frederic Sowa5119bd12016-06-11 20:41:38 +0200761 u8 tclass, __be32 label)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700762{
Eric Dumazetcf533ea2011-10-21 05:22:42 -0400763 const struct tcphdr *th = tcp_hdr(skb);
764 struct tcphdr *t1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700765 struct sk_buff *buff;
David S. Miller4c9483b2011-03-12 16:22:43 -0500766 struct flowi6 fl6;
Eric Dumazet0f85fea2014-12-09 09:56:08 -0800767 struct net *net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
Daniel Lezcanoe5047992008-03-07 11:16:26 -0800768 struct sock *ctl_sk = net->ipv6.tcp_sk;
YOSHIFUJI Hideaki9cb57342008-01-12 02:16:03 -0800769 unsigned int tot_len = sizeof(struct tcphdr);
Eric Dumazetadf30902009-06-02 05:19:30 +0000770 struct dst_entry *dst;
Al Viroe69a4adc2006-11-14 20:56:00 -0800771 __be32 *topt;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700772
Andrey Vaginee684b62013-02-11 05:50:19 +0000773 if (tsecr)
YOSHIFUJI Hideaki4244f8a2006-10-10 19:40:50 -0700774 tot_len += TCPOLEN_TSTAMP_ALIGNED;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800775#ifdef CONFIG_TCP_MD5SIG
776 if (key)
777 tot_len += TCPOLEN_MD5SIG_ALIGNED;
778#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700779
780 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
781 GFP_ATOMIC);
Ian Morris63159f22015-03-29 14:00:04 +0100782 if (!buff)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700783 return;
784
785 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
786
Ilpo Järvinen77c676d2008-10-09 14:41:38 -0700787 t1 = (struct tcphdr *) skb_push(buff, tot_len);
Herbert Xu6651ffc2010-04-21 00:47:15 -0700788 skb_reset_transport_header(buff);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700789
790 /* Swap the send and the receive. */
791 memset(t1, 0, sizeof(*t1));
792 t1->dest = th->source;
793 t1->source = th->dest;
Ilpo Järvinen77c676d2008-10-09 14:41:38 -0700794 t1->doff = tot_len / 4;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700795 t1->seq = htonl(seq);
796 t1->ack_seq = htonl(ack);
Ilpo Järvinen626e2642008-10-09 14:42:40 -0700797 t1->ack = !rst || !th->ack;
798 t1->rst = rst;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700799 t1->window = htons(win);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800800
Al Viroe69a4adc2006-11-14 20:56:00 -0800801 topt = (__be32 *)(t1 + 1);
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +0900802
Andrey Vaginee684b62013-02-11 05:50:19 +0000803 if (tsecr) {
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800804 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
805 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
Andrey Vaginee684b62013-02-11 05:50:19 +0000806 *topt++ = htonl(tsval);
807 *topt++ = htonl(tsecr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700808 }
809
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800810#ifdef CONFIG_TCP_MD5SIG
811 if (key) {
812 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
813 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
Adam Langley49a72df2008-07-19 00:01:42 -0700814 tcp_v6_md5_hash_hdr((__u8 *)topt, key,
Adam Langley90b7e112008-07-31 20:49:48 -0700815 &ipv6_hdr(skb)->saddr,
816 &ipv6_hdr(skb)->daddr, t1);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800817 }
818#endif
819
David S. Miller4c9483b2011-03-12 16:22:43 -0500820 memset(&fl6, 0, sizeof(fl6));
Alexey Dobriyan4e3fd7a2011-11-21 03:39:03 +0000821 fl6.daddr = ipv6_hdr(skb)->saddr;
822 fl6.saddr = ipv6_hdr(skb)->daddr;
Florent Fourcot1d13a962014-01-16 17:21:22 +0100823 fl6.flowlabel = label;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700824
David S. Millere5700af2010-04-21 14:59:20 -0700825 buff->ip_summed = CHECKSUM_PARTIAL;
826 buff->csum = 0;
827
David S. Miller4c9483b2011-03-12 16:22:43 -0500828 __tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700829
David S. Miller4c9483b2011-03-12 16:22:43 -0500830 fl6.flowi6_proto = IPPROTO_TCP;
Lorenzo Colittia36dbdb2014-04-11 13:19:12 +0900831 if (rt6_need_strict(&fl6.daddr) && !oif)
Eric Dumazet870c3152014-10-17 09:17:20 -0700832 fl6.flowi6_oif = tcp_v6_iif(skb);
David Ahern9b6c14d2016-11-09 09:07:26 -0800833 else {
834 if (!oif && netif_index_is_l3_master(net, skb->skb_iif))
835 oif = skb->skb_iif;
836
837 fl6.flowi6_oif = oif;
838 }
David Ahern1d2f7b22016-05-04 21:26:08 -0700839
Lorenzo Colittie1108612014-05-13 10:17:33 -0700840 fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark);
David S. Miller1958b852011-03-12 16:36:19 -0500841 fl6.fl6_dport = t1->dest;
842 fl6.fl6_sport = t1->source;
Lorenzo Colitti50442922016-11-04 02:23:43 +0900843 fl6.flowi6_uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL);
David S. Miller4c9483b2011-03-12 16:22:43 -0500844 security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700845
Ilpo Järvinen626e2642008-10-09 14:42:40 -0700846 /* Pass a socket to ip6_dst_lookup either it is for RST
847 * Underlying function will use this to retrieve the network
848 * namespace
849 */
Steffen Klassert0e0d44a2013-08-28 08:04:14 +0200850 dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL);
David S. Miller68d0c6d2011-03-01 13:19:07 -0800851 if (!IS_ERR(dst)) {
852 skb_dst_set(buff, dst);
Pablo Neira0d4c19e2017-01-26 22:56:21 +0100853 ip6_xmit(ctl_sk, buff, &fl6, fl6.flowi6_mark, NULL, tclass);
Eric Dumazetc10d9312016-04-29 14:16:47 -0700854 TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
David S. Miller68d0c6d2011-03-01 13:19:07 -0800855 if (rst)
Eric Dumazetc10d9312016-04-29 14:16:47 -0700856 TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
David S. Miller68d0c6d2011-03-01 13:19:07 -0800857 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700858 }
859
860 kfree_skb(buff);
861}
862
Eric Dumazeta00e7442015-09-29 07:42:39 -0700863static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb)
Ilpo Järvinen626e2642008-10-09 14:42:40 -0700864{
Eric Dumazetcf533ea2011-10-21 05:22:42 -0400865 const struct tcphdr *th = tcp_hdr(skb);
Ilpo Järvinen626e2642008-10-09 14:42:40 -0700866 u32 seq = 0, ack_seq = 0;
Guo-Fu Tsengfa3e5b42008-10-09 21:11:56 -0700867 struct tcp_md5sig_key *key = NULL;
Shawn Lu658ddaa2012-01-31 22:35:48 +0000868#ifdef CONFIG_TCP_MD5SIG
869 const __u8 *hash_location = NULL;
870 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
871 unsigned char newhash[16];
872 int genhash;
873 struct sock *sk1 = NULL;
874#endif
Wang Yufen9c76a112014-03-29 09:27:31 +0800875 int oif;
Ilpo Järvinen626e2642008-10-09 14:42:40 -0700876
877 if (th->rst)
878 return;
879
Eric Dumazetc3658e82014-11-25 07:40:04 -0800880 /* If sk not NULL, it means we did a successful lookup and incoming
881 * route had to be correct. prequeue might have dropped our dst.
882 */
883 if (!sk && !ipv6_unicast_destination(skb))
Ilpo Järvinen626e2642008-10-09 14:42:40 -0700884 return;
885
886#ifdef CONFIG_TCP_MD5SIG
Eric Dumazet3b24d852016-04-01 08:52:17 -0700887 rcu_read_lock();
Shawn Lu658ddaa2012-01-31 22:35:48 +0000888 hash_location = tcp_parse_md5sig_option(th);
Florian Westphal271c3b92015-12-21 21:29:26 +0100889 if (sk && sk_fullsock(sk)) {
Florian Westphale46787f2015-12-21 21:29:25 +0100890 key = tcp_v6_md5_do_lookup(sk, &ipv6h->saddr);
891 } else if (hash_location) {
Shawn Lu658ddaa2012-01-31 22:35:48 +0000892 /*
893 * active side is lost. Try to find listening socket through
894 * source port, and then find md5 key through listening socket.
895 * we are not loose security here:
896 * Incoming packet is checked with md5 hash with finding key,
897 * no RST generated if md5 hash doesn't match.
898 */
899 sk1 = inet6_lookup_listener(dev_net(skb_dst(skb)->dev),
Craig Galleka5836362016-02-10 11:50:38 -0500900 &tcp_hashinfo, NULL, 0,
901 &ipv6h->saddr,
Tom Herbert5ba24952013-01-22 09:50:39 +0000902 th->source, &ipv6h->daddr,
Eric Dumazet870c3152014-10-17 09:17:20 -0700903 ntohs(th->source), tcp_v6_iif(skb));
Shawn Lu658ddaa2012-01-31 22:35:48 +0000904 if (!sk1)
Eric Dumazet3b24d852016-04-01 08:52:17 -0700905 goto out;
Shawn Lu658ddaa2012-01-31 22:35:48 +0000906
Shawn Lu658ddaa2012-01-31 22:35:48 +0000907 key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr);
908 if (!key)
Eric Dumazet3b24d852016-04-01 08:52:17 -0700909 goto out;
Shawn Lu658ddaa2012-01-31 22:35:48 +0000910
Eric Dumazet39f8e582015-03-24 15:58:55 -0700911 genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, skb);
Shawn Lu658ddaa2012-01-31 22:35:48 +0000912 if (genhash || memcmp(hash_location, newhash, 16) != 0)
Eric Dumazet3b24d852016-04-01 08:52:17 -0700913 goto out;
Shawn Lu658ddaa2012-01-31 22:35:48 +0000914 }
Ilpo Järvinen626e2642008-10-09 14:42:40 -0700915#endif
916
917 if (th->ack)
918 seq = ntohl(th->ack_seq);
919 else
920 ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
921 (th->doff << 2);
922
Wang Yufen9c76a112014-03-29 09:27:31 +0800923 oif = sk ? sk->sk_bound_dev_if : 0;
Eric Dumazet0f85fea2014-12-09 09:56:08 -0800924 tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, key, 1, 0, 0);
Shawn Lu658ddaa2012-01-31 22:35:48 +0000925
926#ifdef CONFIG_TCP_MD5SIG
Eric Dumazet3b24d852016-04-01 08:52:17 -0700927out:
928 rcu_read_unlock();
Shawn Lu658ddaa2012-01-31 22:35:48 +0000929#endif
Ilpo Järvinen626e2642008-10-09 14:42:40 -0700930}
931
Eric Dumazeta00e7442015-09-29 07:42:39 -0700932static void tcp_v6_send_ack(const struct sock *sk, struct sk_buff *skb, u32 seq,
Eric Dumazet0f85fea2014-12-09 09:56:08 -0800933 u32 ack, u32 win, u32 tsval, u32 tsecr, int oif,
Florent Fourcot1d13a962014-01-16 17:21:22 +0100934 struct tcp_md5sig_key *key, u8 tclass,
Hannes Frederic Sowa5119bd12016-06-11 20:41:38 +0200935 __be32 label)
Ilpo Järvinen626e2642008-10-09 14:42:40 -0700936{
Eric Dumazet0f85fea2014-12-09 09:56:08 -0800937 tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, key, 0,
938 tclass, label);
Ilpo Järvinen626e2642008-10-09 14:42:40 -0700939}
940
Linus Torvalds1da177e2005-04-16 15:20:36 -0700941static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
942{
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700943 struct inet_timewait_sock *tw = inet_twsk(sk);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800944 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700945
Eric Dumazet0f85fea2014-12-09 09:56:08 -0800946 tcp_v6_send_ack(sk, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700947 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
Andrey Vaginee684b62013-02-11 05:50:19 +0000948 tcp_time_stamp + tcptw->tw_ts_offset,
Wang Yufen9c76a112014-03-29 09:27:31 +0800949 tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw),
Florent Fourcot21858cd2015-05-16 00:24:59 +0200950 tw->tw_tclass, cpu_to_be32(tw->tw_flowlabel));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700951
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700952 inet_twsk_put(tw);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700953}
954
Eric Dumazeta00e7442015-09-29 07:42:39 -0700955static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
Gui Jianfeng6edafaa2008-08-06 23:50:04 -0700956 struct request_sock *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700957{
Daniel Lee3a19ce02014-05-11 20:22:13 -0700958 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
959 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
960 */
Eric Dumazet20a2b492016-08-22 11:31:10 -0700961 /* RFC 7323 2.3
962 * The window field (SEG.WND) of every outgoing segment, with the
963 * exception of <SYN> segments, MUST be right-shifted by
964 * Rcv.Wind.Shift bits:
965 */
Eric Dumazet0f85fea2014-12-09 09:56:08 -0800966 tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ?
Daniel Lee3a19ce02014-05-11 20:22:13 -0700967 tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
Eric Dumazet20a2b492016-08-22 11:31:10 -0700968 tcp_rsk(req)->rcv_nxt,
969 req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
Eric Dumazet0f85fea2014-12-09 09:56:08 -0800970 tcp_time_stamp, req->ts_recent, sk->sk_bound_dev_if,
Christoph Paasch7887a702017-12-11 00:05:46 -0800971 tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->saddr),
Florent Fourcot1d13a962014-01-16 17:21:22 +0100972 0, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700973}
974
975
Eric Dumazet079096f2015-10-02 11:43:32 -0700976static struct sock *tcp_v6_cookie_check(struct sock *sk, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700977{
Glenn Griffinc6aefaf2008-02-07 21:49:26 -0800978#ifdef CONFIG_SYN_COOKIES
Eric Dumazet079096f2015-10-02 11:43:32 -0700979 const struct tcphdr *th = tcp_hdr(skb);
980
Florian Westphalaf9b4732010-06-03 00:43:44 +0000981 if (!th->syn)
Glenn Griffinc6aefaf2008-02-07 21:49:26 -0800982 sk = cookie_v6_check(sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700983#endif
984 return sk;
985}
986
Linus Torvalds1da177e2005-04-16 15:20:36 -0700987static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
988{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700989 if (skb->protocol == htons(ETH_P_IP))
990 return tcp_v4_conn_request(sk, skb);
991
992 if (!ipv6_unicast_destination(skb))
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +0900993 goto drop;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700994
Octavian Purdila1fb6f152014-06-25 17:10:02 +0300995 return tcp_conn_request(&tcp6_request_sock_ops,
996 &tcp_request_sock_ipv6_ops, sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700997
Linus Torvalds1da177e2005-04-16 15:20:36 -0700998drop:
Eric Dumazet9caad862016-04-01 08:52:20 -0700999 tcp_listendrop(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001000 return 0; /* don't send reset */
1001}
1002
Eric Dumazet1e340bb2017-02-05 20:23:22 -08001003static void tcp_v6_restore_cb(struct sk_buff *skb)
1004{
1005 /* We need to move header back to the beginning if xfrm6_policy_check()
1006 * and tcp_v6_fill_cb() are going to be called again.
1007 * ip6_datagram_recv_specific_ctl() also expects IP6CB to be there.
1008 */
1009 memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
1010 sizeof(struct inet6_skb_parm));
1011}
1012
Eric Dumazet0c271712015-09-29 07:42:48 -07001013static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
Weilong Chen4c99aa42013-12-19 18:44:34 +08001014 struct request_sock *req,
Eric Dumazet5e0724d2015-10-22 08:20:46 -07001015 struct dst_entry *dst,
1016 struct request_sock *req_unhash,
1017 bool *own_req)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001018{
Eric Dumazet634fb9792013-10-09 15:21:29 -07001019 struct inet_request_sock *ireq;
Eric Dumazet0c271712015-09-29 07:42:48 -07001020 struct ipv6_pinfo *newnp;
1021 const struct ipv6_pinfo *np = inet6_sk(sk);
Eric Dumazet45f6fad2015-11-29 19:37:57 -08001022 struct ipv6_txoptions *opt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001023 struct tcp6_sock *newtcp6sk;
1024 struct inet_sock *newinet;
1025 struct tcp_sock *newtp;
1026 struct sock *newsk;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001027#ifdef CONFIG_TCP_MD5SIG
1028 struct tcp_md5sig_key *key;
1029#endif
Neal Cardwell3840a062012-06-28 12:34:19 +00001030 struct flowi6 fl6;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001031
1032 if (skb->protocol == htons(ETH_P_IP)) {
1033 /*
1034 * v6 mapped
1035 */
1036
Eric Dumazet5e0724d2015-10-22 08:20:46 -07001037 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst,
1038 req_unhash, own_req);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001039
Ian Morris63159f22015-03-29 14:00:04 +01001040 if (!newsk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001041 return NULL;
1042
1043 newtcp6sk = (struct tcp6_sock *)newsk;
1044 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1045
1046 newinet = inet_sk(newsk);
1047 newnp = inet6_sk(newsk);
1048 newtp = tcp_sk(newsk);
1049
1050 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1051
Eric Dumazetd1e559d2015-03-18 14:05:35 -07001052 newnp->saddr = newsk->sk_v6_rcv_saddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001053
Arnaldo Carvalho de Melo8292a172005-12-13 23:15:52 -08001054 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001055 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001056#ifdef CONFIG_TCP_MD5SIG
1057 newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1058#endif
1059
WANG Cong4bd8f5e2017-05-09 16:59:54 -07001060 newnp->ipv6_mc_list = NULL;
Yan, Zheng676a1182011-09-25 02:21:30 +00001061 newnp->ipv6_ac_list = NULL;
1062 newnp->ipv6_fl_list = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001063 newnp->pktoptions = NULL;
1064 newnp->opt = NULL;
Eric Dumazet5018d912019-03-19 05:45:35 -07001065 newnp->mcast_oif = inet_iif(skb);
1066 newnp->mcast_hops = ip_hdr(skb)->ttl;
1067 newnp->rcv_flowinfo = 0;
Florent Fourcotdf3687f2014-01-17 17:15:03 +01001068 if (np->repflow)
Eric Dumazet5018d912019-03-19 05:45:35 -07001069 newnp->flow_label = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001070
Arnaldo Carvalho de Meloe6848972005-08-09 19:45:38 -07001071 /*
1072 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1073 * here, tcp_create_openreq_child now does this for us, see the comment in
1074 * that function for the gory details. -acme
Linus Torvalds1da177e2005-04-16 15:20:36 -07001075 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001076
1077 /* It is tricky place. Until this moment IPv4 tcp
Arnaldo Carvalho de Melo8292a172005-12-13 23:15:52 -08001078 worked with IPv6 icsk.icsk_af_ops.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001079 Sync it now.
1080 */
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -08001081 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001082
1083 return newsk;
1084 }
1085
Eric Dumazet634fb9792013-10-09 15:21:29 -07001086 ireq = inet_rsk(req);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001087
1088 if (sk_acceptq_is_full(sk))
1089 goto out_overflow;
1090
David S. Miller493f3772010-12-02 12:14:29 -08001091 if (!dst) {
Eric Dumazetf76b33c2015-09-29 07:42:42 -07001092 dst = inet6_csk_route_req(sk, &fl6, req, IPPROTO_TCP);
David S. Miller493f3772010-12-02 12:14:29 -08001093 if (!dst)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001094 goto out;
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001095 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001096
1097 newsk = tcp_create_openreq_child(sk, req, skb);
Ian Morris63159f22015-03-29 14:00:04 +01001098 if (!newsk)
Balazs Scheidler093d2822010-10-21 13:06:43 +02001099 goto out_nonewsk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001100
Arnaldo Carvalho de Meloe6848972005-08-09 19:45:38 -07001101 /*
1102 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1103 * count here, tcp_create_openreq_child now does this for us, see the
1104 * comment in that function for the gory details. -acme
1105 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001106
Stephen Hemminger59eed272006-08-25 15:55:43 -07001107 newsk->sk_gso_type = SKB_GSO_TCPV6;
Eric Dumazet6bd4f352015-12-02 21:53:57 -08001108 ip6_dst_store(newsk, dst, NULL, NULL);
Neal Cardwellfae6ef82012-08-19 03:30:38 +00001109 inet6_sk_rx_dst_set(newsk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001110
1111 newtcp6sk = (struct tcp6_sock *)newsk;
1112 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1113
1114 newtp = tcp_sk(newsk);
1115 newinet = inet_sk(newsk);
1116 newnp = inet6_sk(newsk);
1117
1118 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1119
Eric Dumazet634fb9792013-10-09 15:21:29 -07001120 newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr;
1121 newnp->saddr = ireq->ir_v6_loc_addr;
1122 newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
1123 newsk->sk_bound_dev_if = ireq->ir_iif;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001124
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001125 /* Now IPv6 options...
Linus Torvalds1da177e2005-04-16 15:20:36 -07001126
1127 First: no IPv4 options.
1128 */
Eric Dumazetf6d8bd02011-04-21 09:45:37 +00001129 newinet->inet_opt = NULL;
WANG Cong4bd8f5e2017-05-09 16:59:54 -07001130 newnp->ipv6_mc_list = NULL;
Yan, Zheng676a1182011-09-25 02:21:30 +00001131 newnp->ipv6_ac_list = NULL;
Masayuki Nakagawad35690b2007-03-16 16:14:03 -07001132 newnp->ipv6_fl_list = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001133
1134 /* Clone RX bits */
1135 newnp->rxopt.all = np->rxopt.all;
1136
Linus Torvalds1da177e2005-04-16 15:20:36 -07001137 newnp->pktoptions = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001138 newnp->opt = NULL;
Eric Dumazet870c3152014-10-17 09:17:20 -07001139 newnp->mcast_oif = tcp_v6_iif(skb);
Arnaldo Carvalho de Melo0660e032007-04-25 17:54:47 -07001140 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
Florent Fourcot1397ed32013-12-08 15:46:57 +01001141 newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
Florent Fourcotdf3687f2014-01-17 17:15:03 +01001142 if (np->repflow)
1143 newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001144
1145 /* Clone native IPv6 options from listening socket (if any)
1146
1147 Yes, keeping reference count would be much more clever,
1148 but we make one more one thing there: reattach optmem
1149 to newsk.
1150 */
Huw Davies56ac42b2016-06-27 15:05:28 -04001151 opt = ireq->ipv6_opt;
1152 if (!opt)
1153 opt = rcu_dereference(np->opt);
Eric Dumazet45f6fad2015-11-29 19:37:57 -08001154 if (opt) {
1155 opt = ipv6_dup_options(newsk, opt);
1156 RCU_INIT_POINTER(newnp->opt, opt);
1157 }
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -08001158 inet_csk(newsk)->icsk_ext_hdr_len = 0;
Eric Dumazet45f6fad2015-11-29 19:37:57 -08001159 if (opt)
1160 inet_csk(newsk)->icsk_ext_hdr_len = opt->opt_nflen +
1161 opt->opt_flen;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001162
Daniel Borkmann81164412015-01-05 23:57:48 +01001163 tcp_ca_openreq_child(newsk, dst);
1164
Linus Torvalds1da177e2005-04-16 15:20:36 -07001165 tcp_sync_mss(newsk, dst_mtu(dst));
David S. Miller0dbaee32010-12-13 12:52:14 -08001166 newtp->advmss = dst_metric_advmss(dst);
Neal Cardwelld135c522012-04-22 09:45:47 +00001167 if (tcp_sk(sk)->rx_opt.user_mss &&
1168 tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1169 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1170
Linus Torvalds1da177e2005-04-16 15:20:36 -07001171 tcp_initialize_rcv_mss(newsk);
1172
Eric Dumazetc720c7e2009-10-15 06:30:45 +00001173 newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1174 newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001175
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001176#ifdef CONFIG_TCP_MD5SIG
1177 /* Copy over the MD5 key from the original socket */
Wang Yufen4aa956d2014-03-29 09:27:29 +08001178 key = tcp_v6_md5_do_lookup(sk, &newsk->sk_v6_daddr);
Ian Morris53b24b82015-03-29 14:00:05 +01001179 if (key) {
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001180 /* We're using one, so create a matching key
1181 * on the newsk structure. If we fail to get
1182 * memory, then we end up not copying the key
1183 * across. Shucks.
1184 */
Eric Dumazetefe42082013-10-03 15:42:29 -07001185 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newsk->sk_v6_daddr,
Mel Gorman99a1dec2012-07-31 16:44:14 -07001186 AF_INET6, key->key, key->keylen,
Eric Dumazet7450aaf2015-11-30 08:57:28 -08001187 sk_gfp_mask(sk, GFP_ATOMIC));
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001188 }
1189#endif
1190
Balazs Scheidler093d2822010-10-21 13:06:43 +02001191 if (__inet_inherit_port(sk, newsk) < 0) {
Christoph Paasche337e242012-12-14 04:07:58 +00001192 inet_csk_prepare_forced_close(newsk);
1193 tcp_done(newsk);
Balazs Scheidler093d2822010-10-21 13:06:43 +02001194 goto out;
1195 }
Eric Dumazet5e0724d2015-10-22 08:20:46 -07001196 *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
Eric Dumazet805c4bc2015-11-05 11:07:13 -08001197 if (*own_req) {
Eric Dumazet49a496c2015-11-05 12:50:19 -08001198 tcp_move_syn(newtp, req);
Eric Dumazet805c4bc2015-11-05 11:07:13 -08001199
1200 /* Clone pktoptions received with SYN, if we own the req */
1201 if (ireq->pktopts) {
1202 newnp->pktoptions = skb_clone(ireq->pktopts,
Eric Dumazet7450aaf2015-11-30 08:57:28 -08001203 sk_gfp_mask(sk, GFP_ATOMIC));
Eric Dumazet805c4bc2015-11-05 11:07:13 -08001204 consume_skb(ireq->pktopts);
1205 ireq->pktopts = NULL;
Eric Dumazet1e340bb2017-02-05 20:23:22 -08001206 if (newnp->pktoptions) {
1207 tcp_v6_restore_cb(newnp->pktoptions);
Eric Dumazet805c4bc2015-11-05 11:07:13 -08001208 skb_set_owner_r(newnp->pktoptions, newsk);
Eric Dumazet1e340bb2017-02-05 20:23:22 -08001209 }
Eric Dumazet805c4bc2015-11-05 11:07:13 -08001210 }
Eric Dumazetce105002015-10-30 09:46:12 -07001211 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001212
1213 return newsk;
1214
1215out_overflow:
Eric Dumazet02a1d6e2016-04-27 16:44:39 -07001216 __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
Balazs Scheidler093d2822010-10-21 13:06:43 +02001217out_nonewsk:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001218 dst_release(dst);
Balazs Scheidler093d2822010-10-21 13:06:43 +02001219out:
Eric Dumazet9caad862016-04-01 08:52:20 -07001220 tcp_listendrop(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001221 return NULL;
1222}
1223
Linus Torvalds1da177e2005-04-16 15:20:36 -07001224/* The socket must have it's spinlock held when we get
Eric Dumazete994b2f2015-10-02 11:43:39 -07001225 * here, unless it is a TCP_LISTEN socket.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001226 *
1227 * We have a potential double-lock case here, so even when
1228 * doing backlog processing we use the BH locking scheme.
1229 * This is because we cannot sleep with the original spinlock
1230 * held.
1231 */
1232static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1233{
1234 struct ipv6_pinfo *np = inet6_sk(sk);
1235 struct tcp_sock *tp;
1236 struct sk_buff *opt_skb = NULL;
1237
1238 /* Imagine: socket is IPv6. IPv4 packet arrives,
1239 goes to IPv4 receive handler and backlogged.
1240 From backlog it always goes here. Kerboom...
1241 Fortunately, tcp_rcv_established and rcv_established
1242 handle them correctly, but it is not case with
1243 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1244 */
1245
1246 if (skb->protocol == htons(ETH_P_IP))
1247 return tcp_v4_do_rcv(sk, skb);
1248
Linus Torvalds1da177e2005-04-16 15:20:36 -07001249 /*
1250 * socket locking is here for SMP purposes as backlog rcv
1251 * is currently called with bh processing disabled.
1252 */
1253
1254 /* Do Stevens' IPV6_PKTOPTIONS.
1255
1256 Yes, guys, it is the only place in our code, where we
1257 may make it not affecting IPv4.
1258 The rest of code is protocol independent,
1259 and I do not like idea to uglify IPv4.
1260
1261 Actually, all the idea behind IPV6_PKTOPTIONS
1262 looks not very well thought. For now we latch
1263 options, received in the last packet, enqueued
1264 by tcp. Feel free to propose better solution.
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001265 --ANK (980728)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001266 */
1267 if (np->rxopt.all)
Eric Dumazet7450aaf2015-11-30 08:57:28 -08001268 opt_skb = skb_clone(skb, sk_gfp_mask(sk, GFP_ATOMIC));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001269
1270 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
Eric Dumazet5d299f32012-08-06 05:09:33 +00001271 struct dst_entry *dst = sk->sk_rx_dst;
1272
Tom Herbertbdeab992011-08-14 19:45:55 +00001273 sock_rps_save_rxhash(sk, skb);
Eric Dumazet3d973792014-11-11 05:54:27 -08001274 sk_mark_napi_id(sk, skb);
Eric Dumazet5d299f32012-08-06 05:09:33 +00001275 if (dst) {
1276 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1277 dst->ops->check(dst, np->rx_dst_cookie) == NULL) {
1278 dst_release(dst);
1279 sk->sk_rx_dst = NULL;
1280 }
1281 }
1282
Vijay Subramanianc995ae22013-09-03 12:23:22 -07001283 tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001284 if (opt_skb)
1285 goto ipv6_pktoptions;
1286 return 0;
1287 }
1288
Eric Dumazet12e25e12015-06-03 23:49:21 -07001289 if (tcp_checksum_complete(skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001290 goto csum_err;
1291
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001292 if (sk->sk_state == TCP_LISTEN) {
Eric Dumazet079096f2015-10-02 11:43:32 -07001293 struct sock *nsk = tcp_v6_cookie_check(sk, skb);
1294
Linus Torvalds1da177e2005-04-16 15:20:36 -07001295 if (!nsk)
1296 goto discard;
1297
Weilong Chen4c99aa42013-12-19 18:44:34 +08001298 if (nsk != sk) {
Tom Herbertbdeab992011-08-14 19:45:55 +00001299 sock_rps_save_rxhash(nsk, skb);
Eric Dumazet38cb5242015-10-02 11:43:26 -07001300 sk_mark_napi_id(nsk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001301 if (tcp_child_process(sk, nsk, skb))
1302 goto reset;
1303 if (opt_skb)
1304 __kfree_skb(opt_skb);
1305 return 0;
1306 }
Neil Horman47482f12011-04-06 13:07:09 -07001307 } else
Tom Herbertbdeab992011-08-14 19:45:55 +00001308 sock_rps_save_rxhash(sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001309
Eric Dumazet72ab4a82015-09-29 07:42:41 -07001310 if (tcp_rcv_state_process(sk, skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001311 goto reset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001312 if (opt_skb)
1313 goto ipv6_pktoptions;
1314 return 0;
1315
1316reset:
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001317 tcp_v6_send_reset(sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001318discard:
1319 if (opt_skb)
1320 __kfree_skb(opt_skb);
1321 kfree_skb(skb);
1322 return 0;
1323csum_err:
Eric Dumazetc10d9312016-04-29 14:16:47 -07001324 TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
1325 TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001326 goto discard;
1327
1328
1329ipv6_pktoptions:
1330 /* Do you ask, what is it?
1331
1332 1. skb was enqueued by tcp.
1333 2. skb is added to tail of read queue, rather than out of order.
1334 3. socket is not in passive state.
1335 4. Finally, it really contains options, which user wants to receive.
1336 */
1337 tp = tcp_sk(sk);
1338 if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1339 !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
YOSHIFUJI Hideaki333fad52005-09-08 09:59:17 +09001340 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
Eric Dumazet870c3152014-10-17 09:17:20 -07001341 np->mcast_oif = tcp_v6_iif(opt_skb);
YOSHIFUJI Hideaki333fad52005-09-08 09:59:17 +09001342 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
Arnaldo Carvalho de Melo0660e032007-04-25 17:54:47 -07001343 np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
Florent Fourcot82e9f102013-12-08 15:46:59 +01001344 if (np->rxopt.bits.rxflow || np->rxopt.bits.rxtclass)
Florent Fourcot1397ed32013-12-08 15:46:57 +01001345 np->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(opt_skb));
Florent Fourcotdf3687f2014-01-17 17:15:03 +01001346 if (np->repflow)
1347 np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb));
Eric Dumazeta2247722014-09-27 09:50:56 -07001348 if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001349 skb_set_owner_r(opt_skb, sk);
Eric Dumazet8ce48622016-10-12 19:01:45 +02001350 tcp_v6_restore_cb(opt_skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001351 opt_skb = xchg(&np->pktoptions, opt_skb);
1352 } else {
1353 __kfree_skb(opt_skb);
1354 opt_skb = xchg(&np->pktoptions, NULL);
1355 }
1356 }
1357
Wei Yongjun800d55f2009-02-23 21:45:33 +00001358 kfree_skb(opt_skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001359 return 0;
1360}
1361
Nicolas Dichtel2dc49d12014-12-22 18:22:48 +01001362static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr,
1363 const struct tcphdr *th)
1364{
1365 /* This is tricky: we move IP6CB at its correct location into
1366 * TCP_SKB_CB(). It must be done after xfrm6_policy_check(), because
1367 * _decode_session6() uses IP6CB().
1368 * barrier() makes sure compiler won't play aliasing games.
1369 */
1370 memmove(&TCP_SKB_CB(skb)->header.h6, IP6CB(skb),
1371 sizeof(struct inet6_skb_parm));
1372 barrier();
1373
1374 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1375 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1376 skb->len - th->doff*4);
1377 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1378 TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1379 TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1380 TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
1381 TCP_SKB_CB(skb)->sacked = 0;
1382}
1383
Herbert Xue5bbef22007-10-15 12:50:28 -07001384static int tcp_v6_rcv(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001385{
Eric Dumazetcf533ea2011-10-21 05:22:42 -04001386 const struct tcphdr *th;
Eric Dumazetb71d1d42011-04-22 04:53:02 +00001387 const struct ipv6hdr *hdr;
Eric Dumazet3b24d852016-04-01 08:52:17 -07001388 bool refcounted;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001389 struct sock *sk;
1390 int ret;
Pavel Emelyanova86b1e32008-07-16 20:20:58 -07001391 struct net *net = dev_net(skb->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001392
1393 if (skb->pkt_type != PACKET_HOST)
1394 goto discard_it;
1395
1396 /*
1397 * Count it even if it's bad.
1398 */
Eric Dumazet90bbcc62016-04-27 16:44:32 -07001399 __TCP_INC_STATS(net, TCP_MIB_INSEGS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001400
1401 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1402 goto discard_it;
1403
Eric Dumazetea1627c2016-05-13 09:16:40 -07001404 th = (const struct tcphdr *)skb->data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001405
Eric Dumazetea1627c2016-05-13 09:16:40 -07001406 if (unlikely(th->doff < sizeof(struct tcphdr)/4))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001407 goto bad_packet;
1408 if (!pskb_may_pull(skb, th->doff*4))
1409 goto discard_it;
1410
Tom Herberte4f45b72014-05-02 16:29:51 -07001411 if (skb_checksum_init(skb, IPPROTO_TCP, ip6_compute_pseudo))
Eric Dumazet6a5dc9e2013-04-29 08:39:56 +00001412 goto csum_error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001413
Eric Dumazetea1627c2016-05-13 09:16:40 -07001414 th = (const struct tcphdr *)skb->data;
Stephen Hemmingere802af92010-04-22 15:24:53 -07001415 hdr = ipv6_hdr(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001416
Eric Dumazet4bdc3d62015-10-13 17:12:54 -07001417lookup:
Craig Galleka5836362016-02-10 11:50:38 -05001418 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th),
Eric Dumazet3b24d852016-04-01 08:52:17 -07001419 th->source, th->dest, inet6_iif(skb),
1420 &refcounted);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001421 if (!sk)
1422 goto no_tcp_socket;
1423
1424process:
1425 if (sk->sk_state == TCP_TIME_WAIT)
1426 goto do_time_wait;
1427
Eric Dumazet079096f2015-10-02 11:43:32 -07001428 if (sk->sk_state == TCP_NEW_SYN_RECV) {
1429 struct request_sock *req = inet_reqsk(sk);
Eric Dumazet77166822016-02-18 05:39:18 -08001430 struct sock *nsk;
Eric Dumazet079096f2015-10-02 11:43:32 -07001431
1432 sk = req->rsk_listener;
1433 tcp_v6_fill_cb(skb, hdr, th);
1434 if (tcp_v6_inbound_md5_hash(sk, skb)) {
Eric Dumazete65c3322016-08-24 08:50:24 -07001435 sk_drops_add(sk, skb);
Eric Dumazet079096f2015-10-02 11:43:32 -07001436 reqsk_put(req);
1437 goto discard_it;
1438 }
Frank van der Linden6caca342018-06-12 23:09:37 +00001439 if (tcp_checksum_complete(skb)) {
1440 reqsk_put(req);
1441 goto csum_error;
1442 }
Eric Dumazet77166822016-02-18 05:39:18 -08001443 if (unlikely(sk->sk_state != TCP_LISTEN)) {
Eric Dumazetf03f2e12015-10-14 11:16:27 -07001444 inet_csk_reqsk_queue_drop_and_put(sk, req);
Eric Dumazet4bdc3d62015-10-13 17:12:54 -07001445 goto lookup;
1446 }
Eric Dumazet77166822016-02-18 05:39:18 -08001447 sock_hold(sk);
Eric Dumazet3b24d852016-04-01 08:52:17 -07001448 refcounted = true;
Eric Dumazet9c3804b2017-09-08 12:44:47 -07001449 nsk = NULL;
1450 if (!tcp_filter(sk, skb))
1451 nsk = tcp_check_req(sk, skb, req, false);
Eric Dumazet079096f2015-10-02 11:43:32 -07001452 if (!nsk) {
1453 reqsk_put(req);
Eric Dumazet77166822016-02-18 05:39:18 -08001454 goto discard_and_relse;
Eric Dumazet079096f2015-10-02 11:43:32 -07001455 }
1456 if (nsk == sk) {
Eric Dumazet079096f2015-10-02 11:43:32 -07001457 reqsk_put(req);
1458 tcp_v6_restore_cb(skb);
1459 } else if (tcp_child_process(sk, nsk, skb)) {
1460 tcp_v6_send_reset(nsk, skb);
Eric Dumazet77166822016-02-18 05:39:18 -08001461 goto discard_and_relse;
Eric Dumazet079096f2015-10-02 11:43:32 -07001462 } else {
Eric Dumazet77166822016-02-18 05:39:18 -08001463 sock_put(sk);
Eric Dumazet079096f2015-10-02 11:43:32 -07001464 return 0;
1465 }
1466 }
Stephen Hemmingere802af92010-04-22 15:24:53 -07001467 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
Eric Dumazet02a1d6e2016-04-27 16:44:39 -07001468 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
Stephen Hemmingere802af92010-04-22 15:24:53 -07001469 goto discard_and_relse;
1470 }
1471
Linus Torvalds1da177e2005-04-16 15:20:36 -07001472 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1473 goto discard_and_relse;
1474
Nicolas Dichtel2dc49d12014-12-22 18:22:48 +01001475 tcp_v6_fill_cb(skb, hdr, th);
1476
Dmitry Popov9ea88a12014-08-07 02:38:22 +04001477 if (tcp_v6_inbound_md5_hash(sk, skb))
1478 goto discard_and_relse;
Dmitry Popov9ea88a12014-08-07 02:38:22 +04001479
Eric Dumazetac6e7802016-11-10 13:12:35 -08001480 if (tcp_filter(sk, skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001481 goto discard_and_relse;
Eric Dumazetac6e7802016-11-10 13:12:35 -08001482 th = (const struct tcphdr *)skb->data;
1483 hdr = ipv6_hdr(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001484
1485 skb->dev = NULL;
1486
Eric Dumazete994b2f2015-10-02 11:43:39 -07001487 if (sk->sk_state == TCP_LISTEN) {
1488 ret = tcp_v6_do_rcv(sk, skb);
1489 goto put_and_return;
1490 }
1491
1492 sk_incoming_cpu_update(sk);
1493
Fabio Olive Leite293b9c42006-09-25 22:28:47 -07001494 bh_lock_sock_nested(sk);
Martin KaFai Laua44d6ea2016-03-14 10:52:15 -07001495 tcp_segs_in(tcp_sk(sk), skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001496 ret = 0;
1497 if (!sock_owned_by_user(sk)) {
Dan Williams7bced392013-12-30 12:37:29 -08001498 if (!tcp_prequeue(sk, skb))
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001499 ret = tcp_v6_do_rcv(sk, skb);
Eric Dumazetc9c33212016-08-27 07:37:54 -07001500 } else if (tcp_add_backlog(sk, skb)) {
Zhu Yi6b03a532010-03-04 18:01:41 +00001501 goto discard_and_relse;
1502 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001503 bh_unlock_sock(sk);
1504
Eric Dumazete994b2f2015-10-02 11:43:39 -07001505put_and_return:
Eric Dumazet3b24d852016-04-01 08:52:17 -07001506 if (refcounted)
1507 sock_put(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001508 return ret ? -1 : 0;
1509
1510no_tcp_socket:
1511 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1512 goto discard_it;
1513
Nicolas Dichtel2dc49d12014-12-22 18:22:48 +01001514 tcp_v6_fill_cb(skb, hdr, th);
1515
Eric Dumazet12e25e12015-06-03 23:49:21 -07001516 if (tcp_checksum_complete(skb)) {
Eric Dumazet6a5dc9e2013-04-29 08:39:56 +00001517csum_error:
Eric Dumazet90bbcc62016-04-27 16:44:32 -07001518 __TCP_INC_STATS(net, TCP_MIB_CSUMERRORS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001519bad_packet:
Eric Dumazet90bbcc62016-04-27 16:44:32 -07001520 __TCP_INC_STATS(net, TCP_MIB_INERRS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001521 } else {
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001522 tcp_v6_send_reset(NULL, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001523 }
1524
1525discard_it:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001526 kfree_skb(skb);
1527 return 0;
1528
1529discard_and_relse:
Eric Dumazet532182c2016-04-01 08:52:19 -07001530 sk_drops_add(sk, skb);
Eric Dumazet3b24d852016-04-01 08:52:17 -07001531 if (refcounted)
1532 sock_put(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001533 goto discard_it;
1534
1535do_time_wait:
1536 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
YOSHIFUJI Hideaki9469c7b2006-10-10 19:41:46 -07001537 inet_twsk_put(inet_twsk(sk));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001538 goto discard_it;
1539 }
1540
Nicolas Dichtel2dc49d12014-12-22 18:22:48 +01001541 tcp_v6_fill_cb(skb, hdr, th);
1542
Eric Dumazet6a5dc9e2013-04-29 08:39:56 +00001543 if (tcp_checksum_complete(skb)) {
1544 inet_twsk_put(inet_twsk(sk));
1545 goto csum_error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001546 }
1547
YOSHIFUJI Hideaki9469c7b2006-10-10 19:41:46 -07001548 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001549 case TCP_TW_SYN:
1550 {
1551 struct sock *sk2;
1552
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09001553 sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
Craig Galleka5836362016-02-10 11:50:38 -05001554 skb, __tcp_hdrlen(th),
Tom Herbert5ba24952013-01-22 09:50:39 +00001555 &ipv6_hdr(skb)->saddr, th->source,
Arnaldo Carvalho de Melo0660e032007-04-25 17:54:47 -07001556 &ipv6_hdr(skb)->daddr,
Eric Dumazet870c3152014-10-17 09:17:20 -07001557 ntohs(th->dest), tcp_v6_iif(skb));
Ian Morris53b24b82015-03-29 14:00:05 +01001558 if (sk2) {
Arnaldo Carvalho de Melo295ff7e2005-08-09 20:44:40 -07001559 struct inet_timewait_sock *tw = inet_twsk(sk);
Eric Dumazetdbe7faa2015-07-08 14:28:30 -07001560 inet_twsk_deschedule_put(tw);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001561 sk = sk2;
Alexey Kodanev4ad19de2015-03-27 12:24:22 +03001562 tcp_v6_restore_cb(skb);
Eric Dumazet3b24d852016-04-01 08:52:17 -07001563 refcounted = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001564 goto process;
1565 }
1566 /* Fall through to ACK */
1567 }
1568 case TCP_TW_ACK:
1569 tcp_v6_timewait_ack(sk, skb);
1570 break;
1571 case TCP_TW_RST:
Alexey Kodanev4ad19de2015-03-27 12:24:22 +03001572 tcp_v6_restore_cb(skb);
Florian Westphal271c3b92015-12-21 21:29:26 +01001573 tcp_v6_send_reset(sk, skb);
1574 inet_twsk_deschedule_put(inet_twsk(sk));
1575 goto discard_it;
Wang Yufen4aa956d2014-03-29 09:27:29 +08001576 case TCP_TW_SUCCESS:
1577 ;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001578 }
1579 goto discard_it;
1580}
1581
Eric Dumazetc7109982012-07-26 12:18:11 +00001582static void tcp_v6_early_demux(struct sk_buff *skb)
1583{
1584 const struct ipv6hdr *hdr;
1585 const struct tcphdr *th;
1586 struct sock *sk;
1587
1588 if (skb->pkt_type != PACKET_HOST)
1589 return;
1590
1591 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1592 return;
1593
1594 hdr = ipv6_hdr(skb);
1595 th = tcp_hdr(skb);
1596
1597 if (th->doff < sizeof(struct tcphdr) / 4)
1598 return;
1599
Eric Dumazet870c3152014-10-17 09:17:20 -07001600 /* Note : We use inet6_iif() here, not tcp_v6_iif() */
Eric Dumazetc7109982012-07-26 12:18:11 +00001601 sk = __inet6_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1602 &hdr->saddr, th->source,
1603 &hdr->daddr, ntohs(th->dest),
1604 inet6_iif(skb));
1605 if (sk) {
1606 skb->sk = sk;
1607 skb->destructor = sock_edemux;
Eric Dumazetf7e4eb02015-03-15 21:12:13 -07001608 if (sk_fullsock(sk)) {
Michal Kubečekd0c294c2015-03-23 15:14:00 +01001609 struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
Neal Cardwellf3f12132012-10-22 21:41:48 +00001610
Eric Dumazetc7109982012-07-26 12:18:11 +00001611 if (dst)
Eric Dumazet5d299f32012-08-06 05:09:33 +00001612 dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie);
Eric Dumazetc7109982012-07-26 12:18:11 +00001613 if (dst &&
Neal Cardwellf3f12132012-10-22 21:41:48 +00001614 inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
Eric Dumazetc7109982012-07-26 12:18:11 +00001615 skb_dst_set_noref(skb, dst);
1616 }
1617 }
1618}
1619
David S. Millerccb7c412010-12-01 18:09:13 -08001620static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1621 .twsk_obj_size = sizeof(struct tcp6_timewait_sock),
1622 .twsk_unique = tcp_twsk_unique,
Wang Yufen4aa956d2014-03-29 09:27:29 +08001623 .twsk_destructor = tcp_twsk_destructor,
David S. Millerccb7c412010-12-01 18:09:13 -08001624};
1625
Stephen Hemminger3b401a82009-09-01 19:25:04 +00001626static const struct inet_connection_sock_af_ops ipv6_specific = {
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001627 .queue_xmit = inet6_csk_xmit,
1628 .send_check = tcp_v6_send_check,
1629 .rebuild_header = inet6_sk_rebuild_header,
Eric Dumazet5d299f32012-08-06 05:09:33 +00001630 .sk_rx_dst_set = inet6_sk_rx_dst_set,
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001631 .conn_request = tcp_v6_conn_request,
1632 .syn_recv_sock = tcp_v6_syn_recv_sock,
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001633 .net_header_len = sizeof(struct ipv6hdr),
Eric Dumazet67469602012-04-24 07:37:38 +00001634 .net_frag_header_len = sizeof(struct frag_hdr),
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001635 .setsockopt = ipv6_setsockopt,
1636 .getsockopt = ipv6_getsockopt,
1637 .addr2sockaddr = inet6_csk_addr2sockaddr,
1638 .sockaddr_len = sizeof(struct sockaddr_in6),
Arnaldo Carvalho de Meloab1e0a12008-02-03 04:06:04 -08001639 .bind_conflict = inet6_csk_bind_conflict,
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08001640#ifdef CONFIG_COMPAT
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001641 .compat_setsockopt = compat_ipv6_setsockopt,
1642 .compat_getsockopt = compat_ipv6_getsockopt,
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08001643#endif
Neal Cardwell4fab9072014-08-14 12:40:05 -04001644 .mtu_reduced = tcp_v6_mtu_reduced,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001645};
1646
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001647#ifdef CONFIG_TCP_MD5SIG
Stephen Hemmingerb2e4b3d2009-09-01 19:25:03 +00001648static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001649 .md5_lookup = tcp_v6_md5_lookup,
Adam Langley49a72df2008-07-19 00:01:42 -07001650 .calc_md5_hash = tcp_v6_md5_hash_skb,
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001651 .md5_parse = tcp_v6_parse_md5_keys,
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001652};
David S. Millera9286302006-11-14 19:53:22 -08001653#endif
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001654
Linus Torvalds1da177e2005-04-16 15:20:36 -07001655/*
1656 * TCP over IPv4 via INET6 API
1657 */
Stephen Hemminger3b401a82009-09-01 19:25:04 +00001658static const struct inet_connection_sock_af_ops ipv6_mapped = {
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001659 .queue_xmit = ip_queue_xmit,
1660 .send_check = tcp_v4_send_check,
1661 .rebuild_header = inet_sk_rebuild_header,
Eric Dumazet63d02d12012-08-09 14:11:00 +00001662 .sk_rx_dst_set = inet_sk_rx_dst_set,
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001663 .conn_request = tcp_v6_conn_request,
1664 .syn_recv_sock = tcp_v6_syn_recv_sock,
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001665 .net_header_len = sizeof(struct iphdr),
1666 .setsockopt = ipv6_setsockopt,
1667 .getsockopt = ipv6_getsockopt,
1668 .addr2sockaddr = inet6_csk_addr2sockaddr,
1669 .sockaddr_len = sizeof(struct sockaddr_in6),
Arnaldo Carvalho de Meloab1e0a12008-02-03 04:06:04 -08001670 .bind_conflict = inet6_csk_bind_conflict,
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08001671#ifdef CONFIG_COMPAT
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001672 .compat_setsockopt = compat_ipv6_setsockopt,
1673 .compat_getsockopt = compat_ipv6_getsockopt,
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08001674#endif
Neal Cardwell4fab9072014-08-14 12:40:05 -04001675 .mtu_reduced = tcp_v4_mtu_reduced,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001676};
1677
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001678#ifdef CONFIG_TCP_MD5SIG
Stephen Hemmingerb2e4b3d2009-09-01 19:25:03 +00001679static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001680 .md5_lookup = tcp_v4_md5_lookup,
Adam Langley49a72df2008-07-19 00:01:42 -07001681 .calc_md5_hash = tcp_v4_md5_hash_skb,
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001682 .md5_parse = tcp_v6_parse_md5_keys,
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001683};
David S. Millera9286302006-11-14 19:53:22 -08001684#endif
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001685
Linus Torvalds1da177e2005-04-16 15:20:36 -07001686/* NOTE: A lot of things set to zero explicitly by call to
1687 * sk_alloc() so need not be done here.
1688 */
1689static int tcp_v6_init_sock(struct sock *sk)
1690{
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -03001691 struct inet_connection_sock *icsk = inet_csk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001692
Neal Cardwell900f65d2012-04-19 09:55:21 +00001693 tcp_init_sock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001694
Arnaldo Carvalho de Melo8292a172005-12-13 23:15:52 -08001695 icsk->icsk_af_ops = &ipv6_specific;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001696
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001697#ifdef CONFIG_TCP_MD5SIG
David S. Millerac807fa2012-04-23 03:21:58 -04001698 tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001699#endif
1700
Linus Torvalds1da177e2005-04-16 15:20:36 -07001701 return 0;
1702}
1703
Brian Haley7d06b2e2008-06-14 17:04:49 -07001704static void tcp_v6_destroy_sock(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001705{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001706 tcp_v4_destroy_sock(sk);
Brian Haley7d06b2e2008-06-14 17:04:49 -07001707 inet6_destroy_sock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001708}
1709
YOSHIFUJI Hideaki952a10b2007-04-21 20:13:44 +09001710#ifdef CONFIG_PROC_FS
Linus Torvalds1da177e2005-04-16 15:20:36 -07001711/* Proc filesystem TCPv6 sock list dumping. */
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001712static void get_openreq6(struct seq_file *seq,
Eric Dumazetaa3a0c82015-10-02 11:43:30 -07001713 const struct request_sock *req, int i)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001714{
Eric Dumazetfa76ce732015-03-19 19:04:20 -07001715 long ttd = req->rsk_timer.expires - jiffies;
Eric Dumazet634fb9792013-10-09 15:21:29 -07001716 const struct in6_addr *src = &inet_rsk(req)->ir_v6_loc_addr;
1717 const struct in6_addr *dest = &inet_rsk(req)->ir_v6_rmt_addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001718
1719 if (ttd < 0)
1720 ttd = 0;
1721
Linus Torvalds1da177e2005-04-16 15:20:36 -07001722 seq_printf(seq,
1723 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
Francesco Fuscod14c5ab2013-08-15 13:42:14 +02001724 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %d %d %pK\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001725 i,
1726 src->s6_addr32[0], src->s6_addr32[1],
1727 src->s6_addr32[2], src->s6_addr32[3],
Eric Dumazetb44084c2013-10-10 00:04:37 -07001728 inet_rsk(req)->ir_num,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001729 dest->s6_addr32[0], dest->s6_addr32[1],
1730 dest->s6_addr32[2], dest->s6_addr32[3],
Eric Dumazet634fb9792013-10-09 15:21:29 -07001731 ntohs(inet_rsk(req)->ir_rmt_port),
Linus Torvalds1da177e2005-04-16 15:20:36 -07001732 TCP_SYN_RECV,
Weilong Chen4c99aa42013-12-19 18:44:34 +08001733 0, 0, /* could print option size, but that is af dependent. */
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001734 1, /* timers active (only the expire timer) */
1735 jiffies_to_clock_t(ttd),
Eric Dumazete6c022a2012-10-27 23:16:46 +00001736 req->num_timeout,
Eric Dumazetaa3a0c82015-10-02 11:43:30 -07001737 from_kuid_munged(seq_user_ns(seq),
1738 sock_i_uid(req->rsk_listener)),
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001739 0, /* non standard timer */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001740 0, /* open_requests have no inode */
1741 0, req);
1742}
1743
1744static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1745{
Eric Dumazetb71d1d42011-04-22 04:53:02 +00001746 const struct in6_addr *dest, *src;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001747 __u16 destp, srcp;
1748 int timer_active;
1749 unsigned long timer_expires;
Eric Dumazetcf533ea2011-10-21 05:22:42 -04001750 const struct inet_sock *inet = inet_sk(sp);
1751 const struct tcp_sock *tp = tcp_sk(sp);
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001752 const struct inet_connection_sock *icsk = inet_csk(sp);
Eric Dumazet0536fcc2015-09-29 07:42:52 -07001753 const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
Eric Dumazet00fd38d2015-11-12 08:43:18 -08001754 int rx_queue;
1755 int state;
Subash Abhinov Kasiviswanathanf65f7a42015-06-05 13:23:01 -06001756 __u8 state_seq = sp->sk_state;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001757
Eric Dumazetefe42082013-10-03 15:42:29 -07001758 dest = &sp->sk_v6_daddr;
1759 src = &sp->sk_v6_rcv_saddr;
Eric Dumazetc720c7e2009-10-15 06:30:45 +00001760 destp = ntohs(inet->inet_dport);
1761 srcp = ntohs(inet->inet_sport);
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001762
Yuchung Chengce3cf4e2016-06-06 15:07:18 -07001763 if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
1764 icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
1765 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001766 timer_active = 1;
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001767 timer_expires = icsk->icsk_timeout;
1768 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001769 timer_active = 4;
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001770 timer_expires = icsk->icsk_timeout;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001771 } else if (timer_pending(&sp->sk_timer)) {
1772 timer_active = 2;
1773 timer_expires = sp->sk_timer.expires;
1774 } else {
1775 timer_active = 0;
1776 timer_expires = jiffies;
1777 }
1778
Eric Dumazet00fd38d2015-11-12 08:43:18 -08001779 state = sk_state_load(sp);
1780 if (state == TCP_LISTEN)
1781 rx_queue = sp->sk_ack_backlog;
1782 else
1783 /* Because we don't lock the socket,
1784 * we might find a transient negative value.
1785 */
1786 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
1787
Subash Abhinov Kasiviswanathanf65f7a42015-06-05 13:23:01 -06001788 if (inet->transparent)
1789 state_seq |= 0x80;
1790
Linus Torvalds1da177e2005-04-16 15:20:36 -07001791 seq_printf(seq,
1792 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
Francesco Fuscod14c5ab2013-08-15 13:42:14 +02001793 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001794 i,
1795 src->s6_addr32[0], src->s6_addr32[1],
1796 src->s6_addr32[2], src->s6_addr32[3], srcp,
1797 dest->s6_addr32[0], dest->s6_addr32[1],
1798 dest->s6_addr32[2], dest->s6_addr32[3], destp,
Subash Abhinov Kasiviswanathanf65f7a42015-06-05 13:23:01 -06001799 state_seq,
Eric Dumazet00fd38d2015-11-12 08:43:18 -08001800 tp->write_seq - tp->snd_una,
1801 rx_queue,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001802 timer_active,
Eric Dumazeta399a802012-08-08 21:13:53 +00001803 jiffies_delta_to_clock_t(timer_expires - jiffies),
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001804 icsk->icsk_retransmits,
Eric W. Biedermana7cb5a42012-05-24 01:10:10 -06001805 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -03001806 icsk->icsk_probes_out,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001807 sock_i_ino(sp),
1808 atomic_read(&sp->sk_refcnt), sp,
Stephen Hemminger7be87352008-06-27 20:00:19 -07001809 jiffies_to_clock_t(icsk->icsk_rto),
1810 jiffies_to_clock_t(icsk->icsk_ack.ato),
Weilong Chen4c99aa42013-12-19 18:44:34 +08001811 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
Ilpo Järvinen0b6a05c2009-09-15 01:30:10 -07001812 tp->snd_cwnd,
Eric Dumazet00fd38d2015-11-12 08:43:18 -08001813 state == TCP_LISTEN ?
Eric Dumazet0536fcc2015-09-29 07:42:52 -07001814 fastopenq->max_qlen :
Yuchung Cheng0a672f72014-05-11 20:22:12 -07001815 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001816 );
1817}
1818
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001819static void get_timewait6_sock(struct seq_file *seq,
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07001820 struct inet_timewait_sock *tw, int i)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001821{
Eric Dumazet789f5582015-04-12 18:51:09 -07001822 long delta = tw->tw_timer.expires - jiffies;
Eric Dumazetb71d1d42011-04-22 04:53:02 +00001823 const struct in6_addr *dest, *src;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001824 __u16 destp, srcp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001825
Eric Dumazetefe42082013-10-03 15:42:29 -07001826 dest = &tw->tw_v6_daddr;
1827 src = &tw->tw_v6_rcv_saddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001828 destp = ntohs(tw->tw_dport);
1829 srcp = ntohs(tw->tw_sport);
1830
1831 seq_printf(seq,
1832 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
Dan Rosenberg71338aa2011-05-23 12:17:35 +00001833 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001834 i,
1835 src->s6_addr32[0], src->s6_addr32[1],
1836 src->s6_addr32[2], src->s6_addr32[3], srcp,
1837 dest->s6_addr32[0], dest->s6_addr32[1],
1838 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1839 tw->tw_substate, 0, 0,
Eric Dumazeta399a802012-08-08 21:13:53 +00001840 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001841 atomic_read(&tw->tw_refcnt), tw);
1842}
1843
Linus Torvalds1da177e2005-04-16 15:20:36 -07001844static int tcp6_seq_show(struct seq_file *seq, void *v)
1845{
1846 struct tcp_iter_state *st;
Eric Dumazet05dbc7b2013-10-03 00:22:02 -07001847 struct sock *sk = v;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001848
1849 if (v == SEQ_START_TOKEN) {
1850 seq_puts(seq,
1851 " sl "
1852 "local_address "
1853 "remote_address "
1854 "st tx_queue rx_queue tr tm->when retrnsmt"
1855 " uid timeout inode\n");
1856 goto out;
1857 }
1858 st = seq->private;
1859
Eric Dumazet079096f2015-10-02 11:43:32 -07001860 if (sk->sk_state == TCP_TIME_WAIT)
1861 get_timewait6_sock(seq, v, st->num);
1862 else if (sk->sk_state == TCP_NEW_SYN_RECV)
Eric Dumazetaa3a0c82015-10-02 11:43:30 -07001863 get_openreq6(seq, v, st->num);
Eric Dumazet079096f2015-10-02 11:43:32 -07001864 else
1865 get_tcp6_sock(seq, v, st->num);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001866out:
1867 return 0;
1868}
1869
Arjan van de Ven73cb88e2011-10-30 06:46:30 +00001870static const struct file_operations tcp6_afinfo_seq_fops = {
1871 .owner = THIS_MODULE,
1872 .open = tcp_seq_open,
1873 .read = seq_read,
1874 .llseek = seq_lseek,
1875 .release = seq_release_net
1876};
1877
Linus Torvalds1da177e2005-04-16 15:20:36 -07001878static struct tcp_seq_afinfo tcp6_seq_afinfo = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001879 .name = "tcp6",
1880 .family = AF_INET6,
Arjan van de Ven73cb88e2011-10-30 06:46:30 +00001881 .seq_fops = &tcp6_afinfo_seq_fops,
Denis V. Lunev9427c4b2008-04-13 22:12:13 -07001882 .seq_ops = {
1883 .show = tcp6_seq_show,
1884 },
Linus Torvalds1da177e2005-04-16 15:20:36 -07001885};
1886
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00001887int __net_init tcp6_proc_init(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001888{
Daniel Lezcano6f8b13b2008-03-21 04:14:45 -07001889 return tcp_proc_register(net, &tcp6_seq_afinfo);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001890}
1891
Daniel Lezcano6f8b13b2008-03-21 04:14:45 -07001892void tcp6_proc_exit(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001893{
Daniel Lezcano6f8b13b2008-03-21 04:14:45 -07001894 tcp_proc_unregister(net, &tcp6_seq_afinfo);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001895}
1896#endif
1897
1898struct proto tcpv6_prot = {
1899 .name = "TCPv6",
1900 .owner = THIS_MODULE,
1901 .close = tcp_close,
1902 .connect = tcp_v6_connect,
1903 .disconnect = tcp_disconnect,
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001904 .accept = inet_csk_accept,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001905 .ioctl = tcp_ioctl,
1906 .init = tcp_v6_init_sock,
1907 .destroy = tcp_v6_destroy_sock,
1908 .shutdown = tcp_shutdown,
1909 .setsockopt = tcp_setsockopt,
1910 .getsockopt = tcp_getsockopt,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001911 .recvmsg = tcp_recvmsg,
Changli Gao7ba42912010-07-10 20:41:55 +00001912 .sendmsg = tcp_sendmsg,
1913 .sendpage = tcp_sendpage,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001914 .backlog_rcv = tcp_v6_do_rcv,
Eric Dumazet46d3cea2012-07-11 05:50:31 +00001915 .release_cb = tcp_release_cb,
Craig Gallek496611d2016-02-10 11:50:36 -05001916 .hash = inet6_hash,
Arnaldo Carvalho de Meloab1e0a12008-02-03 04:06:04 -08001917 .unhash = inet_unhash,
1918 .get_port = inet_csk_get_port,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001919 .enter_memory_pressure = tcp_enter_memory_pressure,
Eric Dumazetc9bee3b72013-07-22 20:27:07 -07001920 .stream_memory_free = tcp_stream_memory_free,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001921 .sockets_allocated = &tcp_sockets_allocated,
1922 .memory_allocated = &tcp_memory_allocated,
1923 .memory_pressure = &tcp_memory_pressure,
Arnaldo Carvalho de Melo0a5578c2005-08-09 20:11:41 -07001924 .orphan_count = &tcp_orphan_count,
Eric W. Biedermana4fe34b2013-10-19 16:25:36 -07001925 .sysctl_mem = sysctl_tcp_mem,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001926 .sysctl_wmem = sysctl_tcp_wmem,
1927 .sysctl_rmem = sysctl_tcp_rmem,
1928 .max_header = MAX_TCP_HEADER,
1929 .obj_size = sizeof(struct tcp6_sock),
Eric Dumazet3ab5aee2008-11-16 19:40:17 -08001930 .slab_flags = SLAB_DESTROY_BY_RCU,
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08001931 .twsk_prot = &tcp6_timewait_sock_ops,
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -07001932 .rsk_prot = &tcp6_request_sock_ops,
Pavel Emelyanov39d8cda2008-03-22 16:50:58 -07001933 .h.hashinfo = &tcp_hashinfo,
Changli Gao7ba42912010-07-10 20:41:55 +00001934 .no_autobind = true,
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001935#ifdef CONFIG_COMPAT
1936 .compat_setsockopt = compat_tcp_setsockopt,
1937 .compat_getsockopt = compat_tcp_getsockopt,
1938#endif
Lorenzo Colittic1e64e22015-12-16 12:30:05 +09001939 .diag_destroy = tcp_abort,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001940};
1941
Alexey Dobriyan41135cc2009-09-14 12:22:28 +00001942static const struct inet6_protocol tcpv6_protocol = {
Eric Dumazetc7109982012-07-26 12:18:11 +00001943 .early_demux = tcp_v6_early_demux,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001944 .handler = tcp_v6_rcv,
1945 .err_handler = tcp_v6_err,
1946 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
1947};
1948
Linus Torvalds1da177e2005-04-16 15:20:36 -07001949static struct inet_protosw tcpv6_protosw = {
1950 .type = SOCK_STREAM,
1951 .protocol = IPPROTO_TCP,
1952 .prot = &tcpv6_prot,
1953 .ops = &inet6_stream_ops,
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -08001954 .flags = INET_PROTOSW_PERMANENT |
1955 INET_PROTOSW_ICSK,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001956};
1957
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00001958static int __net_init tcpv6_net_init(struct net *net)
Daniel Lezcano93ec9262008-03-07 11:16:02 -08001959{
Denis V. Lunev56772422008-04-03 14:28:30 -07001960 return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
1961 SOCK_RAW, IPPROTO_TCP, net);
Daniel Lezcano93ec9262008-03-07 11:16:02 -08001962}
1963
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00001964static void __net_exit tcpv6_net_exit(struct net *net)
Daniel Lezcano93ec9262008-03-07 11:16:02 -08001965{
Denis V. Lunev56772422008-04-03 14:28:30 -07001966 inet_ctl_sock_destroy(net->ipv6.tcp_sk);
Eric W. Biedermanb099ce22009-12-03 02:29:09 +00001967}
1968
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00001969static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
Eric W. Biedermanb099ce22009-12-03 02:29:09 +00001970{
1971 inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET6);
Daniel Lezcano93ec9262008-03-07 11:16:02 -08001972}
1973
1974static struct pernet_operations tcpv6_net_ops = {
Eric W. Biedermanb099ce22009-12-03 02:29:09 +00001975 .init = tcpv6_net_init,
1976 .exit = tcpv6_net_exit,
1977 .exit_batch = tcpv6_net_exit_batch,
Daniel Lezcano93ec9262008-03-07 11:16:02 -08001978};
1979
Daniel Lezcano7f4e4862007-12-11 02:25:35 -08001980int __init tcpv6_init(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001981{
Daniel Lezcano7f4e4862007-12-11 02:25:35 -08001982 int ret;
David Woodhouseae0f7d52006-01-11 15:53:04 -08001983
Vlad Yasevich33362882012-11-15 08:49:15 +00001984 ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
1985 if (ret)
Vlad Yasevichc6b641a2012-11-15 08:49:22 +00001986 goto out;
Vlad Yasevich33362882012-11-15 08:49:15 +00001987
Daniel Lezcano7f4e4862007-12-11 02:25:35 -08001988 /* register inet6 protocol */
1989 ret = inet6_register_protosw(&tcpv6_protosw);
1990 if (ret)
1991 goto out_tcpv6_protocol;
1992
Daniel Lezcano93ec9262008-03-07 11:16:02 -08001993 ret = register_pernet_subsys(&tcpv6_net_ops);
Daniel Lezcano7f4e4862007-12-11 02:25:35 -08001994 if (ret)
1995 goto out_tcpv6_protosw;
1996out:
1997 return ret;
1998
Daniel Lezcano7f4e4862007-12-11 02:25:35 -08001999out_tcpv6_protosw:
2000 inet6_unregister_protosw(&tcpv6_protosw);
Vlad Yasevich33362882012-11-15 08:49:15 +00002001out_tcpv6_protocol:
2002 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
Daniel Lezcano7f4e4862007-12-11 02:25:35 -08002003 goto out;
2004}
2005
Daniel Lezcano09f77092007-12-13 05:34:58 -08002006void tcpv6_exit(void)
Daniel Lezcano7f4e4862007-12-11 02:25:35 -08002007{
Daniel Lezcano93ec9262008-03-07 11:16:02 -08002008 unregister_pernet_subsys(&tcpv6_net_ops);
Daniel Lezcano7f4e4862007-12-11 02:25:35 -08002009 inet6_unregister_protosw(&tcpv6_protosw);
2010 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002011}