blob: 7efa9fd7e1094dc43ca464e5c6f06ea36031d476 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * TCP over IPv6
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09003 * Linux INET6 implementation
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 *
5 * Authors:
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09006 * Pedro Roque <roque@di.fc.ul.pt>
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 *
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09008 * Based on:
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 * linux/net/ipv4/tcp.c
10 * linux/net/ipv4/tcp_input.c
11 * linux/net/ipv4/tcp_output.c
12 *
13 * Fixes:
14 * Hideaki YOSHIFUJI : sin6_scope_id support
15 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
16 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
17 * a single port at the same time.
18 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
19 *
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
24 */
25
Herbert Xueb4dea52008-12-29 23:04:08 -080026#include <linux/bottom_half.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070027#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <linux/errno.h>
29#include <linux/types.h>
30#include <linux/socket.h>
31#include <linux/sockios.h>
32#include <linux/net.h>
33#include <linux/jiffies.h>
34#include <linux/in.h>
35#include <linux/in6.h>
36#include <linux/netdevice.h>
37#include <linux/init.h>
38#include <linux/jhash.h>
39#include <linux/ipsec.h>
40#include <linux/times.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090041#include <linux/slab.h>
Wang Yufen4aa956d2014-03-29 09:27:29 +080042#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070043#include <linux/ipv6.h>
44#include <linux/icmpv6.h>
45#include <linux/random.h>
46
47#include <net/tcp.h>
48#include <net/ndisc.h>
Arnaldo Carvalho de Melo5324a042005-08-12 09:26:18 -030049#include <net/inet6_hashtables.h>
Arnaldo Carvalho de Melo81297652005-12-13 23:15:24 -080050#include <net/inet6_connection_sock.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070051#include <net/ipv6.h>
52#include <net/transp_v6.h>
53#include <net/addrconf.h>
54#include <net/ip6_route.h>
55#include <net/ip6_checksum.h>
56#include <net/inet_ecn.h>
57#include <net/protocol.h>
58#include <net/xfrm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070059#include <net/snmp.h>
60#include <net/dsfield.h>
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -080061#include <net/timewait_sock.h>
Denis V. Lunev3d58b5f2008-04-03 14:22:32 -070062#include <net/inet_common.h>
David S. Miller6e5714e2011-08-03 20:50:44 -070063#include <net/secure_seq.h>
Eliezer Tamir076bb0c2013-07-10 17:13:17 +030064#include <net/busy_poll.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070065
Linus Torvalds1da177e2005-04-16 15:20:36 -070066#include <linux/proc_fs.h>
67#include <linux/seq_file.h>
68
Herbert Xucf80e0e2016-01-24 21:20:23 +080069#include <crypto/hash.h>
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -080070#include <linux/scatterlist.h>
71
Song Liuc24b14c2017-10-23 09:20:24 -070072#include <trace/events/tcp.h>
73
Eric Dumazeta00e7442015-09-29 07:42:39 -070074static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb);
75static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
Gui Jianfeng6edafaa2008-08-06 23:50:04 -070076 struct request_sock *req);
Linus Torvalds1da177e2005-04-16 15:20:36 -070077
78static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070079
Stephen Hemminger3b401a82009-09-01 19:25:04 +000080static const struct inet_connection_sock_af_ops ipv6_mapped;
81static const struct inet_connection_sock_af_ops ipv6_specific;
David S. Millera9286302006-11-14 19:53:22 -080082#ifdef CONFIG_TCP_MD5SIG
Stephen Hemmingerb2e4b3d2009-09-01 19:25:03 +000083static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
84static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
YOSHIFUJI Hideaki9501f972008-04-18 12:45:16 +090085#else
Eric Dumazet51723932015-09-29 21:24:05 -070086static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
Eric Dumazetb71d1d42011-04-22 04:53:02 +000087 const struct in6_addr *addr)
YOSHIFUJI Hideaki9501f972008-04-18 12:45:16 +090088{
89 return NULL;
90}
David S. Millera9286302006-11-14 19:53:22 -080091#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070092
Neal Cardwellfae6ef82012-08-19 03:30:38 +000093static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
94{
95 struct dst_entry *dst = skb_dst(skb);
Neal Cardwellfae6ef82012-08-19 03:30:38 +000096
Eric Dumazet5037e9e2015-12-14 14:08:53 -080097 if (dst && dst_hold_safe(dst)) {
Eric Dumazetca777ef2014-09-08 08:06:07 -070098 const struct rt6_info *rt = (const struct rt6_info *)dst;
99
Eric Dumazetca777ef2014-09-08 08:06:07 -0700100 sk->sk_rx_dst = dst;
101 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
Martin KaFai Laub197df42015-05-22 20:56:01 -0700102 inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt);
Eric Dumazetca777ef2014-09-08 08:06:07 -0700103 }
Neal Cardwellfae6ef82012-08-19 03:30:38 +0000104}
105
Eric Dumazet84b114b2017-05-05 06:56:54 -0700106static u32 tcp_v6_init_seq(const struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107{
Eric Dumazet84b114b2017-05-05 06:56:54 -0700108 return secure_tcpv6_seq(ipv6_hdr(skb)->daddr.s6_addr32,
109 ipv6_hdr(skb)->saddr.s6_addr32,
110 tcp_hdr(skb)->dest,
111 tcp_hdr(skb)->source);
112}
113
Eric Dumazet5d2ed052017-06-07 10:34:39 -0700114static u32 tcp_v6_init_ts_off(const struct net *net, const struct sk_buff *skb)
Eric Dumazet84b114b2017-05-05 06:56:54 -0700115{
Eric Dumazet5d2ed052017-06-07 10:34:39 -0700116 return secure_tcpv6_ts_off(net, ipv6_hdr(skb)->daddr.s6_addr32,
Eric Dumazet84b114b2017-05-05 06:56:54 -0700117 ipv6_hdr(skb)->saddr.s6_addr32);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118}
119
Andrey Ignatovd74bad42018-03-30 15:08:05 -0700120static int tcp_v6_pre_connect(struct sock *sk, struct sockaddr *uaddr,
121 int addr_len)
122{
123 /* This check is replicated from tcp_v6_connect() and intended to
124 * prevent BPF program called below from accessing bytes that are out
125 * of the bound specified by user in addr_len.
126 */
127 if (addr_len < SIN6_LEN_RFC2133)
128 return -EINVAL;
129
130 sock_owned_by_me(sk);
131
132 return BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr);
133}
134
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +0900135static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700136 int addr_len)
137{
138 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +0900139 struct inet_sock *inet = inet_sk(sk);
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -0800140 struct inet_connection_sock *icsk = inet_csk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141 struct ipv6_pinfo *np = inet6_sk(sk);
142 struct tcp_sock *tp = tcp_sk(sk);
Arnaud Ebalard20c59de2010-06-01 21:35:01 +0000143 struct in6_addr *saddr = NULL, *final_p, final;
Eric Dumazet45f6fad2015-11-29 19:37:57 -0800144 struct ipv6_txoptions *opt;
David S. Miller4c9483b2011-03-12 16:22:43 -0500145 struct flowi6 fl6;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700146 struct dst_entry *dst;
147 int addr_type;
148 int err;
Haishuang Yan1946e672016-12-28 17:52:32 +0800149 struct inet_timewait_death_row *tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700150
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +0900151 if (addr_len < SIN6_LEN_RFC2133)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152 return -EINVAL;
153
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +0900154 if (usin->sin6_family != AF_INET6)
Eric Dumazeta02cec22010-09-22 20:43:57 +0000155 return -EAFNOSUPPORT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156
David S. Miller4c9483b2011-03-12 16:22:43 -0500157 memset(&fl6, 0, sizeof(fl6));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158
159 if (np->sndflow) {
David S. Miller4c9483b2011-03-12 16:22:43 -0500160 fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
161 IP6_ECN_flow_init(fl6.flowlabel);
162 if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163 struct ip6_flowlabel *flowlabel;
David S. Miller4c9483b2011-03-12 16:22:43 -0500164 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
Ian Morris63159f22015-03-29 14:00:04 +0100165 if (!flowlabel)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167 fl6_sock_release(flowlabel);
168 }
169 }
170
171 /*
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +0900172 * connect() to INADDR_ANY means loopback (BSD'ism).
173 */
174
Jonathan T. Leighton052d2362017-02-12 17:26:07 -0500175 if (ipv6_addr_any(&usin->sin6_addr)) {
176 if (ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr))
177 ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK),
178 &usin->sin6_addr);
179 else
180 usin->sin6_addr = in6addr_loopback;
181 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182
183 addr_type = ipv6_addr_type(&usin->sin6_addr);
184
Weilong Chen4c99aa42013-12-19 18:44:34 +0800185 if (addr_type & IPV6_ADDR_MULTICAST)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186 return -ENETUNREACH;
187
188 if (addr_type&IPV6_ADDR_LINKLOCAL) {
189 if (addr_len >= sizeof(struct sockaddr_in6) &&
190 usin->sin6_scope_id) {
191 /* If interface is set while binding, indices
192 * must coincide.
193 */
David Ahern54dc3e32018-01-04 14:03:54 -0800194 if (!sk_dev_equal_l3scope(sk, usin->sin6_scope_id))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195 return -EINVAL;
196
197 sk->sk_bound_dev_if = usin->sin6_scope_id;
198 }
199
200 /* Connect to link-local address requires an interface */
201 if (!sk->sk_bound_dev_if)
202 return -EINVAL;
203 }
204
205 if (tp->rx_opt.ts_recent_stamp &&
Eric Dumazetefe42082013-10-03 15:42:29 -0700206 !ipv6_addr_equal(&sk->sk_v6_daddr, &usin->sin6_addr)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207 tp->rx_opt.ts_recent = 0;
208 tp->rx_opt.ts_recent_stamp = 0;
209 tp->write_seq = 0;
210 }
211
Eric Dumazetefe42082013-10-03 15:42:29 -0700212 sk->sk_v6_daddr = usin->sin6_addr;
David S. Miller4c9483b2011-03-12 16:22:43 -0500213 np->flow_label = fl6.flowlabel;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214
215 /*
216 * TCP over IPv4
217 */
218
Jonathan T. Leighton052d2362017-02-12 17:26:07 -0500219 if (addr_type & IPV6_ADDR_MAPPED) {
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -0800220 u32 exthdrlen = icsk->icsk_ext_hdr_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221 struct sockaddr_in sin;
222
223 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
224
225 if (__ipv6_only_sock(sk))
226 return -ENETUNREACH;
227
228 sin.sin_family = AF_INET;
229 sin.sin_port = usin->sin6_port;
230 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
231
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -0800232 icsk->icsk_af_ops = &ipv6_mapped;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700233 sk->sk_backlog_rcv = tcp_v4_do_rcv;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800234#ifdef CONFIG_TCP_MD5SIG
235 tp->af_specific = &tcp_sock_ipv6_mapped_specific;
236#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700237
238 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
239
240 if (err) {
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -0800241 icsk->icsk_ext_hdr_len = exthdrlen;
242 icsk->icsk_af_ops = &ipv6_specific;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243 sk->sk_backlog_rcv = tcp_v6_do_rcv;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800244#ifdef CONFIG_TCP_MD5SIG
245 tp->af_specific = &tcp_sock_ipv6_specific;
246#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247 goto failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700248 }
Eric Dumazetd1e559d2015-03-18 14:05:35 -0700249 np->saddr = sk->sk_v6_rcv_saddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250
251 return err;
252 }
253
Eric Dumazetefe42082013-10-03 15:42:29 -0700254 if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr))
255 saddr = &sk->sk_v6_rcv_saddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256
David S. Miller4c9483b2011-03-12 16:22:43 -0500257 fl6.flowi6_proto = IPPROTO_TCP;
Eric Dumazetefe42082013-10-03 15:42:29 -0700258 fl6.daddr = sk->sk_v6_daddr;
Alexey Dobriyan4e3fd7a2011-11-21 03:39:03 +0000259 fl6.saddr = saddr ? *saddr : np->saddr;
David S. Miller4c9483b2011-03-12 16:22:43 -0500260 fl6.flowi6_oif = sk->sk_bound_dev_if;
261 fl6.flowi6_mark = sk->sk_mark;
David S. Miller1958b852011-03-12 16:36:19 -0500262 fl6.fl6_dport = usin->sin6_port;
263 fl6.fl6_sport = inet->inet_sport;
Lorenzo Colittie2d118a2016-11-04 02:23:43 +0900264 fl6.flowi6_uid = sk->sk_uid;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700265
Hannes Frederic Sowa1e1d04e2016-04-05 17:10:15 +0200266 opt = rcu_dereference_protected(np->opt, lockdep_sock_is_held(sk));
Eric Dumazet45f6fad2015-11-29 19:37:57 -0800267 final_p = fl6_update_dst(&fl6, opt, &final);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700268
David S. Miller4c9483b2011-03-12 16:22:43 -0500269 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
Venkat Yekkiralabeb8d132006-08-04 23:12:42 -0700270
Steffen Klassert0e0d44a2013-08-28 08:04:14 +0200271 dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
David S. Miller68d0c6d2011-03-01 13:19:07 -0800272 if (IS_ERR(dst)) {
273 err = PTR_ERR(dst);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700274 goto failure;
David S. Miller14e50e52007-05-24 18:17:54 -0700275 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700276
Ian Morris63159f22015-03-29 14:00:04 +0100277 if (!saddr) {
David S. Miller4c9483b2011-03-12 16:22:43 -0500278 saddr = &fl6.saddr;
Eric Dumazetefe42082013-10-03 15:42:29 -0700279 sk->sk_v6_rcv_saddr = *saddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700280 }
281
282 /* set the source address */
Alexey Dobriyan4e3fd7a2011-11-21 03:39:03 +0000283 np->saddr = *saddr;
Eric Dumazetc720c7e82009-10-15 06:30:45 +0000284 inet->inet_rcv_saddr = LOOPBACK4_IPV6;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700285
Herbert Xuf83ef8c2006-06-30 13:37:03 -0700286 sk->sk_gso_type = SKB_GSO_TCPV6;
Eric Dumazet6bd4f352015-12-02 21:53:57 -0800287 ip6_dst_store(sk, dst, NULL, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700288
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -0800289 icsk->icsk_ext_hdr_len = 0;
Eric Dumazet45f6fad2015-11-29 19:37:57 -0800290 if (opt)
291 icsk->icsk_ext_hdr_len = opt->opt_flen +
292 opt->opt_nflen;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700293
294 tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
295
Eric Dumazetc720c7e82009-10-15 06:30:45 +0000296 inet->inet_dport = usin->sin6_port;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700297
298 tcp_set_state(sk, TCP_SYN_SENT);
Haishuang Yan1946e672016-12-28 17:52:32 +0800299 err = inet6_hash_connect(tcp_death_row, sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300 if (err)
301 goto late_failure;
302
Tom Herbert877d1f62015-07-28 16:02:05 -0700303 sk_set_txhash(sk);
Sathya Perla9e7ceb02014-10-22 21:42:01 +0530304
Alexey Kodanev00355fa2017-02-22 13:23:55 +0300305 if (likely(!tp->repair)) {
Alexey Kodanev00355fa2017-02-22 13:23:55 +0300306 if (!tp->write_seq)
Eric Dumazet84b114b2017-05-05 06:56:54 -0700307 tp->write_seq = secure_tcpv6_seq(np->saddr.s6_addr32,
308 sk->sk_v6_daddr.s6_addr32,
309 inet->inet_sport,
310 inet->inet_dport);
Eric Dumazet5d2ed052017-06-07 10:34:39 -0700311 tp->tsoffset = secure_tcpv6_ts_off(sock_net(sk),
312 np->saddr.s6_addr32,
Eric Dumazet84b114b2017-05-05 06:56:54 -0700313 sk->sk_v6_daddr.s6_addr32);
Alexey Kodanev00355fa2017-02-22 13:23:55 +0300314 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700315
Wei Wang19f6d3f2017-01-23 10:59:22 -0800316 if (tcp_fastopen_defer_connect(sk, &err))
317 return err;
318 if (err)
319 goto late_failure;
320
Linus Torvalds1da177e2005-04-16 15:20:36 -0700321 err = tcp_connect(sk);
322 if (err)
323 goto late_failure;
324
325 return 0;
326
327late_failure:
328 tcp_set_state(sk, TCP_CLOSE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700329failure:
Eric Dumazetc720c7e82009-10-15 06:30:45 +0000330 inet->inet_dport = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700331 sk->sk_route_caps = 0;
332 return err;
333}
334
Eric Dumazet563d34d2012-07-23 09:48:52 +0200335static void tcp_v6_mtu_reduced(struct sock *sk)
336{
337 struct dst_entry *dst;
338
339 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
340 return;
341
342 dst = inet6_csk_update_pmtu(sk, tcp_sk(sk)->mtu_info);
343 if (!dst)
344 return;
345
346 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
347 tcp_sync_mss(sk, dst_mtu(dst));
348 tcp_simple_retransmit(sk);
349 }
350}
351
Linus Torvalds1da177e2005-04-16 15:20:36 -0700352static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
Brian Haleyd5fdd6b2009-06-23 04:31:07 -0700353 u8 type, u8 code, int offset, __be32 info)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700354{
Weilong Chen4c99aa42013-12-19 18:44:34 +0800355 const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
Arnaldo Carvalho de Melo505cbfc2005-08-12 09:19:38 -0300356 const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
Eric Dumazet22150892015-03-22 10:22:23 -0700357 struct net *net = dev_net(skb->dev);
358 struct request_sock *fastopen;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700359 struct ipv6_pinfo *np;
Eric Dumazet22150892015-03-22 10:22:23 -0700360 struct tcp_sock *tp;
361 __u32 seq, snd_una;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362 struct sock *sk;
Eric Dumazet9cf74902016-02-02 19:31:12 -0800363 bool fatal;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700364 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700365
Eric Dumazet22150892015-03-22 10:22:23 -0700366 sk = __inet6_lookup_established(net, &tcp_hashinfo,
367 &hdr->daddr, th->dest,
368 &hdr->saddr, ntohs(th->source),
David Ahern4297a0e2017-08-07 08:44:21 -0700369 skb->dev->ifindex, inet6_sdif(skb));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700370
Eric Dumazet22150892015-03-22 10:22:23 -0700371 if (!sk) {
Eric Dumazeta16292a2016-04-27 16:44:36 -0700372 __ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
373 ICMP6_MIB_INERRORS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700374 return;
375 }
376
377 if (sk->sk_state == TCP_TIME_WAIT) {
YOSHIFUJI Hideaki9469c7b2006-10-10 19:41:46 -0700378 inet_twsk_put(inet_twsk(sk));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700379 return;
380 }
Eric Dumazet22150892015-03-22 10:22:23 -0700381 seq = ntohl(th->seq);
Eric Dumazet9cf74902016-02-02 19:31:12 -0800382 fatal = icmpv6_err_convert(type, code, &err);
Eric Dumazet22150892015-03-22 10:22:23 -0700383 if (sk->sk_state == TCP_NEW_SYN_RECV)
Eric Dumazet9cf74902016-02-02 19:31:12 -0800384 return tcp_req_err(sk, seq, fatal);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700385
386 bh_lock_sock(sk);
Eric Dumazet563d34d2012-07-23 09:48:52 +0200387 if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
Eric Dumazet02a1d6e2016-04-27 16:44:39 -0700388 __NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700389
390 if (sk->sk_state == TCP_CLOSE)
391 goto out;
392
Stephen Hemmingere802af92010-04-22 15:24:53 -0700393 if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) {
Eric Dumazet02a1d6e2016-04-27 16:44:39 -0700394 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
Stephen Hemmingere802af92010-04-22 15:24:53 -0700395 goto out;
396 }
397
Linus Torvalds1da177e2005-04-16 15:20:36 -0700398 tp = tcp_sk(sk);
Yuchung Cheng0a672f72014-05-11 20:22:12 -0700399 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
400 fastopen = tp->fastopen_rsk;
401 snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700402 if (sk->sk_state != TCP_LISTEN &&
Yuchung Cheng0a672f72014-05-11 20:22:12 -0700403 !between(seq, snd_una, tp->snd_nxt)) {
Eric Dumazet02a1d6e2016-04-27 16:44:39 -0700404 __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700405 goto out;
406 }
407
408 np = inet6_sk(sk);
409
David S. Millerec18d9a2012-07-12 00:25:15 -0700410 if (type == NDISC_REDIRECT) {
Jon Maxwell45caeaa2017-03-10 16:40:33 +1100411 if (!sock_owned_by_user(sk)) {
412 struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
David S. Millerec18d9a2012-07-12 00:25:15 -0700413
Jon Maxwell45caeaa2017-03-10 16:40:33 +1100414 if (dst)
415 dst->ops->redirect(dst, sk, skb);
416 }
Christoph Paasch50a75a82013-04-07 04:53:15 +0000417 goto out;
David S. Millerec18d9a2012-07-12 00:25:15 -0700418 }
419
Linus Torvalds1da177e2005-04-16 15:20:36 -0700420 if (type == ICMPV6_PKT_TOOBIG) {
Eric Dumazet0d4f0602013-03-18 07:01:28 +0000421 /* We are not interested in TCP_LISTEN and open_requests
422 * (SYN-ACKs send out by Linux are always <576bytes so
423 * they should go through unfragmented).
424 */
425 if (sk->sk_state == TCP_LISTEN)
426 goto out;
427
Hannes Frederic Sowa93b36cf2013-12-15 03:41:14 +0100428 if (!ip6_sk_accept_pmtu(sk))
429 goto out;
430
Eric Dumazet563d34d2012-07-23 09:48:52 +0200431 tp->mtu_info = ntohl(info);
432 if (!sock_owned_by_user(sk))
433 tcp_v6_mtu_reduced(sk);
Julian Anastasovd013ef2a2012-09-05 10:53:18 +0000434 else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED,
Eric Dumazet7aa54702016-12-03 11:14:57 -0800435 &sk->sk_tsq_flags))
Julian Anastasovd013ef2a2012-09-05 10:53:18 +0000436 sock_hold(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700437 goto out;
438 }
439
Linus Torvalds1da177e2005-04-16 15:20:36 -0700440
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -0700441 /* Might be for an request_sock */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700442 switch (sk->sk_state) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700443 case TCP_SYN_SENT:
Yuchung Cheng0a672f72014-05-11 20:22:12 -0700444 case TCP_SYN_RECV:
445 /* Only in fast or simultaneous open. If a fast open socket is
446 * is already accepted it is treated as a connected one below.
447 */
Ian Morris63159f22015-03-29 14:00:04 +0100448 if (fastopen && !fastopen->sk)
Yuchung Cheng0a672f72014-05-11 20:22:12 -0700449 break;
450
Linus Torvalds1da177e2005-04-16 15:20:36 -0700451 if (!sock_owned_by_user(sk)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700452 sk->sk_err = err;
453 sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */
454
455 tcp_done(sk);
456 } else
457 sk->sk_err_soft = err;
458 goto out;
459 }
460
461 if (!sock_owned_by_user(sk) && np->recverr) {
462 sk->sk_err = err;
463 sk->sk_error_report(sk);
464 } else
465 sk->sk_err_soft = err;
466
467out:
468 bh_unlock_sock(sk);
469 sock_put(sk);
470}
471
472
Eric Dumazet0f935db2015-09-25 07:39:21 -0700473static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
Octavian Purdilad6274bd2014-06-25 17:09:58 +0300474 struct flowi *fl,
Neal Cardwell3840a062012-06-28 12:34:19 +0000475 struct request_sock *req,
Eric Dumazetca6fb062015-10-02 11:43:35 -0700476 struct tcp_fastopen_cookie *foc,
Eric Dumazetb3d05142016-04-13 22:05:39 -0700477 enum tcp_synack_type synack_type)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700478{
Eric Dumazet634fb9792013-10-09 15:21:29 -0700479 struct inet_request_sock *ireq = inet_rsk(req);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700480 struct ipv6_pinfo *np = inet6_sk(sk);
Huw Davies56ac42b2016-06-27 15:05:28 -0400481 struct ipv6_txoptions *opt;
Octavian Purdilad6274bd2014-06-25 17:09:58 +0300482 struct flowi6 *fl6 = &fl->u.ip6;
Weilong Chen4c99aa42013-12-19 18:44:34 +0800483 struct sk_buff *skb;
Neal Cardwell94942182012-06-28 12:34:20 +0000484 int err = -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700485
Neal Cardwell9f10d3f2012-06-28 12:34:21 +0000486 /* First, grab a route. */
Eric Dumazetf76b33c2015-09-29 07:42:42 -0700487 if (!dst && (dst = inet6_csk_route_req(sk, fl6, req,
488 IPPROTO_TCP)) == NULL)
Denis V. Lunevfd80eb92008-02-29 11:43:03 -0800489 goto done;
Neal Cardwell94942182012-06-28 12:34:20 +0000490
Eric Dumazetb3d05142016-04-13 22:05:39 -0700491 skb = tcp_make_synack(sk, dst, req, foc, synack_type);
Neal Cardwell94942182012-06-28 12:34:20 +0000492
Linus Torvalds1da177e2005-04-16 15:20:36 -0700493 if (skb) {
Eric Dumazet634fb9792013-10-09 15:21:29 -0700494 __tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr,
495 &ireq->ir_v6_rmt_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700496
Eric Dumazet634fb9792013-10-09 15:21:29 -0700497 fl6->daddr = ireq->ir_v6_rmt_addr;
Ian Morris53b24b82015-03-29 14:00:05 +0100498 if (np->repflow && ireq->pktopts)
Florent Fourcotdf3687f2014-01-17 17:15:03 +0100499 fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
500
Eric Dumazet3e4006f2016-01-08 09:35:51 -0800501 rcu_read_lock();
Huw Davies56ac42b2016-06-27 15:05:28 -0400502 opt = ireq->ipv6_opt;
503 if (!opt)
504 opt = rcu_dereference(np->opt);
Pablo Neira92e55f42017-01-26 22:56:21 +0100505 err = ip6_xmit(sk, skb, fl6, sk->sk_mark, opt, np->tclass);
Eric Dumazet3e4006f2016-01-08 09:35:51 -0800506 rcu_read_unlock();
Gerrit Renkerb9df3cb2006-11-14 11:21:36 -0200507 err = net_xmit_eval(err);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700508 }
509
510done:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700511 return err;
512}
513
Octavian Purdila72659ec2010-01-17 19:09:39 -0800514
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -0700515static void tcp_v6_reqsk_destructor(struct request_sock *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700516{
Huw Davies56ac42b2016-06-27 15:05:28 -0400517 kfree(inet_rsk(req)->ipv6_opt);
Eric Dumazet634fb9792013-10-09 15:21:29 -0700518 kfree_skb(inet_rsk(req)->pktopts);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700519}
520
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800521#ifdef CONFIG_TCP_MD5SIG
Eric Dumazetb83e3de2015-09-25 07:39:15 -0700522static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
Eric Dumazetb71d1d42011-04-22 04:53:02 +0000523 const struct in6_addr *addr)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800524{
Eric Dumazeta915da9b2012-01-31 05:18:33 +0000525 return tcp_md5_do_lookup(sk, (union tcp_md5_addr *)addr, AF_INET6);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800526}
527
Eric Dumazetb83e3de2015-09-25 07:39:15 -0700528static struct tcp_md5sig_key *tcp_v6_md5_lookup(const struct sock *sk,
Eric Dumazetfd3a1542015-03-24 15:58:56 -0700529 const struct sock *addr_sk)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800530{
Eric Dumazetefe42082013-10-03 15:42:29 -0700531 return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800532}
533
Ivan Delalande8917a772017-06-15 18:07:07 -0700534static int tcp_v6_parse_md5_keys(struct sock *sk, int optname,
535 char __user *optval, int optlen)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800536{
537 struct tcp_md5sig cmd;
538 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
Ivan Delalande8917a772017-06-15 18:07:07 -0700539 u8 prefixlen;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800540
541 if (optlen < sizeof(cmd))
542 return -EINVAL;
543
544 if (copy_from_user(&cmd, optval, sizeof(cmd)))
545 return -EFAULT;
546
547 if (sin6->sin6_family != AF_INET6)
548 return -EINVAL;
549
Ivan Delalande8917a772017-06-15 18:07:07 -0700550 if (optname == TCP_MD5SIG_EXT &&
551 cmd.tcpm_flags & TCP_MD5SIG_FLAG_PREFIX) {
552 prefixlen = cmd.tcpm_prefixlen;
553 if (prefixlen > 128 || (ipv6_addr_v4mapped(&sin6->sin6_addr) &&
554 prefixlen > 32))
555 return -EINVAL;
556 } else {
557 prefixlen = ipv6_addr_v4mapped(&sin6->sin6_addr) ? 32 : 128;
558 }
559
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800560 if (!cmd.tcpm_keylen) {
Brian Haleye773e4f2007-08-24 23:16:08 -0700561 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
Eric Dumazeta915da9b2012-01-31 05:18:33 +0000562 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
Ivan Delalande8917a772017-06-15 18:07:07 -0700563 AF_INET, prefixlen);
Eric Dumazeta915da9b2012-01-31 05:18:33 +0000564 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
Ivan Delalande8917a772017-06-15 18:07:07 -0700565 AF_INET6, prefixlen);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800566 }
567
568 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
569 return -EINVAL;
570
Eric Dumazeta915da9b2012-01-31 05:18:33 +0000571 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
572 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
Ivan Delalande8917a772017-06-15 18:07:07 -0700573 AF_INET, prefixlen, cmd.tcpm_key,
Ivan Delalande67973182017-06-15 18:07:06 -0700574 cmd.tcpm_keylen, GFP_KERNEL);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800575
Eric Dumazeta915da9b2012-01-31 05:18:33 +0000576 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
Ivan Delalande8917a772017-06-15 18:07:07 -0700577 AF_INET6, prefixlen, cmd.tcpm_key,
578 cmd.tcpm_keylen, GFP_KERNEL);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800579}
580
Eric Dumazet19689e32016-06-27 18:51:53 +0200581static int tcp_v6_md5_hash_headers(struct tcp_md5sig_pool *hp,
582 const struct in6_addr *daddr,
583 const struct in6_addr *saddr,
584 const struct tcphdr *th, int nbytes)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800585{
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800586 struct tcp6_pseudohdr *bp;
Adam Langley49a72df2008-07-19 00:01:42 -0700587 struct scatterlist sg;
Eric Dumazet19689e32016-06-27 18:51:53 +0200588 struct tcphdr *_th;
YOSHIFUJI Hideaki8d26d762008-04-17 13:19:16 +0900589
Eric Dumazet19689e32016-06-27 18:51:53 +0200590 bp = hp->scratch;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800591 /* 1. TCP pseudo-header (RFC2460) */
Alexey Dobriyan4e3fd7a2011-11-21 03:39:03 +0000592 bp->saddr = *saddr;
593 bp->daddr = *daddr;
Adam Langley49a72df2008-07-19 00:01:42 -0700594 bp->protocol = cpu_to_be32(IPPROTO_TCP);
Adam Langley00b13042008-07-31 21:36:07 -0700595 bp->len = cpu_to_be32(nbytes);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800596
Eric Dumazet19689e32016-06-27 18:51:53 +0200597 _th = (struct tcphdr *)(bp + 1);
598 memcpy(_th, th, sizeof(*th));
599 _th->check = 0;
600
601 sg_init_one(&sg, bp, sizeof(*bp) + sizeof(*th));
602 ahash_request_set_crypt(hp->md5_req, &sg, NULL,
603 sizeof(*bp) + sizeof(*th));
Herbert Xucf80e0e2016-01-24 21:20:23 +0800604 return crypto_ahash_update(hp->md5_req);
Adam Langley49a72df2008-07-19 00:01:42 -0700605}
David S. Millerc7da57a2007-10-26 00:41:21 -0700606
Eric Dumazet19689e32016-06-27 18:51:53 +0200607static int tcp_v6_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
Eric Dumazetb71d1d42011-04-22 04:53:02 +0000608 const struct in6_addr *daddr, struct in6_addr *saddr,
Eric Dumazet318cf7a2011-10-24 02:46:04 -0400609 const struct tcphdr *th)
Adam Langley49a72df2008-07-19 00:01:42 -0700610{
611 struct tcp_md5sig_pool *hp;
Herbert Xucf80e0e2016-01-24 21:20:23 +0800612 struct ahash_request *req;
Adam Langley49a72df2008-07-19 00:01:42 -0700613
614 hp = tcp_get_md5sig_pool();
615 if (!hp)
616 goto clear_hash_noput;
Herbert Xucf80e0e2016-01-24 21:20:23 +0800617 req = hp->md5_req;
Adam Langley49a72df2008-07-19 00:01:42 -0700618
Herbert Xucf80e0e2016-01-24 21:20:23 +0800619 if (crypto_ahash_init(req))
Adam Langley49a72df2008-07-19 00:01:42 -0700620 goto clear_hash;
Eric Dumazet19689e32016-06-27 18:51:53 +0200621 if (tcp_v6_md5_hash_headers(hp, daddr, saddr, th, th->doff << 2))
Adam Langley49a72df2008-07-19 00:01:42 -0700622 goto clear_hash;
623 if (tcp_md5_hash_key(hp, key))
624 goto clear_hash;
Herbert Xucf80e0e2016-01-24 21:20:23 +0800625 ahash_request_set_crypt(req, NULL, md5_hash, 0);
626 if (crypto_ahash_final(req))
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800627 goto clear_hash;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800628
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800629 tcp_put_md5sig_pool();
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800630 return 0;
Adam Langley49a72df2008-07-19 00:01:42 -0700631
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800632clear_hash:
633 tcp_put_md5sig_pool();
634clear_hash_noput:
635 memset(md5_hash, 0, 16);
Adam Langley49a72df2008-07-19 00:01:42 -0700636 return 1;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800637}
638
Eric Dumazet39f8e582015-03-24 15:58:55 -0700639static int tcp_v6_md5_hash_skb(char *md5_hash,
640 const struct tcp_md5sig_key *key,
Eric Dumazet318cf7a2011-10-24 02:46:04 -0400641 const struct sock *sk,
Eric Dumazet318cf7a2011-10-24 02:46:04 -0400642 const struct sk_buff *skb)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800643{
Eric Dumazetb71d1d42011-04-22 04:53:02 +0000644 const struct in6_addr *saddr, *daddr;
Adam Langley49a72df2008-07-19 00:01:42 -0700645 struct tcp_md5sig_pool *hp;
Herbert Xucf80e0e2016-01-24 21:20:23 +0800646 struct ahash_request *req;
Eric Dumazet318cf7a2011-10-24 02:46:04 -0400647 const struct tcphdr *th = tcp_hdr(skb);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800648
Eric Dumazet39f8e582015-03-24 15:58:55 -0700649 if (sk) { /* valid for establish/request sockets */
650 saddr = &sk->sk_v6_rcv_saddr;
Eric Dumazetefe42082013-10-03 15:42:29 -0700651 daddr = &sk->sk_v6_daddr;
Adam Langley49a72df2008-07-19 00:01:42 -0700652 } else {
Eric Dumazetb71d1d42011-04-22 04:53:02 +0000653 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
Adam Langley49a72df2008-07-19 00:01:42 -0700654 saddr = &ip6h->saddr;
655 daddr = &ip6h->daddr;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800656 }
Adam Langley49a72df2008-07-19 00:01:42 -0700657
658 hp = tcp_get_md5sig_pool();
659 if (!hp)
660 goto clear_hash_noput;
Herbert Xucf80e0e2016-01-24 21:20:23 +0800661 req = hp->md5_req;
Adam Langley49a72df2008-07-19 00:01:42 -0700662
Herbert Xucf80e0e2016-01-24 21:20:23 +0800663 if (crypto_ahash_init(req))
Adam Langley49a72df2008-07-19 00:01:42 -0700664 goto clear_hash;
665
Eric Dumazet19689e32016-06-27 18:51:53 +0200666 if (tcp_v6_md5_hash_headers(hp, daddr, saddr, th, skb->len))
Adam Langley49a72df2008-07-19 00:01:42 -0700667 goto clear_hash;
668 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
669 goto clear_hash;
670 if (tcp_md5_hash_key(hp, key))
671 goto clear_hash;
Herbert Xucf80e0e2016-01-24 21:20:23 +0800672 ahash_request_set_crypt(req, NULL, md5_hash, 0);
673 if (crypto_ahash_final(req))
Adam Langley49a72df2008-07-19 00:01:42 -0700674 goto clear_hash;
675
676 tcp_put_md5sig_pool();
677 return 0;
678
679clear_hash:
680 tcp_put_md5sig_pool();
681clear_hash_noput:
682 memset(md5_hash, 0, 16);
683 return 1;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800684}
685
Eric Dumazetba8e2752015-10-02 11:43:28 -0700686#endif
687
688static bool tcp_v6_inbound_md5_hash(const struct sock *sk,
689 const struct sk_buff *skb)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800690{
Eric Dumazetba8e2752015-10-02 11:43:28 -0700691#ifdef CONFIG_TCP_MD5SIG
Eric Dumazetcf533ea2011-10-21 05:22:42 -0400692 const __u8 *hash_location = NULL;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800693 struct tcp_md5sig_key *hash_expected;
Eric Dumazetb71d1d42011-04-22 04:53:02 +0000694 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
Eric Dumazet318cf7a2011-10-24 02:46:04 -0400695 const struct tcphdr *th = tcp_hdr(skb);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800696 int genhash;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800697 u8 newhash[16];
698
699 hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
YOSHIFUJI Hideaki7d5d5522008-04-17 12:29:53 +0900700 hash_location = tcp_parse_md5sig_option(th);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800701
David S. Miller785957d2008-07-30 03:03:15 -0700702 /* We've parsed the options - do we have a hash? */
703 if (!hash_expected && !hash_location)
Eric Dumazetff74e232015-03-24 15:58:54 -0700704 return false;
David S. Miller785957d2008-07-30 03:03:15 -0700705
706 if (hash_expected && !hash_location) {
Eric Dumazetc10d9312016-04-29 14:16:47 -0700707 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
Eric Dumazetff74e232015-03-24 15:58:54 -0700708 return true;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800709 }
710
David S. Miller785957d2008-07-30 03:03:15 -0700711 if (!hash_expected && hash_location) {
Eric Dumazetc10d9312016-04-29 14:16:47 -0700712 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
Eric Dumazetff74e232015-03-24 15:58:54 -0700713 return true;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800714 }
715
716 /* check the signature */
Adam Langley49a72df2008-07-19 00:01:42 -0700717 genhash = tcp_v6_md5_hash_skb(newhash,
718 hash_expected,
Eric Dumazet39f8e582015-03-24 15:58:55 -0700719 NULL, skb);
Adam Langley49a72df2008-07-19 00:01:42 -0700720
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800721 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
Eric Dumazet72145a62016-08-24 09:01:23 -0700722 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE);
Joe Perchese87cc472012-05-13 21:56:26 +0000723 net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
724 genhash ? "failed" : "mismatch",
725 &ip6h->saddr, ntohs(th->source),
726 &ip6h->daddr, ntohs(th->dest));
Eric Dumazetff74e232015-03-24 15:58:54 -0700727 return true;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800728 }
Eric Dumazetba8e2752015-10-02 11:43:28 -0700729#endif
Eric Dumazetff74e232015-03-24 15:58:54 -0700730 return false;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800731}
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800732
Eric Dumazetb40cf182015-09-25 07:39:08 -0700733static void tcp_v6_init_req(struct request_sock *req,
734 const struct sock *sk_listener,
Octavian Purdila16bea702014-06-25 17:09:53 +0300735 struct sk_buff *skb)
736{
737 struct inet_request_sock *ireq = inet_rsk(req);
Eric Dumazetb40cf182015-09-25 07:39:08 -0700738 const struct ipv6_pinfo *np = inet6_sk(sk_listener);
Octavian Purdila16bea702014-06-25 17:09:53 +0300739
740 ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
741 ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
742
Octavian Purdila16bea702014-06-25 17:09:53 +0300743 /* So that link locals have meaning */
Eric Dumazetb40cf182015-09-25 07:39:08 -0700744 if (!sk_listener->sk_bound_dev_if &&
Octavian Purdila16bea702014-06-25 17:09:53 +0300745 ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
Eric Dumazet870c3152014-10-17 09:17:20 -0700746 ireq->ir_iif = tcp_v6_iif(skb);
Octavian Purdila16bea702014-06-25 17:09:53 +0300747
Eric Dumazet04317da2014-09-05 15:33:32 -0700748 if (!TCP_SKB_CB(skb)->tcp_tw_isn &&
Eric Dumazetb40cf182015-09-25 07:39:08 -0700749 (ipv6_opt_accepted(sk_listener, skb, &TCP_SKB_CB(skb)->header.h6) ||
Eric Dumazeta2247722014-09-27 09:50:56 -0700750 np->rxopt.bits.rxinfo ||
Octavian Purdila16bea702014-06-25 17:09:53 +0300751 np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim ||
752 np->rxopt.bits.rxohlim || np->repflow)) {
Reshetova, Elena63354792017-06-30 13:07:58 +0300753 refcount_inc(&skb->users);
Octavian Purdila16bea702014-06-25 17:09:53 +0300754 ireq->pktopts = skb;
755 }
756}
757
Eric Dumazetf9646292015-09-29 07:42:50 -0700758static struct dst_entry *tcp_v6_route_req(const struct sock *sk,
759 struct flowi *fl,
Soheil Hassas Yeganeh4396e462017-03-15 16:30:46 -0400760 const struct request_sock *req)
Octavian Purdilad94e0412014-06-25 17:09:55 +0300761{
Eric Dumazetf76b33c2015-09-29 07:42:42 -0700762 return inet6_csk_route_req(sk, &fl->u.ip6, req, IPPROTO_TCP);
Octavian Purdilad94e0412014-06-25 17:09:55 +0300763}
764
Glenn Griffinc6aefaf2008-02-07 21:49:26 -0800765struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700766 .family = AF_INET6,
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -0700767 .obj_size = sizeof(struct tcp6_request_sock),
Octavian Purdila5db92c92014-06-25 17:09:59 +0300768 .rtx_syn_ack = tcp_rtx_synack,
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -0700769 .send_ack = tcp_v6_reqsk_send_ack,
770 .destructor = tcp_v6_reqsk_destructor,
Octavian Purdila72659ec2010-01-17 19:09:39 -0800771 .send_reset = tcp_v6_send_reset,
Wang Yufen4aa956d2014-03-29 09:27:29 +0800772 .syn_ack_timeout = tcp_syn_ack_timeout,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700773};
774
Stephen Hemmingerb2e4b3d2009-09-01 19:25:03 +0000775static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
Octavian Purdila2aec4a22014-06-25 17:10:00 +0300776 .mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) -
777 sizeof(struct ipv6hdr),
Octavian Purdila16bea702014-06-25 17:09:53 +0300778#ifdef CONFIG_TCP_MD5SIG
Eric Dumazetfd3a1542015-03-24 15:58:56 -0700779 .req_md5_lookup = tcp_v6_md5_lookup,
John Dykstrae3afe7b2009-07-16 05:04:51 +0000780 .calc_md5_hash = tcp_v6_md5_hash_skb,
Andrew Mortonb6332e62006-11-30 19:16:28 -0800781#endif
Octavian Purdila16bea702014-06-25 17:09:53 +0300782 .init_req = tcp_v6_init_req,
Octavian Purdilafb7b37a2014-06-25 17:09:54 +0300783#ifdef CONFIG_SYN_COOKIES
784 .cookie_init_seq = cookie_v6_init_sequence,
785#endif
Octavian Purdilad94e0412014-06-25 17:09:55 +0300786 .route_req = tcp_v6_route_req,
Eric Dumazet84b114b2017-05-05 06:56:54 -0700787 .init_seq = tcp_v6_init_seq,
788 .init_ts_off = tcp_v6_init_ts_off,
Octavian Purdilad6274bd2014-06-25 17:09:58 +0300789 .send_synack = tcp_v6_send_synack,
Octavian Purdila16bea702014-06-25 17:09:53 +0300790};
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800791
Eric Dumazeta00e7442015-09-29 07:42:39 -0700792static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 seq,
Eric Dumazet0f85fea2014-12-09 09:56:08 -0800793 u32 ack, u32 win, u32 tsval, u32 tsecr,
794 int oif, struct tcp_md5sig_key *key, int rst,
Hannes Frederic Sowa5119bd12016-06-11 20:41:38 +0200795 u8 tclass, __be32 label)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700796{
Eric Dumazetcf533ea2011-10-21 05:22:42 -0400797 const struct tcphdr *th = tcp_hdr(skb);
798 struct tcphdr *t1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700799 struct sk_buff *buff;
David S. Miller4c9483b2011-03-12 16:22:43 -0500800 struct flowi6 fl6;
Eric Dumazet0f85fea2014-12-09 09:56:08 -0800801 struct net *net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
Daniel Lezcanoe5047992008-03-07 11:16:26 -0800802 struct sock *ctl_sk = net->ipv6.tcp_sk;
YOSHIFUJI Hideaki9cb57342008-01-12 02:16:03 -0800803 unsigned int tot_len = sizeof(struct tcphdr);
Eric Dumazetadf30902009-06-02 05:19:30 +0000804 struct dst_entry *dst;
Al Viroe69a4ad2006-11-14 20:56:00 -0800805 __be32 *topt;
Jon Maxwell00483692018-05-10 16:53:51 +1000806 __u32 mark = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700807
Andrey Vaginee684b62013-02-11 05:50:19 +0000808 if (tsecr)
YOSHIFUJI Hideaki4244f8a2006-10-10 19:40:50 -0700809 tot_len += TCPOLEN_TSTAMP_ALIGNED;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800810#ifdef CONFIG_TCP_MD5SIG
811 if (key)
812 tot_len += TCPOLEN_MD5SIG_ALIGNED;
813#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700814
815 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
816 GFP_ATOMIC);
Ian Morris63159f22015-03-29 14:00:04 +0100817 if (!buff)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700818 return;
819
820 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
821
Johannes Bergd58ff352017-06-16 14:29:23 +0200822 t1 = skb_push(buff, tot_len);
Herbert Xu6651ffc2010-04-21 00:47:15 -0700823 skb_reset_transport_header(buff);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700824
825 /* Swap the send and the receive. */
826 memset(t1, 0, sizeof(*t1));
827 t1->dest = th->source;
828 t1->source = th->dest;
Ilpo Järvinen77c676d2008-10-09 14:41:38 -0700829 t1->doff = tot_len / 4;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700830 t1->seq = htonl(seq);
831 t1->ack_seq = htonl(ack);
Ilpo Järvinen626e2642008-10-09 14:42:40 -0700832 t1->ack = !rst || !th->ack;
833 t1->rst = rst;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700834 t1->window = htons(win);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800835
Al Viroe69a4ad2006-11-14 20:56:00 -0800836 topt = (__be32 *)(t1 + 1);
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +0900837
Andrey Vaginee684b62013-02-11 05:50:19 +0000838 if (tsecr) {
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800839 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
840 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
Andrey Vaginee684b62013-02-11 05:50:19 +0000841 *topt++ = htonl(tsval);
842 *topt++ = htonl(tsecr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700843 }
844
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800845#ifdef CONFIG_TCP_MD5SIG
846 if (key) {
847 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
848 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
Adam Langley49a72df2008-07-19 00:01:42 -0700849 tcp_v6_md5_hash_hdr((__u8 *)topt, key,
Adam Langley90b7e112008-07-31 20:49:48 -0700850 &ipv6_hdr(skb)->saddr,
851 &ipv6_hdr(skb)->daddr, t1);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800852 }
853#endif
854
David S. Miller4c9483b2011-03-12 16:22:43 -0500855 memset(&fl6, 0, sizeof(fl6));
Alexey Dobriyan4e3fd7a2011-11-21 03:39:03 +0000856 fl6.daddr = ipv6_hdr(skb)->saddr;
857 fl6.saddr = ipv6_hdr(skb)->daddr;
Florent Fourcot1d13a962014-01-16 17:21:22 +0100858 fl6.flowlabel = label;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700859
David S. Millere5700af2010-04-21 14:59:20 -0700860 buff->ip_summed = CHECKSUM_PARTIAL;
861 buff->csum = 0;
862
David S. Miller4c9483b2011-03-12 16:22:43 -0500863 __tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700864
David S. Miller4c9483b2011-03-12 16:22:43 -0500865 fl6.flowi6_proto = IPPROTO_TCP;
Lorenzo Colittia36dbdb2014-04-11 13:19:12 +0900866 if (rt6_need_strict(&fl6.daddr) && !oif)
Eric Dumazet870c3152014-10-17 09:17:20 -0700867 fl6.flowi6_oif = tcp_v6_iif(skb);
David Ahern9b6c14d2016-11-09 09:07:26 -0800868 else {
869 if (!oif && netif_index_is_l3_master(net, skb->skb_iif))
870 oif = skb->skb_iif;
871
872 fl6.flowi6_oif = oif;
873 }
David Ahern1d2f7b22016-05-04 21:26:08 -0700874
Jon Maxwell00483692018-05-10 16:53:51 +1000875 if (sk)
876 mark = (sk->sk_state == TCP_TIME_WAIT) ?
877 inet_twsk(sk)->tw_mark : sk->sk_mark;
878 fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark) ?: mark;
David S. Miller1958b852011-03-12 16:36:19 -0500879 fl6.fl6_dport = t1->dest;
880 fl6.fl6_sport = t1->source;
Lorenzo Colittie2d118a2016-11-04 02:23:43 +0900881 fl6.flowi6_uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL);
David S. Miller4c9483b2011-03-12 16:22:43 -0500882 security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700883
Ilpo Järvinen626e2642008-10-09 14:42:40 -0700884 /* Pass a socket to ip6_dst_lookup either it is for RST
885 * Underlying function will use this to retrieve the network
886 * namespace
887 */
Steffen Klassert0e0d44a2013-08-28 08:04:14 +0200888 dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL);
David S. Miller68d0c6d2011-03-01 13:19:07 -0800889 if (!IS_ERR(dst)) {
890 skb_dst_set(buff, dst);
Pablo Neira92e55f42017-01-26 22:56:21 +0100891 ip6_xmit(ctl_sk, buff, &fl6, fl6.flowi6_mark, NULL, tclass);
Eric Dumazetc10d9312016-04-29 14:16:47 -0700892 TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
David S. Miller68d0c6d2011-03-01 13:19:07 -0800893 if (rst)
Eric Dumazetc10d9312016-04-29 14:16:47 -0700894 TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
David S. Miller68d0c6d2011-03-01 13:19:07 -0800895 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700896 }
897
898 kfree_skb(buff);
899}
900
Eric Dumazeta00e7442015-09-29 07:42:39 -0700901static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb)
Ilpo Järvinen626e2642008-10-09 14:42:40 -0700902{
Eric Dumazetcf533ea2011-10-21 05:22:42 -0400903 const struct tcphdr *th = tcp_hdr(skb);
Ilpo Järvinen626e2642008-10-09 14:42:40 -0700904 u32 seq = 0, ack_seq = 0;
Guo-Fu Tsengfa3e5b42008-10-09 21:11:56 -0700905 struct tcp_md5sig_key *key = NULL;
Shawn Lu658ddaa2012-01-31 22:35:48 +0000906#ifdef CONFIG_TCP_MD5SIG
907 const __u8 *hash_location = NULL;
908 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
909 unsigned char newhash[16];
910 int genhash;
911 struct sock *sk1 = NULL;
912#endif
Song Liuc24b14c2017-10-23 09:20:24 -0700913 int oif = 0;
Ilpo Järvinen626e2642008-10-09 14:42:40 -0700914
915 if (th->rst)
916 return;
917
Eric Dumazetc3658e82014-11-25 07:40:04 -0800918 /* If sk not NULL, it means we did a successful lookup and incoming
919 * route had to be correct. prequeue might have dropped our dst.
920 */
921 if (!sk && !ipv6_unicast_destination(skb))
Ilpo Järvinen626e2642008-10-09 14:42:40 -0700922 return;
923
924#ifdef CONFIG_TCP_MD5SIG
Eric Dumazet3b24d852016-04-01 08:52:17 -0700925 rcu_read_lock();
Shawn Lu658ddaa2012-01-31 22:35:48 +0000926 hash_location = tcp_parse_md5sig_option(th);
Florian Westphal271c3b92015-12-21 21:29:26 +0100927 if (sk && sk_fullsock(sk)) {
Florian Westphale46787f2015-12-21 21:29:25 +0100928 key = tcp_v6_md5_do_lookup(sk, &ipv6h->saddr);
929 } else if (hash_location) {
Shawn Lu658ddaa2012-01-31 22:35:48 +0000930 /*
931 * active side is lost. Try to find listening socket through
932 * source port, and then find md5 key through listening socket.
933 * we are not loose security here:
934 * Incoming packet is checked with md5 hash with finding key,
935 * no RST generated if md5 hash doesn't match.
936 */
937 sk1 = inet6_lookup_listener(dev_net(skb_dst(skb)->dev),
Craig Galleka5836362016-02-10 11:50:38 -0500938 &tcp_hashinfo, NULL, 0,
939 &ipv6h->saddr,
Tom Herbert5ba24952013-01-22 09:50:39 +0000940 th->source, &ipv6h->daddr,
David Ahern4297a0e2017-08-07 08:44:21 -0700941 ntohs(th->source), tcp_v6_iif(skb),
942 tcp_v6_sdif(skb));
Shawn Lu658ddaa2012-01-31 22:35:48 +0000943 if (!sk1)
Eric Dumazet3b24d852016-04-01 08:52:17 -0700944 goto out;
Shawn Lu658ddaa2012-01-31 22:35:48 +0000945
Shawn Lu658ddaa2012-01-31 22:35:48 +0000946 key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr);
947 if (!key)
Eric Dumazet3b24d852016-04-01 08:52:17 -0700948 goto out;
Shawn Lu658ddaa2012-01-31 22:35:48 +0000949
Eric Dumazet39f8e582015-03-24 15:58:55 -0700950 genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, skb);
Shawn Lu658ddaa2012-01-31 22:35:48 +0000951 if (genhash || memcmp(hash_location, newhash, 16) != 0)
Eric Dumazet3b24d852016-04-01 08:52:17 -0700952 goto out;
Shawn Lu658ddaa2012-01-31 22:35:48 +0000953 }
Ilpo Järvinen626e2642008-10-09 14:42:40 -0700954#endif
955
956 if (th->ack)
957 seq = ntohl(th->ack_seq);
958 else
959 ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
960 (th->doff << 2);
961
Song Liuc24b14c2017-10-23 09:20:24 -0700962 if (sk) {
963 oif = sk->sk_bound_dev_if;
Song Liu5c487bb2018-02-06 20:50:23 -0800964 if (sk_fullsock(sk))
965 trace_tcp_send_reset(sk, skb);
Song Liuc24b14c2017-10-23 09:20:24 -0700966 }
967
Eric Dumazet0f85fea2014-12-09 09:56:08 -0800968 tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, key, 1, 0, 0);
Shawn Lu658ddaa2012-01-31 22:35:48 +0000969
970#ifdef CONFIG_TCP_MD5SIG
Eric Dumazet3b24d852016-04-01 08:52:17 -0700971out:
972 rcu_read_unlock();
Shawn Lu658ddaa2012-01-31 22:35:48 +0000973#endif
Ilpo Järvinen626e2642008-10-09 14:42:40 -0700974}
975
Eric Dumazeta00e7442015-09-29 07:42:39 -0700976static void tcp_v6_send_ack(const struct sock *sk, struct sk_buff *skb, u32 seq,
Eric Dumazet0f85fea2014-12-09 09:56:08 -0800977 u32 ack, u32 win, u32 tsval, u32 tsecr, int oif,
Florent Fourcot1d13a962014-01-16 17:21:22 +0100978 struct tcp_md5sig_key *key, u8 tclass,
Hannes Frederic Sowa5119bd12016-06-11 20:41:38 +0200979 __be32 label)
Ilpo Järvinen626e2642008-10-09 14:42:40 -0700980{
Eric Dumazet0f85fea2014-12-09 09:56:08 -0800981 tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, key, 0,
982 tclass, label);
Ilpo Järvinen626e2642008-10-09 14:42:40 -0700983}
984
Linus Torvalds1da177e2005-04-16 15:20:36 -0700985static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
986{
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700987 struct inet_timewait_sock *tw = inet_twsk(sk);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800988 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700989
Eric Dumazet0f85fea2014-12-09 09:56:08 -0800990 tcp_v6_send_ack(sk, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700991 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
Eric Dumazet9a568de2017-05-16 14:00:14 -0700992 tcp_time_stamp_raw() + tcptw->tw_ts_offset,
Wang Yufen9c76a112014-03-29 09:27:31 +0800993 tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw),
Florent Fourcot21858cd2015-05-16 00:24:59 +0200994 tw->tw_tclass, cpu_to_be32(tw->tw_flowlabel));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700995
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700996 inet_twsk_put(tw);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700997}
998
Eric Dumazeta00e7442015-09-29 07:42:39 -0700999static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
Gui Jianfeng6edafaa2008-08-06 23:50:04 -07001000 struct request_sock *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001001{
Daniel Lee3a19ce02014-05-11 20:22:13 -07001002 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
1003 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
1004 */
Eric Dumazet20a2b492016-08-22 11:31:10 -07001005 /* RFC 7323 2.3
1006 * The window field (SEG.WND) of every outgoing segment, with the
1007 * exception of <SYN> segments, MUST be right-shifted by
1008 * Rcv.Wind.Shift bits:
1009 */
Eric Dumazet0f85fea2014-12-09 09:56:08 -08001010 tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ?
Daniel Lee3a19ce02014-05-11 20:22:13 -07001011 tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
Eric Dumazet20a2b492016-08-22 11:31:10 -07001012 tcp_rsk(req)->rcv_nxt,
1013 req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
Eric Dumazet9a568de2017-05-16 14:00:14 -07001014 tcp_time_stamp_raw() + tcp_rsk(req)->ts_off,
Florian Westphal95a22ca2016-12-01 11:32:06 +01001015 req->ts_recent, sk->sk_bound_dev_if,
Christoph Paasch30791ac2017-12-11 00:05:46 -08001016 tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->saddr),
Florent Fourcot1d13a962014-01-16 17:21:22 +01001017 0, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001018}
1019
1020
Eric Dumazet079096f2015-10-02 11:43:32 -07001021static struct sock *tcp_v6_cookie_check(struct sock *sk, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001022{
Glenn Griffinc6aefaf2008-02-07 21:49:26 -08001023#ifdef CONFIG_SYN_COOKIES
Eric Dumazet079096f2015-10-02 11:43:32 -07001024 const struct tcphdr *th = tcp_hdr(skb);
1025
Florian Westphalaf9b4732010-06-03 00:43:44 +00001026 if (!th->syn)
Glenn Griffinc6aefaf2008-02-07 21:49:26 -08001027 sk = cookie_v6_check(sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001028#endif
1029 return sk;
1030}
1031
Linus Torvalds1da177e2005-04-16 15:20:36 -07001032static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1033{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001034 if (skb->protocol == htons(ETH_P_IP))
1035 return tcp_v4_conn_request(sk, skb);
1036
1037 if (!ipv6_unicast_destination(skb))
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001038 goto drop;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001039
Octavian Purdila1fb6f152014-06-25 17:10:02 +03001040 return tcp_conn_request(&tcp6_request_sock_ops,
1041 &tcp_request_sock_ipv6_ops, sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001042
Linus Torvalds1da177e2005-04-16 15:20:36 -07001043drop:
Eric Dumazet9caad862016-04-01 08:52:20 -07001044 tcp_listendrop(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001045 return 0; /* don't send reset */
1046}
1047
Eric Dumazetebf6c9c2017-02-05 20:23:22 -08001048static void tcp_v6_restore_cb(struct sk_buff *skb)
1049{
1050 /* We need to move header back to the beginning if xfrm6_policy_check()
1051 * and tcp_v6_fill_cb() are going to be called again.
1052 * ip6_datagram_recv_specific_ctl() also expects IP6CB to be there.
1053 */
1054 memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
1055 sizeof(struct inet6_skb_parm));
1056}
1057
Eric Dumazet0c271712015-09-29 07:42:48 -07001058static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
Weilong Chen4c99aa42013-12-19 18:44:34 +08001059 struct request_sock *req,
Eric Dumazet5e0724d2015-10-22 08:20:46 -07001060 struct dst_entry *dst,
1061 struct request_sock *req_unhash,
1062 bool *own_req)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001063{
Eric Dumazet634fb9792013-10-09 15:21:29 -07001064 struct inet_request_sock *ireq;
Eric Dumazet0c271712015-09-29 07:42:48 -07001065 struct ipv6_pinfo *newnp;
1066 const struct ipv6_pinfo *np = inet6_sk(sk);
Eric Dumazet45f6fad2015-11-29 19:37:57 -08001067 struct ipv6_txoptions *opt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001068 struct tcp6_sock *newtcp6sk;
1069 struct inet_sock *newinet;
1070 struct tcp_sock *newtp;
1071 struct sock *newsk;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001072#ifdef CONFIG_TCP_MD5SIG
1073 struct tcp_md5sig_key *key;
1074#endif
Neal Cardwell3840a062012-06-28 12:34:19 +00001075 struct flowi6 fl6;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001076
1077 if (skb->protocol == htons(ETH_P_IP)) {
1078 /*
1079 * v6 mapped
1080 */
1081
Eric Dumazet5e0724d2015-10-22 08:20:46 -07001082 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst,
1083 req_unhash, own_req);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001084
Ian Morris63159f22015-03-29 14:00:04 +01001085 if (!newsk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001086 return NULL;
1087
1088 newtcp6sk = (struct tcp6_sock *)newsk;
1089 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1090
1091 newinet = inet_sk(newsk);
1092 newnp = inet6_sk(newsk);
1093 newtp = tcp_sk(newsk);
1094
1095 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1096
Eric Dumazetd1e559d2015-03-18 14:05:35 -07001097 newnp->saddr = newsk->sk_v6_rcv_saddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001098
Arnaldo Carvalho de Melo8292a172005-12-13 23:15:52 -08001099 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001100 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001101#ifdef CONFIG_TCP_MD5SIG
1102 newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1103#endif
1104
WANG Cong83eadda2017-05-09 16:59:54 -07001105 newnp->ipv6_mc_list = NULL;
Yan, Zheng676a1182011-09-25 02:21:30 +00001106 newnp->ipv6_ac_list = NULL;
1107 newnp->ipv6_fl_list = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001108 newnp->pktoptions = NULL;
1109 newnp->opt = NULL;
Eric Dumazet870c3152014-10-17 09:17:20 -07001110 newnp->mcast_oif = tcp_v6_iif(skb);
Arnaldo Carvalho de Melo0660e032007-04-25 17:54:47 -07001111 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
Florent Fourcot1397ed32013-12-08 15:46:57 +01001112 newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
Florent Fourcotdf3687f2014-01-17 17:15:03 +01001113 if (np->repflow)
1114 newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001115
Arnaldo Carvalho de Meloe6848972005-08-09 19:45:38 -07001116 /*
1117 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1118 * here, tcp_create_openreq_child now does this for us, see the comment in
1119 * that function for the gory details. -acme
Linus Torvalds1da177e2005-04-16 15:20:36 -07001120 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001121
1122 /* It is tricky place. Until this moment IPv4 tcp
Arnaldo Carvalho de Melo8292a172005-12-13 23:15:52 -08001123 worked with IPv6 icsk.icsk_af_ops.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001124 Sync it now.
1125 */
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -08001126 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001127
1128 return newsk;
1129 }
1130
Eric Dumazet634fb9792013-10-09 15:21:29 -07001131 ireq = inet_rsk(req);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001132
1133 if (sk_acceptq_is_full(sk))
1134 goto out_overflow;
1135
David S. Miller493f3772010-12-02 12:14:29 -08001136 if (!dst) {
Eric Dumazetf76b33c2015-09-29 07:42:42 -07001137 dst = inet6_csk_route_req(sk, &fl6, req, IPPROTO_TCP);
David S. Miller493f3772010-12-02 12:14:29 -08001138 if (!dst)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001139 goto out;
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001140 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001141
1142 newsk = tcp_create_openreq_child(sk, req, skb);
Ian Morris63159f22015-03-29 14:00:04 +01001143 if (!newsk)
Balazs Scheidler093d2822010-10-21 13:06:43 +02001144 goto out_nonewsk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001145
Arnaldo Carvalho de Meloe6848972005-08-09 19:45:38 -07001146 /*
1147 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1148 * count here, tcp_create_openreq_child now does this for us, see the
1149 * comment in that function for the gory details. -acme
1150 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001151
Stephen Hemminger59eed272006-08-25 15:55:43 -07001152 newsk->sk_gso_type = SKB_GSO_TCPV6;
Eric Dumazet6bd4f352015-12-02 21:53:57 -08001153 ip6_dst_store(newsk, dst, NULL, NULL);
Neal Cardwellfae6ef82012-08-19 03:30:38 +00001154 inet6_sk_rx_dst_set(newsk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001155
1156 newtcp6sk = (struct tcp6_sock *)newsk;
1157 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1158
1159 newtp = tcp_sk(newsk);
1160 newinet = inet_sk(newsk);
1161 newnp = inet6_sk(newsk);
1162
1163 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1164
Eric Dumazet634fb9792013-10-09 15:21:29 -07001165 newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr;
1166 newnp->saddr = ireq->ir_v6_loc_addr;
1167 newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
1168 newsk->sk_bound_dev_if = ireq->ir_iif;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001169
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001170 /* Now IPv6 options...
Linus Torvalds1da177e2005-04-16 15:20:36 -07001171
1172 First: no IPv4 options.
1173 */
Eric Dumazetf6d8bd02011-04-21 09:45:37 +00001174 newinet->inet_opt = NULL;
WANG Cong83eadda2017-05-09 16:59:54 -07001175 newnp->ipv6_mc_list = NULL;
Yan, Zheng676a1182011-09-25 02:21:30 +00001176 newnp->ipv6_ac_list = NULL;
Masayuki Nakagawad35690b2007-03-16 16:14:03 -07001177 newnp->ipv6_fl_list = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001178
1179 /* Clone RX bits */
1180 newnp->rxopt.all = np->rxopt.all;
1181
Linus Torvalds1da177e2005-04-16 15:20:36 -07001182 newnp->pktoptions = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001183 newnp->opt = NULL;
Eric Dumazet870c3152014-10-17 09:17:20 -07001184 newnp->mcast_oif = tcp_v6_iif(skb);
Arnaldo Carvalho de Melo0660e032007-04-25 17:54:47 -07001185 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
Florent Fourcot1397ed32013-12-08 15:46:57 +01001186 newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
Florent Fourcotdf3687f2014-01-17 17:15:03 +01001187 if (np->repflow)
1188 newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001189
1190 /* Clone native IPv6 options from listening socket (if any)
1191
1192 Yes, keeping reference count would be much more clever,
1193 but we make one more one thing there: reattach optmem
1194 to newsk.
1195 */
Huw Davies56ac42b2016-06-27 15:05:28 -04001196 opt = ireq->ipv6_opt;
1197 if (!opt)
1198 opt = rcu_dereference(np->opt);
Eric Dumazet45f6fad2015-11-29 19:37:57 -08001199 if (opt) {
1200 opt = ipv6_dup_options(newsk, opt);
1201 RCU_INIT_POINTER(newnp->opt, opt);
1202 }
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -08001203 inet_csk(newsk)->icsk_ext_hdr_len = 0;
Eric Dumazet45f6fad2015-11-29 19:37:57 -08001204 if (opt)
1205 inet_csk(newsk)->icsk_ext_hdr_len = opt->opt_nflen +
1206 opt->opt_flen;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001207
Daniel Borkmann81164412015-01-05 23:57:48 +01001208 tcp_ca_openreq_child(newsk, dst);
1209
Linus Torvalds1da177e2005-04-16 15:20:36 -07001210 tcp_sync_mss(newsk, dst_mtu(dst));
Eric Dumazet3541f9e2017-02-02 08:04:56 -08001211 newtp->advmss = tcp_mss_clamp(tcp_sk(sk), dst_metric_advmss(dst));
Neal Cardwelld135c522012-04-22 09:45:47 +00001212
Linus Torvalds1da177e2005-04-16 15:20:36 -07001213 tcp_initialize_rcv_mss(newsk);
1214
Eric Dumazetc720c7e82009-10-15 06:30:45 +00001215 newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1216 newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001217
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001218#ifdef CONFIG_TCP_MD5SIG
1219 /* Copy over the MD5 key from the original socket */
Wang Yufen4aa956d2014-03-29 09:27:29 +08001220 key = tcp_v6_md5_do_lookup(sk, &newsk->sk_v6_daddr);
Ian Morris53b24b82015-03-29 14:00:05 +01001221 if (key) {
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001222 /* We're using one, so create a matching key
1223 * on the newsk structure. If we fail to get
1224 * memory, then we end up not copying the key
1225 * across. Shucks.
1226 */
Eric Dumazetefe42082013-10-03 15:42:29 -07001227 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newsk->sk_v6_daddr,
Ivan Delalande67973182017-06-15 18:07:06 -07001228 AF_INET6, 128, key->key, key->keylen,
Eric Dumazet7450aaf2015-11-30 08:57:28 -08001229 sk_gfp_mask(sk, GFP_ATOMIC));
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001230 }
1231#endif
1232
Balazs Scheidler093d2822010-10-21 13:06:43 +02001233 if (__inet_inherit_port(sk, newsk) < 0) {
Christoph Paasche337e242012-12-14 04:07:58 +00001234 inet_csk_prepare_forced_close(newsk);
1235 tcp_done(newsk);
Balazs Scheidler093d2822010-10-21 13:06:43 +02001236 goto out;
1237 }
Eric Dumazet5e0724d2015-10-22 08:20:46 -07001238 *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
Eric Dumazet805c4bc2015-11-05 11:07:13 -08001239 if (*own_req) {
Eric Dumazet49a496c2015-11-05 12:50:19 -08001240 tcp_move_syn(newtp, req);
Eric Dumazet805c4bc2015-11-05 11:07:13 -08001241
1242 /* Clone pktoptions received with SYN, if we own the req */
1243 if (ireq->pktopts) {
1244 newnp->pktoptions = skb_clone(ireq->pktopts,
Eric Dumazet7450aaf2015-11-30 08:57:28 -08001245 sk_gfp_mask(sk, GFP_ATOMIC));
Eric Dumazet805c4bc2015-11-05 11:07:13 -08001246 consume_skb(ireq->pktopts);
1247 ireq->pktopts = NULL;
Eric Dumazetebf6c9c2017-02-05 20:23:22 -08001248 if (newnp->pktoptions) {
1249 tcp_v6_restore_cb(newnp->pktoptions);
Eric Dumazet805c4bc2015-11-05 11:07:13 -08001250 skb_set_owner_r(newnp->pktoptions, newsk);
Eric Dumazetebf6c9c2017-02-05 20:23:22 -08001251 }
Eric Dumazet805c4bc2015-11-05 11:07:13 -08001252 }
Eric Dumazetce105002015-10-30 09:46:12 -07001253 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001254
1255 return newsk;
1256
1257out_overflow:
Eric Dumazet02a1d6e2016-04-27 16:44:39 -07001258 __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
Balazs Scheidler093d2822010-10-21 13:06:43 +02001259out_nonewsk:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001260 dst_release(dst);
Balazs Scheidler093d2822010-10-21 13:06:43 +02001261out:
Eric Dumazet9caad862016-04-01 08:52:20 -07001262 tcp_listendrop(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001263 return NULL;
1264}
1265
Linus Torvalds1da177e2005-04-16 15:20:36 -07001266/* The socket must have it's spinlock held when we get
Eric Dumazete994b2f2015-10-02 11:43:39 -07001267 * here, unless it is a TCP_LISTEN socket.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001268 *
1269 * We have a potential double-lock case here, so even when
1270 * doing backlog processing we use the BH locking scheme.
1271 * This is because we cannot sleep with the original spinlock
1272 * held.
1273 */
1274static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1275{
1276 struct ipv6_pinfo *np = inet6_sk(sk);
1277 struct tcp_sock *tp;
1278 struct sk_buff *opt_skb = NULL;
1279
1280 /* Imagine: socket is IPv6. IPv4 packet arrives,
1281 goes to IPv4 receive handler and backlogged.
1282 From backlog it always goes here. Kerboom...
1283 Fortunately, tcp_rcv_established and rcv_established
1284 handle them correctly, but it is not case with
1285 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1286 */
1287
1288 if (skb->protocol == htons(ETH_P_IP))
1289 return tcp_v4_do_rcv(sk, skb);
1290
Linus Torvalds1da177e2005-04-16 15:20:36 -07001291 /*
1292 * socket locking is here for SMP purposes as backlog rcv
1293 * is currently called with bh processing disabled.
1294 */
1295
1296 /* Do Stevens' IPV6_PKTOPTIONS.
1297
1298 Yes, guys, it is the only place in our code, where we
1299 may make it not affecting IPv4.
1300 The rest of code is protocol independent,
1301 and I do not like idea to uglify IPv4.
1302
1303 Actually, all the idea behind IPV6_PKTOPTIONS
1304 looks not very well thought. For now we latch
1305 options, received in the last packet, enqueued
1306 by tcp. Feel free to propose better solution.
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001307 --ANK (980728)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001308 */
1309 if (np->rxopt.all)
Eric Dumazet7450aaf2015-11-30 08:57:28 -08001310 opt_skb = skb_clone(skb, sk_gfp_mask(sk, GFP_ATOMIC));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001311
1312 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
Eric Dumazet5d299f32012-08-06 05:09:33 +00001313 struct dst_entry *dst = sk->sk_rx_dst;
1314
Tom Herbertbdeab992011-08-14 19:45:55 +00001315 sock_rps_save_rxhash(sk, skb);
Eric Dumazet3d973792014-11-11 05:54:27 -08001316 sk_mark_napi_id(sk, skb);
Eric Dumazet5d299f32012-08-06 05:09:33 +00001317 if (dst) {
1318 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1319 dst->ops->check(dst, np->rx_dst_cookie) == NULL) {
1320 dst_release(dst);
1321 sk->sk_rx_dst = NULL;
1322 }
1323 }
1324
Yafang Shao3d97d882018-05-29 23:27:31 +08001325 tcp_rcv_established(sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001326 if (opt_skb)
1327 goto ipv6_pktoptions;
1328 return 0;
1329 }
1330
Eric Dumazet12e25e12015-06-03 23:49:21 -07001331 if (tcp_checksum_complete(skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001332 goto csum_err;
1333
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001334 if (sk->sk_state == TCP_LISTEN) {
Eric Dumazet079096f2015-10-02 11:43:32 -07001335 struct sock *nsk = tcp_v6_cookie_check(sk, skb);
1336
Linus Torvalds1da177e2005-04-16 15:20:36 -07001337 if (!nsk)
1338 goto discard;
1339
Weilong Chen4c99aa42013-12-19 18:44:34 +08001340 if (nsk != sk) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001341 if (tcp_child_process(sk, nsk, skb))
1342 goto reset;
1343 if (opt_skb)
1344 __kfree_skb(opt_skb);
1345 return 0;
1346 }
Neil Horman47482f12011-04-06 13:07:09 -07001347 } else
Tom Herbertbdeab992011-08-14 19:45:55 +00001348 sock_rps_save_rxhash(sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001349
Eric Dumazet72ab4a82015-09-29 07:42:41 -07001350 if (tcp_rcv_state_process(sk, skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001351 goto reset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001352 if (opt_skb)
1353 goto ipv6_pktoptions;
1354 return 0;
1355
1356reset:
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001357 tcp_v6_send_reset(sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001358discard:
1359 if (opt_skb)
1360 __kfree_skb(opt_skb);
1361 kfree_skb(skb);
1362 return 0;
1363csum_err:
Eric Dumazetc10d9312016-04-29 14:16:47 -07001364 TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
1365 TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001366 goto discard;
1367
1368
1369ipv6_pktoptions:
1370 /* Do you ask, what is it?
1371
1372 1. skb was enqueued by tcp.
1373 2. skb is added to tail of read queue, rather than out of order.
1374 3. socket is not in passive state.
1375 4. Finally, it really contains options, which user wants to receive.
1376 */
1377 tp = tcp_sk(sk);
1378 if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1379 !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
YOSHIFUJI Hideaki333fad52005-09-08 09:59:17 +09001380 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
Eric Dumazet870c3152014-10-17 09:17:20 -07001381 np->mcast_oif = tcp_v6_iif(opt_skb);
YOSHIFUJI Hideaki333fad52005-09-08 09:59:17 +09001382 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
Arnaldo Carvalho de Melo0660e032007-04-25 17:54:47 -07001383 np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
Florent Fourcot82e9f102013-12-08 15:46:59 +01001384 if (np->rxopt.bits.rxflow || np->rxopt.bits.rxtclass)
Florent Fourcot1397ed32013-12-08 15:46:57 +01001385 np->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(opt_skb));
Florent Fourcotdf3687f2014-01-17 17:15:03 +01001386 if (np->repflow)
1387 np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb));
Eric Dumazeta2247722014-09-27 09:50:56 -07001388 if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001389 skb_set_owner_r(opt_skb, sk);
Eric Dumazet8ce48622016-10-12 19:01:45 +02001390 tcp_v6_restore_cb(opt_skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001391 opt_skb = xchg(&np->pktoptions, opt_skb);
1392 } else {
1393 __kfree_skb(opt_skb);
1394 opt_skb = xchg(&np->pktoptions, NULL);
1395 }
1396 }
1397
Wei Yongjun800d55f2009-02-23 21:45:33 +00001398 kfree_skb(opt_skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001399 return 0;
1400}
1401
Nicolas Dichtel2dc49d12014-12-22 18:22:48 +01001402static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr,
1403 const struct tcphdr *th)
1404{
1405 /* This is tricky: we move IP6CB at its correct location into
1406 * TCP_SKB_CB(). It must be done after xfrm6_policy_check(), because
1407 * _decode_session6() uses IP6CB().
1408 * barrier() makes sure compiler won't play aliasing games.
1409 */
1410 memmove(&TCP_SKB_CB(skb)->header.h6, IP6CB(skb),
1411 sizeof(struct inet6_skb_parm));
1412 barrier();
1413
1414 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1415 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1416 skb->len - th->doff*4);
1417 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1418 TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1419 TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1420 TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
1421 TCP_SKB_CB(skb)->sacked = 0;
Mike Maloney98aaa912017-08-22 17:08:48 -04001422 TCP_SKB_CB(skb)->has_rxtstamp =
1423 skb->tstamp || skb_hwtstamps(skb)->hwtstamp;
Nicolas Dichtel2dc49d12014-12-22 18:22:48 +01001424}
1425
Herbert Xue5bbef22007-10-15 12:50:28 -07001426static int tcp_v6_rcv(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001427{
David Ahern4297a0e2017-08-07 08:44:21 -07001428 int sdif = inet6_sdif(skb);
Eric Dumazetcf533ea2011-10-21 05:22:42 -04001429 const struct tcphdr *th;
Eric Dumazetb71d1d42011-04-22 04:53:02 +00001430 const struct ipv6hdr *hdr;
Eric Dumazet3b24d852016-04-01 08:52:17 -07001431 bool refcounted;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001432 struct sock *sk;
1433 int ret;
Pavel Emelyanova86b1e32008-07-16 20:20:58 -07001434 struct net *net = dev_net(skb->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001435
1436 if (skb->pkt_type != PACKET_HOST)
1437 goto discard_it;
1438
1439 /*
1440 * Count it even if it's bad.
1441 */
Eric Dumazet90bbcc62016-04-27 16:44:32 -07001442 __TCP_INC_STATS(net, TCP_MIB_INSEGS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001443
1444 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1445 goto discard_it;
1446
Eric Dumazetea1627c2016-05-13 09:16:40 -07001447 th = (const struct tcphdr *)skb->data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001448
Eric Dumazetea1627c2016-05-13 09:16:40 -07001449 if (unlikely(th->doff < sizeof(struct tcphdr)/4))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001450 goto bad_packet;
1451 if (!pskb_may_pull(skb, th->doff*4))
1452 goto discard_it;
1453
Tom Herberte4f45b72014-05-02 16:29:51 -07001454 if (skb_checksum_init(skb, IPPROTO_TCP, ip6_compute_pseudo))
Eric Dumazet6a5dc9e2013-04-29 08:39:56 +00001455 goto csum_error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001456
Eric Dumazetea1627c2016-05-13 09:16:40 -07001457 th = (const struct tcphdr *)skb->data;
Stephen Hemmingere802af92010-04-22 15:24:53 -07001458 hdr = ipv6_hdr(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001459
Eric Dumazet4bdc3d62015-10-13 17:12:54 -07001460lookup:
Craig Galleka5836362016-02-10 11:50:38 -05001461 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th),
David Ahern4297a0e2017-08-07 08:44:21 -07001462 th->source, th->dest, inet6_iif(skb), sdif,
Eric Dumazet3b24d852016-04-01 08:52:17 -07001463 &refcounted);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001464 if (!sk)
1465 goto no_tcp_socket;
1466
1467process:
1468 if (sk->sk_state == TCP_TIME_WAIT)
1469 goto do_time_wait;
1470
Eric Dumazet079096f2015-10-02 11:43:32 -07001471 if (sk->sk_state == TCP_NEW_SYN_RECV) {
1472 struct request_sock *req = inet_reqsk(sk);
Eric Dumazete0f97592018-02-13 06:14:12 -08001473 bool req_stolen = false;
Eric Dumazet77166822016-02-18 05:39:18 -08001474 struct sock *nsk;
Eric Dumazet079096f2015-10-02 11:43:32 -07001475
1476 sk = req->rsk_listener;
Eric Dumazet079096f2015-10-02 11:43:32 -07001477 if (tcp_v6_inbound_md5_hash(sk, skb)) {
Eric Dumazete65c3322016-08-24 08:50:24 -07001478 sk_drops_add(sk, skb);
Eric Dumazet079096f2015-10-02 11:43:32 -07001479 reqsk_put(req);
1480 goto discard_it;
1481 }
Frank van der Linden4fd44a92018-06-12 23:09:37 +00001482 if (tcp_checksum_complete(skb)) {
1483 reqsk_put(req);
1484 goto csum_error;
1485 }
Eric Dumazet77166822016-02-18 05:39:18 -08001486 if (unlikely(sk->sk_state != TCP_LISTEN)) {
Eric Dumazetf03f2e12015-10-14 11:16:27 -07001487 inet_csk_reqsk_queue_drop_and_put(sk, req);
Eric Dumazet4bdc3d62015-10-13 17:12:54 -07001488 goto lookup;
1489 }
Eric Dumazet77166822016-02-18 05:39:18 -08001490 sock_hold(sk);
Eric Dumazet3b24d852016-04-01 08:52:17 -07001491 refcounted = true;
Eric Dumazet1f3b3592017-09-08 12:44:47 -07001492 nsk = NULL;
Eric Dumazeteeea10b2017-12-03 09:32:59 -08001493 if (!tcp_filter(sk, skb)) {
1494 th = (const struct tcphdr *)skb->data;
1495 hdr = ipv6_hdr(skb);
1496 tcp_v6_fill_cb(skb, hdr, th);
Eric Dumazete0f97592018-02-13 06:14:12 -08001497 nsk = tcp_check_req(sk, skb, req, false, &req_stolen);
Eric Dumazeteeea10b2017-12-03 09:32:59 -08001498 }
Eric Dumazet079096f2015-10-02 11:43:32 -07001499 if (!nsk) {
1500 reqsk_put(req);
Eric Dumazete0f97592018-02-13 06:14:12 -08001501 if (req_stolen) {
1502 /* Another cpu got exclusive access to req
1503 * and created a full blown socket.
1504 * Try to feed this packet to this socket
1505 * instead of discarding it.
1506 */
1507 tcp_v6_restore_cb(skb);
1508 sock_put(sk);
1509 goto lookup;
1510 }
Eric Dumazet77166822016-02-18 05:39:18 -08001511 goto discard_and_relse;
Eric Dumazet079096f2015-10-02 11:43:32 -07001512 }
1513 if (nsk == sk) {
Eric Dumazet079096f2015-10-02 11:43:32 -07001514 reqsk_put(req);
1515 tcp_v6_restore_cb(skb);
1516 } else if (tcp_child_process(sk, nsk, skb)) {
1517 tcp_v6_send_reset(nsk, skb);
Eric Dumazet77166822016-02-18 05:39:18 -08001518 goto discard_and_relse;
Eric Dumazet079096f2015-10-02 11:43:32 -07001519 } else {
Eric Dumazet77166822016-02-18 05:39:18 -08001520 sock_put(sk);
Eric Dumazet079096f2015-10-02 11:43:32 -07001521 return 0;
1522 }
1523 }
Stephen Hemmingere802af92010-04-22 15:24:53 -07001524 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
Eric Dumazet02a1d6e2016-04-27 16:44:39 -07001525 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
Stephen Hemmingere802af92010-04-22 15:24:53 -07001526 goto discard_and_relse;
1527 }
1528
Linus Torvalds1da177e2005-04-16 15:20:36 -07001529 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1530 goto discard_and_relse;
1531
Dmitry Popov9ea88a12014-08-07 02:38:22 +04001532 if (tcp_v6_inbound_md5_hash(sk, skb))
1533 goto discard_and_relse;
Dmitry Popov9ea88a12014-08-07 02:38:22 +04001534
Eric Dumazetac6e7802016-11-10 13:12:35 -08001535 if (tcp_filter(sk, skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001536 goto discard_and_relse;
Eric Dumazetac6e7802016-11-10 13:12:35 -08001537 th = (const struct tcphdr *)skb->data;
1538 hdr = ipv6_hdr(skb);
Eric Dumazeteeea10b2017-12-03 09:32:59 -08001539 tcp_v6_fill_cb(skb, hdr, th);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001540
1541 skb->dev = NULL;
1542
Eric Dumazete994b2f2015-10-02 11:43:39 -07001543 if (sk->sk_state == TCP_LISTEN) {
1544 ret = tcp_v6_do_rcv(sk, skb);
1545 goto put_and_return;
1546 }
1547
1548 sk_incoming_cpu_update(sk);
1549
Fabio Olive Leite293b9c42006-09-25 22:28:47 -07001550 bh_lock_sock_nested(sk);
Martin KaFai Laua44d6ea2016-03-14 10:52:15 -07001551 tcp_segs_in(tcp_sk(sk), skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001552 ret = 0;
1553 if (!sock_owned_by_user(sk)) {
Florian Westphale7942d02017-07-30 03:57:18 +02001554 ret = tcp_v6_do_rcv(sk, skb);
Eric Dumazetc9c33212016-08-27 07:37:54 -07001555 } else if (tcp_add_backlog(sk, skb)) {
Zhu Yi6b03a532010-03-04 18:01:41 +00001556 goto discard_and_relse;
1557 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001558 bh_unlock_sock(sk);
1559
Eric Dumazete994b2f2015-10-02 11:43:39 -07001560put_and_return:
Eric Dumazet3b24d852016-04-01 08:52:17 -07001561 if (refcounted)
1562 sock_put(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001563 return ret ? -1 : 0;
1564
1565no_tcp_socket:
1566 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1567 goto discard_it;
1568
Nicolas Dichtel2dc49d12014-12-22 18:22:48 +01001569 tcp_v6_fill_cb(skb, hdr, th);
1570
Eric Dumazet12e25e12015-06-03 23:49:21 -07001571 if (tcp_checksum_complete(skb)) {
Eric Dumazet6a5dc9e2013-04-29 08:39:56 +00001572csum_error:
Eric Dumazet90bbcc62016-04-27 16:44:32 -07001573 __TCP_INC_STATS(net, TCP_MIB_CSUMERRORS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001574bad_packet:
Eric Dumazet90bbcc62016-04-27 16:44:32 -07001575 __TCP_INC_STATS(net, TCP_MIB_INERRS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001576 } else {
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001577 tcp_v6_send_reset(NULL, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001578 }
1579
1580discard_it:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001581 kfree_skb(skb);
1582 return 0;
1583
1584discard_and_relse:
Eric Dumazet532182c2016-04-01 08:52:19 -07001585 sk_drops_add(sk, skb);
Eric Dumazet3b24d852016-04-01 08:52:17 -07001586 if (refcounted)
1587 sock_put(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001588 goto discard_it;
1589
1590do_time_wait:
1591 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
YOSHIFUJI Hideaki9469c7b2006-10-10 19:41:46 -07001592 inet_twsk_put(inet_twsk(sk));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001593 goto discard_it;
1594 }
1595
Nicolas Dichtel2dc49d12014-12-22 18:22:48 +01001596 tcp_v6_fill_cb(skb, hdr, th);
1597
Eric Dumazet6a5dc9e2013-04-29 08:39:56 +00001598 if (tcp_checksum_complete(skb)) {
1599 inet_twsk_put(inet_twsk(sk));
1600 goto csum_error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001601 }
1602
YOSHIFUJI Hideaki9469c7b2006-10-10 19:41:46 -07001603 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001604 case TCP_TW_SYN:
1605 {
1606 struct sock *sk2;
1607
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09001608 sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
Craig Galleka5836362016-02-10 11:50:38 -05001609 skb, __tcp_hdrlen(th),
Tom Herbert5ba24952013-01-22 09:50:39 +00001610 &ipv6_hdr(skb)->saddr, th->source,
Arnaldo Carvalho de Melo0660e032007-04-25 17:54:47 -07001611 &ipv6_hdr(skb)->daddr,
David Ahern4297a0e2017-08-07 08:44:21 -07001612 ntohs(th->dest), tcp_v6_iif(skb),
1613 sdif);
Ian Morris53b24b82015-03-29 14:00:05 +01001614 if (sk2) {
Arnaldo Carvalho de Melo295ff7e2005-08-09 20:44:40 -07001615 struct inet_timewait_sock *tw = inet_twsk(sk);
Eric Dumazetdbe7faa2015-07-08 14:28:30 -07001616 inet_twsk_deschedule_put(tw);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001617 sk = sk2;
Alexey Kodanev4ad19de2015-03-27 12:24:22 +03001618 tcp_v6_restore_cb(skb);
Eric Dumazet3b24d852016-04-01 08:52:17 -07001619 refcounted = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001620 goto process;
1621 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001622 }
Gustavo A. R. Silva275757e62017-10-16 16:36:52 -05001623 /* to ACK */
1624 /* fall through */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001625 case TCP_TW_ACK:
1626 tcp_v6_timewait_ack(sk, skb);
1627 break;
1628 case TCP_TW_RST:
Florian Westphal271c3b92015-12-21 21:29:26 +01001629 tcp_v6_send_reset(sk, skb);
1630 inet_twsk_deschedule_put(inet_twsk(sk));
1631 goto discard_it;
Wang Yufen4aa956d2014-03-29 09:27:29 +08001632 case TCP_TW_SUCCESS:
1633 ;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001634 }
1635 goto discard_it;
1636}
1637
Eric Dumazetc7109982012-07-26 12:18:11 +00001638static void tcp_v6_early_demux(struct sk_buff *skb)
1639{
1640 const struct ipv6hdr *hdr;
1641 const struct tcphdr *th;
1642 struct sock *sk;
1643
1644 if (skb->pkt_type != PACKET_HOST)
1645 return;
1646
1647 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1648 return;
1649
1650 hdr = ipv6_hdr(skb);
1651 th = tcp_hdr(skb);
1652
1653 if (th->doff < sizeof(struct tcphdr) / 4)
1654 return;
1655
Eric Dumazet870c3152014-10-17 09:17:20 -07001656 /* Note : We use inet6_iif() here, not tcp_v6_iif() */
Eric Dumazetc7109982012-07-26 12:18:11 +00001657 sk = __inet6_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1658 &hdr->saddr, th->source,
1659 &hdr->daddr, ntohs(th->dest),
David Ahern4297a0e2017-08-07 08:44:21 -07001660 inet6_iif(skb), inet6_sdif(skb));
Eric Dumazetc7109982012-07-26 12:18:11 +00001661 if (sk) {
1662 skb->sk = sk;
1663 skb->destructor = sock_edemux;
Eric Dumazetf7e4eb02015-03-15 21:12:13 -07001664 if (sk_fullsock(sk)) {
Michal Kubečekd0c294c2015-03-23 15:14:00 +01001665 struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
Neal Cardwellf3f12132012-10-22 21:41:48 +00001666
Eric Dumazetc7109982012-07-26 12:18:11 +00001667 if (dst)
Eric Dumazet5d299f32012-08-06 05:09:33 +00001668 dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie);
Eric Dumazetc7109982012-07-26 12:18:11 +00001669 if (dst &&
Neal Cardwellf3f12132012-10-22 21:41:48 +00001670 inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
Eric Dumazetc7109982012-07-26 12:18:11 +00001671 skb_dst_set_noref(skb, dst);
1672 }
1673 }
1674}
1675
David S. Millerccb7c412010-12-01 18:09:13 -08001676static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1677 .twsk_obj_size = sizeof(struct tcp6_timewait_sock),
1678 .twsk_unique = tcp_twsk_unique,
Wang Yufen4aa956d2014-03-29 09:27:29 +08001679 .twsk_destructor = tcp_twsk_destructor,
David S. Millerccb7c412010-12-01 18:09:13 -08001680};
1681
Stephen Hemminger3b401a82009-09-01 19:25:04 +00001682static const struct inet_connection_sock_af_ops ipv6_specific = {
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001683 .queue_xmit = inet6_csk_xmit,
1684 .send_check = tcp_v6_send_check,
1685 .rebuild_header = inet6_sk_rebuild_header,
Eric Dumazet5d299f32012-08-06 05:09:33 +00001686 .sk_rx_dst_set = inet6_sk_rx_dst_set,
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001687 .conn_request = tcp_v6_conn_request,
1688 .syn_recv_sock = tcp_v6_syn_recv_sock,
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001689 .net_header_len = sizeof(struct ipv6hdr),
Eric Dumazet67469602012-04-24 07:37:38 +00001690 .net_frag_header_len = sizeof(struct frag_hdr),
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001691 .setsockopt = ipv6_setsockopt,
1692 .getsockopt = ipv6_getsockopt,
1693 .addr2sockaddr = inet6_csk_addr2sockaddr,
1694 .sockaddr_len = sizeof(struct sockaddr_in6),
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08001695#ifdef CONFIG_COMPAT
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001696 .compat_setsockopt = compat_ipv6_setsockopt,
1697 .compat_getsockopt = compat_ipv6_getsockopt,
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08001698#endif
Neal Cardwell4fab9072014-08-14 12:40:05 -04001699 .mtu_reduced = tcp_v6_mtu_reduced,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001700};
1701
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001702#ifdef CONFIG_TCP_MD5SIG
Stephen Hemmingerb2e4b3d2009-09-01 19:25:03 +00001703static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001704 .md5_lookup = tcp_v6_md5_lookup,
Adam Langley49a72df2008-07-19 00:01:42 -07001705 .calc_md5_hash = tcp_v6_md5_hash_skb,
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001706 .md5_parse = tcp_v6_parse_md5_keys,
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001707};
David S. Millera9286302006-11-14 19:53:22 -08001708#endif
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001709
Linus Torvalds1da177e2005-04-16 15:20:36 -07001710/*
1711 * TCP over IPv4 via INET6 API
1712 */
Stephen Hemminger3b401a82009-09-01 19:25:04 +00001713static const struct inet_connection_sock_af_ops ipv6_mapped = {
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001714 .queue_xmit = ip_queue_xmit,
1715 .send_check = tcp_v4_send_check,
1716 .rebuild_header = inet_sk_rebuild_header,
Eric Dumazet63d02d12012-08-09 14:11:00 +00001717 .sk_rx_dst_set = inet_sk_rx_dst_set,
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001718 .conn_request = tcp_v6_conn_request,
1719 .syn_recv_sock = tcp_v6_syn_recv_sock,
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001720 .net_header_len = sizeof(struct iphdr),
1721 .setsockopt = ipv6_setsockopt,
1722 .getsockopt = ipv6_getsockopt,
1723 .addr2sockaddr = inet6_csk_addr2sockaddr,
1724 .sockaddr_len = sizeof(struct sockaddr_in6),
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08001725#ifdef CONFIG_COMPAT
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001726 .compat_setsockopt = compat_ipv6_setsockopt,
1727 .compat_getsockopt = compat_ipv6_getsockopt,
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08001728#endif
Neal Cardwell4fab9072014-08-14 12:40:05 -04001729 .mtu_reduced = tcp_v4_mtu_reduced,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001730};
1731
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001732#ifdef CONFIG_TCP_MD5SIG
Stephen Hemmingerb2e4b3d2009-09-01 19:25:03 +00001733static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001734 .md5_lookup = tcp_v4_md5_lookup,
Adam Langley49a72df2008-07-19 00:01:42 -07001735 .calc_md5_hash = tcp_v4_md5_hash_skb,
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001736 .md5_parse = tcp_v6_parse_md5_keys,
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001737};
David S. Millera9286302006-11-14 19:53:22 -08001738#endif
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001739
Linus Torvalds1da177e2005-04-16 15:20:36 -07001740/* NOTE: A lot of things set to zero explicitly by call to
1741 * sk_alloc() so need not be done here.
1742 */
1743static int tcp_v6_init_sock(struct sock *sk)
1744{
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -03001745 struct inet_connection_sock *icsk = inet_csk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001746
Neal Cardwell900f65d2012-04-19 09:55:21 +00001747 tcp_init_sock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001748
Arnaldo Carvalho de Melo8292a172005-12-13 23:15:52 -08001749 icsk->icsk_af_ops = &ipv6_specific;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001750
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001751#ifdef CONFIG_TCP_MD5SIG
David S. Millerac807fa2012-04-23 03:21:58 -04001752 tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001753#endif
1754
Linus Torvalds1da177e2005-04-16 15:20:36 -07001755 return 0;
1756}
1757
Brian Haley7d06b2e2008-06-14 17:04:49 -07001758static void tcp_v6_destroy_sock(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001759{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001760 tcp_v4_destroy_sock(sk);
Brian Haley7d06b2e2008-06-14 17:04:49 -07001761 inet6_destroy_sock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001762}
1763
YOSHIFUJI Hideaki952a10b2007-04-21 20:13:44 +09001764#ifdef CONFIG_PROC_FS
Linus Torvalds1da177e2005-04-16 15:20:36 -07001765/* Proc filesystem TCPv6 sock list dumping. */
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001766static void get_openreq6(struct seq_file *seq,
Eric Dumazetaa3a0c82015-10-02 11:43:30 -07001767 const struct request_sock *req, int i)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001768{
Eric Dumazetfa76ce732015-03-19 19:04:20 -07001769 long ttd = req->rsk_timer.expires - jiffies;
Eric Dumazet634fb9792013-10-09 15:21:29 -07001770 const struct in6_addr *src = &inet_rsk(req)->ir_v6_loc_addr;
1771 const struct in6_addr *dest = &inet_rsk(req)->ir_v6_rmt_addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001772
1773 if (ttd < 0)
1774 ttd = 0;
1775
Linus Torvalds1da177e2005-04-16 15:20:36 -07001776 seq_printf(seq,
1777 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
Francesco Fuscod14c5ab2013-08-15 13:42:14 +02001778 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %d %d %pK\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001779 i,
1780 src->s6_addr32[0], src->s6_addr32[1],
1781 src->s6_addr32[2], src->s6_addr32[3],
Eric Dumazetb44084c2013-10-10 00:04:37 -07001782 inet_rsk(req)->ir_num,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001783 dest->s6_addr32[0], dest->s6_addr32[1],
1784 dest->s6_addr32[2], dest->s6_addr32[3],
Eric Dumazet634fb9792013-10-09 15:21:29 -07001785 ntohs(inet_rsk(req)->ir_rmt_port),
Linus Torvalds1da177e2005-04-16 15:20:36 -07001786 TCP_SYN_RECV,
Weilong Chen4c99aa42013-12-19 18:44:34 +08001787 0, 0, /* could print option size, but that is af dependent. */
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001788 1, /* timers active (only the expire timer) */
1789 jiffies_to_clock_t(ttd),
Eric Dumazete6c022a2012-10-27 23:16:46 +00001790 req->num_timeout,
Eric Dumazetaa3a0c82015-10-02 11:43:30 -07001791 from_kuid_munged(seq_user_ns(seq),
1792 sock_i_uid(req->rsk_listener)),
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001793 0, /* non standard timer */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001794 0, /* open_requests have no inode */
1795 0, req);
1796}
1797
1798static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1799{
Eric Dumazetb71d1d42011-04-22 04:53:02 +00001800 const struct in6_addr *dest, *src;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001801 __u16 destp, srcp;
1802 int timer_active;
1803 unsigned long timer_expires;
Eric Dumazetcf533ea2011-10-21 05:22:42 -04001804 const struct inet_sock *inet = inet_sk(sp);
1805 const struct tcp_sock *tp = tcp_sk(sp);
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001806 const struct inet_connection_sock *icsk = inet_csk(sp);
Eric Dumazet0536fcc2015-09-29 07:42:52 -07001807 const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
Eric Dumazet00fd38d2015-11-12 08:43:18 -08001808 int rx_queue;
1809 int state;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001810
Eric Dumazetefe42082013-10-03 15:42:29 -07001811 dest = &sp->sk_v6_daddr;
1812 src = &sp->sk_v6_rcv_saddr;
Eric Dumazetc720c7e82009-10-15 06:30:45 +00001813 destp = ntohs(inet->inet_dport);
1814 srcp = ntohs(inet->inet_sport);
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001815
Yuchung Chengce3cf4e2016-06-06 15:07:18 -07001816 if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
Yuchung Cheng57dde7f2017-01-12 22:11:33 -08001817 icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
Yuchung Chengce3cf4e2016-06-06 15:07:18 -07001818 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001819 timer_active = 1;
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001820 timer_expires = icsk->icsk_timeout;
1821 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001822 timer_active = 4;
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001823 timer_expires = icsk->icsk_timeout;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001824 } else if (timer_pending(&sp->sk_timer)) {
1825 timer_active = 2;
1826 timer_expires = sp->sk_timer.expires;
1827 } else {
1828 timer_active = 0;
1829 timer_expires = jiffies;
1830 }
1831
Yafang Shao986ffdf2017-12-20 11:12:52 +08001832 state = inet_sk_state_load(sp);
Eric Dumazet00fd38d2015-11-12 08:43:18 -08001833 if (state == TCP_LISTEN)
1834 rx_queue = sp->sk_ack_backlog;
1835 else
1836 /* Because we don't lock the socket,
1837 * we might find a transient negative value.
1838 */
1839 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
1840
Linus Torvalds1da177e2005-04-16 15:20:36 -07001841 seq_printf(seq,
1842 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
Francesco Fuscod14c5ab2013-08-15 13:42:14 +02001843 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001844 i,
1845 src->s6_addr32[0], src->s6_addr32[1],
1846 src->s6_addr32[2], src->s6_addr32[3], srcp,
1847 dest->s6_addr32[0], dest->s6_addr32[1],
1848 dest->s6_addr32[2], dest->s6_addr32[3], destp,
Eric Dumazet00fd38d2015-11-12 08:43:18 -08001849 state,
1850 tp->write_seq - tp->snd_una,
1851 rx_queue,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001852 timer_active,
Eric Dumazeta399a802012-08-08 21:13:53 +00001853 jiffies_delta_to_clock_t(timer_expires - jiffies),
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001854 icsk->icsk_retransmits,
Eric W. Biedermana7cb5a42012-05-24 01:10:10 -06001855 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -03001856 icsk->icsk_probes_out,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001857 sock_i_ino(sp),
Reshetova, Elena41c6d652017-06-30 13:08:01 +03001858 refcount_read(&sp->sk_refcnt), sp,
Stephen Hemminger7be87352008-06-27 20:00:19 -07001859 jiffies_to_clock_t(icsk->icsk_rto),
1860 jiffies_to_clock_t(icsk->icsk_ack.ato),
Weilong Chen4c99aa42013-12-19 18:44:34 +08001861 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
Ilpo Järvinen0b6a05c2009-09-15 01:30:10 -07001862 tp->snd_cwnd,
Eric Dumazet00fd38d2015-11-12 08:43:18 -08001863 state == TCP_LISTEN ?
Eric Dumazet0536fcc2015-09-29 07:42:52 -07001864 fastopenq->max_qlen :
Yuchung Cheng0a672f72014-05-11 20:22:12 -07001865 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001866 );
1867}
1868
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001869static void get_timewait6_sock(struct seq_file *seq,
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07001870 struct inet_timewait_sock *tw, int i)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001871{
Eric Dumazet789f5582015-04-12 18:51:09 -07001872 long delta = tw->tw_timer.expires - jiffies;
Eric Dumazetb71d1d42011-04-22 04:53:02 +00001873 const struct in6_addr *dest, *src;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001874 __u16 destp, srcp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001875
Eric Dumazetefe42082013-10-03 15:42:29 -07001876 dest = &tw->tw_v6_daddr;
1877 src = &tw->tw_v6_rcv_saddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001878 destp = ntohs(tw->tw_dport);
1879 srcp = ntohs(tw->tw_sport);
1880
1881 seq_printf(seq,
1882 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
Dan Rosenberg71338aa2011-05-23 12:17:35 +00001883 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001884 i,
1885 src->s6_addr32[0], src->s6_addr32[1],
1886 src->s6_addr32[2], src->s6_addr32[3], srcp,
1887 dest->s6_addr32[0], dest->s6_addr32[1],
1888 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1889 tw->tw_substate, 0, 0,
Eric Dumazeta399a802012-08-08 21:13:53 +00001890 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
Reshetova, Elena41c6d652017-06-30 13:08:01 +03001891 refcount_read(&tw->tw_refcnt), tw);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001892}
1893
Linus Torvalds1da177e2005-04-16 15:20:36 -07001894static int tcp6_seq_show(struct seq_file *seq, void *v)
1895{
1896 struct tcp_iter_state *st;
Eric Dumazet05dbc7b2013-10-03 00:22:02 -07001897 struct sock *sk = v;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001898
1899 if (v == SEQ_START_TOKEN) {
1900 seq_puts(seq,
1901 " sl "
1902 "local_address "
1903 "remote_address "
1904 "st tx_queue rx_queue tr tm->when retrnsmt"
1905 " uid timeout inode\n");
1906 goto out;
1907 }
1908 st = seq->private;
1909
Eric Dumazet079096f2015-10-02 11:43:32 -07001910 if (sk->sk_state == TCP_TIME_WAIT)
1911 get_timewait6_sock(seq, v, st->num);
1912 else if (sk->sk_state == TCP_NEW_SYN_RECV)
Eric Dumazetaa3a0c82015-10-02 11:43:30 -07001913 get_openreq6(seq, v, st->num);
Eric Dumazet079096f2015-10-02 11:43:32 -07001914 else
1915 get_tcp6_sock(seq, v, st->num);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001916out:
1917 return 0;
1918}
1919
Christoph Hellwig37d849b2018-04-11 09:31:28 +02001920static const struct seq_operations tcp6_seq_ops = {
1921 .show = tcp6_seq_show,
1922 .start = tcp_seq_start,
1923 .next = tcp_seq_next,
1924 .stop = tcp_seq_stop,
1925};
1926
Linus Torvalds1da177e2005-04-16 15:20:36 -07001927static struct tcp_seq_afinfo tcp6_seq_afinfo = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001928 .family = AF_INET6,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001929};
1930
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00001931int __net_init tcp6_proc_init(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001932{
Christoph Hellwigc3506372018-04-10 19:42:55 +02001933 if (!proc_create_net_data("tcp6", 0444, net->proc_net, &tcp6_seq_ops,
1934 sizeof(struct tcp_iter_state), &tcp6_seq_afinfo))
Christoph Hellwig37d849b2018-04-11 09:31:28 +02001935 return -ENOMEM;
1936 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001937}
1938
Daniel Lezcano6f8b13b2008-03-21 04:14:45 -07001939void tcp6_proc_exit(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001940{
Christoph Hellwig37d849b2018-04-11 09:31:28 +02001941 remove_proc_entry("tcp6", net->proc_net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001942}
1943#endif
1944
1945struct proto tcpv6_prot = {
1946 .name = "TCPv6",
1947 .owner = THIS_MODULE,
1948 .close = tcp_close,
Andrey Ignatovd74bad42018-03-30 15:08:05 -07001949 .pre_connect = tcp_v6_pre_connect,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001950 .connect = tcp_v6_connect,
1951 .disconnect = tcp_disconnect,
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001952 .accept = inet_csk_accept,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001953 .ioctl = tcp_ioctl,
1954 .init = tcp_v6_init_sock,
1955 .destroy = tcp_v6_destroy_sock,
1956 .shutdown = tcp_shutdown,
1957 .setsockopt = tcp_setsockopt,
1958 .getsockopt = tcp_getsockopt,
Ursula Braun4b9d07a2017-01-09 16:55:12 +01001959 .keepalive = tcp_set_keepalive,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001960 .recvmsg = tcp_recvmsg,
Changli Gao7ba42912010-07-10 20:41:55 +00001961 .sendmsg = tcp_sendmsg,
1962 .sendpage = tcp_sendpage,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001963 .backlog_rcv = tcp_v6_do_rcv,
Eric Dumazet46d3cea2012-07-11 05:50:31 +00001964 .release_cb = tcp_release_cb,
Craig Gallek496611d2016-02-10 11:50:36 -05001965 .hash = inet6_hash,
Arnaldo Carvalho de Meloab1e0a12008-02-03 04:06:04 -08001966 .unhash = inet_unhash,
1967 .get_port = inet_csk_get_port,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001968 .enter_memory_pressure = tcp_enter_memory_pressure,
Eric Dumazet06044752017-06-07 13:29:12 -07001969 .leave_memory_pressure = tcp_leave_memory_pressure,
Eric Dumazetc9bee3b72013-07-22 20:27:07 -07001970 .stream_memory_free = tcp_stream_memory_free,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001971 .sockets_allocated = &tcp_sockets_allocated,
1972 .memory_allocated = &tcp_memory_allocated,
1973 .memory_pressure = &tcp_memory_pressure,
Arnaldo Carvalho de Melo0a5578c2005-08-09 20:11:41 -07001974 .orphan_count = &tcp_orphan_count,
Eric W. Biedermana4fe34b2013-10-19 16:25:36 -07001975 .sysctl_mem = sysctl_tcp_mem,
Eric Dumazet356d1832017-11-07 00:29:28 -08001976 .sysctl_wmem_offset = offsetof(struct net, ipv4.sysctl_tcp_wmem),
1977 .sysctl_rmem_offset = offsetof(struct net, ipv4.sysctl_tcp_rmem),
Linus Torvalds1da177e2005-04-16 15:20:36 -07001978 .max_header = MAX_TCP_HEADER,
1979 .obj_size = sizeof(struct tcp6_sock),
Paul E. McKenney5f0d5a32017-01-18 02:53:44 -08001980 .slab_flags = SLAB_TYPESAFE_BY_RCU,
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08001981 .twsk_prot = &tcp6_timewait_sock_ops,
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -07001982 .rsk_prot = &tcp6_request_sock_ops,
Pavel Emelyanov39d8cda2008-03-22 16:50:58 -07001983 .h.hashinfo = &tcp_hashinfo,
Changli Gao7ba42912010-07-10 20:41:55 +00001984 .no_autobind = true,
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001985#ifdef CONFIG_COMPAT
1986 .compat_setsockopt = compat_tcp_setsockopt,
1987 .compat_getsockopt = compat_tcp_getsockopt,
1988#endif
Lorenzo Colittic1e64e22015-12-16 12:30:05 +09001989 .diag_destroy = tcp_abort,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001990};
1991
David Aherna8e3bb32017-08-28 15:14:20 -07001992/* thinking of making this const? Don't.
1993 * early_demux can change based on sysctl.
1994 */
Julia Lawall39294c32017-08-01 18:27:28 +02001995static struct inet6_protocol tcpv6_protocol = {
Eric Dumazetc7109982012-07-26 12:18:11 +00001996 .early_demux = tcp_v6_early_demux,
subashab@codeaurora.orgdddb64b2017-03-23 13:34:16 -06001997 .early_demux_handler = tcp_v6_early_demux,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001998 .handler = tcp_v6_rcv,
1999 .err_handler = tcp_v6_err,
2000 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
2001};
2002
Linus Torvalds1da177e2005-04-16 15:20:36 -07002003static struct inet_protosw tcpv6_protosw = {
2004 .type = SOCK_STREAM,
2005 .protocol = IPPROTO_TCP,
2006 .prot = &tcpv6_prot,
2007 .ops = &inet6_stream_ops,
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -08002008 .flags = INET_PROTOSW_PERMANENT |
2009 INET_PROTOSW_ICSK,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002010};
2011
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00002012static int __net_init tcpv6_net_init(struct net *net)
Daniel Lezcano93ec9262008-03-07 11:16:02 -08002013{
Denis V. Lunev56772422008-04-03 14:28:30 -07002014 return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
2015 SOCK_RAW, IPPROTO_TCP, net);
Daniel Lezcano93ec9262008-03-07 11:16:02 -08002016}
2017
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00002018static void __net_exit tcpv6_net_exit(struct net *net)
Daniel Lezcano93ec9262008-03-07 11:16:02 -08002019{
Denis V. Lunev56772422008-04-03 14:28:30 -07002020 inet_ctl_sock_destroy(net->ipv6.tcp_sk);
Eric W. Biedermanb099ce22009-12-03 02:29:09 +00002021}
2022
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00002023static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
Eric W. Biedermanb099ce22009-12-03 02:29:09 +00002024{
Haishuang Yan1946e672016-12-28 17:52:32 +08002025 inet_twsk_purge(&tcp_hashinfo, AF_INET6);
Daniel Lezcano93ec9262008-03-07 11:16:02 -08002026}
2027
2028static struct pernet_operations tcpv6_net_ops = {
Eric W. Biedermanb099ce22009-12-03 02:29:09 +00002029 .init = tcpv6_net_init,
2030 .exit = tcpv6_net_exit,
2031 .exit_batch = tcpv6_net_exit_batch,
Daniel Lezcano93ec9262008-03-07 11:16:02 -08002032};
2033
Daniel Lezcano7f4e4862007-12-11 02:25:35 -08002034int __init tcpv6_init(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002035{
Daniel Lezcano7f4e4862007-12-11 02:25:35 -08002036 int ret;
David Woodhouseae0f7d52006-01-11 15:53:04 -08002037
Vlad Yasevich33362882012-11-15 08:49:15 +00002038 ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
2039 if (ret)
Vlad Yasevichc6b641a2012-11-15 08:49:22 +00002040 goto out;
Vlad Yasevich33362882012-11-15 08:49:15 +00002041
Daniel Lezcano7f4e4862007-12-11 02:25:35 -08002042 /* register inet6 protocol */
2043 ret = inet6_register_protosw(&tcpv6_protosw);
2044 if (ret)
2045 goto out_tcpv6_protocol;
2046
Daniel Lezcano93ec9262008-03-07 11:16:02 -08002047 ret = register_pernet_subsys(&tcpv6_net_ops);
Daniel Lezcano7f4e4862007-12-11 02:25:35 -08002048 if (ret)
2049 goto out_tcpv6_protosw;
2050out:
2051 return ret;
2052
Daniel Lezcano7f4e4862007-12-11 02:25:35 -08002053out_tcpv6_protosw:
2054 inet6_unregister_protosw(&tcpv6_protosw);
Vlad Yasevich33362882012-11-15 08:49:15 +00002055out_tcpv6_protocol:
2056 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
Daniel Lezcano7f4e4862007-12-11 02:25:35 -08002057 goto out;
2058}
2059
Daniel Lezcano09f77092007-12-13 05:34:58 -08002060void tcpv6_exit(void)
Daniel Lezcano7f4e4862007-12-11 02:25:35 -08002061{
Daniel Lezcano93ec9262008-03-07 11:16:02 -08002062 unregister_pernet_subsys(&tcpv6_net_ops);
Daniel Lezcano7f4e4862007-12-11 02:25:35 -08002063 inet6_unregister_protosw(&tcpv6_protosw);
2064 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002065}