blob: 4f49e5dd41bbb2681111cc992b91409e33b68c0f [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * TCP over IPv6
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09003 * Linux INET6 implementation
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 *
5 * Authors:
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09006 * Pedro Roque <roque@di.fc.ul.pt>
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 *
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09008 * Based on:
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 * linux/net/ipv4/tcp.c
10 * linux/net/ipv4/tcp_input.c
11 * linux/net/ipv4/tcp_output.c
12 *
13 * Fixes:
14 * Hideaki YOSHIFUJI : sin6_scope_id support
15 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
16 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
17 * a single port at the same time.
18 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
19 *
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
24 */
25
Herbert Xueb4dea52008-12-29 23:04:08 -080026#include <linux/bottom_half.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070027#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <linux/errno.h>
29#include <linux/types.h>
30#include <linux/socket.h>
31#include <linux/sockios.h>
32#include <linux/net.h>
33#include <linux/jiffies.h>
34#include <linux/in.h>
35#include <linux/in6.h>
36#include <linux/netdevice.h>
37#include <linux/init.h>
38#include <linux/jhash.h>
39#include <linux/ipsec.h>
40#include <linux/times.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090041#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070042
43#include <linux/ipv6.h>
44#include <linux/icmpv6.h>
45#include <linux/random.h>
46
47#include <net/tcp.h>
48#include <net/ndisc.h>
Arnaldo Carvalho de Melo5324a042005-08-12 09:26:18 -030049#include <net/inet6_hashtables.h>
Arnaldo Carvalho de Melo81297652005-12-13 23:15:24 -080050#include <net/inet6_connection_sock.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070051#include <net/ipv6.h>
52#include <net/transp_v6.h>
53#include <net/addrconf.h>
54#include <net/ip6_route.h>
55#include <net/ip6_checksum.h>
56#include <net/inet_ecn.h>
57#include <net/protocol.h>
58#include <net/xfrm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070059#include <net/snmp.h>
60#include <net/dsfield.h>
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -080061#include <net/timewait_sock.h>
Jeff Garzik18134be2007-10-26 22:53:14 -070062#include <net/netdma.h>
Denis V. Lunev3d58b5f2008-04-03 14:22:32 -070063#include <net/inet_common.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070064
65#include <asm/uaccess.h>
66
67#include <linux/proc_fs.h>
68#include <linux/seq_file.h>
69
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -080070#include <linux/crypto.h>
71#include <linux/scatterlist.h>
72
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -080073static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb);
Gui Jianfeng6edafaa2008-08-06 23:50:04 -070074static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
75 struct request_sock *req);
Linus Torvalds1da177e2005-04-16 15:20:36 -070076
77static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
Herbert Xu8ad50d92010-04-11 02:15:54 +000078static void __tcp_v6_send_check(struct sk_buff *skb,
79 struct in6_addr *saddr,
80 struct in6_addr *daddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -070081
Stephen Hemminger3b401a82009-09-01 19:25:04 +000082static const struct inet_connection_sock_af_ops ipv6_mapped;
83static const struct inet_connection_sock_af_ops ipv6_specific;
David S. Millera9286302006-11-14 19:53:22 -080084#ifdef CONFIG_TCP_MD5SIG
Stephen Hemmingerb2e4b3d2009-09-01 19:25:03 +000085static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
86static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
YOSHIFUJI Hideaki9501f972008-04-18 12:45:16 +090087#else
88static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
89 struct in6_addr *addr)
90{
91 return NULL;
92}
David S. Millera9286302006-11-14 19:53:22 -080093#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070094
Linus Torvalds1da177e2005-04-16 15:20:36 -070095static void tcp_v6_hash(struct sock *sk)
96{
97 if (sk->sk_state != TCP_CLOSE) {
Arnaldo Carvalho de Melo8292a172005-12-13 23:15:52 -080098 if (inet_csk(sk)->icsk_af_ops == &ipv6_mapped) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070099 tcp_prot.hash(sk);
100 return;
101 }
102 local_bh_disable();
Eric Dumazet9327f702009-12-04 03:46:54 +0000103 __inet6_hash(sk, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104 local_bh_enable();
105 }
106}
107
Herbert Xu684f2172009-01-08 10:41:23 -0800108static __inline__ __sum16 tcp_v6_check(int len,
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +0900109 struct in6_addr *saddr,
110 struct in6_addr *daddr,
Al Viro868c86b2006-11-14 21:35:48 -0800111 __wsum base)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112{
113 return csum_ipv6_magic(saddr, daddr, len, IPPROTO_TCP, base);
114}
115
Gerrit Renkera94f7232006-11-10 14:06:49 -0800116static __u32 tcp_v6_init_sequence(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117{
Arnaldo Carvalho de Melo0660e032007-04-25 17:54:47 -0700118 return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
119 ipv6_hdr(skb)->saddr.s6_addr32,
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -0700120 tcp_hdr(skb)->dest,
121 tcp_hdr(skb)->source);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122}
123
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +0900124static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700125 int addr_len)
126{
127 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +0900128 struct inet_sock *inet = inet_sk(sk);
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -0800129 struct inet_connection_sock *icsk = inet_csk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130 struct ipv6_pinfo *np = inet6_sk(sk);
131 struct tcp_sock *tp = tcp_sk(sk);
Arnaud Ebalard20c59de2010-06-01 21:35:01 +0000132 struct in6_addr *saddr = NULL, *final_p, final;
David S. Miller493f3772010-12-02 12:14:29 -0800133 struct rt6_info *rt;
David S. Miller4c9483b2011-03-12 16:22:43 -0500134 struct flowi6 fl6;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135 struct dst_entry *dst;
136 int addr_type;
137 int err;
138
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +0900139 if (addr_len < SIN6_LEN_RFC2133)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700140 return -EINVAL;
141
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +0900142 if (usin->sin6_family != AF_INET6)
Eric Dumazeta02cec22010-09-22 20:43:57 +0000143 return -EAFNOSUPPORT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700144
David S. Miller4c9483b2011-03-12 16:22:43 -0500145 memset(&fl6, 0, sizeof(fl6));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700146
147 if (np->sndflow) {
David S. Miller4c9483b2011-03-12 16:22:43 -0500148 fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
149 IP6_ECN_flow_init(fl6.flowlabel);
150 if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151 struct ip6_flowlabel *flowlabel;
David S. Miller4c9483b2011-03-12 16:22:43 -0500152 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153 if (flowlabel == NULL)
154 return -EINVAL;
155 ipv6_addr_copy(&usin->sin6_addr, &flowlabel->dst);
156 fl6_sock_release(flowlabel);
157 }
158 }
159
160 /*
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +0900161 * connect() to INADDR_ANY means loopback (BSD'ism).
162 */
163
164 if(ipv6_addr_any(&usin->sin6_addr))
165 usin->sin6_addr.s6_addr[15] = 0x1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166
167 addr_type = ipv6_addr_type(&usin->sin6_addr);
168
169 if(addr_type & IPV6_ADDR_MULTICAST)
170 return -ENETUNREACH;
171
172 if (addr_type&IPV6_ADDR_LINKLOCAL) {
173 if (addr_len >= sizeof(struct sockaddr_in6) &&
174 usin->sin6_scope_id) {
175 /* If interface is set while binding, indices
176 * must coincide.
177 */
178 if (sk->sk_bound_dev_if &&
179 sk->sk_bound_dev_if != usin->sin6_scope_id)
180 return -EINVAL;
181
182 sk->sk_bound_dev_if = usin->sin6_scope_id;
183 }
184
185 /* Connect to link-local address requires an interface */
186 if (!sk->sk_bound_dev_if)
187 return -EINVAL;
188 }
189
190 if (tp->rx_opt.ts_recent_stamp &&
191 !ipv6_addr_equal(&np->daddr, &usin->sin6_addr)) {
192 tp->rx_opt.ts_recent = 0;
193 tp->rx_opt.ts_recent_stamp = 0;
194 tp->write_seq = 0;
195 }
196
197 ipv6_addr_copy(&np->daddr, &usin->sin6_addr);
David S. Miller4c9483b2011-03-12 16:22:43 -0500198 np->flow_label = fl6.flowlabel;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199
200 /*
201 * TCP over IPv4
202 */
203
204 if (addr_type == IPV6_ADDR_MAPPED) {
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -0800205 u32 exthdrlen = icsk->icsk_ext_hdr_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206 struct sockaddr_in sin;
207
208 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
209
210 if (__ipv6_only_sock(sk))
211 return -ENETUNREACH;
212
213 sin.sin_family = AF_INET;
214 sin.sin_port = usin->sin6_port;
215 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
216
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -0800217 icsk->icsk_af_ops = &ipv6_mapped;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700218 sk->sk_backlog_rcv = tcp_v4_do_rcv;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800219#ifdef CONFIG_TCP_MD5SIG
220 tp->af_specific = &tcp_sock_ipv6_mapped_specific;
221#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700222
223 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
224
225 if (err) {
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -0800226 icsk->icsk_ext_hdr_len = exthdrlen;
227 icsk->icsk_af_ops = &ipv6_specific;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700228 sk->sk_backlog_rcv = tcp_v6_do_rcv;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800229#ifdef CONFIG_TCP_MD5SIG
230 tp->af_specific = &tcp_sock_ipv6_specific;
231#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232 goto failure;
233 } else {
Eric Dumazetc720c7e2009-10-15 06:30:45 +0000234 ipv6_addr_set_v4mapped(inet->inet_saddr, &np->saddr);
235 ipv6_addr_set_v4mapped(inet->inet_rcv_saddr,
236 &np->rcv_saddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700237 }
238
239 return err;
240 }
241
242 if (!ipv6_addr_any(&np->rcv_saddr))
243 saddr = &np->rcv_saddr;
244
David S. Miller4c9483b2011-03-12 16:22:43 -0500245 fl6.flowi6_proto = IPPROTO_TCP;
246 ipv6_addr_copy(&fl6.daddr, &np->daddr);
247 ipv6_addr_copy(&fl6.saddr,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700248 (saddr ? saddr : &np->saddr));
David S. Miller4c9483b2011-03-12 16:22:43 -0500249 fl6.flowi6_oif = sk->sk_bound_dev_if;
250 fl6.flowi6_mark = sk->sk_mark;
David S. Miller1958b852011-03-12 16:36:19 -0500251 fl6.fl6_dport = usin->sin6_port;
252 fl6.fl6_sport = inet->inet_sport;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253
David S. Miller4c9483b2011-03-12 16:22:43 -0500254 final_p = fl6_update_dst(&fl6, np->opt, &final);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700255
David S. Miller4c9483b2011-03-12 16:22:43 -0500256 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
Venkat Yekkiralabeb8d132006-08-04 23:12:42 -0700257
David S. Miller4c9483b2011-03-12 16:22:43 -0500258 dst = ip6_dst_lookup_flow(sk, &fl6, final_p, true);
David S. Miller68d0c6d2011-03-01 13:19:07 -0800259 if (IS_ERR(dst)) {
260 err = PTR_ERR(dst);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700261 goto failure;
David S. Miller14e50e52007-05-24 18:17:54 -0700262 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700263
264 if (saddr == NULL) {
David S. Miller4c9483b2011-03-12 16:22:43 -0500265 saddr = &fl6.saddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266 ipv6_addr_copy(&np->rcv_saddr, saddr);
267 }
268
269 /* set the source address */
270 ipv6_addr_copy(&np->saddr, saddr);
Eric Dumazetc720c7e2009-10-15 06:30:45 +0000271 inet->inet_rcv_saddr = LOOPBACK4_IPV6;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272
Herbert Xuf83ef8c2006-06-30 13:37:03 -0700273 sk->sk_gso_type = SKB_GSO_TCPV6;
YOSHIFUJI Hideaki8e1ef0a2006-08-29 17:15:09 -0700274 __ip6_dst_store(sk, dst, NULL, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275
David S. Miller493f3772010-12-02 12:14:29 -0800276 rt = (struct rt6_info *) dst;
277 if (tcp_death_row.sysctl_tw_recycle &&
278 !tp->rx_opt.ts_recent_stamp &&
279 ipv6_addr_equal(&rt->rt6i_dst.addr, &np->daddr)) {
280 struct inet_peer *peer = rt6_get_peer(rt);
281 /*
282 * VJ's idea. We save last timestamp seen from
283 * the destination in peer table, when entering state
284 * TIME-WAIT * and initialize rx_opt.ts_recent from it,
285 * when trying new connection.
286 */
287 if (peer) {
288 inet_peer_refcheck(peer);
289 if ((u32)get_seconds() - peer->tcp_ts_stamp <= TCP_PAWS_MSL) {
290 tp->rx_opt.ts_recent_stamp = peer->tcp_ts_stamp;
291 tp->rx_opt.ts_recent = peer->tcp_ts;
292 }
293 }
294 }
295
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -0800296 icsk->icsk_ext_hdr_len = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700297 if (np->opt)
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -0800298 icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
299 np->opt->opt_nflen);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300
301 tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
302
Eric Dumazetc720c7e2009-10-15 06:30:45 +0000303 inet->inet_dport = usin->sin6_port;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700304
305 tcp_set_state(sk, TCP_SYN_SENT);
Arnaldo Carvalho de Melod8313f52005-12-13 23:25:44 -0800306 err = inet6_hash_connect(&tcp_death_row, sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307 if (err)
308 goto late_failure;
309
310 if (!tp->write_seq)
311 tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
312 np->daddr.s6_addr32,
Eric Dumazetc720c7e2009-10-15 06:30:45 +0000313 inet->inet_sport,
314 inet->inet_dport);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700315
316 err = tcp_connect(sk);
317 if (err)
318 goto late_failure;
319
320 return 0;
321
322late_failure:
323 tcp_set_state(sk, TCP_CLOSE);
324 __sk_dst_reset(sk);
325failure:
Eric Dumazetc720c7e2009-10-15 06:30:45 +0000326 inet->inet_dport = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700327 sk->sk_route_caps = 0;
328 return err;
329}
330
331static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
Brian Haleyd5fdd6b2009-06-23 04:31:07 -0700332 u8 type, u8 code, int offset, __be32 info)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700333{
334 struct ipv6hdr *hdr = (struct ipv6hdr*)skb->data;
Arnaldo Carvalho de Melo505cbfc2005-08-12 09:19:38 -0300335 const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336 struct ipv6_pinfo *np;
337 struct sock *sk;
338 int err;
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +0900339 struct tcp_sock *tp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700340 __u32 seq;
Pavel Emelyanovca12a1a2008-07-16 20:28:42 -0700341 struct net *net = dev_net(skb->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700342
Pavel Emelyanovca12a1a2008-07-16 20:28:42 -0700343 sk = inet6_lookup(net, &tcp_hashinfo, &hdr->daddr,
Pavel Emelyanovd86e0da2008-01-31 05:07:21 -0800344 th->dest, &hdr->saddr, th->source, skb->dev->ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345
346 if (sk == NULL) {
Denis V. Luneve41b5362008-10-08 10:33:26 -0700347 ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
348 ICMP6_MIB_INERRORS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700349 return;
350 }
351
352 if (sk->sk_state == TCP_TIME_WAIT) {
YOSHIFUJI Hideaki9469c7b2006-10-10 19:41:46 -0700353 inet_twsk_put(inet_twsk(sk));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700354 return;
355 }
356
357 bh_lock_sock(sk);
358 if (sock_owned_by_user(sk))
Pavel Emelyanovde0744a2008-07-16 20:31:16 -0700359 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700360
361 if (sk->sk_state == TCP_CLOSE)
362 goto out;
363
Stephen Hemmingere802af92010-04-22 15:24:53 -0700364 if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) {
365 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
366 goto out;
367 }
368
Linus Torvalds1da177e2005-04-16 15:20:36 -0700369 tp = tcp_sk(sk);
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +0900370 seq = ntohl(th->seq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700371 if (sk->sk_state != TCP_LISTEN &&
372 !between(seq, tp->snd_una, tp->snd_nxt)) {
Pavel Emelyanovde0744a2008-07-16 20:31:16 -0700373 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700374 goto out;
375 }
376
377 np = inet6_sk(sk);
378
379 if (type == ICMPV6_PKT_TOOBIG) {
David S. Miller68d0c6d2011-03-01 13:19:07 -0800380 struct dst_entry *dst;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700381
382 if (sock_owned_by_user(sk))
383 goto out;
384 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
385 goto out;
386
387 /* icmp should have updated the destination cache entry */
388 dst = __sk_dst_check(sk, np->dst_cookie);
389
390 if (dst == NULL) {
391 struct inet_sock *inet = inet_sk(sk);
David S. Miller4c9483b2011-03-12 16:22:43 -0500392 struct flowi6 fl6;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393
394 /* BUGGG_FUTURE: Again, it is not clear how
395 to handle rthdr case. Ignore this complexity
396 for now.
397 */
David S. Miller4c9483b2011-03-12 16:22:43 -0500398 memset(&fl6, 0, sizeof(fl6));
399 fl6.flowi6_proto = IPPROTO_TCP;
400 ipv6_addr_copy(&fl6.daddr, &np->daddr);
401 ipv6_addr_copy(&fl6.saddr, &np->saddr);
402 fl6.flowi6_oif = sk->sk_bound_dev_if;
403 fl6.flowi6_mark = sk->sk_mark;
David S. Miller1958b852011-03-12 16:36:19 -0500404 fl6.fl6_dport = inet->inet_dport;
405 fl6.fl6_sport = inet->inet_sport;
David S. Miller4c9483b2011-03-12 16:22:43 -0500406 security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700407
David S. Miller4c9483b2011-03-12 16:22:43 -0500408 dst = ip6_dst_lookup_flow(sk, &fl6, NULL, false);
David S. Miller68d0c6d2011-03-01 13:19:07 -0800409 if (IS_ERR(dst)) {
410 sk->sk_err_soft = -PTR_ERR(dst);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700411 goto out;
412 }
413
414 } else
415 dst_hold(dst);
416
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -0800417 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700418 tcp_sync_mss(sk, dst_mtu(dst));
419 tcp_simple_retransmit(sk);
420 } /* else let the usual retransmit timer handle it */
421 dst_release(dst);
422 goto out;
423 }
424
425 icmpv6_err_convert(type, code, &err);
426
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -0700427 /* Might be for an request_sock */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700428 switch (sk->sk_state) {
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -0700429 struct request_sock *req, **prev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700430 case TCP_LISTEN:
431 if (sock_owned_by_user(sk))
432 goto out;
433
Arnaldo Carvalho de Melo81297652005-12-13 23:15:24 -0800434 req = inet6_csk_search_req(sk, &prev, th->dest, &hdr->daddr,
435 &hdr->saddr, inet6_iif(skb));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700436 if (!req)
437 goto out;
438
439 /* ICMPs are not backlogged, hence we cannot get
440 * an established socket here.
441 */
Ilpo Järvinen547b7922008-07-25 21:43:18 -0700442 WARN_ON(req->sk != NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700443
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -0700444 if (seq != tcp_rsk(req)->snt_isn) {
Pavel Emelyanovde0744a2008-07-16 20:31:16 -0700445 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700446 goto out;
447 }
448
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700449 inet_csk_reqsk_queue_drop(sk, req, prev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700450 goto out;
451
452 case TCP_SYN_SENT:
453 case TCP_SYN_RECV: /* Cannot happen.
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +0900454 It can, it SYNs are crossed. --ANK */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700455 if (!sock_owned_by_user(sk)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700456 sk->sk_err = err;
457 sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */
458
459 tcp_done(sk);
460 } else
461 sk->sk_err_soft = err;
462 goto out;
463 }
464
465 if (!sock_owned_by_user(sk) && np->recverr) {
466 sk->sk_err = err;
467 sk->sk_error_report(sk);
468 } else
469 sk->sk_err_soft = err;
470
471out:
472 bh_unlock_sock(sk);
473 sock_put(sk);
474}
475
476
William Allen Simpsone6b4d112009-12-02 18:07:39 +0000477static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req,
478 struct request_values *rvp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700479{
Arnaldo Carvalho de Meloca304b62005-12-13 23:15:40 -0800480 struct inet6_request_sock *treq = inet6_rsk(req);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700481 struct ipv6_pinfo *np = inet6_sk(sk);
482 struct sk_buff * skb;
483 struct ipv6_txoptions *opt = NULL;
Arnaud Ebalard20c59de2010-06-01 21:35:01 +0000484 struct in6_addr * final_p, final;
David S. Miller4c9483b2011-03-12 16:22:43 -0500485 struct flowi6 fl6;
Denis V. Lunevfd80eb92008-02-29 11:43:03 -0800486 struct dst_entry *dst;
David S. Miller68d0c6d2011-03-01 13:19:07 -0800487 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700488
David S. Miller4c9483b2011-03-12 16:22:43 -0500489 memset(&fl6, 0, sizeof(fl6));
490 fl6.flowi6_proto = IPPROTO_TCP;
491 ipv6_addr_copy(&fl6.daddr, &treq->rmt_addr);
492 ipv6_addr_copy(&fl6.saddr, &treq->loc_addr);
493 fl6.flowlabel = 0;
494 fl6.flowi6_oif = treq->iif;
495 fl6.flowi6_mark = sk->sk_mark;
David S. Miller1958b852011-03-12 16:36:19 -0500496 fl6.fl6_dport = inet_rsk(req)->rmt_port;
497 fl6.fl6_sport = inet_rsk(req)->loc_port;
David S. Miller4c9483b2011-03-12 16:22:43 -0500498 security_req_classify_flow(req, flowi6_to_flowi(&fl6));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700499
Denis V. Lunevfd80eb92008-02-29 11:43:03 -0800500 opt = np->opt;
David S. Miller4c9483b2011-03-12 16:22:43 -0500501 final_p = fl6_update_dst(&fl6, opt, &final);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700502
David S. Miller4c9483b2011-03-12 16:22:43 -0500503 dst = ip6_dst_lookup_flow(sk, &fl6, final_p, false);
David S. Miller68d0c6d2011-03-01 13:19:07 -0800504 if (IS_ERR(dst)) {
505 err = PTR_ERR(dst);
Boris Ostrovsky738faca2011-04-04 13:07:26 -0700506 dst = NULL;
Denis V. Lunevfd80eb92008-02-29 11:43:03 -0800507 goto done;
David S. Miller68d0c6d2011-03-01 13:19:07 -0800508 }
William Allen Simpsone6b4d112009-12-02 18:07:39 +0000509 skb = tcp_make_synack(sk, dst, req, rvp);
David S. Miller68d0c6d2011-03-01 13:19:07 -0800510 err = -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700511 if (skb) {
Herbert Xu8ad50d92010-04-11 02:15:54 +0000512 __tcp_v6_send_check(skb, &treq->loc_addr, &treq->rmt_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700513
David S. Miller4c9483b2011-03-12 16:22:43 -0500514 ipv6_addr_copy(&fl6.daddr, &treq->rmt_addr);
515 err = ip6_xmit(sk, skb, &fl6, opt);
Gerrit Renkerb9df3cb2006-11-14 11:21:36 -0200516 err = net_xmit_eval(err);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700517 }
518
519done:
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +0900520 if (opt && opt != np->opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700521 sock_kfree_s(sk, opt, opt->tot_len);
Eric W. Biederman78b91042006-01-31 17:51:44 -0800522 dst_release(dst);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700523 return err;
524}
525
Octavian Purdila72659ec2010-01-17 19:09:39 -0800526static int tcp_v6_rtx_synack(struct sock *sk, struct request_sock *req,
527 struct request_values *rvp)
528{
529 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
530 return tcp_v6_send_synack(sk, req, rvp);
531}
532
Glenn Griffinc6aefaf2008-02-07 21:49:26 -0800533static inline void syn_flood_warning(struct sk_buff *skb)
534{
535#ifdef CONFIG_SYN_COOKIES
536 if (sysctl_tcp_syncookies)
537 printk(KERN_INFO
538 "TCPv6: Possible SYN flooding on port %d. "
539 "Sending cookies.\n", ntohs(tcp_hdr(skb)->dest));
540 else
541#endif
542 printk(KERN_INFO
543 "TCPv6: Possible SYN flooding on port %d. "
544 "Dropping request.\n", ntohs(tcp_hdr(skb)->dest));
545}
546
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -0700547static void tcp_v6_reqsk_destructor(struct request_sock *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700548{
Wei Yongjun800d55f2009-02-23 21:45:33 +0000549 kfree_skb(inet6_rsk(req)->pktopts);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700550}
551
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800552#ifdef CONFIG_TCP_MD5SIG
553static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
554 struct in6_addr *addr)
555{
556 struct tcp_sock *tp = tcp_sk(sk);
557 int i;
558
559 BUG_ON(tp == NULL);
560
561 if (!tp->md5sig_info || !tp->md5sig_info->entries6)
562 return NULL;
563
564 for (i = 0; i < tp->md5sig_info->entries6; i++) {
YOSHIFUJI Hideakicaad2952008-04-10 15:42:07 +0900565 if (ipv6_addr_equal(&tp->md5sig_info->keys6[i].addr, addr))
David S. Millerf8ab18d2007-09-28 15:18:35 -0700566 return &tp->md5sig_info->keys6[i].base;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800567 }
568 return NULL;
569}
570
571static struct tcp_md5sig_key *tcp_v6_md5_lookup(struct sock *sk,
572 struct sock *addr_sk)
573{
574 return tcp_v6_md5_do_lookup(sk, &inet6_sk(addr_sk)->daddr);
575}
576
577static struct tcp_md5sig_key *tcp_v6_reqsk_md5_lookup(struct sock *sk,
578 struct request_sock *req)
579{
580 return tcp_v6_md5_do_lookup(sk, &inet6_rsk(req)->rmt_addr);
581}
582
583static int tcp_v6_md5_do_add(struct sock *sk, struct in6_addr *peer,
584 char *newkey, u8 newkeylen)
585{
586 /* Add key to the list */
Matthias M. Dellwegb0a713e2007-10-29 20:55:27 -0700587 struct tcp_md5sig_key *key;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800588 struct tcp_sock *tp = tcp_sk(sk);
589 struct tcp6_md5sig_key *keys;
590
Matthias M. Dellwegb0a713e2007-10-29 20:55:27 -0700591 key = tcp_v6_md5_do_lookup(sk, peer);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800592 if (key) {
593 /* modify existing entry - just update that one */
Matthias M. Dellwegb0a713e2007-10-29 20:55:27 -0700594 kfree(key->key);
595 key->key = newkey;
596 key->keylen = newkeylen;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800597 } else {
598 /* reallocate new list if current one is full. */
599 if (!tp->md5sig_info) {
600 tp->md5sig_info = kzalloc(sizeof(*tp->md5sig_info), GFP_ATOMIC);
601 if (!tp->md5sig_info) {
602 kfree(newkey);
603 return -ENOMEM;
604 }
Eric Dumazeta4654192010-05-16 00:36:33 -0700605 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800606 }
Wu Fengguangaa133072009-09-02 23:45:45 -0700607 if (tcp_alloc_md5sig_pool(sk) == NULL) {
YOSHIFUJI Hideakiaacbe8c2007-11-20 17:30:56 -0800608 kfree(newkey);
609 return -ENOMEM;
610 }
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800611 if (tp->md5sig_info->alloced6 == tp->md5sig_info->entries6) {
612 keys = kmalloc((sizeof (tp->md5sig_info->keys6[0]) *
613 (tp->md5sig_info->entries6 + 1)), GFP_ATOMIC);
614
615 if (!keys) {
616 tcp_free_md5sig_pool();
617 kfree(newkey);
618 return -ENOMEM;
619 }
620
621 if (tp->md5sig_info->entries6)
622 memmove(keys, tp->md5sig_info->keys6,
623 (sizeof (tp->md5sig_info->keys6[0]) *
624 tp->md5sig_info->entries6));
625
626 kfree(tp->md5sig_info->keys6);
627 tp->md5sig_info->keys6 = keys;
628 tp->md5sig_info->alloced6++;
629 }
630
631 ipv6_addr_copy(&tp->md5sig_info->keys6[tp->md5sig_info->entries6].addr,
632 peer);
David S. Millerf8ab18d2007-09-28 15:18:35 -0700633 tp->md5sig_info->keys6[tp->md5sig_info->entries6].base.key = newkey;
634 tp->md5sig_info->keys6[tp->md5sig_info->entries6].base.keylen = newkeylen;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800635
636 tp->md5sig_info->entries6++;
637 }
638 return 0;
639}
640
641static int tcp_v6_md5_add_func(struct sock *sk, struct sock *addr_sk,
642 u8 *newkey, __u8 newkeylen)
643{
644 return tcp_v6_md5_do_add(sk, &inet6_sk(addr_sk)->daddr,
645 newkey, newkeylen);
646}
647
648static int tcp_v6_md5_do_del(struct sock *sk, struct in6_addr *peer)
649{
650 struct tcp_sock *tp = tcp_sk(sk);
651 int i;
652
653 for (i = 0; i < tp->md5sig_info->entries6; i++) {
YOSHIFUJI Hideakicaad2952008-04-10 15:42:07 +0900654 if (ipv6_addr_equal(&tp->md5sig_info->keys6[i].addr, peer)) {
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800655 /* Free the key */
David S. Millerf8ab18d2007-09-28 15:18:35 -0700656 kfree(tp->md5sig_info->keys6[i].base.key);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800657 tp->md5sig_info->entries6--;
658
659 if (tp->md5sig_info->entries6 == 0) {
660 kfree(tp->md5sig_info->keys6);
661 tp->md5sig_info->keys6 = NULL;
YOSHIFUJI Hideakica983ce2007-07-24 15:27:30 -0700662 tp->md5sig_info->alloced6 = 0;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800663 } else {
664 /* shrink the database */
665 if (tp->md5sig_info->entries6 != i)
666 memmove(&tp->md5sig_info->keys6[i],
667 &tp->md5sig_info->keys6[i+1],
668 (tp->md5sig_info->entries6 - i)
669 * sizeof (tp->md5sig_info->keys6[0]));
670 }
YOSHIFUJI Hideaki77adefd2007-11-20 17:31:23 -0800671 tcp_free_md5sig_pool();
672 return 0;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800673 }
674 }
675 return -ENOENT;
676}
677
678static void tcp_v6_clear_md5_list (struct sock *sk)
679{
680 struct tcp_sock *tp = tcp_sk(sk);
681 int i;
682
683 if (tp->md5sig_info->entries6) {
684 for (i = 0; i < tp->md5sig_info->entries6; i++)
David S. Millerf8ab18d2007-09-28 15:18:35 -0700685 kfree(tp->md5sig_info->keys6[i].base.key);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800686 tp->md5sig_info->entries6 = 0;
687 tcp_free_md5sig_pool();
688 }
689
690 kfree(tp->md5sig_info->keys6);
691 tp->md5sig_info->keys6 = NULL;
692 tp->md5sig_info->alloced6 = 0;
693
694 if (tp->md5sig_info->entries4) {
695 for (i = 0; i < tp->md5sig_info->entries4; i++)
David S. Millerf8ab18d2007-09-28 15:18:35 -0700696 kfree(tp->md5sig_info->keys4[i].base.key);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800697 tp->md5sig_info->entries4 = 0;
698 tcp_free_md5sig_pool();
699 }
700
701 kfree(tp->md5sig_info->keys4);
702 tp->md5sig_info->keys4 = NULL;
703 tp->md5sig_info->alloced4 = 0;
704}
705
706static int tcp_v6_parse_md5_keys (struct sock *sk, char __user *optval,
707 int optlen)
708{
709 struct tcp_md5sig cmd;
710 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
711 u8 *newkey;
712
713 if (optlen < sizeof(cmd))
714 return -EINVAL;
715
716 if (copy_from_user(&cmd, optval, sizeof(cmd)))
717 return -EFAULT;
718
719 if (sin6->sin6_family != AF_INET6)
720 return -EINVAL;
721
722 if (!cmd.tcpm_keylen) {
723 if (!tcp_sk(sk)->md5sig_info)
724 return -ENOENT;
Brian Haleye773e4f2007-08-24 23:16:08 -0700725 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800726 return tcp_v4_md5_do_del(sk, sin6->sin6_addr.s6_addr32[3]);
727 return tcp_v6_md5_do_del(sk, &sin6->sin6_addr);
728 }
729
730 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
731 return -EINVAL;
732
733 if (!tcp_sk(sk)->md5sig_info) {
734 struct tcp_sock *tp = tcp_sk(sk);
735 struct tcp_md5sig_info *p;
736
737 p = kzalloc(sizeof(struct tcp_md5sig_info), GFP_KERNEL);
738 if (!p)
739 return -ENOMEM;
740
741 tp->md5sig_info = p;
Eric Dumazeta4654192010-05-16 00:36:33 -0700742 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800743 }
744
Arnaldo Carvalho de Meloaf879cc2006-11-17 12:14:37 -0200745 newkey = kmemdup(cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800746 if (!newkey)
747 return -ENOMEM;
Brian Haleye773e4f2007-08-24 23:16:08 -0700748 if (ipv6_addr_v4mapped(&sin6->sin6_addr)) {
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800749 return tcp_v4_md5_do_add(sk, sin6->sin6_addr.s6_addr32[3],
750 newkey, cmd.tcpm_keylen);
751 }
752 return tcp_v6_md5_do_add(sk, &sin6->sin6_addr, newkey, cmd.tcpm_keylen);
753}
754
Adam Langley49a72df2008-07-19 00:01:42 -0700755static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
756 struct in6_addr *daddr,
757 struct in6_addr *saddr, int nbytes)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800758{
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800759 struct tcp6_pseudohdr *bp;
Adam Langley49a72df2008-07-19 00:01:42 -0700760 struct scatterlist sg;
YOSHIFUJI Hideaki8d26d762008-04-17 13:19:16 +0900761
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800762 bp = &hp->md5_blk.ip6;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800763 /* 1. TCP pseudo-header (RFC2460) */
764 ipv6_addr_copy(&bp->saddr, saddr);
765 ipv6_addr_copy(&bp->daddr, daddr);
Adam Langley49a72df2008-07-19 00:01:42 -0700766 bp->protocol = cpu_to_be32(IPPROTO_TCP);
Adam Langley00b13042008-07-31 21:36:07 -0700767 bp->len = cpu_to_be32(nbytes);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800768
Adam Langley49a72df2008-07-19 00:01:42 -0700769 sg_init_one(&sg, bp, sizeof(*bp));
770 return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
771}
David S. Millerc7da57a2007-10-26 00:41:21 -0700772
Adam Langley49a72df2008-07-19 00:01:42 -0700773static int tcp_v6_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
774 struct in6_addr *daddr, struct in6_addr *saddr,
775 struct tcphdr *th)
776{
777 struct tcp_md5sig_pool *hp;
778 struct hash_desc *desc;
779
780 hp = tcp_get_md5sig_pool();
781 if (!hp)
782 goto clear_hash_noput;
783 desc = &hp->md5_desc;
784
785 if (crypto_hash_init(desc))
786 goto clear_hash;
787 if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
788 goto clear_hash;
789 if (tcp_md5_hash_header(hp, th))
790 goto clear_hash;
791 if (tcp_md5_hash_key(hp, key))
792 goto clear_hash;
793 if (crypto_hash_final(desc, md5_hash))
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800794 goto clear_hash;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800795
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800796 tcp_put_md5sig_pool();
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800797 return 0;
Adam Langley49a72df2008-07-19 00:01:42 -0700798
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800799clear_hash:
800 tcp_put_md5sig_pool();
801clear_hash_noput:
802 memset(md5_hash, 0, 16);
Adam Langley49a72df2008-07-19 00:01:42 -0700803 return 1;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800804}
805
Adam Langley49a72df2008-07-19 00:01:42 -0700806static int tcp_v6_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
807 struct sock *sk, struct request_sock *req,
808 struct sk_buff *skb)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800809{
810 struct in6_addr *saddr, *daddr;
Adam Langley49a72df2008-07-19 00:01:42 -0700811 struct tcp_md5sig_pool *hp;
812 struct hash_desc *desc;
813 struct tcphdr *th = tcp_hdr(skb);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800814
815 if (sk) {
816 saddr = &inet6_sk(sk)->saddr;
817 daddr = &inet6_sk(sk)->daddr;
Adam Langley49a72df2008-07-19 00:01:42 -0700818 } else if (req) {
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800819 saddr = &inet6_rsk(req)->loc_addr;
820 daddr = &inet6_rsk(req)->rmt_addr;
Adam Langley49a72df2008-07-19 00:01:42 -0700821 } else {
822 struct ipv6hdr *ip6h = ipv6_hdr(skb);
823 saddr = &ip6h->saddr;
824 daddr = &ip6h->daddr;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800825 }
Adam Langley49a72df2008-07-19 00:01:42 -0700826
827 hp = tcp_get_md5sig_pool();
828 if (!hp)
829 goto clear_hash_noput;
830 desc = &hp->md5_desc;
831
832 if (crypto_hash_init(desc))
833 goto clear_hash;
834
835 if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
836 goto clear_hash;
837 if (tcp_md5_hash_header(hp, th))
838 goto clear_hash;
839 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
840 goto clear_hash;
841 if (tcp_md5_hash_key(hp, key))
842 goto clear_hash;
843 if (crypto_hash_final(desc, md5_hash))
844 goto clear_hash;
845
846 tcp_put_md5sig_pool();
847 return 0;
848
849clear_hash:
850 tcp_put_md5sig_pool();
851clear_hash_noput:
852 memset(md5_hash, 0, 16);
853 return 1;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800854}
855
856static int tcp_v6_inbound_md5_hash (struct sock *sk, struct sk_buff *skb)
857{
858 __u8 *hash_location = NULL;
859 struct tcp_md5sig_key *hash_expected;
Arnaldo Carvalho de Melo0660e032007-04-25 17:54:47 -0700860 struct ipv6hdr *ip6h = ipv6_hdr(skb);
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -0700861 struct tcphdr *th = tcp_hdr(skb);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800862 int genhash;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800863 u8 newhash[16];
864
865 hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
YOSHIFUJI Hideaki7d5d5522008-04-17 12:29:53 +0900866 hash_location = tcp_parse_md5sig_option(th);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800867
David S. Miller785957d2008-07-30 03:03:15 -0700868 /* We've parsed the options - do we have a hash? */
869 if (!hash_expected && !hash_location)
870 return 0;
871
872 if (hash_expected && !hash_location) {
873 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800874 return 1;
875 }
876
David S. Miller785957d2008-07-30 03:03:15 -0700877 if (!hash_expected && hash_location) {
878 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800879 return 1;
880 }
881
882 /* check the signature */
Adam Langley49a72df2008-07-19 00:01:42 -0700883 genhash = tcp_v6_md5_hash_skb(newhash,
884 hash_expected,
885 NULL, NULL, skb);
886
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800887 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
888 if (net_ratelimit()) {
Joe Perches5856b602010-01-08 00:59:52 -0800889 printk(KERN_INFO "MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800890 genhash ? "failed" : "mismatch",
Harvey Harrison0c6ce782008-10-28 16:09:23 -0700891 &ip6h->saddr, ntohs(th->source),
892 &ip6h->daddr, ntohs(th->dest));
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800893 }
894 return 1;
895 }
896 return 0;
897}
898#endif
899
Glenn Griffinc6aefaf2008-02-07 21:49:26 -0800900struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700901 .family = AF_INET6,
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -0700902 .obj_size = sizeof(struct tcp6_request_sock),
Octavian Purdila72659ec2010-01-17 19:09:39 -0800903 .rtx_syn_ack = tcp_v6_rtx_synack,
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -0700904 .send_ack = tcp_v6_reqsk_send_ack,
905 .destructor = tcp_v6_reqsk_destructor,
Octavian Purdila72659ec2010-01-17 19:09:39 -0800906 .send_reset = tcp_v6_send_reset,
907 .syn_ack_timeout = tcp_syn_ack_timeout,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700908};
909
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800910#ifdef CONFIG_TCP_MD5SIG
Stephen Hemmingerb2e4b3d2009-09-01 19:25:03 +0000911static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800912 .md5_lookup = tcp_v6_reqsk_md5_lookup,
John Dykstrae3afe7b2009-07-16 05:04:51 +0000913 .calc_md5_hash = tcp_v6_md5_hash_skb,
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800914};
Andrew Mortonb6332e62006-11-30 19:16:28 -0800915#endif
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800916
Herbert Xu8ad50d92010-04-11 02:15:54 +0000917static void __tcp_v6_send_check(struct sk_buff *skb,
918 struct in6_addr *saddr, struct in6_addr *daddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700919{
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -0700920 struct tcphdr *th = tcp_hdr(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700921
Patrick McHardy84fa7932006-08-29 16:44:56 -0700922 if (skb->ip_summed == CHECKSUM_PARTIAL) {
Herbert Xu8ad50d92010-04-11 02:15:54 +0000923 th->check = ~tcp_v6_check(skb->len, saddr, daddr, 0);
Herbert Xu663ead32007-04-09 11:59:07 -0700924 skb->csum_start = skb_transport_header(skb) - skb->head;
Al Viroff1dcad2006-11-20 18:07:29 -0800925 skb->csum_offset = offsetof(struct tcphdr, check);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700926 } else {
Herbert Xu8ad50d92010-04-11 02:15:54 +0000927 th->check = tcp_v6_check(skb->len, saddr, daddr,
928 csum_partial(th, th->doff << 2,
929 skb->csum));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700930 }
931}
932
Herbert Xubb296242010-04-11 02:15:55 +0000933static void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb)
Herbert Xu8ad50d92010-04-11 02:15:54 +0000934{
935 struct ipv6_pinfo *np = inet6_sk(sk);
936
937 __tcp_v6_send_check(skb, &np->saddr, &np->daddr);
938}
939
Herbert Xua430a432006-07-08 13:34:56 -0700940static int tcp_v6_gso_send_check(struct sk_buff *skb)
941{
942 struct ipv6hdr *ipv6h;
943 struct tcphdr *th;
944
945 if (!pskb_may_pull(skb, sizeof(*th)))
946 return -EINVAL;
947
Arnaldo Carvalho de Melo0660e032007-04-25 17:54:47 -0700948 ipv6h = ipv6_hdr(skb);
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -0700949 th = tcp_hdr(skb);
Herbert Xua430a432006-07-08 13:34:56 -0700950
951 th->check = 0;
Patrick McHardy84fa7932006-08-29 16:44:56 -0700952 skb->ip_summed = CHECKSUM_PARTIAL;
Herbert Xu8ad50d92010-04-11 02:15:54 +0000953 __tcp_v6_send_check(skb, &ipv6h->saddr, &ipv6h->daddr);
Herbert Xua430a432006-07-08 13:34:56 -0700954 return 0;
955}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700956
Herbert Xu36990672009-05-22 00:45:28 -0700957static struct sk_buff **tcp6_gro_receive(struct sk_buff **head,
958 struct sk_buff *skb)
Herbert Xu684f2172009-01-08 10:41:23 -0800959{
Herbert Xu36e7b1b2009-04-27 05:44:45 -0700960 struct ipv6hdr *iph = skb_gro_network_header(skb);
Herbert Xu684f2172009-01-08 10:41:23 -0800961
962 switch (skb->ip_summed) {
963 case CHECKSUM_COMPLETE:
Herbert Xu86911732009-01-29 14:19:50 +0000964 if (!tcp_v6_check(skb_gro_len(skb), &iph->saddr, &iph->daddr,
Herbert Xu684f2172009-01-08 10:41:23 -0800965 skb->csum)) {
966 skb->ip_summed = CHECKSUM_UNNECESSARY;
967 break;
968 }
969
970 /* fall through */
971 case CHECKSUM_NONE:
972 NAPI_GRO_CB(skb)->flush = 1;
973 return NULL;
974 }
975
976 return tcp_gro_receive(head, skb);
977}
Herbert Xu684f2172009-01-08 10:41:23 -0800978
Herbert Xu36990672009-05-22 00:45:28 -0700979static int tcp6_gro_complete(struct sk_buff *skb)
Herbert Xu684f2172009-01-08 10:41:23 -0800980{
981 struct ipv6hdr *iph = ipv6_hdr(skb);
982 struct tcphdr *th = tcp_hdr(skb);
983
984 th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
985 &iph->saddr, &iph->daddr, 0);
986 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
987
988 return tcp_gro_complete(skb);
989}
Herbert Xu684f2172009-01-08 10:41:23 -0800990
Ilpo Järvinen626e2642008-10-09 14:42:40 -0700991static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
992 u32 ts, struct tcp_md5sig_key *key, int rst)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700993{
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -0700994 struct tcphdr *th = tcp_hdr(skb), *t1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700995 struct sk_buff *buff;
David S. Miller4c9483b2011-03-12 16:22:43 -0500996 struct flowi6 fl6;
Eric Dumazetadf30902009-06-02 05:19:30 +0000997 struct net *net = dev_net(skb_dst(skb)->dev);
Daniel Lezcanoe5047992008-03-07 11:16:26 -0800998 struct sock *ctl_sk = net->ipv6.tcp_sk;
YOSHIFUJI Hideaki9cb57342008-01-12 02:16:03 -0800999 unsigned int tot_len = sizeof(struct tcphdr);
Eric Dumazetadf30902009-06-02 05:19:30 +00001000 struct dst_entry *dst;
Al Viroe69a4ad2006-11-14 20:56:00 -08001001 __be32 *topt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001002
1003 if (ts)
YOSHIFUJI Hideaki4244f8a2006-10-10 19:40:50 -07001004 tot_len += TCPOLEN_TSTAMP_ALIGNED;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001005#ifdef CONFIG_TCP_MD5SIG
1006 if (key)
1007 tot_len += TCPOLEN_MD5SIG_ALIGNED;
1008#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001009
1010 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
1011 GFP_ATOMIC);
1012 if (buff == NULL)
1013 return;
1014
1015 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
1016
Ilpo Järvinen77c676d2008-10-09 14:41:38 -07001017 t1 = (struct tcphdr *) skb_push(buff, tot_len);
Herbert Xu6651ffc2010-04-21 00:47:15 -07001018 skb_reset_transport_header(buff);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001019
1020 /* Swap the send and the receive. */
1021 memset(t1, 0, sizeof(*t1));
1022 t1->dest = th->source;
1023 t1->source = th->dest;
Ilpo Järvinen77c676d2008-10-09 14:41:38 -07001024 t1->doff = tot_len / 4;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001025 t1->seq = htonl(seq);
1026 t1->ack_seq = htonl(ack);
Ilpo Järvinen626e2642008-10-09 14:42:40 -07001027 t1->ack = !rst || !th->ack;
1028 t1->rst = rst;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001029 t1->window = htons(win);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001030
Al Viroe69a4ad2006-11-14 20:56:00 -08001031 topt = (__be32 *)(t1 + 1);
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001032
Linus Torvalds1da177e2005-04-16 15:20:36 -07001033 if (ts) {
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001034 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
1035 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
1036 *topt++ = htonl(tcp_time_stamp);
Ilpo Järvinen53b12572008-10-08 14:36:33 -07001037 *topt++ = htonl(ts);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001038 }
1039
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001040#ifdef CONFIG_TCP_MD5SIG
1041 if (key) {
1042 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
1043 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
Adam Langley49a72df2008-07-19 00:01:42 -07001044 tcp_v6_md5_hash_hdr((__u8 *)topt, key,
Adam Langley90b7e112008-07-31 20:49:48 -07001045 &ipv6_hdr(skb)->saddr,
1046 &ipv6_hdr(skb)->daddr, t1);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001047 }
1048#endif
1049
David S. Miller4c9483b2011-03-12 16:22:43 -05001050 memset(&fl6, 0, sizeof(fl6));
1051 ipv6_addr_copy(&fl6.daddr, &ipv6_hdr(skb)->saddr);
1052 ipv6_addr_copy(&fl6.saddr, &ipv6_hdr(skb)->daddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001053
David S. Millere5700af2010-04-21 14:59:20 -07001054 buff->ip_summed = CHECKSUM_PARTIAL;
1055 buff->csum = 0;
1056
David S. Miller4c9483b2011-03-12 16:22:43 -05001057 __tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001058
David S. Miller4c9483b2011-03-12 16:22:43 -05001059 fl6.flowi6_proto = IPPROTO_TCP;
1060 fl6.flowi6_oif = inet6_iif(skb);
David S. Miller1958b852011-03-12 16:36:19 -05001061 fl6.fl6_dport = t1->dest;
1062 fl6.fl6_sport = t1->source;
David S. Miller4c9483b2011-03-12 16:22:43 -05001063 security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001064
Ilpo Järvinen626e2642008-10-09 14:42:40 -07001065 /* Pass a socket to ip6_dst_lookup either it is for RST
1066 * Underlying function will use this to retrieve the network
1067 * namespace
1068 */
David S. Miller4c9483b2011-03-12 16:22:43 -05001069 dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL, false);
David S. Miller68d0c6d2011-03-01 13:19:07 -08001070 if (!IS_ERR(dst)) {
1071 skb_dst_set(buff, dst);
David S. Miller4c9483b2011-03-12 16:22:43 -05001072 ip6_xmit(ctl_sk, buff, &fl6, NULL);
David S. Miller68d0c6d2011-03-01 13:19:07 -08001073 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
1074 if (rst)
1075 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
1076 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001077 }
1078
1079 kfree_skb(buff);
1080}
1081
Ilpo Järvinen626e2642008-10-09 14:42:40 -07001082static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
1083{
1084 struct tcphdr *th = tcp_hdr(skb);
1085 u32 seq = 0, ack_seq = 0;
Guo-Fu Tsengfa3e5b42008-10-09 21:11:56 -07001086 struct tcp_md5sig_key *key = NULL;
Ilpo Järvinen626e2642008-10-09 14:42:40 -07001087
1088 if (th->rst)
1089 return;
1090
1091 if (!ipv6_unicast_destination(skb))
1092 return;
1093
1094#ifdef CONFIG_TCP_MD5SIG
1095 if (sk)
1096 key = tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr);
Ilpo Järvinen626e2642008-10-09 14:42:40 -07001097#endif
1098
1099 if (th->ack)
1100 seq = ntohl(th->ack_seq);
1101 else
1102 ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
1103 (th->doff << 2);
1104
1105 tcp_v6_send_response(skb, seq, ack_seq, 0, 0, key, 1);
1106}
1107
1108static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts,
1109 struct tcp_md5sig_key *key)
1110{
1111 tcp_v6_send_response(skb, seq, ack, win, ts, key, 0);
1112}
1113
Linus Torvalds1da177e2005-04-16 15:20:36 -07001114static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
1115{
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07001116 struct inet_timewait_sock *tw = inet_twsk(sk);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001117 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001118
YOSHIFUJI Hideaki9501f972008-04-18 12:45:16 +09001119 tcp_v6_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07001120 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
YOSHIFUJI Hideaki9501f972008-04-18 12:45:16 +09001121 tcptw->tw_ts_recent, tcp_twsk_md5_key(tcptw));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001122
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07001123 inet_twsk_put(tw);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001124}
1125
Gui Jianfeng6edafaa2008-08-06 23:50:04 -07001126static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
1127 struct request_sock *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001128{
YOSHIFUJI Hideaki9501f972008-04-18 12:45:16 +09001129 tcp_v6_send_ack(skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, req->ts_recent,
Gui Jianfeng6edafaa2008-08-06 23:50:04 -07001130 tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001131}
1132
1133
1134static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
1135{
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -07001136 struct request_sock *req, **prev;
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07001137 const struct tcphdr *th = tcp_hdr(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001138 struct sock *nsk;
1139
1140 /* Find possible connection requests. */
Arnaldo Carvalho de Melo81297652005-12-13 23:15:24 -08001141 req = inet6_csk_search_req(sk, &prev, th->source,
Arnaldo Carvalho de Melo0660e032007-04-25 17:54:47 -07001142 &ipv6_hdr(skb)->saddr,
1143 &ipv6_hdr(skb)->daddr, inet6_iif(skb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001144 if (req)
1145 return tcp_check_req(sk, skb, req, prev);
1146
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001147 nsk = __inet6_lookup_established(sock_net(sk), &tcp_hashinfo,
Pavel Emelyanovd86e0da2008-01-31 05:07:21 -08001148 &ipv6_hdr(skb)->saddr, th->source,
1149 &ipv6_hdr(skb)->daddr, ntohs(th->dest), inet6_iif(skb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001150
1151 if (nsk) {
1152 if (nsk->sk_state != TCP_TIME_WAIT) {
1153 bh_lock_sock(nsk);
1154 return nsk;
1155 }
YOSHIFUJI Hideaki9469c7b2006-10-10 19:41:46 -07001156 inet_twsk_put(inet_twsk(nsk));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001157 return NULL;
1158 }
1159
Glenn Griffinc6aefaf2008-02-07 21:49:26 -08001160#ifdef CONFIG_SYN_COOKIES
Florian Westphalaf9b4732010-06-03 00:43:44 +00001161 if (!th->syn)
Glenn Griffinc6aefaf2008-02-07 21:49:26 -08001162 sk = cookie_v6_check(sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001163#endif
1164 return sk;
1165}
1166
Linus Torvalds1da177e2005-04-16 15:20:36 -07001167/* FIXME: this is substantially similar to the ipv4 code.
1168 * Can some kind of merge be done? -- erics
1169 */
1170static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1171{
William Allen Simpson4957faade2009-12-02 18:25:27 +00001172 struct tcp_extend_values tmp_ext;
William Allen Simpsone6b4d112009-12-02 18:07:39 +00001173 struct tcp_options_received tmp_opt;
William Allen Simpson4957faade2009-12-02 18:25:27 +00001174 u8 *hash_location;
William Allen Simpsone6b4d112009-12-02 18:07:39 +00001175 struct request_sock *req;
Arnaldo Carvalho de Meloca304b62005-12-13 23:15:40 -08001176 struct inet6_request_sock *treq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001177 struct ipv6_pinfo *np = inet6_sk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001178 struct tcp_sock *tp = tcp_sk(sk);
William Allen Simpsone6b4d112009-12-02 18:07:39 +00001179 __u32 isn = TCP_SKB_CB(skb)->when;
David S. Miller493f3772010-12-02 12:14:29 -08001180 struct dst_entry *dst = NULL;
Glenn Griffinc6aefaf2008-02-07 21:49:26 -08001181#ifdef CONFIG_SYN_COOKIES
1182 int want_cookie = 0;
1183#else
1184#define want_cookie 0
1185#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001186
1187 if (skb->protocol == htons(ETH_P_IP))
1188 return tcp_v4_conn_request(sk, skb);
1189
1190 if (!ipv6_unicast_destination(skb))
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001191 goto drop;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001192
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001193 if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001194 if (net_ratelimit())
Glenn Griffinc6aefaf2008-02-07 21:49:26 -08001195 syn_flood_warning(skb);
1196#ifdef CONFIG_SYN_COOKIES
1197 if (sysctl_tcp_syncookies)
1198 want_cookie = 1;
1199 else
1200#endif
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001201 goto drop;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001202 }
1203
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001204 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001205 goto drop;
1206
Arnaldo Carvalho de Meloca304b62005-12-13 23:15:40 -08001207 req = inet6_reqsk_alloc(&tcp6_request_sock_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001208 if (req == NULL)
1209 goto drop;
1210
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001211#ifdef CONFIG_TCP_MD5SIG
1212 tcp_rsk(req)->af_specific = &tcp_request_sock_ipv6_ops;
1213#endif
1214
Linus Torvalds1da177e2005-04-16 15:20:36 -07001215 tcp_clear_options(&tmp_opt);
1216 tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
1217 tmp_opt.user_mss = tp->rx_opt.user_mss;
David S. Millerbb5b7c12009-12-15 20:56:42 -08001218 tcp_parse_options(skb, &tmp_opt, &hash_location, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001219
William Allen Simpson4957faade2009-12-02 18:25:27 +00001220 if (tmp_opt.cookie_plus > 0 &&
1221 tmp_opt.saw_tstamp &&
1222 !tp->rx_opt.cookie_out_never &&
1223 (sysctl_tcp_cookie_size > 0 ||
1224 (tp->cookie_values != NULL &&
1225 tp->cookie_values->cookie_desired > 0))) {
1226 u8 *c;
1227 u32 *d;
1228 u32 *mess = &tmp_ext.cookie_bakery[COOKIE_DIGEST_WORDS];
1229 int l = tmp_opt.cookie_plus - TCPOLEN_COOKIE_BASE;
1230
1231 if (tcp_cookie_generator(&tmp_ext.cookie_bakery[0]) != 0)
1232 goto drop_and_free;
1233
1234 /* Secret recipe starts with IP addresses */
Eric Dumazet0eae88f2010-04-20 19:06:52 -07001235 d = (__force u32 *)&ipv6_hdr(skb)->daddr.s6_addr32[0];
William Allen Simpson4957faade2009-12-02 18:25:27 +00001236 *mess++ ^= *d++;
1237 *mess++ ^= *d++;
1238 *mess++ ^= *d++;
1239 *mess++ ^= *d++;
Eric Dumazet0eae88f2010-04-20 19:06:52 -07001240 d = (__force u32 *)&ipv6_hdr(skb)->saddr.s6_addr32[0];
William Allen Simpson4957faade2009-12-02 18:25:27 +00001241 *mess++ ^= *d++;
1242 *mess++ ^= *d++;
1243 *mess++ ^= *d++;
1244 *mess++ ^= *d++;
1245
1246 /* plus variable length Initiator Cookie */
1247 c = (u8 *)mess;
1248 while (l-- > 0)
1249 *c++ ^= *hash_location++;
1250
1251#ifdef CONFIG_SYN_COOKIES
1252 want_cookie = 0; /* not our kind of cookie */
1253#endif
1254 tmp_ext.cookie_out_never = 0; /* false */
1255 tmp_ext.cookie_plus = tmp_opt.cookie_plus;
1256 } else if (!tp->rx_opt.cookie_in_always) {
1257 /* redundant indications, but ensure initialization. */
1258 tmp_ext.cookie_out_never = 1; /* true */
1259 tmp_ext.cookie_plus = 0;
1260 } else {
1261 goto drop_and_free;
1262 }
1263 tmp_ext.cookie_in_always = tp->rx_opt.cookie_in_always;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001264
Florian Westphal4dfc2812008-04-10 03:12:40 -07001265 if (want_cookie && !tmp_opt.saw_tstamp)
Glenn Griffinc6aefaf2008-02-07 21:49:26 -08001266 tcp_clear_options(&tmp_opt);
Glenn Griffinc6aefaf2008-02-07 21:49:26 -08001267
Linus Torvalds1da177e2005-04-16 15:20:36 -07001268 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
1269 tcp_openreq_init(req, &tmp_opt, skb);
1270
Arnaldo Carvalho de Meloca304b62005-12-13 23:15:40 -08001271 treq = inet6_rsk(req);
Arnaldo Carvalho de Melo0660e032007-04-25 17:54:47 -07001272 ipv6_addr_copy(&treq->rmt_addr, &ipv6_hdr(skb)->saddr);
1273 ipv6_addr_copy(&treq->loc_addr, &ipv6_hdr(skb)->daddr);
Florian Westphal172d69e2010-06-21 11:48:45 +00001274 if (!want_cookie || tmp_opt.tstamp_ok)
Glenn Griffinc6aefaf2008-02-07 21:49:26 -08001275 TCP_ECN_create_request(req, tcp_hdr(skb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001276
Florian Westphal2bbdf382010-06-13 11:29:39 +00001277 if (!isn) {
David S. Miller493f3772010-12-02 12:14:29 -08001278 struct inet_peer *peer = NULL;
1279
Glenn Griffinc6aefaf2008-02-07 21:49:26 -08001280 if (ipv6_opt_accepted(sk, skb) ||
1281 np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
1282 np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
1283 atomic_inc(&skb->users);
1284 treq->pktopts = skb;
1285 }
1286 treq->iif = sk->sk_bound_dev_if;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001287
Glenn Griffinc6aefaf2008-02-07 21:49:26 -08001288 /* So that link locals have meaning */
1289 if (!sk->sk_bound_dev_if &&
1290 ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL)
1291 treq->iif = inet6_iif(skb);
David S. Miller493f3772010-12-02 12:14:29 -08001292
1293 if (want_cookie) {
Florian Westphal2bbdf382010-06-13 11:29:39 +00001294 isn = cookie_v6_init_sequence(sk, skb, &req->mss);
1295 req->cookie_ts = tmp_opt.tstamp_ok;
David S. Miller493f3772010-12-02 12:14:29 -08001296 goto have_isn;
Florian Westphal2bbdf382010-06-13 11:29:39 +00001297 }
David S. Miller493f3772010-12-02 12:14:29 -08001298
1299 /* VJ's idea. We save last timestamp seen
1300 * from the destination in peer table, when entering
1301 * state TIME-WAIT, and check against it before
1302 * accepting new connection request.
1303 *
1304 * If "isn" is not zero, this request hit alive
1305 * timewait bucket, so that all the necessary checks
1306 * are made in the function processing timewait state.
1307 */
1308 if (tmp_opt.saw_tstamp &&
1309 tcp_death_row.sysctl_tw_recycle &&
1310 (dst = inet6_csk_route_req(sk, req)) != NULL &&
1311 (peer = rt6_get_peer((struct rt6_info *)dst)) != NULL &&
David S. Miller7a71ed82011-02-09 14:30:26 -08001312 ipv6_addr_equal((struct in6_addr *)peer->daddr.addr.a6,
David S. Miller493f3772010-12-02 12:14:29 -08001313 &treq->rmt_addr)) {
1314 inet_peer_refcheck(peer);
1315 if ((u32)get_seconds() - peer->tcp_ts_stamp < TCP_PAWS_MSL &&
1316 (s32)(peer->tcp_ts - req->ts_recent) >
1317 TCP_PAWS_WINDOW) {
1318 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
1319 goto drop_and_release;
1320 }
1321 }
1322 /* Kill the following clause, if you dislike this way. */
1323 else if (!sysctl_tcp_syncookies &&
1324 (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
1325 (sysctl_max_syn_backlog >> 2)) &&
1326 (!peer || !peer->tcp_ts_stamp) &&
1327 (!dst || !dst_metric(dst, RTAX_RTT))) {
1328 /* Without syncookies last quarter of
1329 * backlog is filled with destinations,
1330 * proven to be alive.
1331 * It means that we continue to communicate
1332 * to destinations, already remembered
1333 * to the moment of synflood.
1334 */
1335 LIMIT_NETDEBUG(KERN_DEBUG "TCP: drop open request from %pI6/%u\n",
1336 &treq->rmt_addr, ntohs(tcp_hdr(skb)->source));
1337 goto drop_and_release;
1338 }
1339
1340 isn = tcp_v6_init_sequence(skb);
Glenn Griffinc6aefaf2008-02-07 21:49:26 -08001341 }
David S. Miller493f3772010-12-02 12:14:29 -08001342have_isn:
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07001343 tcp_rsk(req)->snt_isn = isn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001344
Venkat Yekkirala4237c752006-07-24 23:32:50 -07001345 security_inet_conn_request(sk, skb, req);
1346
William Allen Simpson4957faade2009-12-02 18:25:27 +00001347 if (tcp_v6_send_synack(sk, req,
1348 (struct request_values *)&tmp_ext) ||
1349 want_cookie)
William Allen Simpsone6b4d112009-12-02 18:07:39 +00001350 goto drop_and_free;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001351
William Allen Simpsone6b4d112009-12-02 18:07:39 +00001352 inet6_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1353 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001354
David S. Miller493f3772010-12-02 12:14:29 -08001355drop_and_release:
1356 dst_release(dst);
William Allen Simpsone6b4d112009-12-02 18:07:39 +00001357drop_and_free:
1358 reqsk_free(req);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001359drop:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001360 return 0; /* don't send reset */
1361}
1362
1363static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -07001364 struct request_sock *req,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001365 struct dst_entry *dst)
1366{
Vegard Nossum78d15e82008-09-12 16:17:43 -07001367 struct inet6_request_sock *treq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001368 struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
1369 struct tcp6_sock *newtcp6sk;
1370 struct inet_sock *newinet;
1371 struct tcp_sock *newtp;
1372 struct sock *newsk;
1373 struct ipv6_txoptions *opt;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001374#ifdef CONFIG_TCP_MD5SIG
1375 struct tcp_md5sig_key *key;
1376#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001377
1378 if (skb->protocol == htons(ETH_P_IP)) {
1379 /*
1380 * v6 mapped
1381 */
1382
1383 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst);
1384
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001385 if (newsk == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001386 return NULL;
1387
1388 newtcp6sk = (struct tcp6_sock *)newsk;
1389 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1390
1391 newinet = inet_sk(newsk);
1392 newnp = inet6_sk(newsk);
1393 newtp = tcp_sk(newsk);
1394
1395 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1396
Eric Dumazetc720c7e2009-10-15 06:30:45 +00001397 ipv6_addr_set_v4mapped(newinet->inet_daddr, &newnp->daddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001398
Eric Dumazetc720c7e2009-10-15 06:30:45 +00001399 ipv6_addr_set_v4mapped(newinet->inet_saddr, &newnp->saddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001400
1401 ipv6_addr_copy(&newnp->rcv_saddr, &newnp->saddr);
1402
Arnaldo Carvalho de Melo8292a172005-12-13 23:15:52 -08001403 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001404 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001405#ifdef CONFIG_TCP_MD5SIG
1406 newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1407#endif
1408
Linus Torvalds1da177e2005-04-16 15:20:36 -07001409 newnp->pktoptions = NULL;
1410 newnp->opt = NULL;
Arnaldo Carvalho de Melo505cbfc2005-08-12 09:19:38 -03001411 newnp->mcast_oif = inet6_iif(skb);
Arnaldo Carvalho de Melo0660e032007-04-25 17:54:47 -07001412 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001413
Arnaldo Carvalho de Meloe6848972005-08-09 19:45:38 -07001414 /*
1415 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1416 * here, tcp_create_openreq_child now does this for us, see the comment in
1417 * that function for the gory details. -acme
Linus Torvalds1da177e2005-04-16 15:20:36 -07001418 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001419
1420 /* It is tricky place. Until this moment IPv4 tcp
Arnaldo Carvalho de Melo8292a172005-12-13 23:15:52 -08001421 worked with IPv6 icsk.icsk_af_ops.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001422 Sync it now.
1423 */
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -08001424 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001425
1426 return newsk;
1427 }
1428
Vegard Nossum78d15e82008-09-12 16:17:43 -07001429 treq = inet6_rsk(req);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001430 opt = np->opt;
1431
1432 if (sk_acceptq_is_full(sk))
1433 goto out_overflow;
1434
David S. Miller493f3772010-12-02 12:14:29 -08001435 if (!dst) {
1436 dst = inet6_csk_route_req(sk, req);
1437 if (!dst)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001438 goto out;
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001439 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001440
1441 newsk = tcp_create_openreq_child(sk, req, skb);
1442 if (newsk == NULL)
Balazs Scheidler093d2822010-10-21 13:06:43 +02001443 goto out_nonewsk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001444
Arnaldo Carvalho de Meloe6848972005-08-09 19:45:38 -07001445 /*
1446 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1447 * count here, tcp_create_openreq_child now does this for us, see the
1448 * comment in that function for the gory details. -acme
1449 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001450
Stephen Hemminger59eed272006-08-25 15:55:43 -07001451 newsk->sk_gso_type = SKB_GSO_TCPV6;
YOSHIFUJI Hideaki8e1ef0a2006-08-29 17:15:09 -07001452 __ip6_dst_store(newsk, dst, NULL, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001453
1454 newtcp6sk = (struct tcp6_sock *)newsk;
1455 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1456
1457 newtp = tcp_sk(newsk);
1458 newinet = inet_sk(newsk);
1459 newnp = inet6_sk(newsk);
1460
1461 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1462
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07001463 ipv6_addr_copy(&newnp->daddr, &treq->rmt_addr);
1464 ipv6_addr_copy(&newnp->saddr, &treq->loc_addr);
1465 ipv6_addr_copy(&newnp->rcv_saddr, &treq->loc_addr);
1466 newsk->sk_bound_dev_if = treq->iif;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001467
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001468 /* Now IPv6 options...
Linus Torvalds1da177e2005-04-16 15:20:36 -07001469
1470 First: no IPv4 options.
1471 */
1472 newinet->opt = NULL;
Masayuki Nakagawad35690b2007-03-16 16:14:03 -07001473 newnp->ipv6_fl_list = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001474
1475 /* Clone RX bits */
1476 newnp->rxopt.all = np->rxopt.all;
1477
1478 /* Clone pktoptions received with SYN */
1479 newnp->pktoptions = NULL;
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07001480 if (treq->pktopts != NULL) {
1481 newnp->pktoptions = skb_clone(treq->pktopts, GFP_ATOMIC);
1482 kfree_skb(treq->pktopts);
1483 treq->pktopts = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001484 if (newnp->pktoptions)
1485 skb_set_owner_r(newnp->pktoptions, newsk);
1486 }
1487 newnp->opt = NULL;
Arnaldo Carvalho de Melo505cbfc2005-08-12 09:19:38 -03001488 newnp->mcast_oif = inet6_iif(skb);
Arnaldo Carvalho de Melo0660e032007-04-25 17:54:47 -07001489 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001490
1491 /* Clone native IPv6 options from listening socket (if any)
1492
1493 Yes, keeping reference count would be much more clever,
1494 but we make one more one thing there: reattach optmem
1495 to newsk.
1496 */
1497 if (opt) {
1498 newnp->opt = ipv6_dup_options(newsk, opt);
1499 if (opt != np->opt)
1500 sock_kfree_s(sk, opt, opt->tot_len);
1501 }
1502
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -08001503 inet_csk(newsk)->icsk_ext_hdr_len = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001504 if (newnp->opt)
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -08001505 inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
1506 newnp->opt->opt_flen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001507
John Heffner5d424d52006-03-20 17:53:41 -08001508 tcp_mtup_init(newsk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001509 tcp_sync_mss(newsk, dst_mtu(dst));
David S. Miller0dbaee32010-12-13 12:52:14 -08001510 newtp->advmss = dst_metric_advmss(dst);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001511 tcp_initialize_rcv_mss(newsk);
1512
Eric Dumazetc720c7e2009-10-15 06:30:45 +00001513 newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1514 newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001515
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001516#ifdef CONFIG_TCP_MD5SIG
1517 /* Copy over the MD5 key from the original socket */
1518 if ((key = tcp_v6_md5_do_lookup(sk, &newnp->daddr)) != NULL) {
1519 /* We're using one, so create a matching key
1520 * on the newsk structure. If we fail to get
1521 * memory, then we end up not copying the key
1522 * across. Shucks.
1523 */
Arnaldo Carvalho de Meloaf879cc2006-11-17 12:14:37 -02001524 char *newkey = kmemdup(key->key, key->keylen, GFP_ATOMIC);
1525 if (newkey != NULL)
John Dykstrae547bc12009-07-17 09:23:22 +00001526 tcp_v6_md5_do_add(newsk, &newnp->daddr,
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001527 newkey, key->keylen);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001528 }
1529#endif
1530
Balazs Scheidler093d2822010-10-21 13:06:43 +02001531 if (__inet_inherit_port(sk, newsk) < 0) {
1532 sock_put(newsk);
1533 goto out;
1534 }
Eric Dumazet9327f702009-12-04 03:46:54 +00001535 __inet6_hash(newsk, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001536
1537 return newsk;
1538
1539out_overflow:
Pavel Emelyanovde0744a2008-07-16 20:31:16 -07001540 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
Balazs Scheidler093d2822010-10-21 13:06:43 +02001541out_nonewsk:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001542 if (opt && opt != np->opt)
1543 sock_kfree_s(sk, opt, opt->tot_len);
1544 dst_release(dst);
Balazs Scheidler093d2822010-10-21 13:06:43 +02001545out:
1546 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001547 return NULL;
1548}
1549
Al Virob51655b2006-11-14 21:40:42 -08001550static __sum16 tcp_v6_checksum_init(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001551{
Patrick McHardy84fa7932006-08-29 16:44:56 -07001552 if (skb->ip_summed == CHECKSUM_COMPLETE) {
Herbert Xu684f2172009-01-08 10:41:23 -08001553 if (!tcp_v6_check(skb->len, &ipv6_hdr(skb)->saddr,
Arnaldo Carvalho de Melo0660e032007-04-25 17:54:47 -07001554 &ipv6_hdr(skb)->daddr, skb->csum)) {
Herbert Xufb286bb2005-11-10 13:01:24 -08001555 skb->ip_summed = CHECKSUM_UNNECESSARY;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001556 return 0;
Herbert Xufb286bb2005-11-10 13:01:24 -08001557 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001558 }
Herbert Xufb286bb2005-11-10 13:01:24 -08001559
Herbert Xu684f2172009-01-08 10:41:23 -08001560 skb->csum = ~csum_unfold(tcp_v6_check(skb->len,
Arnaldo Carvalho de Melo0660e032007-04-25 17:54:47 -07001561 &ipv6_hdr(skb)->saddr,
1562 &ipv6_hdr(skb)->daddr, 0));
Herbert Xufb286bb2005-11-10 13:01:24 -08001563
Linus Torvalds1da177e2005-04-16 15:20:36 -07001564 if (skb->len <= 76) {
Herbert Xufb286bb2005-11-10 13:01:24 -08001565 return __skb_checksum_complete(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001566 }
1567 return 0;
1568}
1569
1570/* The socket must have it's spinlock held when we get
1571 * here.
1572 *
1573 * We have a potential double-lock case here, so even when
1574 * doing backlog processing we use the BH locking scheme.
1575 * This is because we cannot sleep with the original spinlock
1576 * held.
1577 */
1578static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1579{
1580 struct ipv6_pinfo *np = inet6_sk(sk);
1581 struct tcp_sock *tp;
1582 struct sk_buff *opt_skb = NULL;
1583
1584 /* Imagine: socket is IPv6. IPv4 packet arrives,
1585 goes to IPv4 receive handler and backlogged.
1586 From backlog it always goes here. Kerboom...
1587 Fortunately, tcp_rcv_established and rcv_established
1588 handle them correctly, but it is not case with
1589 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1590 */
1591
1592 if (skb->protocol == htons(ETH_P_IP))
1593 return tcp_v4_do_rcv(sk, skb);
1594
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001595#ifdef CONFIG_TCP_MD5SIG
1596 if (tcp_v6_inbound_md5_hash (sk, skb))
1597 goto discard;
1598#endif
1599
Dmitry Mishinfda9ef52006-08-31 15:28:39 -07001600 if (sk_filter(sk, skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001601 goto discard;
1602
1603 /*
1604 * socket locking is here for SMP purposes as backlog rcv
1605 * is currently called with bh processing disabled.
1606 */
1607
1608 /* Do Stevens' IPV6_PKTOPTIONS.
1609
1610 Yes, guys, it is the only place in our code, where we
1611 may make it not affecting IPv4.
1612 The rest of code is protocol independent,
1613 and I do not like idea to uglify IPv4.
1614
1615 Actually, all the idea behind IPV6_PKTOPTIONS
1616 looks not very well thought. For now we latch
1617 options, received in the last packet, enqueued
1618 by tcp. Feel free to propose better solution.
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001619 --ANK (980728)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001620 */
1621 if (np->rxopt.all)
1622 opt_skb = skb_clone(skb, GFP_ATOMIC);
1623
1624 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
Neil Horman47482f12011-04-06 13:07:09 -07001625 sock_rps_save_rxhash(sk, skb->rxhash);
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07001626 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001627 goto reset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001628 if (opt_skb)
1629 goto ipv6_pktoptions;
1630 return 0;
1631 }
1632
Arnaldo Carvalho de Meloab6a5bb2007-03-18 17:43:48 -07001633 if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001634 goto csum_err;
1635
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001636 if (sk->sk_state == TCP_LISTEN) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001637 struct sock *nsk = tcp_v6_hnd_req(sk, skb);
1638 if (!nsk)
1639 goto discard;
1640
1641 /*
1642 * Queue it on the new socket if the new socket is active,
1643 * otherwise we just shortcircuit this and continue with
1644 * the new socket..
1645 */
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001646 if(nsk != sk) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001647 if (tcp_child_process(sk, nsk, skb))
1648 goto reset;
1649 if (opt_skb)
1650 __kfree_skb(opt_skb);
1651 return 0;
1652 }
Neil Horman47482f12011-04-06 13:07:09 -07001653 } else
1654 sock_rps_save_rxhash(sk, skb->rxhash);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001655
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07001656 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001657 goto reset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001658 if (opt_skb)
1659 goto ipv6_pktoptions;
1660 return 0;
1661
1662reset:
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001663 tcp_v6_send_reset(sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001664discard:
1665 if (opt_skb)
1666 __kfree_skb(opt_skb);
1667 kfree_skb(skb);
1668 return 0;
1669csum_err:
Pavel Emelyanov63231bd2008-07-16 20:22:25 -07001670 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001671 goto discard;
1672
1673
1674ipv6_pktoptions:
1675 /* Do you ask, what is it?
1676
1677 1. skb was enqueued by tcp.
1678 2. skb is added to tail of read queue, rather than out of order.
1679 3. socket is not in passive state.
1680 4. Finally, it really contains options, which user wants to receive.
1681 */
1682 tp = tcp_sk(sk);
1683 if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1684 !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
YOSHIFUJI Hideaki333fad52005-09-08 09:59:17 +09001685 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
Arnaldo Carvalho de Melo505cbfc2005-08-12 09:19:38 -03001686 np->mcast_oif = inet6_iif(opt_skb);
YOSHIFUJI Hideaki333fad52005-09-08 09:59:17 +09001687 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
Arnaldo Carvalho de Melo0660e032007-04-25 17:54:47 -07001688 np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001689 if (ipv6_opt_accepted(sk, opt_skb)) {
1690 skb_set_owner_r(opt_skb, sk);
1691 opt_skb = xchg(&np->pktoptions, opt_skb);
1692 } else {
1693 __kfree_skb(opt_skb);
1694 opt_skb = xchg(&np->pktoptions, NULL);
1695 }
1696 }
1697
Wei Yongjun800d55f2009-02-23 21:45:33 +00001698 kfree_skb(opt_skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001699 return 0;
1700}
1701
Herbert Xue5bbef22007-10-15 12:50:28 -07001702static int tcp_v6_rcv(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001703{
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001704 struct tcphdr *th;
Stephen Hemmingere802af92010-04-22 15:24:53 -07001705 struct ipv6hdr *hdr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001706 struct sock *sk;
1707 int ret;
Pavel Emelyanova86b1e32008-07-16 20:20:58 -07001708 struct net *net = dev_net(skb->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001709
1710 if (skb->pkt_type != PACKET_HOST)
1711 goto discard_it;
1712
1713 /*
1714 * Count it even if it's bad.
1715 */
Pavel Emelyanov63231bd2008-07-16 20:22:25 -07001716 TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001717
1718 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1719 goto discard_it;
1720
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07001721 th = tcp_hdr(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001722
1723 if (th->doff < sizeof(struct tcphdr)/4)
1724 goto bad_packet;
1725 if (!pskb_may_pull(skb, th->doff*4))
1726 goto discard_it;
1727
Herbert Xu60476372007-04-09 11:59:39 -07001728 if (!skb_csum_unnecessary(skb) && tcp_v6_checksum_init(skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001729 goto bad_packet;
1730
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07001731 th = tcp_hdr(skb);
Stephen Hemmingere802af92010-04-22 15:24:53 -07001732 hdr = ipv6_hdr(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001733 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1734 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1735 skb->len - th->doff*4);
1736 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1737 TCP_SKB_CB(skb)->when = 0;
Stephen Hemmingere802af92010-04-22 15:24:53 -07001738 TCP_SKB_CB(skb)->flags = ipv6_get_dsfield(hdr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001739 TCP_SKB_CB(skb)->sacked = 0;
1740
Arnaldo Carvalho de Melo9a1f27c2008-10-07 11:41:57 -07001741 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001742 if (!sk)
1743 goto no_tcp_socket;
1744
1745process:
1746 if (sk->sk_state == TCP_TIME_WAIT)
1747 goto do_time_wait;
1748
Stephen Hemmingere802af92010-04-22 15:24:53 -07001749 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
1750 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
1751 goto discard_and_relse;
1752 }
1753
Linus Torvalds1da177e2005-04-16 15:20:36 -07001754 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1755 goto discard_and_relse;
1756
Dmitry Mishinfda9ef52006-08-31 15:28:39 -07001757 if (sk_filter(sk, skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001758 goto discard_and_relse;
1759
1760 skb->dev = NULL;
1761
Fabio Olive Leite293b9c42006-09-25 22:28:47 -07001762 bh_lock_sock_nested(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001763 ret = 0;
1764 if (!sock_owned_by_user(sk)) {
Chris Leech1a2449a2006-05-23 18:05:53 -07001765#ifdef CONFIG_NET_DMA
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001766 struct tcp_sock *tp = tcp_sk(sk);
David S. Millerb4caea82007-10-26 04:20:13 -07001767 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
Dan Williamsf67b4592009-01-06 11:38:15 -07001768 tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY);
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001769 if (tp->ucopy.dma_chan)
1770 ret = tcp_v6_do_rcv(sk, skb);
1771 else
Chris Leech1a2449a2006-05-23 18:05:53 -07001772#endif
1773 {
1774 if (!tcp_prequeue(sk, skb))
1775 ret = tcp_v6_do_rcv(sk, skb);
1776 }
Eric Dumazet6cce09f2010-03-07 23:21:57 +00001777 } else if (unlikely(sk_add_backlog(sk, skb))) {
Zhu Yi6b03a532010-03-04 18:01:41 +00001778 bh_unlock_sock(sk);
Eric Dumazet6cce09f2010-03-07 23:21:57 +00001779 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
Zhu Yi6b03a532010-03-04 18:01:41 +00001780 goto discard_and_relse;
1781 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001782 bh_unlock_sock(sk);
1783
1784 sock_put(sk);
1785 return ret ? -1 : 0;
1786
1787no_tcp_socket:
1788 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1789 goto discard_it;
1790
1791 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1792bad_packet:
Pavel Emelyanov63231bd2008-07-16 20:22:25 -07001793 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001794 } else {
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001795 tcp_v6_send_reset(NULL, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001796 }
1797
1798discard_it:
1799
1800 /*
1801 * Discard frame
1802 */
1803
1804 kfree_skb(skb);
1805 return 0;
1806
1807discard_and_relse:
1808 sock_put(sk);
1809 goto discard_it;
1810
1811do_time_wait:
1812 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
YOSHIFUJI Hideaki9469c7b2006-10-10 19:41:46 -07001813 inet_twsk_put(inet_twsk(sk));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001814 goto discard_it;
1815 }
1816
1817 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
Pavel Emelyanov63231bd2008-07-16 20:22:25 -07001818 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
YOSHIFUJI Hideaki9469c7b2006-10-10 19:41:46 -07001819 inet_twsk_put(inet_twsk(sk));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001820 goto discard_it;
1821 }
1822
YOSHIFUJI Hideaki9469c7b2006-10-10 19:41:46 -07001823 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001824 case TCP_TW_SYN:
1825 {
1826 struct sock *sk2;
1827
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09001828 sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
Arnaldo Carvalho de Melo0660e032007-04-25 17:54:47 -07001829 &ipv6_hdr(skb)->daddr,
Arnaldo Carvalho de Melo505cbfc2005-08-12 09:19:38 -03001830 ntohs(th->dest), inet6_iif(skb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001831 if (sk2 != NULL) {
Arnaldo Carvalho de Melo295ff7e2005-08-09 20:44:40 -07001832 struct inet_timewait_sock *tw = inet_twsk(sk);
1833 inet_twsk_deschedule(tw, &tcp_death_row);
1834 inet_twsk_put(tw);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001835 sk = sk2;
1836 goto process;
1837 }
1838 /* Fall through to ACK */
1839 }
1840 case TCP_TW_ACK:
1841 tcp_v6_timewait_ack(sk, skb);
1842 break;
1843 case TCP_TW_RST:
1844 goto no_tcp_socket;
1845 case TCP_TW_SUCCESS:;
1846 }
1847 goto discard_it;
1848}
1849
David S. Millerccb7c412010-12-01 18:09:13 -08001850static struct inet_peer *tcp_v6_get_peer(struct sock *sk, bool *release_it)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001851{
David S. Millerdb3949c2010-12-02 11:52:07 -08001852 struct rt6_info *rt = (struct rt6_info *) __sk_dst_get(sk);
1853 struct ipv6_pinfo *np = inet6_sk(sk);
1854 struct inet_peer *peer;
1855
1856 if (!rt ||
1857 !ipv6_addr_equal(&np->daddr, &rt->rt6i_dst.addr)) {
1858 peer = inet_getpeer_v6(&np->daddr, 1);
1859 *release_it = true;
1860 } else {
1861 if (!rt->rt6i_peer)
1862 rt6_bind_peer(rt, 1);
1863 peer = rt->rt6i_peer;
David S. Miller457de432010-12-10 13:16:09 -08001864 *release_it = false;
David S. Millerdb3949c2010-12-02 11:52:07 -08001865 }
1866
1867 return peer;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001868}
1869
David S. Millerccb7c412010-12-01 18:09:13 -08001870static void *tcp_v6_tw_get_peer(struct sock *sk)
1871{
David S. Millerdb3949c2010-12-02 11:52:07 -08001872 struct inet6_timewait_sock *tw6 = inet6_twsk(sk);
David S. Millerccb7c412010-12-01 18:09:13 -08001873 struct inet_timewait_sock *tw = inet_twsk(sk);
1874
1875 if (tw->tw_family == AF_INET)
1876 return tcp_v4_tw_get_peer(sk);
1877
David S. Millerdb3949c2010-12-02 11:52:07 -08001878 return inet_getpeer_v6(&tw6->tw_v6_daddr, 1);
David S. Millerccb7c412010-12-01 18:09:13 -08001879}
1880
1881static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1882 .twsk_obj_size = sizeof(struct tcp6_timewait_sock),
1883 .twsk_unique = tcp_twsk_unique,
1884 .twsk_destructor= tcp_twsk_destructor,
1885 .twsk_getpeer = tcp_v6_tw_get_peer,
1886};
1887
Stephen Hemminger3b401a82009-09-01 19:25:04 +00001888static const struct inet_connection_sock_af_ops ipv6_specific = {
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001889 .queue_xmit = inet6_csk_xmit,
1890 .send_check = tcp_v6_send_check,
1891 .rebuild_header = inet6_sk_rebuild_header,
1892 .conn_request = tcp_v6_conn_request,
1893 .syn_recv_sock = tcp_v6_syn_recv_sock,
David S. Miller3f419d22010-11-29 13:37:14 -08001894 .get_peer = tcp_v6_get_peer,
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001895 .net_header_len = sizeof(struct ipv6hdr),
1896 .setsockopt = ipv6_setsockopt,
1897 .getsockopt = ipv6_getsockopt,
1898 .addr2sockaddr = inet6_csk_addr2sockaddr,
1899 .sockaddr_len = sizeof(struct sockaddr_in6),
Arnaldo Carvalho de Meloab1e0a12008-02-03 04:06:04 -08001900 .bind_conflict = inet6_csk_bind_conflict,
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08001901#ifdef CONFIG_COMPAT
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001902 .compat_setsockopt = compat_ipv6_setsockopt,
1903 .compat_getsockopt = compat_ipv6_getsockopt,
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08001904#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001905};
1906
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001907#ifdef CONFIG_TCP_MD5SIG
Stephen Hemmingerb2e4b3d2009-09-01 19:25:03 +00001908static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001909 .md5_lookup = tcp_v6_md5_lookup,
Adam Langley49a72df2008-07-19 00:01:42 -07001910 .calc_md5_hash = tcp_v6_md5_hash_skb,
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001911 .md5_add = tcp_v6_md5_add_func,
1912 .md5_parse = tcp_v6_parse_md5_keys,
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001913};
David S. Millera9286302006-11-14 19:53:22 -08001914#endif
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001915
Linus Torvalds1da177e2005-04-16 15:20:36 -07001916/*
1917 * TCP over IPv4 via INET6 API
1918 */
1919
Stephen Hemminger3b401a82009-09-01 19:25:04 +00001920static const struct inet_connection_sock_af_ops ipv6_mapped = {
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001921 .queue_xmit = ip_queue_xmit,
1922 .send_check = tcp_v4_send_check,
1923 .rebuild_header = inet_sk_rebuild_header,
1924 .conn_request = tcp_v6_conn_request,
1925 .syn_recv_sock = tcp_v6_syn_recv_sock,
David S. Miller3f419d22010-11-29 13:37:14 -08001926 .get_peer = tcp_v4_get_peer,
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001927 .net_header_len = sizeof(struct iphdr),
1928 .setsockopt = ipv6_setsockopt,
1929 .getsockopt = ipv6_getsockopt,
1930 .addr2sockaddr = inet6_csk_addr2sockaddr,
1931 .sockaddr_len = sizeof(struct sockaddr_in6),
Arnaldo Carvalho de Meloab1e0a12008-02-03 04:06:04 -08001932 .bind_conflict = inet6_csk_bind_conflict,
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08001933#ifdef CONFIG_COMPAT
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001934 .compat_setsockopt = compat_ipv6_setsockopt,
1935 .compat_getsockopt = compat_ipv6_getsockopt,
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08001936#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001937};
1938
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001939#ifdef CONFIG_TCP_MD5SIG
Stephen Hemmingerb2e4b3d2009-09-01 19:25:03 +00001940static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001941 .md5_lookup = tcp_v4_md5_lookup,
Adam Langley49a72df2008-07-19 00:01:42 -07001942 .calc_md5_hash = tcp_v4_md5_hash_skb,
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001943 .md5_add = tcp_v6_md5_add_func,
1944 .md5_parse = tcp_v6_parse_md5_keys,
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001945};
David S. Millera9286302006-11-14 19:53:22 -08001946#endif
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001947
Linus Torvalds1da177e2005-04-16 15:20:36 -07001948/* NOTE: A lot of things set to zero explicitly by call to
1949 * sk_alloc() so need not be done here.
1950 */
1951static int tcp_v6_init_sock(struct sock *sk)
1952{
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -03001953 struct inet_connection_sock *icsk = inet_csk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001954 struct tcp_sock *tp = tcp_sk(sk);
1955
1956 skb_queue_head_init(&tp->out_of_order_queue);
1957 tcp_init_xmit_timers(sk);
1958 tcp_prequeue_init(tp);
1959
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -03001960 icsk->icsk_rto = TCP_TIMEOUT_INIT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001961 tp->mdev = TCP_TIMEOUT_INIT;
1962
1963 /* So many TCP implementations out there (incorrectly) count the
1964 * initial SYN frame in their delayed-ACK and congestion control
1965 * algorithms that we must have the following bandaid to talk
1966 * efficiently to them. -DaveM
1967 */
1968 tp->snd_cwnd = 2;
1969
1970 /* See draft-stevens-tcpca-spec-01 for discussion of the
1971 * initialization of these values.
1972 */
Ilpo Järvinen0b6a05c2009-09-15 01:30:10 -07001973 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001974 tp->snd_cwnd_clamp = ~0;
William Allen Simpsonbee7ca92009-11-10 09:51:18 +00001975 tp->mss_cache = TCP_MSS_DEFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001976
1977 tp->reordering = sysctl_tcp_reordering;
1978
1979 sk->sk_state = TCP_CLOSE;
1980
Arnaldo Carvalho de Melo8292a172005-12-13 23:15:52 -08001981 icsk->icsk_af_ops = &ipv6_specific;
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -03001982 icsk->icsk_ca_ops = &tcp_init_congestion_ops;
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -08001983 icsk->icsk_sync_mss = tcp_sync_mss;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001984 sk->sk_write_space = sk_stream_write_space;
1985 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
1986
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001987#ifdef CONFIG_TCP_MD5SIG
1988 tp->af_specific = &tcp_sock_ipv6_specific;
1989#endif
1990
William Allen Simpson435cf552009-12-02 18:17:05 +00001991 /* TCP Cookie Transactions */
1992 if (sysctl_tcp_cookie_size > 0) {
1993 /* Default, cookies without s_data_payload. */
1994 tp->cookie_values =
1995 kzalloc(sizeof(*tp->cookie_values),
1996 sk->sk_allocation);
1997 if (tp->cookie_values != NULL)
1998 kref_init(&tp->cookie_values->kref);
1999 }
2000 /* Presumed zeroed, in order of appearance:
2001 * cookie_in_always, cookie_out_never,
2002 * s_data_constant, s_data_in, s_data_out
2003 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002004 sk->sk_sndbuf = sysctl_tcp_wmem[1];
2005 sk->sk_rcvbuf = sysctl_tcp_rmem[1];
2006
Herbert Xueb4dea52008-12-29 23:04:08 -08002007 local_bh_disable();
Eric Dumazet17483762008-11-25 21:16:35 -08002008 percpu_counter_inc(&tcp_sockets_allocated);
Herbert Xueb4dea52008-12-29 23:04:08 -08002009 local_bh_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002010
2011 return 0;
2012}
2013
Brian Haley7d06b2e2008-06-14 17:04:49 -07002014static void tcp_v6_destroy_sock(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002015{
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002016#ifdef CONFIG_TCP_MD5SIG
2017 /* Clean up the MD5 key list */
2018 if (tcp_sk(sk)->md5sig_info)
2019 tcp_v6_clear_md5_list(sk);
2020#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002021 tcp_v4_destroy_sock(sk);
Brian Haley7d06b2e2008-06-14 17:04:49 -07002022 inet6_destroy_sock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002023}
2024
YOSHIFUJI Hideaki952a10b2007-04-21 20:13:44 +09002025#ifdef CONFIG_PROC_FS
Linus Torvalds1da177e2005-04-16 15:20:36 -07002026/* Proc filesystem TCPv6 sock list dumping. */
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09002027static void get_openreq6(struct seq_file *seq,
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -07002028 struct sock *sk, struct request_sock *req, int i, int uid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002029{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002030 int ttd = req->expires - jiffies;
Arnaldo Carvalho de Meloca304b62005-12-13 23:15:40 -08002031 struct in6_addr *src = &inet6_rsk(req)->loc_addr;
2032 struct in6_addr *dest = &inet6_rsk(req)->rmt_addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002033
2034 if (ttd < 0)
2035 ttd = 0;
2036
Linus Torvalds1da177e2005-04-16 15:20:36 -07002037 seq_printf(seq,
2038 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
2039 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p\n",
2040 i,
2041 src->s6_addr32[0], src->s6_addr32[1],
2042 src->s6_addr32[2], src->s6_addr32[3],
KOVACS Krisztianfd507032008-10-19 23:35:58 -07002043 ntohs(inet_rsk(req)->loc_port),
Linus Torvalds1da177e2005-04-16 15:20:36 -07002044 dest->s6_addr32[0], dest->s6_addr32[1],
2045 dest->s6_addr32[2], dest->s6_addr32[3],
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002046 ntohs(inet_rsk(req)->rmt_port),
Linus Torvalds1da177e2005-04-16 15:20:36 -07002047 TCP_SYN_RECV,
2048 0,0, /* could print option size, but that is af dependent. */
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09002049 1, /* timers active (only the expire timer) */
2050 jiffies_to_clock_t(ttd),
Linus Torvalds1da177e2005-04-16 15:20:36 -07002051 req->retrans,
2052 uid,
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09002053 0, /* non standard timer */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002054 0, /* open_requests have no inode */
2055 0, req);
2056}
2057
2058static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
2059{
2060 struct in6_addr *dest, *src;
2061 __u16 destp, srcp;
2062 int timer_active;
2063 unsigned long timer_expires;
2064 struct inet_sock *inet = inet_sk(sp);
2065 struct tcp_sock *tp = tcp_sk(sp);
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002066 const struct inet_connection_sock *icsk = inet_csk(sp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002067 struct ipv6_pinfo *np = inet6_sk(sp);
2068
2069 dest = &np->daddr;
2070 src = &np->rcv_saddr;
Eric Dumazetc720c7e2009-10-15 06:30:45 +00002071 destp = ntohs(inet->inet_dport);
2072 srcp = ntohs(inet->inet_sport);
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002073
2074 if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002075 timer_active = 1;
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002076 timer_expires = icsk->icsk_timeout;
2077 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002078 timer_active = 4;
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002079 timer_expires = icsk->icsk_timeout;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002080 } else if (timer_pending(&sp->sk_timer)) {
2081 timer_active = 2;
2082 timer_expires = sp->sk_timer.expires;
2083 } else {
2084 timer_active = 0;
2085 timer_expires = jiffies;
2086 }
2087
2088 seq_printf(seq,
2089 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
Stephen Hemminger7be87352008-06-27 20:00:19 -07002090 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %lu %lu %u %u %d\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002091 i,
2092 src->s6_addr32[0], src->s6_addr32[1],
2093 src->s6_addr32[2], src->s6_addr32[3], srcp,
2094 dest->s6_addr32[0], dest->s6_addr32[1],
2095 dest->s6_addr32[2], dest->s6_addr32[3], destp,
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09002096 sp->sk_state,
Sridhar Samudrala47da8ee2006-06-27 13:29:00 -07002097 tp->write_seq-tp->snd_una,
2098 (sp->sk_state == TCP_LISTEN) ? sp->sk_ack_backlog : (tp->rcv_nxt - tp->copied_seq),
Linus Torvalds1da177e2005-04-16 15:20:36 -07002099 timer_active,
2100 jiffies_to_clock_t(timer_expires - jiffies),
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002101 icsk->icsk_retransmits,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002102 sock_i_uid(sp),
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -03002103 icsk->icsk_probes_out,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002104 sock_i_ino(sp),
2105 atomic_read(&sp->sk_refcnt), sp,
Stephen Hemminger7be87352008-06-27 20:00:19 -07002106 jiffies_to_clock_t(icsk->icsk_rto),
2107 jiffies_to_clock_t(icsk->icsk_ack.ato),
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002108 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
Ilpo Järvinen0b6a05c2009-09-15 01:30:10 -07002109 tp->snd_cwnd,
2110 tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh
Linus Torvalds1da177e2005-04-16 15:20:36 -07002111 );
2112}
2113
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09002114static void get_timewait6_sock(struct seq_file *seq,
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07002115 struct inet_timewait_sock *tw, int i)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002116{
2117 struct in6_addr *dest, *src;
2118 __u16 destp, srcp;
Arnaldo Carvalho de Melo0fa1a532005-12-13 23:23:09 -08002119 struct inet6_timewait_sock *tw6 = inet6_twsk((struct sock *)tw);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002120 int ttd = tw->tw_ttd - jiffies;
2121
2122 if (ttd < 0)
2123 ttd = 0;
2124
Arnaldo Carvalho de Melo0fa1a532005-12-13 23:23:09 -08002125 dest = &tw6->tw_v6_daddr;
2126 src = &tw6->tw_v6_rcv_saddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002127 destp = ntohs(tw->tw_dport);
2128 srcp = ntohs(tw->tw_sport);
2129
2130 seq_printf(seq,
2131 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
2132 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p\n",
2133 i,
2134 src->s6_addr32[0], src->s6_addr32[1],
2135 src->s6_addr32[2], src->s6_addr32[3], srcp,
2136 dest->s6_addr32[0], dest->s6_addr32[1],
2137 dest->s6_addr32[2], dest->s6_addr32[3], destp,
2138 tw->tw_substate, 0, 0,
2139 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
2140 atomic_read(&tw->tw_refcnt), tw);
2141}
2142
Linus Torvalds1da177e2005-04-16 15:20:36 -07002143static int tcp6_seq_show(struct seq_file *seq, void *v)
2144{
2145 struct tcp_iter_state *st;
2146
2147 if (v == SEQ_START_TOKEN) {
2148 seq_puts(seq,
2149 " sl "
2150 "local_address "
2151 "remote_address "
2152 "st tx_queue rx_queue tr tm->when retrnsmt"
2153 " uid timeout inode\n");
2154 goto out;
2155 }
2156 st = seq->private;
2157
2158 switch (st->state) {
2159 case TCP_SEQ_STATE_LISTENING:
2160 case TCP_SEQ_STATE_ESTABLISHED:
2161 get_tcp6_sock(seq, v, st->num);
2162 break;
2163 case TCP_SEQ_STATE_OPENREQ:
2164 get_openreq6(seq, st->syn_wait_sk, v, st->num, st->uid);
2165 break;
2166 case TCP_SEQ_STATE_TIME_WAIT:
2167 get_timewait6_sock(seq, v, st->num);
2168 break;
2169 }
2170out:
2171 return 0;
2172}
2173
Linus Torvalds1da177e2005-04-16 15:20:36 -07002174static struct tcp_seq_afinfo tcp6_seq_afinfo = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002175 .name = "tcp6",
2176 .family = AF_INET6,
Denis V. Lunev5f4472c2008-04-13 22:13:53 -07002177 .seq_fops = {
2178 .owner = THIS_MODULE,
2179 },
Denis V. Lunev9427c4b2008-04-13 22:12:13 -07002180 .seq_ops = {
2181 .show = tcp6_seq_show,
2182 },
Linus Torvalds1da177e2005-04-16 15:20:36 -07002183};
2184
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00002185int __net_init tcp6_proc_init(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002186{
Daniel Lezcano6f8b13b2008-03-21 04:14:45 -07002187 return tcp_proc_register(net, &tcp6_seq_afinfo);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002188}
2189
Daniel Lezcano6f8b13b2008-03-21 04:14:45 -07002190void tcp6_proc_exit(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002191{
Daniel Lezcano6f8b13b2008-03-21 04:14:45 -07002192 tcp_proc_unregister(net, &tcp6_seq_afinfo);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002193}
2194#endif
2195
2196struct proto tcpv6_prot = {
2197 .name = "TCPv6",
2198 .owner = THIS_MODULE,
2199 .close = tcp_close,
2200 .connect = tcp_v6_connect,
2201 .disconnect = tcp_disconnect,
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002202 .accept = inet_csk_accept,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002203 .ioctl = tcp_ioctl,
2204 .init = tcp_v6_init_sock,
2205 .destroy = tcp_v6_destroy_sock,
2206 .shutdown = tcp_shutdown,
2207 .setsockopt = tcp_setsockopt,
2208 .getsockopt = tcp_getsockopt,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002209 .recvmsg = tcp_recvmsg,
Changli Gao7ba42912010-07-10 20:41:55 +00002210 .sendmsg = tcp_sendmsg,
2211 .sendpage = tcp_sendpage,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002212 .backlog_rcv = tcp_v6_do_rcv,
2213 .hash = tcp_v6_hash,
Arnaldo Carvalho de Meloab1e0a12008-02-03 04:06:04 -08002214 .unhash = inet_unhash,
2215 .get_port = inet_csk_get_port,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002216 .enter_memory_pressure = tcp_enter_memory_pressure,
2217 .sockets_allocated = &tcp_sockets_allocated,
2218 .memory_allocated = &tcp_memory_allocated,
2219 .memory_pressure = &tcp_memory_pressure,
Arnaldo Carvalho de Melo0a5578c2005-08-09 20:11:41 -07002220 .orphan_count = &tcp_orphan_count,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002221 .sysctl_mem = sysctl_tcp_mem,
2222 .sysctl_wmem = sysctl_tcp_wmem,
2223 .sysctl_rmem = sysctl_tcp_rmem,
2224 .max_header = MAX_TCP_HEADER,
2225 .obj_size = sizeof(struct tcp6_sock),
Eric Dumazet3ab5aee2008-11-16 19:40:17 -08002226 .slab_flags = SLAB_DESTROY_BY_RCU,
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002227 .twsk_prot = &tcp6_timewait_sock_ops,
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -07002228 .rsk_prot = &tcp6_request_sock_ops,
Pavel Emelyanov39d8cda2008-03-22 16:50:58 -07002229 .h.hashinfo = &tcp_hashinfo,
Changli Gao7ba42912010-07-10 20:41:55 +00002230 .no_autobind = true,
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08002231#ifdef CONFIG_COMPAT
2232 .compat_setsockopt = compat_tcp_setsockopt,
2233 .compat_getsockopt = compat_tcp_getsockopt,
2234#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002235};
2236
Alexey Dobriyan41135cc2009-09-14 12:22:28 +00002237static const struct inet6_protocol tcpv6_protocol = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002238 .handler = tcp_v6_rcv,
2239 .err_handler = tcp_v6_err,
Herbert Xua430a432006-07-08 13:34:56 -07002240 .gso_send_check = tcp_v6_gso_send_check,
Herbert Xuadcfc7d2006-06-30 13:36:15 -07002241 .gso_segment = tcp_tso_segment,
Herbert Xu684f2172009-01-08 10:41:23 -08002242 .gro_receive = tcp6_gro_receive,
2243 .gro_complete = tcp6_gro_complete,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002244 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
2245};
2246
Linus Torvalds1da177e2005-04-16 15:20:36 -07002247static struct inet_protosw tcpv6_protosw = {
2248 .type = SOCK_STREAM,
2249 .protocol = IPPROTO_TCP,
2250 .prot = &tcpv6_prot,
2251 .ops = &inet6_stream_ops,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002252 .no_check = 0,
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -08002253 .flags = INET_PROTOSW_PERMANENT |
2254 INET_PROTOSW_ICSK,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002255};
2256
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00002257static int __net_init tcpv6_net_init(struct net *net)
Daniel Lezcano93ec9262008-03-07 11:16:02 -08002258{
Denis V. Lunev56772422008-04-03 14:28:30 -07002259 return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
2260 SOCK_RAW, IPPROTO_TCP, net);
Daniel Lezcano93ec9262008-03-07 11:16:02 -08002261}
2262
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00002263static void __net_exit tcpv6_net_exit(struct net *net)
Daniel Lezcano93ec9262008-03-07 11:16:02 -08002264{
Denis V. Lunev56772422008-04-03 14:28:30 -07002265 inet_ctl_sock_destroy(net->ipv6.tcp_sk);
Eric W. Biedermanb099ce22009-12-03 02:29:09 +00002266}
2267
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00002268static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
Eric W. Biedermanb099ce22009-12-03 02:29:09 +00002269{
2270 inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET6);
Daniel Lezcano93ec9262008-03-07 11:16:02 -08002271}
2272
2273static struct pernet_operations tcpv6_net_ops = {
Eric W. Biedermanb099ce22009-12-03 02:29:09 +00002274 .init = tcpv6_net_init,
2275 .exit = tcpv6_net_exit,
2276 .exit_batch = tcpv6_net_exit_batch,
Daniel Lezcano93ec9262008-03-07 11:16:02 -08002277};
2278
Daniel Lezcano7f4e4862007-12-11 02:25:35 -08002279int __init tcpv6_init(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002280{
Daniel Lezcano7f4e4862007-12-11 02:25:35 -08002281 int ret;
David Woodhouseae0f7d52006-01-11 15:53:04 -08002282
Daniel Lezcano7f4e4862007-12-11 02:25:35 -08002283 ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
2284 if (ret)
2285 goto out;
2286
2287 /* register inet6 protocol */
2288 ret = inet6_register_protosw(&tcpv6_protosw);
2289 if (ret)
2290 goto out_tcpv6_protocol;
2291
Daniel Lezcano93ec9262008-03-07 11:16:02 -08002292 ret = register_pernet_subsys(&tcpv6_net_ops);
Daniel Lezcano7f4e4862007-12-11 02:25:35 -08002293 if (ret)
2294 goto out_tcpv6_protosw;
2295out:
2296 return ret;
2297
2298out_tcpv6_protocol:
2299 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
2300out_tcpv6_protosw:
2301 inet6_unregister_protosw(&tcpv6_protosw);
2302 goto out;
2303}
2304
Daniel Lezcano09f77092007-12-13 05:34:58 -08002305void tcpv6_exit(void)
Daniel Lezcano7f4e4862007-12-11 02:25:35 -08002306{
Daniel Lezcano93ec9262008-03-07 11:16:02 -08002307 unregister_pernet_subsys(&tcpv6_net_ops);
Daniel Lezcano7f4e4862007-12-11 02:25:35 -08002308 inet6_unregister_protosw(&tcpv6_protosw);
2309 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002310}