blob: 76c8f5a2f7f35a45d813a9b3860eb5b36113d256 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * TCP over IPv6
3 * Linux INET6 implementation
4 *
5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt>
7 *
8 * $Id: tcp_ipv6.c,v 1.144 2002/02/01 22:01:04 davem Exp $
9 *
10 * Based on:
11 * linux/net/ipv4/tcp.c
12 * linux/net/ipv4/tcp_input.c
13 * linux/net/ipv4/tcp_output.c
14 *
15 * Fixes:
16 * Hideaki YOSHIFUJI : sin6_scope_id support
17 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
18 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
19 * a single port at the same time.
20 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
21 *
22 * This program is free software; you can redistribute it and/or
23 * modify it under the terms of the GNU General Public License
24 * as published by the Free Software Foundation; either version
25 * 2 of the License, or (at your option) any later version.
26 */
27
28#include <linux/module.h>
29#include <linux/config.h>
30#include <linux/errno.h>
31#include <linux/types.h>
32#include <linux/socket.h>
33#include <linux/sockios.h>
34#include <linux/net.h>
35#include <linux/jiffies.h>
36#include <linux/in.h>
37#include <linux/in6.h>
38#include <linux/netdevice.h>
39#include <linux/init.h>
40#include <linux/jhash.h>
41#include <linux/ipsec.h>
42#include <linux/times.h>
43
44#include <linux/ipv6.h>
45#include <linux/icmpv6.h>
46#include <linux/random.h>
47
48#include <net/tcp.h>
49#include <net/ndisc.h>
Arnaldo Carvalho de Melo5324a042005-08-12 09:26:18 -030050#include <net/inet6_hashtables.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070051#include <net/ipv6.h>
52#include <net/transp_v6.h>
53#include <net/addrconf.h>
54#include <net/ip6_route.h>
55#include <net/ip6_checksum.h>
56#include <net/inet_ecn.h>
57#include <net/protocol.h>
58#include <net/xfrm.h>
59#include <net/addrconf.h>
60#include <net/snmp.h>
61#include <net/dsfield.h>
62
63#include <asm/uaccess.h>
64
65#include <linux/proc_fs.h>
66#include <linux/seq_file.h>
67
68static void tcp_v6_send_reset(struct sk_buff *skb);
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -070069static void tcp_v6_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req);
Linus Torvalds1da177e2005-04-16 15:20:36 -070070static void tcp_v6_send_check(struct sock *sk, struct tcphdr *th, int len,
71 struct sk_buff *skb);
72
73static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
74static int tcp_v6_xmit(struct sk_buff *skb, int ipfragok);
75
76static struct tcp_func ipv6_mapped;
77static struct tcp_func ipv6_specific;
78
Arnaldo Carvalho de Melo971af182005-12-13 23:14:47 -080079int inet6_csk_bind_conflict(const struct sock *sk,
80 const struct inet_bind_bucket *tb)
Linus Torvalds1da177e2005-04-16 15:20:36 -070081{
Arnaldo Carvalho de Melo0f7ff922005-08-09 19:59:44 -070082 const struct sock *sk2;
83 const struct hlist_node *node;
Linus Torvalds1da177e2005-04-16 15:20:36 -070084
85 /* We must walk the whole port owner list in this case. -DaveM */
86 sk_for_each_bound(sk2, node, &tb->owners) {
87 if (sk != sk2 &&
88 (!sk->sk_bound_dev_if ||
89 !sk2->sk_bound_dev_if ||
90 sk->sk_bound_dev_if == sk2->sk_bound_dev_if) &&
91 (!sk->sk_reuse || !sk2->sk_reuse ||
92 sk2->sk_state == TCP_LISTEN) &&
93 ipv6_rcv_saddr_equal(sk, sk2))
94 break;
95 }
96
97 return node != NULL;
98}
99
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100static int tcp_v6_get_port(struct sock *sk, unsigned short snum)
101{
Arnaldo Carvalho de Melo971af182005-12-13 23:14:47 -0800102 return inet_csk_get_port(&tcp_hashinfo, sk, snum,
103 inet6_csk_bind_conflict);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104}
105
106static __inline__ void __tcp_v6_hash(struct sock *sk)
107{
108 struct hlist_head *list;
109 rwlock_t *lock;
110
111 BUG_TRAP(sk_unhashed(sk));
112
113 if (sk->sk_state == TCP_LISTEN) {
Arnaldo Carvalho de Melo6e04e022005-08-09 20:07:35 -0700114 list = &tcp_hashinfo.listening_hash[inet_sk_listen_hashfn(sk)];
115 lock = &tcp_hashinfo.lhash_lock;
Arnaldo Carvalho de Melof3f05f72005-08-09 20:08:09 -0700116 inet_listen_wlock(&tcp_hashinfo);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117 } else {
Eric Dumazet81c3d542005-10-03 14:13:38 -0700118 unsigned int hash;
119 sk->sk_hash = hash = inet6_sk_ehashfn(sk);
120 hash &= (tcp_hashinfo.ehash_size - 1);
121 list = &tcp_hashinfo.ehash[hash].chain;
122 lock = &tcp_hashinfo.ehash[hash].lock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700123 write_lock(lock);
124 }
125
126 __sk_add_node(sk, list);
127 sock_prot_inc_use(sk->sk_prot);
128 write_unlock(lock);
129}
130
131
132static void tcp_v6_hash(struct sock *sk)
133{
134 if (sk->sk_state != TCP_CLOSE) {
135 struct tcp_sock *tp = tcp_sk(sk);
136
137 if (tp->af_specific == &ipv6_mapped) {
138 tcp_prot.hash(sk);
139 return;
140 }
141 local_bh_disable();
142 __tcp_v6_hash(sk);
143 local_bh_enable();
144 }
145}
146
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147/*
148 * Open request hash tables.
149 */
150
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700151static u32 tcp_v6_synq_hash(const struct in6_addr *raddr, const u16 rport, const u32 rnd)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152{
153 u32 a, b, c;
154
155 a = raddr->s6_addr32[0];
156 b = raddr->s6_addr32[1];
157 c = raddr->s6_addr32[2];
158
159 a += JHASH_GOLDEN_RATIO;
160 b += JHASH_GOLDEN_RATIO;
161 c += rnd;
162 __jhash_mix(a, b, c);
163
164 a += raddr->s6_addr32[3];
165 b += (u32) rport;
166 __jhash_mix(a, b, c);
167
168 return c & (TCP_SYNQ_HSIZE - 1);
169}
170
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700171static struct request_sock *tcp_v6_search_req(const struct sock *sk,
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -0700172 struct request_sock ***prevp,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173 __u16 rport,
174 struct in6_addr *raddr,
175 struct in6_addr *laddr,
176 int iif)
177{
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700178 const struct inet_connection_sock *icsk = inet_csk(sk);
179 struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt;
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -0700180 struct request_sock *req, **prev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700181
182 for (prev = &lopt->syn_table[tcp_v6_synq_hash(raddr, rport, lopt->hash_rnd)];
183 (req = *prev) != NULL;
184 prev = &req->dl_next) {
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -0700185 const struct tcp6_request_sock *treq = tcp6_rsk(req);
186
187 if (inet_rsk(req)->rmt_port == rport &&
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -0700188 req->rsk_ops->family == AF_INET6 &&
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -0700189 ipv6_addr_equal(&treq->rmt_addr, raddr) &&
190 ipv6_addr_equal(&treq->loc_addr, laddr) &&
191 (!treq->iif || treq->iif == iif)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700192 BUG_TRAP(req->sk == NULL);
193 *prevp = prev;
194 return req;
195 }
196 }
197
198 return NULL;
199}
200
201static __inline__ u16 tcp_v6_check(struct tcphdr *th, int len,
202 struct in6_addr *saddr,
203 struct in6_addr *daddr,
204 unsigned long base)
205{
206 return csum_ipv6_magic(saddr, daddr, len, IPPROTO_TCP, base);
207}
208
209static __u32 tcp_v6_init_sequence(struct sock *sk, struct sk_buff *skb)
210{
211 if (skb->protocol == htons(ETH_P_IPV6)) {
212 return secure_tcpv6_sequence_number(skb->nh.ipv6h->daddr.s6_addr32,
213 skb->nh.ipv6h->saddr.s6_addr32,
214 skb->h.th->dest,
215 skb->h.th->source);
216 } else {
217 return secure_tcp_sequence_number(skb->nh.iph->daddr,
218 skb->nh.iph->saddr,
219 skb->h.th->dest,
220 skb->h.th->source);
221 }
222}
223
Arnaldo Carvalho de Melo505cbfc2005-08-12 09:19:38 -0300224static int __tcp_v6_check_established(struct sock *sk, const __u16 lport,
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700225 struct inet_timewait_sock **twp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226{
227 struct inet_sock *inet = inet_sk(sk);
Arnaldo Carvalho de Melo505cbfc2005-08-12 09:19:38 -0300228 const struct ipv6_pinfo *np = inet6_sk(sk);
229 const struct in6_addr *daddr = &np->rcv_saddr;
230 const struct in6_addr *saddr = &np->daddr;
231 const int dif = sk->sk_bound_dev_if;
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700232 const u32 ports = INET_COMBINED_PORTS(inet->dport, lport);
Eric Dumazet81c3d542005-10-03 14:13:38 -0700233 unsigned int hash = inet6_ehashfn(daddr, inet->num, saddr, inet->dport);
234 struct inet_ehash_bucket *head = inet_ehash_bucket(&tcp_hashinfo, hash);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700235 struct sock *sk2;
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700236 const struct hlist_node *node;
237 struct inet_timewait_sock *tw;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700238
Eric Dumazet81c3d542005-10-03 14:13:38 -0700239 prefetch(head->chain.first);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240 write_lock(&head->lock);
241
242 /* Check TIME-WAIT sockets first. */
Arnaldo Carvalho de Melo6e04e022005-08-09 20:07:35 -0700243 sk_for_each(sk2, node, &(head + tcp_hashinfo.ehash_size)->chain) {
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700244 const struct tcp6_timewait_sock *tcp6tw = tcp6_twsk(sk2);
245
246 tw = inet_twsk(sk2);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247
248 if(*((__u32 *)&(tw->tw_dport)) == ports &&
249 sk2->sk_family == PF_INET6 &&
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700250 ipv6_addr_equal(&tcp6tw->tw_v6_daddr, saddr) &&
251 ipv6_addr_equal(&tcp6tw->tw_v6_rcv_saddr, daddr) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252 sk2->sk_bound_dev_if == sk->sk_bound_dev_if) {
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700253 const struct tcp_timewait_sock *tcptw = tcp_twsk(sk2);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254 struct tcp_sock *tp = tcp_sk(sk);
255
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700256 if (tcptw->tw_ts_recent_stamp &&
257 (!twp ||
258 (sysctl_tcp_tw_reuse &&
259 xtime.tv_sec - tcptw->tw_ts_recent_stamp > 1))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700260 /* See comment in tcp_ipv4.c */
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700261 tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700262 if (!tp->write_seq)
263 tp->write_seq = 1;
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700264 tp->rx_opt.ts_recent = tcptw->tw_ts_recent;
265 tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266 sock_hold(sk2);
267 goto unique;
268 } else
269 goto not_unique;
270 }
271 }
272 tw = NULL;
273
274 /* And established part... */
275 sk_for_each(sk2, node, &head->chain) {
Eric Dumazet81c3d542005-10-03 14:13:38 -0700276 if (INET6_MATCH(sk2, hash, saddr, daddr, ports, dif))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700277 goto not_unique;
278 }
279
280unique:
281 BUG_TRAP(sk_unhashed(sk));
282 __sk_add_node(sk, &head->chain);
Eric Dumazet81c3d542005-10-03 14:13:38 -0700283 sk->sk_hash = hash;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700284 sock_prot_inc_use(sk->sk_prot);
285 write_unlock(&head->lock);
286
287 if (twp) {
288 *twp = tw;
289 NET_INC_STATS_BH(LINUX_MIB_TIMEWAITRECYCLED);
290 } else if (tw) {
291 /* Silly. Should hash-dance instead... */
Arnaldo Carvalho de Melo295ff7e2005-08-09 20:44:40 -0700292 inet_twsk_deschedule(tw, &tcp_death_row);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700293 NET_INC_STATS_BH(LINUX_MIB_TIMEWAITRECYCLED);
294
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700295 inet_twsk_put(tw);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700296 }
297 return 0;
298
299not_unique:
300 write_unlock(&head->lock);
301 return -EADDRNOTAVAIL;
302}
303
304static inline u32 tcpv6_port_offset(const struct sock *sk)
305{
306 const struct inet_sock *inet = inet_sk(sk);
307 const struct ipv6_pinfo *np = inet6_sk(sk);
308
309 return secure_tcpv6_port_ephemeral(np->rcv_saddr.s6_addr32,
310 np->daddr.s6_addr32,
311 inet->dport);
312}
313
314static int tcp_v6_hash_connect(struct sock *sk)
315{
316 unsigned short snum = inet_sk(sk)->num;
Arnaldo Carvalho de Melo0f7ff922005-08-09 19:59:44 -0700317 struct inet_bind_hashbucket *head;
318 struct inet_bind_bucket *tb;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700319 int ret;
320
321 if (!snum) {
322 int low = sysctl_local_port_range[0];
323 int high = sysctl_local_port_range[1];
324 int range = high - low;
325 int i;
326 int port;
327 static u32 hint;
328 u32 offset = hint + tcpv6_port_offset(sk);
329 struct hlist_node *node;
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700330 struct inet_timewait_sock *tw = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700331
332 local_bh_disable();
333 for (i = 1; i <= range; i++) {
334 port = low + (i + offset) % range;
Arnaldo Carvalho de Melo6e04e022005-08-09 20:07:35 -0700335 head = &tcp_hashinfo.bhash[inet_bhashfn(port, tcp_hashinfo.bhash_size)];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336 spin_lock(&head->lock);
337
338 /* Does not bother with rcv_saddr checks,
339 * because the established check is already
340 * unique enough.
341 */
Arnaldo Carvalho de Melo0f7ff922005-08-09 19:59:44 -0700342 inet_bind_bucket_for_each(tb, node, &head->chain) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700343 if (tb->port == port) {
344 BUG_TRAP(!hlist_empty(&tb->owners));
345 if (tb->fastreuse >= 0)
346 goto next_port;
347 if (!__tcp_v6_check_established(sk,
348 port,
349 &tw))
350 goto ok;
351 goto next_port;
352 }
353 }
354
Arnaldo Carvalho de Melo6e04e022005-08-09 20:07:35 -0700355 tb = inet_bind_bucket_create(tcp_hashinfo.bind_bucket_cachep, head, port);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700356 if (!tb) {
357 spin_unlock(&head->lock);
358 break;
359 }
360 tb->fastreuse = -1;
361 goto ok;
362
363 next_port:
364 spin_unlock(&head->lock);
365 }
366 local_bh_enable();
367
368 return -EADDRNOTAVAIL;
369
370ok:
371 hint += i;
372
373 /* Head lock still held and bh's disabled */
Arnaldo Carvalho de Melo2d8c4ce2005-08-09 20:07:13 -0700374 inet_bind_hash(sk, tb, port);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700375 if (sk_unhashed(sk)) {
376 inet_sk(sk)->sport = htons(port);
377 __tcp_v6_hash(sk);
378 }
379 spin_unlock(&head->lock);
380
381 if (tw) {
Arnaldo Carvalho de Melo295ff7e2005-08-09 20:44:40 -0700382 inet_twsk_deschedule(tw, &tcp_death_row);
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700383 inet_twsk_put(tw);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700384 }
385
386 ret = 0;
387 goto out;
388 }
389
Arnaldo Carvalho de Melo6e04e022005-08-09 20:07:35 -0700390 head = &tcp_hashinfo.bhash[inet_bhashfn(snum, tcp_hashinfo.bhash_size)];
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700391 tb = inet_csk(sk)->icsk_bind_hash;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700392 spin_lock_bh(&head->lock);
393
394 if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) {
395 __tcp_v6_hash(sk);
396 spin_unlock_bh(&head->lock);
397 return 0;
398 } else {
399 spin_unlock(&head->lock);
400 /* No definite answer... Walk to established hash table */
401 ret = __tcp_v6_check_established(sk, snum, NULL);
402out:
403 local_bh_enable();
404 return ret;
405 }
406}
407
Linus Torvalds1da177e2005-04-16 15:20:36 -0700408static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
409 int addr_len)
410{
411 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
412 struct inet_sock *inet = inet_sk(sk);
413 struct ipv6_pinfo *np = inet6_sk(sk);
414 struct tcp_sock *tp = tcp_sk(sk);
415 struct in6_addr *saddr = NULL, *final_p = NULL, final;
416 struct flowi fl;
417 struct dst_entry *dst;
418 int addr_type;
419 int err;
420
421 if (addr_len < SIN6_LEN_RFC2133)
422 return -EINVAL;
423
424 if (usin->sin6_family != AF_INET6)
425 return(-EAFNOSUPPORT);
426
427 memset(&fl, 0, sizeof(fl));
428
429 if (np->sndflow) {
430 fl.fl6_flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
431 IP6_ECN_flow_init(fl.fl6_flowlabel);
432 if (fl.fl6_flowlabel&IPV6_FLOWLABEL_MASK) {
433 struct ip6_flowlabel *flowlabel;
434 flowlabel = fl6_sock_lookup(sk, fl.fl6_flowlabel);
435 if (flowlabel == NULL)
436 return -EINVAL;
437 ipv6_addr_copy(&usin->sin6_addr, &flowlabel->dst);
438 fl6_sock_release(flowlabel);
439 }
440 }
441
442 /*
443 * connect() to INADDR_ANY means loopback (BSD'ism).
444 */
445
446 if(ipv6_addr_any(&usin->sin6_addr))
447 usin->sin6_addr.s6_addr[15] = 0x1;
448
449 addr_type = ipv6_addr_type(&usin->sin6_addr);
450
451 if(addr_type & IPV6_ADDR_MULTICAST)
452 return -ENETUNREACH;
453
454 if (addr_type&IPV6_ADDR_LINKLOCAL) {
455 if (addr_len >= sizeof(struct sockaddr_in6) &&
456 usin->sin6_scope_id) {
457 /* If interface is set while binding, indices
458 * must coincide.
459 */
460 if (sk->sk_bound_dev_if &&
461 sk->sk_bound_dev_if != usin->sin6_scope_id)
462 return -EINVAL;
463
464 sk->sk_bound_dev_if = usin->sin6_scope_id;
465 }
466
467 /* Connect to link-local address requires an interface */
468 if (!sk->sk_bound_dev_if)
469 return -EINVAL;
470 }
471
472 if (tp->rx_opt.ts_recent_stamp &&
473 !ipv6_addr_equal(&np->daddr, &usin->sin6_addr)) {
474 tp->rx_opt.ts_recent = 0;
475 tp->rx_opt.ts_recent_stamp = 0;
476 tp->write_seq = 0;
477 }
478
479 ipv6_addr_copy(&np->daddr, &usin->sin6_addr);
480 np->flow_label = fl.fl6_flowlabel;
481
482 /*
483 * TCP over IPv4
484 */
485
486 if (addr_type == IPV6_ADDR_MAPPED) {
487 u32 exthdrlen = tp->ext_header_len;
488 struct sockaddr_in sin;
489
490 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
491
492 if (__ipv6_only_sock(sk))
493 return -ENETUNREACH;
494
495 sin.sin_family = AF_INET;
496 sin.sin_port = usin->sin6_port;
497 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
498
499 tp->af_specific = &ipv6_mapped;
500 sk->sk_backlog_rcv = tcp_v4_do_rcv;
501
502 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
503
504 if (err) {
505 tp->ext_header_len = exthdrlen;
506 tp->af_specific = &ipv6_specific;
507 sk->sk_backlog_rcv = tcp_v6_do_rcv;
508 goto failure;
509 } else {
510 ipv6_addr_set(&np->saddr, 0, 0, htonl(0x0000FFFF),
511 inet->saddr);
512 ipv6_addr_set(&np->rcv_saddr, 0, 0, htonl(0x0000FFFF),
513 inet->rcv_saddr);
514 }
515
516 return err;
517 }
518
519 if (!ipv6_addr_any(&np->rcv_saddr))
520 saddr = &np->rcv_saddr;
521
522 fl.proto = IPPROTO_TCP;
523 ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
524 ipv6_addr_copy(&fl.fl6_src,
525 (saddr ? saddr : &np->saddr));
526 fl.oif = sk->sk_bound_dev_if;
527 fl.fl_ip_dport = usin->sin6_port;
528 fl.fl_ip_sport = inet->sport;
529
530 if (np->opt && np->opt->srcrt) {
531 struct rt0_hdr *rt0 = (struct rt0_hdr *)np->opt->srcrt;
532 ipv6_addr_copy(&final, &fl.fl6_dst);
533 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
534 final_p = &final;
535 }
536
537 err = ip6_dst_lookup(sk, &dst, &fl);
538 if (err)
539 goto failure;
540 if (final_p)
541 ipv6_addr_copy(&fl.fl6_dst, final_p);
542
Patrick McHardye104411b2005-09-08 15:11:55 -0700543 if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700544 goto failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700545
546 if (saddr == NULL) {
547 saddr = &fl.fl6_src;
548 ipv6_addr_copy(&np->rcv_saddr, saddr);
549 }
550
551 /* set the source address */
552 ipv6_addr_copy(&np->saddr, saddr);
553 inet->rcv_saddr = LOOPBACK4_IPV6;
554
555 ip6_dst_store(sk, dst, NULL);
556 sk->sk_route_caps = dst->dev->features &
557 ~(NETIF_F_IP_CSUM | NETIF_F_TSO);
558
559 tp->ext_header_len = 0;
560 if (np->opt)
561 tp->ext_header_len = np->opt->opt_flen + np->opt->opt_nflen;
562
563 tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
564
565 inet->dport = usin->sin6_port;
566
567 tcp_set_state(sk, TCP_SYN_SENT);
568 err = tcp_v6_hash_connect(sk);
569 if (err)
570 goto late_failure;
571
572 if (!tp->write_seq)
573 tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
574 np->daddr.s6_addr32,
575 inet->sport,
576 inet->dport);
577
578 err = tcp_connect(sk);
579 if (err)
580 goto late_failure;
581
582 return 0;
583
584late_failure:
585 tcp_set_state(sk, TCP_CLOSE);
586 __sk_dst_reset(sk);
587failure:
588 inet->dport = 0;
589 sk->sk_route_caps = 0;
590 return err;
591}
592
593static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
594 int type, int code, int offset, __u32 info)
595{
596 struct ipv6hdr *hdr = (struct ipv6hdr*)skb->data;
Arnaldo Carvalho de Melo505cbfc2005-08-12 09:19:38 -0300597 const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700598 struct ipv6_pinfo *np;
599 struct sock *sk;
600 int err;
601 struct tcp_sock *tp;
602 __u32 seq;
603
Arnaldo Carvalho de Melo505cbfc2005-08-12 09:19:38 -0300604 sk = inet6_lookup(&tcp_hashinfo, &hdr->daddr, th->dest, &hdr->saddr,
605 th->source, skb->dev->ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700606
607 if (sk == NULL) {
608 ICMP6_INC_STATS_BH(__in6_dev_get(skb->dev), ICMP6_MIB_INERRORS);
609 return;
610 }
611
612 if (sk->sk_state == TCP_TIME_WAIT) {
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700613 inet_twsk_put((struct inet_timewait_sock *)sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700614 return;
615 }
616
617 bh_lock_sock(sk);
618 if (sock_owned_by_user(sk))
619 NET_INC_STATS_BH(LINUX_MIB_LOCKDROPPEDICMPS);
620
621 if (sk->sk_state == TCP_CLOSE)
622 goto out;
623
624 tp = tcp_sk(sk);
625 seq = ntohl(th->seq);
626 if (sk->sk_state != TCP_LISTEN &&
627 !between(seq, tp->snd_una, tp->snd_nxt)) {
628 NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS);
629 goto out;
630 }
631
632 np = inet6_sk(sk);
633
634 if (type == ICMPV6_PKT_TOOBIG) {
635 struct dst_entry *dst = NULL;
636
637 if (sock_owned_by_user(sk))
638 goto out;
639 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
640 goto out;
641
642 /* icmp should have updated the destination cache entry */
643 dst = __sk_dst_check(sk, np->dst_cookie);
644
645 if (dst == NULL) {
646 struct inet_sock *inet = inet_sk(sk);
647 struct flowi fl;
648
649 /* BUGGG_FUTURE: Again, it is not clear how
650 to handle rthdr case. Ignore this complexity
651 for now.
652 */
653 memset(&fl, 0, sizeof(fl));
654 fl.proto = IPPROTO_TCP;
655 ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
656 ipv6_addr_copy(&fl.fl6_src, &np->saddr);
657 fl.oif = sk->sk_bound_dev_if;
658 fl.fl_ip_dport = inet->dport;
659 fl.fl_ip_sport = inet->sport;
660
661 if ((err = ip6_dst_lookup(sk, &dst, &fl))) {
662 sk->sk_err_soft = -err;
663 goto out;
664 }
665
666 if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0) {
667 sk->sk_err_soft = -err;
668 goto out;
669 }
670
671 } else
672 dst_hold(dst);
673
674 if (tp->pmtu_cookie > dst_mtu(dst)) {
675 tcp_sync_mss(sk, dst_mtu(dst));
676 tcp_simple_retransmit(sk);
677 } /* else let the usual retransmit timer handle it */
678 dst_release(dst);
679 goto out;
680 }
681
682 icmpv6_err_convert(type, code, &err);
683
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -0700684 /* Might be for an request_sock */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700685 switch (sk->sk_state) {
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -0700686 struct request_sock *req, **prev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700687 case TCP_LISTEN:
688 if (sock_owned_by_user(sk))
689 goto out;
690
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700691 req = tcp_v6_search_req(sk, &prev, th->dest, &hdr->daddr,
Arnaldo Carvalho de Melo505cbfc2005-08-12 09:19:38 -0300692 &hdr->saddr, inet6_iif(skb));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700693 if (!req)
694 goto out;
695
696 /* ICMPs are not backlogged, hence we cannot get
697 * an established socket here.
698 */
699 BUG_TRAP(req->sk == NULL);
700
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -0700701 if (seq != tcp_rsk(req)->snt_isn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700702 NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS);
703 goto out;
704 }
705
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700706 inet_csk_reqsk_queue_drop(sk, req, prev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700707 goto out;
708
709 case TCP_SYN_SENT:
710 case TCP_SYN_RECV: /* Cannot happen.
711 It can, it SYNs are crossed. --ANK */
712 if (!sock_owned_by_user(sk)) {
713 TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS);
714 sk->sk_err = err;
715 sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */
716
717 tcp_done(sk);
718 } else
719 sk->sk_err_soft = err;
720 goto out;
721 }
722
723 if (!sock_owned_by_user(sk) && np->recverr) {
724 sk->sk_err = err;
725 sk->sk_error_report(sk);
726 } else
727 sk->sk_err_soft = err;
728
729out:
730 bh_unlock_sock(sk);
731 sock_put(sk);
732}
733
734
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -0700735static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700736 struct dst_entry *dst)
737{
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -0700738 struct tcp6_request_sock *treq = tcp6_rsk(req);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700739 struct ipv6_pinfo *np = inet6_sk(sk);
740 struct sk_buff * skb;
741 struct ipv6_txoptions *opt = NULL;
742 struct in6_addr * final_p = NULL, final;
743 struct flowi fl;
744 int err = -1;
745
746 memset(&fl, 0, sizeof(fl));
747 fl.proto = IPPROTO_TCP;
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -0700748 ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
749 ipv6_addr_copy(&fl.fl6_src, &treq->loc_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700750 fl.fl6_flowlabel = 0;
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -0700751 fl.oif = treq->iif;
752 fl.fl_ip_dport = inet_rsk(req)->rmt_port;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700753 fl.fl_ip_sport = inet_sk(sk)->sport;
754
755 if (dst == NULL) {
756 opt = np->opt;
757 if (opt == NULL &&
YOSHIFUJI Hideaki333fad52005-09-08 09:59:17 +0900758 np->rxopt.bits.osrcrt == 2 &&
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -0700759 treq->pktopts) {
760 struct sk_buff *pktopts = treq->pktopts;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700761 struct inet6_skb_parm *rxopt = IP6CB(pktopts);
762 if (rxopt->srcrt)
763 opt = ipv6_invert_rthdr(sk, (struct ipv6_rt_hdr*)(pktopts->nh.raw + rxopt->srcrt));
764 }
765
766 if (opt && opt->srcrt) {
767 struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt;
768 ipv6_addr_copy(&final, &fl.fl6_dst);
769 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
770 final_p = &final;
771 }
772
773 err = ip6_dst_lookup(sk, &dst, &fl);
774 if (err)
775 goto done;
776 if (final_p)
777 ipv6_addr_copy(&fl.fl6_dst, final_p);
778 if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0)
779 goto done;
780 }
781
782 skb = tcp_make_synack(sk, dst, req);
783 if (skb) {
784 struct tcphdr *th = skb->h.th;
785
786 th->check = tcp_v6_check(th, skb->len,
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -0700787 &treq->loc_addr, &treq->rmt_addr,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700788 csum_partial((char *)th, skb->len, skb->csum));
789
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -0700790 ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700791 err = ip6_xmit(sk, skb, &fl, opt, 0);
792 if (err == NET_XMIT_CN)
793 err = 0;
794 }
795
796done:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700797 if (opt && opt != np->opt)
798 sock_kfree_s(sk, opt, opt->tot_len);
799 return err;
800}
801
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -0700802static void tcp_v6_reqsk_destructor(struct request_sock *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700803{
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -0700804 if (tcp6_rsk(req)->pktopts)
805 kfree_skb(tcp6_rsk(req)->pktopts);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700806}
807
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -0700808static struct request_sock_ops tcp6_request_sock_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700809 .family = AF_INET6,
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -0700810 .obj_size = sizeof(struct tcp6_request_sock),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700811 .rtx_syn_ack = tcp_v6_send_synack,
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -0700812 .send_ack = tcp_v6_reqsk_send_ack,
813 .destructor = tcp_v6_reqsk_destructor,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700814 .send_reset = tcp_v6_send_reset
815};
816
817static int ipv6_opt_accepted(struct sock *sk, struct sk_buff *skb)
818{
819 struct ipv6_pinfo *np = inet6_sk(sk);
820 struct inet6_skb_parm *opt = IP6CB(skb);
821
822 if (np->rxopt.all) {
YOSHIFUJI Hideaki333fad52005-09-08 09:59:17 +0900823 if ((opt->hop && (np->rxopt.bits.hopopts || np->rxopt.bits.ohopopts)) ||
824 ((IPV6_FLOWINFO_MASK & *(u32*)skb->nh.raw) && np->rxopt.bits.rxflow) ||
825 (opt->srcrt && (np->rxopt.bits.srcrt || np->rxopt.bits.osrcrt)) ||
826 ((opt->dst1 || opt->dst0) && (np->rxopt.bits.dstopts || np->rxopt.bits.odstopts)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700827 return 1;
828 }
829 return 0;
830}
831
832
833static void tcp_v6_send_check(struct sock *sk, struct tcphdr *th, int len,
834 struct sk_buff *skb)
835{
836 struct ipv6_pinfo *np = inet6_sk(sk);
837
838 if (skb->ip_summed == CHECKSUM_HW) {
839 th->check = ~csum_ipv6_magic(&np->saddr, &np->daddr, len, IPPROTO_TCP, 0);
840 skb->csum = offsetof(struct tcphdr, check);
841 } else {
842 th->check = csum_ipv6_magic(&np->saddr, &np->daddr, len, IPPROTO_TCP,
843 csum_partial((char *)th, th->doff<<2,
844 skb->csum));
845 }
846}
847
848
849static void tcp_v6_send_reset(struct sk_buff *skb)
850{
851 struct tcphdr *th = skb->h.th, *t1;
852 struct sk_buff *buff;
853 struct flowi fl;
854
855 if (th->rst)
856 return;
857
858 if (!ipv6_unicast_destination(skb))
859 return;
860
861 /*
862 * We need to grab some memory, and put together an RST,
863 * and then put it into the queue to be sent.
864 */
865
866 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + sizeof(struct tcphdr),
867 GFP_ATOMIC);
868 if (buff == NULL)
869 return;
870
871 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + sizeof(struct tcphdr));
872
873 t1 = (struct tcphdr *) skb_push(buff,sizeof(struct tcphdr));
874
875 /* Swap the send and the receive. */
876 memset(t1, 0, sizeof(*t1));
877 t1->dest = th->source;
878 t1->source = th->dest;
879 t1->doff = sizeof(*t1)/4;
880 t1->rst = 1;
881
882 if(th->ack) {
883 t1->seq = th->ack_seq;
884 } else {
885 t1->ack = 1;
886 t1->ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin
887 + skb->len - (th->doff<<2));
888 }
889
890 buff->csum = csum_partial((char *)t1, sizeof(*t1), 0);
891
892 memset(&fl, 0, sizeof(fl));
893 ipv6_addr_copy(&fl.fl6_dst, &skb->nh.ipv6h->saddr);
894 ipv6_addr_copy(&fl.fl6_src, &skb->nh.ipv6h->daddr);
895
896 t1->check = csum_ipv6_magic(&fl.fl6_src, &fl.fl6_dst,
897 sizeof(*t1), IPPROTO_TCP,
898 buff->csum);
899
900 fl.proto = IPPROTO_TCP;
Arnaldo Carvalho de Melo505cbfc2005-08-12 09:19:38 -0300901 fl.oif = inet6_iif(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700902 fl.fl_ip_dport = t1->dest;
903 fl.fl_ip_sport = t1->source;
904
905 /* sk = NULL, but it is safe for now. RST socket required. */
906 if (!ip6_dst_lookup(NULL, &buff->dst, &fl)) {
907
Arnaldo Carvalho de Meloecc51b62005-12-12 14:38:10 -0800908 if (xfrm_lookup(&buff->dst, &fl, NULL, 0) >= 0) {
909 ip6_xmit(NULL, buff, &fl, NULL, 0);
910 TCP_INC_STATS_BH(TCP_MIB_OUTSEGS);
911 TCP_INC_STATS_BH(TCP_MIB_OUTRSTS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700912 return;
Arnaldo Carvalho de Meloecc51b62005-12-12 14:38:10 -0800913 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700914 }
915
916 kfree_skb(buff);
917}
918
919static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts)
920{
921 struct tcphdr *th = skb->h.th, *t1;
922 struct sk_buff *buff;
923 struct flowi fl;
924 int tot_len = sizeof(struct tcphdr);
925
926 if (ts)
927 tot_len += 3*4;
928
929 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
930 GFP_ATOMIC);
931 if (buff == NULL)
932 return;
933
934 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
935
936 t1 = (struct tcphdr *) skb_push(buff,tot_len);
937
938 /* Swap the send and the receive. */
939 memset(t1, 0, sizeof(*t1));
940 t1->dest = th->source;
941 t1->source = th->dest;
942 t1->doff = tot_len/4;
943 t1->seq = htonl(seq);
944 t1->ack_seq = htonl(ack);
945 t1->ack = 1;
946 t1->window = htons(win);
947
948 if (ts) {
949 u32 *ptr = (u32*)(t1 + 1);
950 *ptr++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
951 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
952 *ptr++ = htonl(tcp_time_stamp);
953 *ptr = htonl(ts);
954 }
955
956 buff->csum = csum_partial((char *)t1, tot_len, 0);
957
958 memset(&fl, 0, sizeof(fl));
959 ipv6_addr_copy(&fl.fl6_dst, &skb->nh.ipv6h->saddr);
960 ipv6_addr_copy(&fl.fl6_src, &skb->nh.ipv6h->daddr);
961
962 t1->check = csum_ipv6_magic(&fl.fl6_src, &fl.fl6_dst,
963 tot_len, IPPROTO_TCP,
964 buff->csum);
965
966 fl.proto = IPPROTO_TCP;
Arnaldo Carvalho de Melo505cbfc2005-08-12 09:19:38 -0300967 fl.oif = inet6_iif(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700968 fl.fl_ip_dport = t1->dest;
969 fl.fl_ip_sport = t1->source;
970
971 if (!ip6_dst_lookup(NULL, &buff->dst, &fl)) {
Arnaldo Carvalho de Meloecc51b62005-12-12 14:38:10 -0800972 if (xfrm_lookup(&buff->dst, &fl, NULL, 0) >= 0) {
973 ip6_xmit(NULL, buff, &fl, NULL, 0);
974 TCP_INC_STATS_BH(TCP_MIB_OUTSEGS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700975 return;
Arnaldo Carvalho de Meloecc51b62005-12-12 14:38:10 -0800976 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700977 }
978
979 kfree_skb(buff);
980}
981
982static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
983{
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700984 struct inet_timewait_sock *tw = inet_twsk(sk);
985 const struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700986
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700987 tcp_v6_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
988 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
989 tcptw->tw_ts_recent);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700990
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700991 inet_twsk_put(tw);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700992}
993
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -0700994static void tcp_v6_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700995{
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -0700996 tcp_v6_send_ack(skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, req->ts_recent);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700997}
998
999
1000static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
1001{
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -07001002 struct request_sock *req, **prev;
Arnaldo Carvalho de Melo505cbfc2005-08-12 09:19:38 -03001003 const struct tcphdr *th = skb->h.th;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001004 struct sock *nsk;
1005
1006 /* Find possible connection requests. */
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001007 req = tcp_v6_search_req(sk, &prev, th->source, &skb->nh.ipv6h->saddr,
Arnaldo Carvalho de Melo505cbfc2005-08-12 09:19:38 -03001008 &skb->nh.ipv6h->daddr, inet6_iif(skb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001009 if (req)
1010 return tcp_check_req(sk, skb, req, prev);
1011
Arnaldo Carvalho de Melo505cbfc2005-08-12 09:19:38 -03001012 nsk = __inet6_lookup_established(&tcp_hashinfo, &skb->nh.ipv6h->saddr,
1013 th->source, &skb->nh.ipv6h->daddr,
1014 ntohs(th->dest), inet6_iif(skb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001015
1016 if (nsk) {
1017 if (nsk->sk_state != TCP_TIME_WAIT) {
1018 bh_lock_sock(nsk);
1019 return nsk;
1020 }
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07001021 inet_twsk_put((struct inet_timewait_sock *)nsk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001022 return NULL;
1023 }
1024
1025#if 0 /*def CONFIG_SYN_COOKIES*/
1026 if (!th->rst && !th->syn && th->ack)
1027 sk = cookie_v6_check(sk, skb, &(IPCB(skb)->opt));
1028#endif
1029 return sk;
1030}
1031
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -07001032static void tcp_v6_synq_add(struct sock *sk, struct request_sock *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001033{
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001034 struct inet_connection_sock *icsk = inet_csk(sk);
1035 struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt;
1036 const u32 h = tcp_v6_synq_hash(&tcp6_rsk(req)->rmt_addr, inet_rsk(req)->rmt_port, lopt->hash_rnd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001037
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001038 reqsk_queue_hash_req(&icsk->icsk_accept_queue, h, req, TCP_TIMEOUT_INIT);
1039 inet_csk_reqsk_queue_added(sk, TCP_TIMEOUT_INIT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001040}
1041
1042
1043/* FIXME: this is substantially similar to the ipv4 code.
1044 * Can some kind of merge be done? -- erics
1045 */
1046static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1047{
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07001048 struct tcp6_request_sock *treq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001049 struct ipv6_pinfo *np = inet6_sk(sk);
1050 struct tcp_options_received tmp_opt;
1051 struct tcp_sock *tp = tcp_sk(sk);
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -07001052 struct request_sock *req = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001053 __u32 isn = TCP_SKB_CB(skb)->when;
1054
1055 if (skb->protocol == htons(ETH_P_IP))
1056 return tcp_v4_conn_request(sk, skb);
1057
1058 if (!ipv6_unicast_destination(skb))
1059 goto drop;
1060
1061 /*
1062 * There are no SYN attacks on IPv6, yet...
1063 */
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001064 if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001065 if (net_ratelimit())
1066 printk(KERN_INFO "TCPv6: dropping request, synflood is possible\n");
1067 goto drop;
1068 }
1069
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001070 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001071 goto drop;
1072
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -07001073 req = reqsk_alloc(&tcp6_request_sock_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001074 if (req == NULL)
1075 goto drop;
1076
1077 tcp_clear_options(&tmp_opt);
1078 tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
1079 tmp_opt.user_mss = tp->rx_opt.user_mss;
1080
1081 tcp_parse_options(skb, &tmp_opt, 0);
1082
1083 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
1084 tcp_openreq_init(req, &tmp_opt, skb);
1085
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07001086 treq = tcp6_rsk(req);
1087 ipv6_addr_copy(&treq->rmt_addr, &skb->nh.ipv6h->saddr);
1088 ipv6_addr_copy(&treq->loc_addr, &skb->nh.ipv6h->daddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001089 TCP_ECN_create_request(req, skb->h.th);
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07001090 treq->pktopts = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001091 if (ipv6_opt_accepted(sk, skb) ||
YOSHIFUJI Hideaki333fad52005-09-08 09:59:17 +09001092 np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
1093 np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001094 atomic_inc(&skb->users);
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07001095 treq->pktopts = skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001096 }
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07001097 treq->iif = sk->sk_bound_dev_if;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001098
1099 /* So that link locals have meaning */
1100 if (!sk->sk_bound_dev_if &&
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07001101 ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL)
Arnaldo Carvalho de Melo505cbfc2005-08-12 09:19:38 -03001102 treq->iif = inet6_iif(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001103
1104 if (isn == 0)
1105 isn = tcp_v6_init_sequence(sk,skb);
1106
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07001107 tcp_rsk(req)->snt_isn = isn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001108
1109 if (tcp_v6_send_synack(sk, req, NULL))
1110 goto drop;
1111
1112 tcp_v6_synq_add(sk, req);
1113
1114 return 0;
1115
1116drop:
1117 if (req)
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -07001118 reqsk_free(req);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001119
1120 TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS);
1121 return 0; /* don't send reset */
1122}
1123
1124static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -07001125 struct request_sock *req,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001126 struct dst_entry *dst)
1127{
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07001128 struct tcp6_request_sock *treq = tcp6_rsk(req);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001129 struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
1130 struct tcp6_sock *newtcp6sk;
1131 struct inet_sock *newinet;
1132 struct tcp_sock *newtp;
1133 struct sock *newsk;
1134 struct ipv6_txoptions *opt;
1135
1136 if (skb->protocol == htons(ETH_P_IP)) {
1137 /*
1138 * v6 mapped
1139 */
1140
1141 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst);
1142
1143 if (newsk == NULL)
1144 return NULL;
1145
1146 newtcp6sk = (struct tcp6_sock *)newsk;
1147 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1148
1149 newinet = inet_sk(newsk);
1150 newnp = inet6_sk(newsk);
1151 newtp = tcp_sk(newsk);
1152
1153 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1154
1155 ipv6_addr_set(&newnp->daddr, 0, 0, htonl(0x0000FFFF),
1156 newinet->daddr);
1157
1158 ipv6_addr_set(&newnp->saddr, 0, 0, htonl(0x0000FFFF),
1159 newinet->saddr);
1160
1161 ipv6_addr_copy(&newnp->rcv_saddr, &newnp->saddr);
1162
1163 newtp->af_specific = &ipv6_mapped;
1164 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
1165 newnp->pktoptions = NULL;
1166 newnp->opt = NULL;
Arnaldo Carvalho de Melo505cbfc2005-08-12 09:19:38 -03001167 newnp->mcast_oif = inet6_iif(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001168 newnp->mcast_hops = skb->nh.ipv6h->hop_limit;
1169
Arnaldo Carvalho de Meloe6848972005-08-09 19:45:38 -07001170 /*
1171 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1172 * here, tcp_create_openreq_child now does this for us, see the comment in
1173 * that function for the gory details. -acme
Linus Torvalds1da177e2005-04-16 15:20:36 -07001174 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001175
1176 /* It is tricky place. Until this moment IPv4 tcp
1177 worked with IPv6 af_tcp.af_specific.
1178 Sync it now.
1179 */
1180 tcp_sync_mss(newsk, newtp->pmtu_cookie);
1181
1182 return newsk;
1183 }
1184
1185 opt = np->opt;
1186
1187 if (sk_acceptq_is_full(sk))
1188 goto out_overflow;
1189
YOSHIFUJI Hideaki333fad52005-09-08 09:59:17 +09001190 if (np->rxopt.bits.osrcrt == 2 &&
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07001191 opt == NULL && treq->pktopts) {
1192 struct inet6_skb_parm *rxopt = IP6CB(treq->pktopts);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001193 if (rxopt->srcrt)
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07001194 opt = ipv6_invert_rthdr(sk, (struct ipv6_rt_hdr *)(treq->pktopts->nh.raw + rxopt->srcrt));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001195 }
1196
1197 if (dst == NULL) {
1198 struct in6_addr *final_p = NULL, final;
1199 struct flowi fl;
1200
1201 memset(&fl, 0, sizeof(fl));
1202 fl.proto = IPPROTO_TCP;
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07001203 ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001204 if (opt && opt->srcrt) {
1205 struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt;
1206 ipv6_addr_copy(&final, &fl.fl6_dst);
1207 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
1208 final_p = &final;
1209 }
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07001210 ipv6_addr_copy(&fl.fl6_src, &treq->loc_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001211 fl.oif = sk->sk_bound_dev_if;
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07001212 fl.fl_ip_dport = inet_rsk(req)->rmt_port;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001213 fl.fl_ip_sport = inet_sk(sk)->sport;
1214
1215 if (ip6_dst_lookup(sk, &dst, &fl))
1216 goto out;
1217
1218 if (final_p)
1219 ipv6_addr_copy(&fl.fl6_dst, final_p);
1220
1221 if ((xfrm_lookup(&dst, &fl, sk, 0)) < 0)
1222 goto out;
1223 }
1224
1225 newsk = tcp_create_openreq_child(sk, req, skb);
1226 if (newsk == NULL)
1227 goto out;
1228
Arnaldo Carvalho de Meloe6848972005-08-09 19:45:38 -07001229 /*
1230 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1231 * count here, tcp_create_openreq_child now does this for us, see the
1232 * comment in that function for the gory details. -acme
1233 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001234
1235 ip6_dst_store(newsk, dst, NULL);
1236 newsk->sk_route_caps = dst->dev->features &
1237 ~(NETIF_F_IP_CSUM | NETIF_F_TSO);
1238
1239 newtcp6sk = (struct tcp6_sock *)newsk;
1240 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1241
1242 newtp = tcp_sk(newsk);
1243 newinet = inet_sk(newsk);
1244 newnp = inet6_sk(newsk);
1245
1246 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1247
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07001248 ipv6_addr_copy(&newnp->daddr, &treq->rmt_addr);
1249 ipv6_addr_copy(&newnp->saddr, &treq->loc_addr);
1250 ipv6_addr_copy(&newnp->rcv_saddr, &treq->loc_addr);
1251 newsk->sk_bound_dev_if = treq->iif;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001252
1253 /* Now IPv6 options...
1254
1255 First: no IPv4 options.
1256 */
1257 newinet->opt = NULL;
1258
1259 /* Clone RX bits */
1260 newnp->rxopt.all = np->rxopt.all;
1261
1262 /* Clone pktoptions received with SYN */
1263 newnp->pktoptions = NULL;
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07001264 if (treq->pktopts != NULL) {
1265 newnp->pktoptions = skb_clone(treq->pktopts, GFP_ATOMIC);
1266 kfree_skb(treq->pktopts);
1267 treq->pktopts = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001268 if (newnp->pktoptions)
1269 skb_set_owner_r(newnp->pktoptions, newsk);
1270 }
1271 newnp->opt = NULL;
Arnaldo Carvalho de Melo505cbfc2005-08-12 09:19:38 -03001272 newnp->mcast_oif = inet6_iif(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001273 newnp->mcast_hops = skb->nh.ipv6h->hop_limit;
1274
1275 /* Clone native IPv6 options from listening socket (if any)
1276
1277 Yes, keeping reference count would be much more clever,
1278 but we make one more one thing there: reattach optmem
1279 to newsk.
1280 */
1281 if (opt) {
1282 newnp->opt = ipv6_dup_options(newsk, opt);
1283 if (opt != np->opt)
1284 sock_kfree_s(sk, opt, opt->tot_len);
1285 }
1286
1287 newtp->ext_header_len = 0;
1288 if (newnp->opt)
1289 newtp->ext_header_len = newnp->opt->opt_nflen +
1290 newnp->opt->opt_flen;
1291
1292 tcp_sync_mss(newsk, dst_mtu(dst));
1293 newtp->advmss = dst_metric(dst, RTAX_ADVMSS);
1294 tcp_initialize_rcv_mss(newsk);
1295
1296 newinet->daddr = newinet->saddr = newinet->rcv_saddr = LOOPBACK4_IPV6;
1297
1298 __tcp_v6_hash(newsk);
Arnaldo Carvalho de Melo2d8c4ce2005-08-09 20:07:13 -07001299 inet_inherit_port(&tcp_hashinfo, sk, newsk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001300
1301 return newsk;
1302
1303out_overflow:
1304 NET_INC_STATS_BH(LINUX_MIB_LISTENOVERFLOWS);
1305out:
1306 NET_INC_STATS_BH(LINUX_MIB_LISTENDROPS);
1307 if (opt && opt != np->opt)
1308 sock_kfree_s(sk, opt, opt->tot_len);
1309 dst_release(dst);
1310 return NULL;
1311}
1312
1313static int tcp_v6_checksum_init(struct sk_buff *skb)
1314{
1315 if (skb->ip_summed == CHECKSUM_HW) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001316 if (!tcp_v6_check(skb->h.th,skb->len,&skb->nh.ipv6h->saddr,
Herbert Xufb286bb2005-11-10 13:01:24 -08001317 &skb->nh.ipv6h->daddr,skb->csum)) {
1318 skb->ip_summed = CHECKSUM_UNNECESSARY;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001319 return 0;
Herbert Xufb286bb2005-11-10 13:01:24 -08001320 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001321 }
Herbert Xufb286bb2005-11-10 13:01:24 -08001322
1323 skb->csum = ~tcp_v6_check(skb->h.th,skb->len,&skb->nh.ipv6h->saddr,
1324 &skb->nh.ipv6h->daddr, 0);
1325
Linus Torvalds1da177e2005-04-16 15:20:36 -07001326 if (skb->len <= 76) {
Herbert Xufb286bb2005-11-10 13:01:24 -08001327 return __skb_checksum_complete(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001328 }
1329 return 0;
1330}
1331
1332/* The socket must have it's spinlock held when we get
1333 * here.
1334 *
1335 * We have a potential double-lock case here, so even when
1336 * doing backlog processing we use the BH locking scheme.
1337 * This is because we cannot sleep with the original spinlock
1338 * held.
1339 */
1340static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1341{
1342 struct ipv6_pinfo *np = inet6_sk(sk);
1343 struct tcp_sock *tp;
1344 struct sk_buff *opt_skb = NULL;
1345
1346 /* Imagine: socket is IPv6. IPv4 packet arrives,
1347 goes to IPv4 receive handler and backlogged.
1348 From backlog it always goes here. Kerboom...
1349 Fortunately, tcp_rcv_established and rcv_established
1350 handle them correctly, but it is not case with
1351 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1352 */
1353
1354 if (skb->protocol == htons(ETH_P_IP))
1355 return tcp_v4_do_rcv(sk, skb);
1356
1357 if (sk_filter(sk, skb, 0))
1358 goto discard;
1359
1360 /*
1361 * socket locking is here for SMP purposes as backlog rcv
1362 * is currently called with bh processing disabled.
1363 */
1364
1365 /* Do Stevens' IPV6_PKTOPTIONS.
1366
1367 Yes, guys, it is the only place in our code, where we
1368 may make it not affecting IPv4.
1369 The rest of code is protocol independent,
1370 and I do not like idea to uglify IPv4.
1371
1372 Actually, all the idea behind IPV6_PKTOPTIONS
1373 looks not very well thought. For now we latch
1374 options, received in the last packet, enqueued
1375 by tcp. Feel free to propose better solution.
1376 --ANK (980728)
1377 */
1378 if (np->rxopt.all)
1379 opt_skb = skb_clone(skb, GFP_ATOMIC);
1380
1381 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1382 TCP_CHECK_TIMER(sk);
1383 if (tcp_rcv_established(sk, skb, skb->h.th, skb->len))
1384 goto reset;
1385 TCP_CHECK_TIMER(sk);
1386 if (opt_skb)
1387 goto ipv6_pktoptions;
1388 return 0;
1389 }
1390
1391 if (skb->len < (skb->h.th->doff<<2) || tcp_checksum_complete(skb))
1392 goto csum_err;
1393
1394 if (sk->sk_state == TCP_LISTEN) {
1395 struct sock *nsk = tcp_v6_hnd_req(sk, skb);
1396 if (!nsk)
1397 goto discard;
1398
1399 /*
1400 * Queue it on the new socket if the new socket is active,
1401 * otherwise we just shortcircuit this and continue with
1402 * the new socket..
1403 */
1404 if(nsk != sk) {
1405 if (tcp_child_process(sk, nsk, skb))
1406 goto reset;
1407 if (opt_skb)
1408 __kfree_skb(opt_skb);
1409 return 0;
1410 }
1411 }
1412
1413 TCP_CHECK_TIMER(sk);
1414 if (tcp_rcv_state_process(sk, skb, skb->h.th, skb->len))
1415 goto reset;
1416 TCP_CHECK_TIMER(sk);
1417 if (opt_skb)
1418 goto ipv6_pktoptions;
1419 return 0;
1420
1421reset:
1422 tcp_v6_send_reset(skb);
1423discard:
1424 if (opt_skb)
1425 __kfree_skb(opt_skb);
1426 kfree_skb(skb);
1427 return 0;
1428csum_err:
1429 TCP_INC_STATS_BH(TCP_MIB_INERRS);
1430 goto discard;
1431
1432
1433ipv6_pktoptions:
1434 /* Do you ask, what is it?
1435
1436 1. skb was enqueued by tcp.
1437 2. skb is added to tail of read queue, rather than out of order.
1438 3. socket is not in passive state.
1439 4. Finally, it really contains options, which user wants to receive.
1440 */
1441 tp = tcp_sk(sk);
1442 if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1443 !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
YOSHIFUJI Hideaki333fad52005-09-08 09:59:17 +09001444 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
Arnaldo Carvalho de Melo505cbfc2005-08-12 09:19:38 -03001445 np->mcast_oif = inet6_iif(opt_skb);
YOSHIFUJI Hideaki333fad52005-09-08 09:59:17 +09001446 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001447 np->mcast_hops = opt_skb->nh.ipv6h->hop_limit;
1448 if (ipv6_opt_accepted(sk, opt_skb)) {
1449 skb_set_owner_r(opt_skb, sk);
1450 opt_skb = xchg(&np->pktoptions, opt_skb);
1451 } else {
1452 __kfree_skb(opt_skb);
1453 opt_skb = xchg(&np->pktoptions, NULL);
1454 }
1455 }
1456
1457 if (opt_skb)
1458 kfree_skb(opt_skb);
1459 return 0;
1460}
1461
1462static int tcp_v6_rcv(struct sk_buff **pskb, unsigned int *nhoffp)
1463{
1464 struct sk_buff *skb = *pskb;
1465 struct tcphdr *th;
1466 struct sock *sk;
1467 int ret;
1468
1469 if (skb->pkt_type != PACKET_HOST)
1470 goto discard_it;
1471
1472 /*
1473 * Count it even if it's bad.
1474 */
1475 TCP_INC_STATS_BH(TCP_MIB_INSEGS);
1476
1477 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1478 goto discard_it;
1479
1480 th = skb->h.th;
1481
1482 if (th->doff < sizeof(struct tcphdr)/4)
1483 goto bad_packet;
1484 if (!pskb_may_pull(skb, th->doff*4))
1485 goto discard_it;
1486
1487 if ((skb->ip_summed != CHECKSUM_UNNECESSARY &&
Herbert Xufb286bb2005-11-10 13:01:24 -08001488 tcp_v6_checksum_init(skb)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001489 goto bad_packet;
1490
1491 th = skb->h.th;
1492 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1493 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1494 skb->len - th->doff*4);
1495 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1496 TCP_SKB_CB(skb)->when = 0;
1497 TCP_SKB_CB(skb)->flags = ipv6_get_dsfield(skb->nh.ipv6h);
1498 TCP_SKB_CB(skb)->sacked = 0;
1499
Arnaldo Carvalho de Melo505cbfc2005-08-12 09:19:38 -03001500 sk = __inet6_lookup(&tcp_hashinfo, &skb->nh.ipv6h->saddr, th->source,
1501 &skb->nh.ipv6h->daddr, ntohs(th->dest),
1502 inet6_iif(skb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001503
1504 if (!sk)
1505 goto no_tcp_socket;
1506
1507process:
1508 if (sk->sk_state == TCP_TIME_WAIT)
1509 goto do_time_wait;
1510
1511 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1512 goto discard_and_relse;
1513
1514 if (sk_filter(sk, skb, 0))
1515 goto discard_and_relse;
1516
1517 skb->dev = NULL;
1518
1519 bh_lock_sock(sk);
1520 ret = 0;
1521 if (!sock_owned_by_user(sk)) {
1522 if (!tcp_prequeue(sk, skb))
1523 ret = tcp_v6_do_rcv(sk, skb);
1524 } else
1525 sk_add_backlog(sk, skb);
1526 bh_unlock_sock(sk);
1527
1528 sock_put(sk);
1529 return ret ? -1 : 0;
1530
1531no_tcp_socket:
1532 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1533 goto discard_it;
1534
1535 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1536bad_packet:
1537 TCP_INC_STATS_BH(TCP_MIB_INERRS);
1538 } else {
1539 tcp_v6_send_reset(skb);
1540 }
1541
1542discard_it:
1543
1544 /*
1545 * Discard frame
1546 */
1547
1548 kfree_skb(skb);
1549 return 0;
1550
1551discard_and_relse:
1552 sock_put(sk);
1553 goto discard_it;
1554
1555do_time_wait:
1556 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07001557 inet_twsk_put((struct inet_timewait_sock *)sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001558 goto discard_it;
1559 }
1560
1561 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1562 TCP_INC_STATS_BH(TCP_MIB_INERRS);
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07001563 inet_twsk_put((struct inet_timewait_sock *)sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001564 goto discard_it;
1565 }
1566
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07001567 switch (tcp_timewait_state_process((struct inet_timewait_sock *)sk,
1568 skb, th)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001569 case TCP_TW_SYN:
1570 {
1571 struct sock *sk2;
1572
Arnaldo Carvalho de Melo505cbfc2005-08-12 09:19:38 -03001573 sk2 = inet6_lookup_listener(&tcp_hashinfo,
1574 &skb->nh.ipv6h->daddr,
1575 ntohs(th->dest), inet6_iif(skb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001576 if (sk2 != NULL) {
Arnaldo Carvalho de Melo295ff7e2005-08-09 20:44:40 -07001577 struct inet_timewait_sock *tw = inet_twsk(sk);
1578 inet_twsk_deschedule(tw, &tcp_death_row);
1579 inet_twsk_put(tw);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001580 sk = sk2;
1581 goto process;
1582 }
1583 /* Fall through to ACK */
1584 }
1585 case TCP_TW_ACK:
1586 tcp_v6_timewait_ack(sk, skb);
1587 break;
1588 case TCP_TW_RST:
1589 goto no_tcp_socket;
1590 case TCP_TW_SUCCESS:;
1591 }
1592 goto discard_it;
1593}
1594
1595static int tcp_v6_rebuild_header(struct sock *sk)
1596{
1597 int err;
1598 struct dst_entry *dst;
1599 struct ipv6_pinfo *np = inet6_sk(sk);
1600
1601 dst = __sk_dst_check(sk, np->dst_cookie);
1602
1603 if (dst == NULL) {
1604 struct inet_sock *inet = inet_sk(sk);
1605 struct in6_addr *final_p = NULL, final;
1606 struct flowi fl;
1607
1608 memset(&fl, 0, sizeof(fl));
1609 fl.proto = IPPROTO_TCP;
1610 ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
1611 ipv6_addr_copy(&fl.fl6_src, &np->saddr);
1612 fl.fl6_flowlabel = np->flow_label;
1613 fl.oif = sk->sk_bound_dev_if;
1614 fl.fl_ip_dport = inet->dport;
1615 fl.fl_ip_sport = inet->sport;
1616
1617 if (np->opt && np->opt->srcrt) {
1618 struct rt0_hdr *rt0 = (struct rt0_hdr *) np->opt->srcrt;
1619 ipv6_addr_copy(&final, &fl.fl6_dst);
1620 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
1621 final_p = &final;
1622 }
1623
1624 err = ip6_dst_lookup(sk, &dst, &fl);
1625 if (err) {
1626 sk->sk_route_caps = 0;
1627 return err;
1628 }
1629 if (final_p)
1630 ipv6_addr_copy(&fl.fl6_dst, final_p);
1631
1632 if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0) {
1633 sk->sk_err_soft = -err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001634 return err;
1635 }
1636
1637 ip6_dst_store(sk, dst, NULL);
1638 sk->sk_route_caps = dst->dev->features &
1639 ~(NETIF_F_IP_CSUM | NETIF_F_TSO);
1640 }
1641
1642 return 0;
1643}
1644
1645static int tcp_v6_xmit(struct sk_buff *skb, int ipfragok)
1646{
1647 struct sock *sk = skb->sk;
1648 struct inet_sock *inet = inet_sk(sk);
1649 struct ipv6_pinfo *np = inet6_sk(sk);
1650 struct flowi fl;
1651 struct dst_entry *dst;
1652 struct in6_addr *final_p = NULL, final;
1653
1654 memset(&fl, 0, sizeof(fl));
1655 fl.proto = IPPROTO_TCP;
1656 ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
1657 ipv6_addr_copy(&fl.fl6_src, &np->saddr);
1658 fl.fl6_flowlabel = np->flow_label;
1659 IP6_ECN_flow_xmit(sk, fl.fl6_flowlabel);
1660 fl.oif = sk->sk_bound_dev_if;
1661 fl.fl_ip_sport = inet->sport;
1662 fl.fl_ip_dport = inet->dport;
1663
1664 if (np->opt && np->opt->srcrt) {
1665 struct rt0_hdr *rt0 = (struct rt0_hdr *) np->opt->srcrt;
1666 ipv6_addr_copy(&final, &fl.fl6_dst);
1667 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
1668 final_p = &final;
1669 }
1670
1671 dst = __sk_dst_check(sk, np->dst_cookie);
1672
1673 if (dst == NULL) {
1674 int err = ip6_dst_lookup(sk, &dst, &fl);
1675
1676 if (err) {
1677 sk->sk_err_soft = -err;
1678 return err;
1679 }
1680
1681 if (final_p)
1682 ipv6_addr_copy(&fl.fl6_dst, final_p);
1683
1684 if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0) {
1685 sk->sk_route_caps = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001686 return err;
1687 }
1688
1689 ip6_dst_store(sk, dst, NULL);
1690 sk->sk_route_caps = dst->dev->features &
1691 ~(NETIF_F_IP_CSUM | NETIF_F_TSO);
1692 }
1693
1694 skb->dst = dst_clone(dst);
1695
1696 /* Restore final destination back after routing done */
1697 ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
1698
1699 return ip6_xmit(sk, skb, &fl, np->opt, 0);
1700}
1701
1702static void v6_addr2sockaddr(struct sock *sk, struct sockaddr * uaddr)
1703{
1704 struct ipv6_pinfo *np = inet6_sk(sk);
1705 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) uaddr;
1706
1707 sin6->sin6_family = AF_INET6;
1708 ipv6_addr_copy(&sin6->sin6_addr, &np->daddr);
1709 sin6->sin6_port = inet_sk(sk)->dport;
1710 /* We do not store received flowlabel for TCP */
1711 sin6->sin6_flowinfo = 0;
1712 sin6->sin6_scope_id = 0;
1713 if (sk->sk_bound_dev_if &&
1714 ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL)
1715 sin6->sin6_scope_id = sk->sk_bound_dev_if;
1716}
1717
1718static int tcp_v6_remember_stamp(struct sock *sk)
1719{
1720 /* Alas, not yet... */
1721 return 0;
1722}
1723
1724static struct tcp_func ipv6_specific = {
1725 .queue_xmit = tcp_v6_xmit,
1726 .send_check = tcp_v6_send_check,
1727 .rebuild_header = tcp_v6_rebuild_header,
1728 .conn_request = tcp_v6_conn_request,
1729 .syn_recv_sock = tcp_v6_syn_recv_sock,
1730 .remember_stamp = tcp_v6_remember_stamp,
1731 .net_header_len = sizeof(struct ipv6hdr),
1732
1733 .setsockopt = ipv6_setsockopt,
1734 .getsockopt = ipv6_getsockopt,
1735 .addr2sockaddr = v6_addr2sockaddr,
1736 .sockaddr_len = sizeof(struct sockaddr_in6)
1737};
1738
1739/*
1740 * TCP over IPv4 via INET6 API
1741 */
1742
1743static struct tcp_func ipv6_mapped = {
1744 .queue_xmit = ip_queue_xmit,
1745 .send_check = tcp_v4_send_check,
Arnaldo Carvalho de Melo32519f12005-08-09 19:50:02 -07001746 .rebuild_header = inet_sk_rebuild_header,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001747 .conn_request = tcp_v6_conn_request,
1748 .syn_recv_sock = tcp_v6_syn_recv_sock,
1749 .remember_stamp = tcp_v4_remember_stamp,
1750 .net_header_len = sizeof(struct iphdr),
1751
1752 .setsockopt = ipv6_setsockopt,
1753 .getsockopt = ipv6_getsockopt,
1754 .addr2sockaddr = v6_addr2sockaddr,
1755 .sockaddr_len = sizeof(struct sockaddr_in6)
1756};
1757
1758
1759
1760/* NOTE: A lot of things set to zero explicitly by call to
1761 * sk_alloc() so need not be done here.
1762 */
1763static int tcp_v6_init_sock(struct sock *sk)
1764{
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -03001765 struct inet_connection_sock *icsk = inet_csk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001766 struct tcp_sock *tp = tcp_sk(sk);
1767
1768 skb_queue_head_init(&tp->out_of_order_queue);
1769 tcp_init_xmit_timers(sk);
1770 tcp_prequeue_init(tp);
1771
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -03001772 icsk->icsk_rto = TCP_TIMEOUT_INIT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001773 tp->mdev = TCP_TIMEOUT_INIT;
1774
1775 /* So many TCP implementations out there (incorrectly) count the
1776 * initial SYN frame in their delayed-ACK and congestion control
1777 * algorithms that we must have the following bandaid to talk
1778 * efficiently to them. -DaveM
1779 */
1780 tp->snd_cwnd = 2;
1781
1782 /* See draft-stevens-tcpca-spec-01 for discussion of the
1783 * initialization of these values.
1784 */
1785 tp->snd_ssthresh = 0x7fffffff;
1786 tp->snd_cwnd_clamp = ~0;
David S. Millerc1b4a7e2005-07-05 15:24:38 -07001787 tp->mss_cache = 536;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001788
1789 tp->reordering = sysctl_tcp_reordering;
1790
1791 sk->sk_state = TCP_CLOSE;
1792
1793 tp->af_specific = &ipv6_specific;
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -03001794 icsk->icsk_ca_ops = &tcp_init_congestion_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001795 sk->sk_write_space = sk_stream_write_space;
1796 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
1797
1798 sk->sk_sndbuf = sysctl_tcp_wmem[1];
1799 sk->sk_rcvbuf = sysctl_tcp_rmem[1];
1800
1801 atomic_inc(&tcp_sockets_allocated);
1802
1803 return 0;
1804}
1805
1806static int tcp_v6_destroy_sock(struct sock *sk)
1807{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001808 tcp_v4_destroy_sock(sk);
1809 return inet6_destroy_sock(sk);
1810}
1811
1812/* Proc filesystem TCPv6 sock list dumping. */
1813static void get_openreq6(struct seq_file *seq,
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -07001814 struct sock *sk, struct request_sock *req, int i, int uid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001815{
1816 struct in6_addr *dest, *src;
1817 int ttd = req->expires - jiffies;
1818
1819 if (ttd < 0)
1820 ttd = 0;
1821
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07001822 src = &tcp6_rsk(req)->loc_addr;
1823 dest = &tcp6_rsk(req)->rmt_addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001824 seq_printf(seq,
1825 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1826 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p\n",
1827 i,
1828 src->s6_addr32[0], src->s6_addr32[1],
1829 src->s6_addr32[2], src->s6_addr32[3],
1830 ntohs(inet_sk(sk)->sport),
1831 dest->s6_addr32[0], dest->s6_addr32[1],
1832 dest->s6_addr32[2], dest->s6_addr32[3],
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07001833 ntohs(inet_rsk(req)->rmt_port),
Linus Torvalds1da177e2005-04-16 15:20:36 -07001834 TCP_SYN_RECV,
1835 0,0, /* could print option size, but that is af dependent. */
1836 1, /* timers active (only the expire timer) */
1837 jiffies_to_clock_t(ttd),
1838 req->retrans,
1839 uid,
1840 0, /* non standard timer */
1841 0, /* open_requests have no inode */
1842 0, req);
1843}
1844
1845static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1846{
1847 struct in6_addr *dest, *src;
1848 __u16 destp, srcp;
1849 int timer_active;
1850 unsigned long timer_expires;
1851 struct inet_sock *inet = inet_sk(sp);
1852 struct tcp_sock *tp = tcp_sk(sp);
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001853 const struct inet_connection_sock *icsk = inet_csk(sp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001854 struct ipv6_pinfo *np = inet6_sk(sp);
1855
1856 dest = &np->daddr;
1857 src = &np->rcv_saddr;
1858 destp = ntohs(inet->dport);
1859 srcp = ntohs(inet->sport);
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001860
1861 if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001862 timer_active = 1;
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001863 timer_expires = icsk->icsk_timeout;
1864 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001865 timer_active = 4;
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001866 timer_expires = icsk->icsk_timeout;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001867 } else if (timer_pending(&sp->sk_timer)) {
1868 timer_active = 2;
1869 timer_expires = sp->sk_timer.expires;
1870 } else {
1871 timer_active = 0;
1872 timer_expires = jiffies;
1873 }
1874
1875 seq_printf(seq,
1876 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1877 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %u %u %u %u %d\n",
1878 i,
1879 src->s6_addr32[0], src->s6_addr32[1],
1880 src->s6_addr32[2], src->s6_addr32[3], srcp,
1881 dest->s6_addr32[0], dest->s6_addr32[1],
1882 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1883 sp->sk_state,
1884 tp->write_seq-tp->snd_una, tp->rcv_nxt-tp->copied_seq,
1885 timer_active,
1886 jiffies_to_clock_t(timer_expires - jiffies),
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001887 icsk->icsk_retransmits,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001888 sock_i_uid(sp),
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -03001889 icsk->icsk_probes_out,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001890 sock_i_ino(sp),
1891 atomic_read(&sp->sk_refcnt), sp,
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001892 icsk->icsk_rto,
1893 icsk->icsk_ack.ato,
1894 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001895 tp->snd_cwnd, tp->snd_ssthresh>=0xFFFF?-1:tp->snd_ssthresh
1896 );
1897}
1898
1899static void get_timewait6_sock(struct seq_file *seq,
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07001900 struct inet_timewait_sock *tw, int i)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001901{
1902 struct in6_addr *dest, *src;
1903 __u16 destp, srcp;
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07001904 struct tcp6_timewait_sock *tcp6tw = tcp6_twsk((struct sock *)tw);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001905 int ttd = tw->tw_ttd - jiffies;
1906
1907 if (ttd < 0)
1908 ttd = 0;
1909
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07001910 dest = &tcp6tw->tw_v6_daddr;
1911 src = &tcp6tw->tw_v6_rcv_saddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001912 destp = ntohs(tw->tw_dport);
1913 srcp = ntohs(tw->tw_sport);
1914
1915 seq_printf(seq,
1916 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1917 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p\n",
1918 i,
1919 src->s6_addr32[0], src->s6_addr32[1],
1920 src->s6_addr32[2], src->s6_addr32[3], srcp,
1921 dest->s6_addr32[0], dest->s6_addr32[1],
1922 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1923 tw->tw_substate, 0, 0,
1924 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
1925 atomic_read(&tw->tw_refcnt), tw);
1926}
1927
1928#ifdef CONFIG_PROC_FS
1929static int tcp6_seq_show(struct seq_file *seq, void *v)
1930{
1931 struct tcp_iter_state *st;
1932
1933 if (v == SEQ_START_TOKEN) {
1934 seq_puts(seq,
1935 " sl "
1936 "local_address "
1937 "remote_address "
1938 "st tx_queue rx_queue tr tm->when retrnsmt"
1939 " uid timeout inode\n");
1940 goto out;
1941 }
1942 st = seq->private;
1943
1944 switch (st->state) {
1945 case TCP_SEQ_STATE_LISTENING:
1946 case TCP_SEQ_STATE_ESTABLISHED:
1947 get_tcp6_sock(seq, v, st->num);
1948 break;
1949 case TCP_SEQ_STATE_OPENREQ:
1950 get_openreq6(seq, st->syn_wait_sk, v, st->num, st->uid);
1951 break;
1952 case TCP_SEQ_STATE_TIME_WAIT:
1953 get_timewait6_sock(seq, v, st->num);
1954 break;
1955 }
1956out:
1957 return 0;
1958}
1959
1960static struct file_operations tcp6_seq_fops;
1961static struct tcp_seq_afinfo tcp6_seq_afinfo = {
1962 .owner = THIS_MODULE,
1963 .name = "tcp6",
1964 .family = AF_INET6,
1965 .seq_show = tcp6_seq_show,
1966 .seq_fops = &tcp6_seq_fops,
1967};
1968
1969int __init tcp6_proc_init(void)
1970{
1971 return tcp_proc_register(&tcp6_seq_afinfo);
1972}
1973
1974void tcp6_proc_exit(void)
1975{
1976 tcp_proc_unregister(&tcp6_seq_afinfo);
1977}
1978#endif
1979
1980struct proto tcpv6_prot = {
1981 .name = "TCPv6",
1982 .owner = THIS_MODULE,
1983 .close = tcp_close,
1984 .connect = tcp_v6_connect,
1985 .disconnect = tcp_disconnect,
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001986 .accept = inet_csk_accept,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001987 .ioctl = tcp_ioctl,
1988 .init = tcp_v6_init_sock,
1989 .destroy = tcp_v6_destroy_sock,
1990 .shutdown = tcp_shutdown,
1991 .setsockopt = tcp_setsockopt,
1992 .getsockopt = tcp_getsockopt,
1993 .sendmsg = tcp_sendmsg,
1994 .recvmsg = tcp_recvmsg,
1995 .backlog_rcv = tcp_v6_do_rcv,
1996 .hash = tcp_v6_hash,
1997 .unhash = tcp_unhash,
1998 .get_port = tcp_v6_get_port,
1999 .enter_memory_pressure = tcp_enter_memory_pressure,
2000 .sockets_allocated = &tcp_sockets_allocated,
2001 .memory_allocated = &tcp_memory_allocated,
2002 .memory_pressure = &tcp_memory_pressure,
Arnaldo Carvalho de Melo0a5578c2005-08-09 20:11:41 -07002003 .orphan_count = &tcp_orphan_count,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002004 .sysctl_mem = sysctl_tcp_mem,
2005 .sysctl_wmem = sysctl_tcp_wmem,
2006 .sysctl_rmem = sysctl_tcp_rmem,
2007 .max_header = MAX_TCP_HEADER,
2008 .obj_size = sizeof(struct tcp6_sock),
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07002009 .twsk_obj_size = sizeof(struct tcp6_timewait_sock),
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -07002010 .rsk_prot = &tcp6_request_sock_ops,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002011};
2012
2013static struct inet6_protocol tcpv6_protocol = {
2014 .handler = tcp_v6_rcv,
2015 .err_handler = tcp_v6_err,
2016 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
2017};
2018
Linus Torvalds1da177e2005-04-16 15:20:36 -07002019static struct inet_protosw tcpv6_protosw = {
2020 .type = SOCK_STREAM,
2021 .protocol = IPPROTO_TCP,
2022 .prot = &tcpv6_prot,
2023 .ops = &inet6_stream_ops,
2024 .capability = -1,
2025 .no_check = 0,
2026 .flags = INET_PROTOSW_PERMANENT,
2027};
2028
2029void __init tcpv6_init(void)
2030{
2031 /* register inet6 protocol */
2032 if (inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP) < 0)
2033 printk(KERN_ERR "tcpv6_init: Could not register protocol\n");
2034 inet6_register_protosw(&tcpv6_protosw);
2035}