Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * IPv6 over IPv6 tunnel device |
| 3 | * Linux INET6 implementation |
| 4 | * |
| 5 | * Authors: |
| 6 | * Ville Nuorvala <vnuorval@tcs.hut.fi> |
| 7 | * |
| 8 | * $Id$ |
| 9 | * |
| 10 | * Based on: |
| 11 | * linux/net/ipv6/sit.c |
| 12 | * |
| 13 | * RFC 2473 |
| 14 | * |
| 15 | * This program is free software; you can redistribute it and/or |
| 16 | * modify it under the terms of the GNU General Public License |
| 17 | * as published by the Free Software Foundation; either version |
| 18 | * 2 of the License, or (at your option) any later version. |
| 19 | * |
| 20 | */ |
| 21 | |
| 22 | #include <linux/config.h> |
| 23 | #include <linux/module.h> |
| 24 | #include <linux/errno.h> |
| 25 | #include <linux/types.h> |
| 26 | #include <linux/sockios.h> |
| 27 | #include <linux/if.h> |
| 28 | #include <linux/in.h> |
| 29 | #include <linux/ip.h> |
| 30 | #include <linux/if_tunnel.h> |
| 31 | #include <linux/net.h> |
| 32 | #include <linux/in6.h> |
| 33 | #include <linux/netdevice.h> |
| 34 | #include <linux/if_arp.h> |
| 35 | #include <linux/icmpv6.h> |
| 36 | #include <linux/init.h> |
| 37 | #include <linux/route.h> |
| 38 | #include <linux/rtnetlink.h> |
| 39 | #include <linux/netfilter_ipv6.h> |
| 40 | |
| 41 | #include <asm/uaccess.h> |
| 42 | #include <asm/atomic.h> |
| 43 | |
| 44 | #include <net/ip.h> |
| 45 | #include <net/ipv6.h> |
| 46 | #include <net/protocol.h> |
| 47 | #include <net/ip6_route.h> |
| 48 | #include <net/addrconf.h> |
| 49 | #include <net/ip6_tunnel.h> |
| 50 | #include <net/xfrm.h> |
| 51 | #include <net/dsfield.h> |
| 52 | #include <net/inet_ecn.h> |
| 53 | |
| 54 | MODULE_AUTHOR("Ville Nuorvala"); |
| 55 | MODULE_DESCRIPTION("IPv6-in-IPv6 tunnel"); |
| 56 | MODULE_LICENSE("GPL"); |
| 57 | |
| 58 | #define IPV6_TLV_TEL_DST_SIZE 8 |
| 59 | |
| 60 | #ifdef IP6_TNL_DEBUG |
| 61 | #define IP6_TNL_TRACE(x...) printk(KERN_DEBUG "%s:" x "\n", __FUNCTION__) |
| 62 | #else |
| 63 | #define IP6_TNL_TRACE(x...) do {;} while(0) |
| 64 | #endif |
| 65 | |
| 66 | #define IPV6_TCLASS_MASK (IPV6_FLOWINFO_MASK & ~IPV6_FLOWLABEL_MASK) |
| 67 | |
| 68 | #define HASH_SIZE 32 |
| 69 | |
| 70 | #define HASH(addr) (((addr)->s6_addr32[0] ^ (addr)->s6_addr32[1] ^ \ |
| 71 | (addr)->s6_addr32[2] ^ (addr)->s6_addr32[3]) & \ |
| 72 | (HASH_SIZE - 1)) |
| 73 | |
| 74 | static int ip6ip6_fb_tnl_dev_init(struct net_device *dev); |
| 75 | static int ip6ip6_tnl_dev_init(struct net_device *dev); |
| 76 | static void ip6ip6_tnl_dev_setup(struct net_device *dev); |
| 77 | |
| 78 | /* the IPv6 tunnel fallback device */ |
| 79 | static struct net_device *ip6ip6_fb_tnl_dev; |
| 80 | |
| 81 | |
| 82 | /* lists for storing tunnels in use */ |
| 83 | static struct ip6_tnl *tnls_r_l[HASH_SIZE]; |
| 84 | static struct ip6_tnl *tnls_wc[1]; |
| 85 | static struct ip6_tnl **tnls[2] = { tnls_wc, tnls_r_l }; |
| 86 | |
| 87 | /* lock for the tunnel lists */ |
| 88 | static DEFINE_RWLOCK(ip6ip6_lock); |
| 89 | |
| 90 | static inline struct dst_entry *ip6_tnl_dst_check(struct ip6_tnl *t) |
| 91 | { |
| 92 | struct dst_entry *dst = t->dst_cache; |
| 93 | |
| 94 | if (dst && dst->obsolete && |
| 95 | dst->ops->check(dst, t->dst_cookie) == NULL) { |
| 96 | t->dst_cache = NULL; |
| 97 | dst_release(dst); |
| 98 | return NULL; |
| 99 | } |
| 100 | |
| 101 | return dst; |
| 102 | } |
| 103 | |
| 104 | static inline void ip6_tnl_dst_reset(struct ip6_tnl *t) |
| 105 | { |
| 106 | dst_release(t->dst_cache); |
| 107 | t->dst_cache = NULL; |
| 108 | } |
| 109 | |
| 110 | static inline void ip6_tnl_dst_store(struct ip6_tnl *t, struct dst_entry *dst) |
| 111 | { |
| 112 | struct rt6_info *rt = (struct rt6_info *) dst; |
| 113 | t->dst_cookie = rt->rt6i_node ? rt->rt6i_node->fn_sernum : 0; |
| 114 | dst_release(t->dst_cache); |
| 115 | t->dst_cache = dst; |
| 116 | } |
| 117 | |
| 118 | /** |
| 119 | * ip6ip6_tnl_lookup - fetch tunnel matching the end-point addresses |
| 120 | * @remote: the address of the tunnel exit-point |
| 121 | * @local: the address of the tunnel entry-point |
| 122 | * |
| 123 | * Return: |
| 124 | * tunnel matching given end-points if found, |
| 125 | * else fallback tunnel if its device is up, |
| 126 | * else %NULL |
| 127 | **/ |
| 128 | |
| 129 | static struct ip6_tnl * |
| 130 | ip6ip6_tnl_lookup(struct in6_addr *remote, struct in6_addr *local) |
| 131 | { |
| 132 | unsigned h0 = HASH(remote); |
| 133 | unsigned h1 = HASH(local); |
| 134 | struct ip6_tnl *t; |
| 135 | |
| 136 | for (t = tnls_r_l[h0 ^ h1]; t; t = t->next) { |
| 137 | if (ipv6_addr_equal(local, &t->parms.laddr) && |
| 138 | ipv6_addr_equal(remote, &t->parms.raddr) && |
| 139 | (t->dev->flags & IFF_UP)) |
| 140 | return t; |
| 141 | } |
| 142 | if ((t = tnls_wc[0]) != NULL && (t->dev->flags & IFF_UP)) |
| 143 | return t; |
| 144 | |
| 145 | return NULL; |
| 146 | } |
| 147 | |
| 148 | /** |
| 149 | * ip6ip6_bucket - get head of list matching given tunnel parameters |
| 150 | * @p: parameters containing tunnel end-points |
| 151 | * |
| 152 | * Description: |
| 153 | * ip6ip6_bucket() returns the head of the list matching the |
| 154 | * &struct in6_addr entries laddr and raddr in @p. |
| 155 | * |
| 156 | * Return: head of IPv6 tunnel list |
| 157 | **/ |
| 158 | |
| 159 | static struct ip6_tnl ** |
| 160 | ip6ip6_bucket(struct ip6_tnl_parm *p) |
| 161 | { |
| 162 | struct in6_addr *remote = &p->raddr; |
| 163 | struct in6_addr *local = &p->laddr; |
| 164 | unsigned h = 0; |
| 165 | int prio = 0; |
| 166 | |
| 167 | if (!ipv6_addr_any(remote) || !ipv6_addr_any(local)) { |
| 168 | prio = 1; |
| 169 | h = HASH(remote) ^ HASH(local); |
| 170 | } |
| 171 | return &tnls[prio][h]; |
| 172 | } |
| 173 | |
| 174 | /** |
| 175 | * ip6ip6_tnl_link - add tunnel to hash table |
| 176 | * @t: tunnel to be added |
| 177 | **/ |
| 178 | |
| 179 | static void |
| 180 | ip6ip6_tnl_link(struct ip6_tnl *t) |
| 181 | { |
| 182 | struct ip6_tnl **tp = ip6ip6_bucket(&t->parms); |
| 183 | |
| 184 | t->next = *tp; |
| 185 | write_lock_bh(&ip6ip6_lock); |
| 186 | *tp = t; |
| 187 | write_unlock_bh(&ip6ip6_lock); |
| 188 | } |
| 189 | |
| 190 | /** |
| 191 | * ip6ip6_tnl_unlink - remove tunnel from hash table |
| 192 | * @t: tunnel to be removed |
| 193 | **/ |
| 194 | |
| 195 | static void |
| 196 | ip6ip6_tnl_unlink(struct ip6_tnl *t) |
| 197 | { |
| 198 | struct ip6_tnl **tp; |
| 199 | |
| 200 | for (tp = ip6ip6_bucket(&t->parms); *tp; tp = &(*tp)->next) { |
| 201 | if (t == *tp) { |
| 202 | write_lock_bh(&ip6ip6_lock); |
| 203 | *tp = t->next; |
| 204 | write_unlock_bh(&ip6ip6_lock); |
| 205 | break; |
| 206 | } |
| 207 | } |
| 208 | } |
| 209 | |
| 210 | /** |
| 211 | * ip6_tnl_create() - create a new tunnel |
| 212 | * @p: tunnel parameters |
| 213 | * @pt: pointer to new tunnel |
| 214 | * |
| 215 | * Description: |
| 216 | * Create tunnel matching given parameters. |
| 217 | * |
| 218 | * Return: |
| 219 | * 0 on success |
| 220 | **/ |
| 221 | |
| 222 | static int |
| 223 | ip6_tnl_create(struct ip6_tnl_parm *p, struct ip6_tnl **pt) |
| 224 | { |
| 225 | struct net_device *dev; |
| 226 | struct ip6_tnl *t; |
| 227 | char name[IFNAMSIZ]; |
| 228 | int err; |
| 229 | |
| 230 | if (p->name[0]) { |
| 231 | strlcpy(name, p->name, IFNAMSIZ); |
| 232 | } else { |
| 233 | int i; |
| 234 | for (i = 1; i < IP6_TNL_MAX; i++) { |
| 235 | sprintf(name, "ip6tnl%d", i); |
| 236 | if (__dev_get_by_name(name) == NULL) |
| 237 | break; |
| 238 | } |
| 239 | if (i == IP6_TNL_MAX) |
| 240 | return -ENOBUFS; |
| 241 | } |
| 242 | dev = alloc_netdev(sizeof (*t), name, ip6ip6_tnl_dev_setup); |
| 243 | if (dev == NULL) |
| 244 | return -ENOMEM; |
| 245 | |
| 246 | t = dev->priv; |
| 247 | dev->init = ip6ip6_tnl_dev_init; |
| 248 | t->parms = *p; |
| 249 | |
| 250 | if ((err = register_netdevice(dev)) < 0) { |
| 251 | free_netdev(dev); |
| 252 | return err; |
| 253 | } |
| 254 | dev_hold(dev); |
| 255 | |
| 256 | ip6ip6_tnl_link(t); |
| 257 | *pt = t; |
| 258 | return 0; |
| 259 | } |
| 260 | |
| 261 | /** |
| 262 | * ip6ip6_tnl_locate - find or create tunnel matching given parameters |
| 263 | * @p: tunnel parameters |
| 264 | * @create: != 0 if allowed to create new tunnel if no match found |
| 265 | * |
| 266 | * Description: |
| 267 | * ip6ip6_tnl_locate() first tries to locate an existing tunnel |
| 268 | * based on @parms. If this is unsuccessful, but @create is set a new |
| 269 | * tunnel device is created and registered for use. |
| 270 | * |
| 271 | * Return: |
| 272 | * 0 if tunnel located or created, |
| 273 | * -EINVAL if parameters incorrect, |
| 274 | * -ENODEV if no matching tunnel available |
| 275 | **/ |
| 276 | |
| 277 | static int |
| 278 | ip6ip6_tnl_locate(struct ip6_tnl_parm *p, struct ip6_tnl **pt, int create) |
| 279 | { |
| 280 | struct in6_addr *remote = &p->raddr; |
| 281 | struct in6_addr *local = &p->laddr; |
| 282 | struct ip6_tnl *t; |
| 283 | |
| 284 | if (p->proto != IPPROTO_IPV6) |
| 285 | return -EINVAL; |
| 286 | |
| 287 | for (t = *ip6ip6_bucket(p); t; t = t->next) { |
| 288 | if (ipv6_addr_equal(local, &t->parms.laddr) && |
| 289 | ipv6_addr_equal(remote, &t->parms.raddr)) { |
| 290 | *pt = t; |
| 291 | return (create ? -EEXIST : 0); |
| 292 | } |
| 293 | } |
| 294 | if (!create) |
| 295 | return -ENODEV; |
| 296 | |
| 297 | return ip6_tnl_create(p, pt); |
| 298 | } |
| 299 | |
| 300 | /** |
| 301 | * ip6ip6_tnl_dev_uninit - tunnel device uninitializer |
| 302 | * @dev: the device to be destroyed |
| 303 | * |
| 304 | * Description: |
| 305 | * ip6ip6_tnl_dev_uninit() removes tunnel from its list |
| 306 | **/ |
| 307 | |
| 308 | static void |
| 309 | ip6ip6_tnl_dev_uninit(struct net_device *dev) |
| 310 | { |
| 311 | struct ip6_tnl *t = dev->priv; |
| 312 | |
| 313 | if (dev == ip6ip6_fb_tnl_dev) { |
| 314 | write_lock_bh(&ip6ip6_lock); |
| 315 | tnls_wc[0] = NULL; |
| 316 | write_unlock_bh(&ip6ip6_lock); |
| 317 | } else { |
| 318 | ip6ip6_tnl_unlink(t); |
| 319 | } |
| 320 | ip6_tnl_dst_reset(t); |
| 321 | dev_put(dev); |
| 322 | } |
| 323 | |
| 324 | /** |
| 325 | * parse_tvl_tnl_enc_lim - handle encapsulation limit option |
| 326 | * @skb: received socket buffer |
| 327 | * |
| 328 | * Return: |
| 329 | * 0 if none was found, |
| 330 | * else index to encapsulation limit |
| 331 | **/ |
| 332 | |
| 333 | static __u16 |
| 334 | parse_tlv_tnl_enc_lim(struct sk_buff *skb, __u8 * raw) |
| 335 | { |
| 336 | struct ipv6hdr *ipv6h = (struct ipv6hdr *) raw; |
| 337 | __u8 nexthdr = ipv6h->nexthdr; |
| 338 | __u16 off = sizeof (*ipv6h); |
| 339 | |
| 340 | while (ipv6_ext_hdr(nexthdr) && nexthdr != NEXTHDR_NONE) { |
| 341 | __u16 optlen = 0; |
| 342 | struct ipv6_opt_hdr *hdr; |
| 343 | if (raw + off + sizeof (*hdr) > skb->data && |
| 344 | !pskb_may_pull(skb, raw - skb->data + off + sizeof (*hdr))) |
| 345 | break; |
| 346 | |
| 347 | hdr = (struct ipv6_opt_hdr *) (raw + off); |
| 348 | if (nexthdr == NEXTHDR_FRAGMENT) { |
| 349 | struct frag_hdr *frag_hdr = (struct frag_hdr *) hdr; |
| 350 | if (frag_hdr->frag_off) |
| 351 | break; |
| 352 | optlen = 8; |
| 353 | } else if (nexthdr == NEXTHDR_AUTH) { |
| 354 | optlen = (hdr->hdrlen + 2) << 2; |
| 355 | } else { |
| 356 | optlen = ipv6_optlen(hdr); |
| 357 | } |
| 358 | if (nexthdr == NEXTHDR_DEST) { |
| 359 | __u16 i = off + 2; |
| 360 | while (1) { |
| 361 | struct ipv6_tlv_tnl_enc_lim *tel; |
| 362 | |
| 363 | /* No more room for encapsulation limit */ |
| 364 | if (i + sizeof (*tel) > off + optlen) |
| 365 | break; |
| 366 | |
| 367 | tel = (struct ipv6_tlv_tnl_enc_lim *) &raw[i]; |
| 368 | /* return index of option if found and valid */ |
| 369 | if (tel->type == IPV6_TLV_TNL_ENCAP_LIMIT && |
| 370 | tel->length == 1) |
| 371 | return i; |
| 372 | /* else jump to next option */ |
| 373 | if (tel->type) |
| 374 | i += tel->length + 2; |
| 375 | else |
| 376 | i++; |
| 377 | } |
| 378 | } |
| 379 | nexthdr = hdr->nexthdr; |
| 380 | off += optlen; |
| 381 | } |
| 382 | return 0; |
| 383 | } |
| 384 | |
| 385 | /** |
| 386 | * ip6ip6_err - tunnel error handler |
| 387 | * |
| 388 | * Description: |
| 389 | * ip6ip6_err() should handle errors in the tunnel according |
| 390 | * to the specifications in RFC 2473. |
| 391 | **/ |
| 392 | |
| 393 | static void |
| 394 | ip6ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, |
| 395 | int type, int code, int offset, __u32 info) |
| 396 | { |
| 397 | struct ipv6hdr *ipv6h = (struct ipv6hdr *) skb->data; |
| 398 | struct ip6_tnl *t; |
| 399 | int rel_msg = 0; |
| 400 | int rel_type = ICMPV6_DEST_UNREACH; |
| 401 | int rel_code = ICMPV6_ADDR_UNREACH; |
| 402 | __u32 rel_info = 0; |
| 403 | __u16 len; |
| 404 | |
| 405 | /* If the packet doesn't contain the original IPv6 header we are |
| 406 | in trouble since we might need the source address for further |
| 407 | processing of the error. */ |
| 408 | |
| 409 | read_lock(&ip6ip6_lock); |
| 410 | if ((t = ip6ip6_tnl_lookup(&ipv6h->daddr, &ipv6h->saddr)) == NULL) |
| 411 | goto out; |
| 412 | |
| 413 | switch (type) { |
| 414 | __u32 teli; |
| 415 | struct ipv6_tlv_tnl_enc_lim *tel; |
| 416 | __u32 mtu; |
| 417 | case ICMPV6_DEST_UNREACH: |
| 418 | if (net_ratelimit()) |
| 419 | printk(KERN_WARNING |
| 420 | "%s: Path to destination invalid " |
| 421 | "or inactive!\n", t->parms.name); |
| 422 | rel_msg = 1; |
| 423 | break; |
| 424 | case ICMPV6_TIME_EXCEED: |
| 425 | if (code == ICMPV6_EXC_HOPLIMIT) { |
| 426 | if (net_ratelimit()) |
| 427 | printk(KERN_WARNING |
| 428 | "%s: Too small hop limit or " |
| 429 | "routing loop in tunnel!\n", |
| 430 | t->parms.name); |
| 431 | rel_msg = 1; |
| 432 | } |
| 433 | break; |
| 434 | case ICMPV6_PARAMPROB: |
| 435 | /* ignore if parameter problem not caused by a tunnel |
| 436 | encapsulation limit sub-option */ |
| 437 | if (code != ICMPV6_HDR_FIELD) { |
| 438 | break; |
| 439 | } |
| 440 | teli = parse_tlv_tnl_enc_lim(skb, skb->data); |
| 441 | |
| 442 | if (teli && teli == ntohl(info) - 2) { |
| 443 | tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->data[teli]; |
| 444 | if (tel->encap_limit == 0) { |
| 445 | if (net_ratelimit()) |
| 446 | printk(KERN_WARNING |
| 447 | "%s: Too small encapsulation " |
| 448 | "limit or routing loop in " |
| 449 | "tunnel!\n", t->parms.name); |
| 450 | rel_msg = 1; |
| 451 | } |
| 452 | } |
| 453 | break; |
| 454 | case ICMPV6_PKT_TOOBIG: |
| 455 | mtu = ntohl(info) - offset; |
| 456 | if (mtu < IPV6_MIN_MTU) |
| 457 | mtu = IPV6_MIN_MTU; |
| 458 | t->dev->mtu = mtu; |
| 459 | |
| 460 | if ((len = sizeof (*ipv6h) + ipv6h->payload_len) > mtu) { |
| 461 | rel_type = ICMPV6_PKT_TOOBIG; |
| 462 | rel_code = 0; |
| 463 | rel_info = mtu; |
| 464 | rel_msg = 1; |
| 465 | } |
| 466 | break; |
| 467 | } |
| 468 | if (rel_msg && pskb_may_pull(skb, offset + sizeof (*ipv6h))) { |
| 469 | struct rt6_info *rt; |
| 470 | struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); |
| 471 | if (!skb2) |
| 472 | goto out; |
| 473 | |
| 474 | dst_release(skb2->dst); |
| 475 | skb2->dst = NULL; |
| 476 | skb_pull(skb2, offset); |
| 477 | skb2->nh.raw = skb2->data; |
| 478 | |
| 479 | /* Try to guess incoming interface */ |
| 480 | rt = rt6_lookup(&skb2->nh.ipv6h->saddr, NULL, 0, 0); |
| 481 | |
| 482 | if (rt && rt->rt6i_dev) |
| 483 | skb2->dev = rt->rt6i_dev; |
| 484 | |
| 485 | icmpv6_send(skb2, rel_type, rel_code, rel_info, skb2->dev); |
| 486 | |
| 487 | if (rt) |
| 488 | dst_release(&rt->u.dst); |
| 489 | |
| 490 | kfree_skb(skb2); |
| 491 | } |
| 492 | out: |
| 493 | read_unlock(&ip6ip6_lock); |
| 494 | } |
| 495 | |
| 496 | static inline void ip6ip6_ecn_decapsulate(struct ipv6hdr *outer_iph, |
| 497 | struct sk_buff *skb) |
| 498 | { |
| 499 | struct ipv6hdr *inner_iph = skb->nh.ipv6h; |
| 500 | |
| 501 | if (INET_ECN_is_ce(ipv6_get_dsfield(outer_iph))) |
| 502 | IP6_ECN_set_ce(inner_iph); |
| 503 | } |
| 504 | |
| 505 | /** |
| 506 | * ip6ip6_rcv - decapsulate IPv6 packet and retransmit it locally |
| 507 | * @skb: received socket buffer |
| 508 | * |
| 509 | * Return: 0 |
| 510 | **/ |
| 511 | |
| 512 | static int |
| 513 | ip6ip6_rcv(struct sk_buff **pskb, unsigned int *nhoffp) |
| 514 | { |
| 515 | struct sk_buff *skb = *pskb; |
| 516 | struct ipv6hdr *ipv6h; |
| 517 | struct ip6_tnl *t; |
| 518 | |
| 519 | if (!pskb_may_pull(skb, sizeof (*ipv6h))) |
| 520 | goto discard; |
| 521 | |
| 522 | ipv6h = skb->nh.ipv6h; |
| 523 | |
| 524 | read_lock(&ip6ip6_lock); |
| 525 | |
| 526 | if ((t = ip6ip6_tnl_lookup(&ipv6h->saddr, &ipv6h->daddr)) != NULL) { |
| 527 | if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) { |
| 528 | kfree_skb(skb); |
| 529 | return 0; |
| 530 | } |
| 531 | |
| 532 | if (!(t->parms.flags & IP6_TNL_F_CAP_RCV)) { |
| 533 | t->stat.rx_dropped++; |
| 534 | read_unlock(&ip6ip6_lock); |
| 535 | goto discard; |
| 536 | } |
| 537 | secpath_reset(skb); |
| 538 | skb->mac.raw = skb->nh.raw; |
| 539 | skb->nh.raw = skb->data; |
| 540 | skb->protocol = htons(ETH_P_IPV6); |
| 541 | skb->pkt_type = PACKET_HOST; |
| 542 | memset(skb->cb, 0, sizeof(struct inet6_skb_parm)); |
| 543 | skb->dev = t->dev; |
| 544 | dst_release(skb->dst); |
| 545 | skb->dst = NULL; |
| 546 | if (t->parms.flags & IP6_TNL_F_RCV_DSCP_COPY) |
| 547 | ipv6_copy_dscp(ipv6h, skb->nh.ipv6h); |
| 548 | ip6ip6_ecn_decapsulate(ipv6h, skb); |
| 549 | t->stat.rx_packets++; |
| 550 | t->stat.rx_bytes += skb->len; |
| 551 | netif_rx(skb); |
| 552 | read_unlock(&ip6ip6_lock); |
| 553 | return 0; |
| 554 | } |
| 555 | read_unlock(&ip6ip6_lock); |
| 556 | icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0, skb->dev); |
| 557 | discard: |
| 558 | return 1; |
| 559 | } |
| 560 | |
| 561 | static inline struct ipv6_txoptions *create_tel(__u8 encap_limit) |
| 562 | { |
| 563 | struct ipv6_tlv_tnl_enc_lim *tel; |
| 564 | struct ipv6_txoptions *opt; |
| 565 | __u8 *raw; |
| 566 | |
| 567 | int opt_len = sizeof(*opt) + 8; |
| 568 | |
| 569 | if (!(opt = kmalloc(opt_len, GFP_ATOMIC))) { |
| 570 | return NULL; |
| 571 | } |
| 572 | memset(opt, 0, opt_len); |
| 573 | opt->tot_len = opt_len; |
| 574 | opt->dst0opt = (struct ipv6_opt_hdr *) (opt + 1); |
| 575 | opt->opt_nflen = 8; |
| 576 | |
| 577 | tel = (struct ipv6_tlv_tnl_enc_lim *) (opt->dst0opt + 1); |
| 578 | tel->type = IPV6_TLV_TNL_ENCAP_LIMIT; |
| 579 | tel->length = 1; |
| 580 | tel->encap_limit = encap_limit; |
| 581 | |
| 582 | raw = (__u8 *) opt->dst0opt; |
| 583 | raw[5] = IPV6_TLV_PADN; |
| 584 | raw[6] = 1; |
| 585 | |
| 586 | return opt; |
| 587 | } |
| 588 | |
| 589 | /** |
| 590 | * ip6ip6_tnl_addr_conflict - compare packet addresses to tunnel's own |
| 591 | * @t: the outgoing tunnel device |
| 592 | * @hdr: IPv6 header from the incoming packet |
| 593 | * |
| 594 | * Description: |
| 595 | * Avoid trivial tunneling loop by checking that tunnel exit-point |
| 596 | * doesn't match source of incoming packet. |
| 597 | * |
| 598 | * Return: |
| 599 | * 1 if conflict, |
| 600 | * 0 else |
| 601 | **/ |
| 602 | |
| 603 | static inline int |
| 604 | ip6ip6_tnl_addr_conflict(struct ip6_tnl *t, struct ipv6hdr *hdr) |
| 605 | { |
| 606 | return ipv6_addr_equal(&t->parms.raddr, &hdr->saddr); |
| 607 | } |
| 608 | |
| 609 | /** |
| 610 | * ip6ip6_tnl_xmit - encapsulate packet and send |
| 611 | * @skb: the outgoing socket buffer |
| 612 | * @dev: the outgoing tunnel device |
| 613 | * |
| 614 | * Description: |
| 615 | * Build new header and do some sanity checks on the packet before sending |
| 616 | * it. |
| 617 | * |
| 618 | * Return: |
| 619 | * 0 |
| 620 | **/ |
| 621 | |
| 622 | static int |
| 623 | ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) |
| 624 | { |
| 625 | struct ip6_tnl *t = (struct ip6_tnl *) dev->priv; |
| 626 | struct net_device_stats *stats = &t->stat; |
| 627 | struct ipv6hdr *ipv6h = skb->nh.ipv6h; |
| 628 | struct ipv6_txoptions *opt = NULL; |
| 629 | int encap_limit = -1; |
| 630 | __u16 offset; |
| 631 | struct flowi fl; |
| 632 | struct dst_entry *dst; |
| 633 | struct net_device *tdev; |
| 634 | int mtu; |
| 635 | int max_headroom = sizeof(struct ipv6hdr); |
| 636 | u8 proto; |
| 637 | int err; |
| 638 | int pkt_len; |
| 639 | int dsfield; |
| 640 | |
| 641 | if (t->recursion++) { |
| 642 | stats->collisions++; |
| 643 | goto tx_err; |
| 644 | } |
| 645 | if (skb->protocol != htons(ETH_P_IPV6) || |
| 646 | !(t->parms.flags & IP6_TNL_F_CAP_XMIT) || |
| 647 | ip6ip6_tnl_addr_conflict(t, ipv6h)) { |
| 648 | goto tx_err; |
| 649 | } |
| 650 | if ((offset = parse_tlv_tnl_enc_lim(skb, skb->nh.raw)) > 0) { |
| 651 | struct ipv6_tlv_tnl_enc_lim *tel; |
| 652 | tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->nh.raw[offset]; |
| 653 | if (tel->encap_limit == 0) { |
| 654 | icmpv6_send(skb, ICMPV6_PARAMPROB, |
| 655 | ICMPV6_HDR_FIELD, offset + 2, skb->dev); |
| 656 | goto tx_err; |
| 657 | } |
| 658 | encap_limit = tel->encap_limit - 1; |
| 659 | } else if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) { |
| 660 | encap_limit = t->parms.encap_limit; |
| 661 | } |
| 662 | memcpy(&fl, &t->fl, sizeof (fl)); |
| 663 | proto = fl.proto; |
| 664 | |
| 665 | dsfield = ipv6_get_dsfield(ipv6h); |
| 666 | if ((t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)) |
| 667 | fl.fl6_flowlabel |= (*(__u32 *) ipv6h & IPV6_TCLASS_MASK); |
| 668 | if ((t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL)) |
| 669 | fl.fl6_flowlabel |= (*(__u32 *) ipv6h & IPV6_FLOWLABEL_MASK); |
| 670 | |
| 671 | if (encap_limit >= 0 && (opt = create_tel(encap_limit)) == NULL) |
| 672 | goto tx_err; |
| 673 | |
| 674 | if ((dst = ip6_tnl_dst_check(t)) != NULL) |
| 675 | dst_hold(dst); |
| 676 | else |
| 677 | dst = ip6_route_output(NULL, &fl); |
| 678 | |
| 679 | if (dst->error || xfrm_lookup(&dst, &fl, NULL, 0) < 0) |
| 680 | goto tx_err_link_failure; |
| 681 | |
| 682 | tdev = dst->dev; |
| 683 | |
| 684 | if (tdev == dev) { |
| 685 | stats->collisions++; |
| 686 | if (net_ratelimit()) |
| 687 | printk(KERN_WARNING |
| 688 | "%s: Local routing loop detected!\n", |
| 689 | t->parms.name); |
| 690 | goto tx_err_dst_release; |
| 691 | } |
| 692 | mtu = dst_mtu(dst) - sizeof (*ipv6h); |
| 693 | if (opt) { |
| 694 | max_headroom += 8; |
| 695 | mtu -= 8; |
| 696 | } |
| 697 | if (mtu < IPV6_MIN_MTU) |
| 698 | mtu = IPV6_MIN_MTU; |
| 699 | if (skb->dst && mtu < dst_mtu(skb->dst)) { |
| 700 | struct rt6_info *rt = (struct rt6_info *) skb->dst; |
| 701 | rt->rt6i_flags |= RTF_MODIFIED; |
| 702 | rt->u.dst.metrics[RTAX_MTU-1] = mtu; |
| 703 | } |
| 704 | if (skb->len > mtu) { |
| 705 | icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, dev); |
| 706 | goto tx_err_dst_release; |
| 707 | } |
| 708 | |
| 709 | /* |
| 710 | * Okay, now see if we can stuff it in the buffer as-is. |
| 711 | */ |
| 712 | max_headroom += LL_RESERVED_SPACE(tdev); |
| 713 | |
| 714 | if (skb_headroom(skb) < max_headroom || |
| 715 | skb_cloned(skb) || skb_shared(skb)) { |
| 716 | struct sk_buff *new_skb; |
| 717 | |
| 718 | if (!(new_skb = skb_realloc_headroom(skb, max_headroom))) |
| 719 | goto tx_err_dst_release; |
| 720 | |
| 721 | if (skb->sk) |
| 722 | skb_set_owner_w(new_skb, skb->sk); |
| 723 | kfree_skb(skb); |
| 724 | skb = new_skb; |
| 725 | } |
| 726 | dst_release(skb->dst); |
| 727 | skb->dst = dst_clone(dst); |
| 728 | |
| 729 | skb->h.raw = skb->nh.raw; |
| 730 | |
| 731 | if (opt) |
| 732 | ipv6_push_nfrag_opts(skb, opt, &proto, NULL); |
| 733 | |
| 734 | skb->nh.raw = skb_push(skb, sizeof(struct ipv6hdr)); |
| 735 | ipv6h = skb->nh.ipv6h; |
| 736 | *(u32*)ipv6h = fl.fl6_flowlabel | htonl(0x60000000); |
| 737 | dsfield = INET_ECN_encapsulate(0, dsfield); |
| 738 | ipv6_change_dsfield(ipv6h, ~INET_ECN_MASK, dsfield); |
| 739 | ipv6h->payload_len = htons(skb->len - sizeof(struct ipv6hdr)); |
| 740 | ipv6h->hop_limit = t->parms.hop_limit; |
| 741 | ipv6h->nexthdr = proto; |
| 742 | ipv6_addr_copy(&ipv6h->saddr, &fl.fl6_src); |
| 743 | ipv6_addr_copy(&ipv6h->daddr, &fl.fl6_dst); |
| 744 | nf_reset(skb); |
| 745 | pkt_len = skb->len; |
| 746 | err = NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, |
| 747 | skb->dst->dev, dst_output); |
| 748 | |
| 749 | if (err == NET_XMIT_SUCCESS || err == NET_XMIT_CN) { |
| 750 | stats->tx_bytes += pkt_len; |
| 751 | stats->tx_packets++; |
| 752 | } else { |
| 753 | stats->tx_errors++; |
| 754 | stats->tx_aborted_errors++; |
| 755 | } |
| 756 | ip6_tnl_dst_store(t, dst); |
| 757 | |
| 758 | if (opt) |
| 759 | kfree(opt); |
| 760 | |
| 761 | t->recursion--; |
| 762 | return 0; |
| 763 | tx_err_link_failure: |
| 764 | stats->tx_carrier_errors++; |
| 765 | dst_link_failure(skb); |
| 766 | tx_err_dst_release: |
| 767 | dst_release(dst); |
| 768 | if (opt) |
| 769 | kfree(opt); |
| 770 | tx_err: |
| 771 | stats->tx_errors++; |
| 772 | stats->tx_dropped++; |
| 773 | kfree_skb(skb); |
| 774 | t->recursion--; |
| 775 | return 0; |
| 776 | } |
| 777 | |
| 778 | static void ip6_tnl_set_cap(struct ip6_tnl *t) |
| 779 | { |
| 780 | struct ip6_tnl_parm *p = &t->parms; |
| 781 | struct in6_addr *laddr = &p->laddr; |
| 782 | struct in6_addr *raddr = &p->raddr; |
| 783 | int ltype = ipv6_addr_type(laddr); |
| 784 | int rtype = ipv6_addr_type(raddr); |
| 785 | |
| 786 | p->flags &= ~(IP6_TNL_F_CAP_XMIT|IP6_TNL_F_CAP_RCV); |
| 787 | |
| 788 | if (ltype != IPV6_ADDR_ANY && rtype != IPV6_ADDR_ANY && |
| 789 | ((ltype|rtype) & |
| 790 | (IPV6_ADDR_UNICAST| |
| 791 | IPV6_ADDR_LOOPBACK|IPV6_ADDR_LINKLOCAL| |
| 792 | IPV6_ADDR_MAPPED|IPV6_ADDR_RESERVED)) == IPV6_ADDR_UNICAST) { |
| 793 | struct net_device *ldev = NULL; |
| 794 | int l_ok = 1; |
| 795 | int r_ok = 1; |
| 796 | |
| 797 | if (p->link) |
| 798 | ldev = dev_get_by_index(p->link); |
| 799 | |
| 800 | if (ltype&IPV6_ADDR_UNICAST && !ipv6_chk_addr(laddr, ldev, 0)) |
| 801 | l_ok = 0; |
| 802 | |
| 803 | if (rtype&IPV6_ADDR_UNICAST && ipv6_chk_addr(raddr, NULL, 0)) |
| 804 | r_ok = 0; |
| 805 | |
| 806 | if (l_ok && r_ok) { |
| 807 | if (ltype&IPV6_ADDR_UNICAST) |
| 808 | p->flags |= IP6_TNL_F_CAP_XMIT; |
| 809 | if (rtype&IPV6_ADDR_UNICAST) |
| 810 | p->flags |= IP6_TNL_F_CAP_RCV; |
| 811 | } |
| 812 | if (ldev) |
| 813 | dev_put(ldev); |
| 814 | } |
| 815 | } |
| 816 | |
| 817 | static void ip6ip6_tnl_link_config(struct ip6_tnl *t) |
| 818 | { |
| 819 | struct net_device *dev = t->dev; |
| 820 | struct ip6_tnl_parm *p = &t->parms; |
| 821 | struct flowi *fl = &t->fl; |
| 822 | |
| 823 | memcpy(&dev->dev_addr, &p->laddr, sizeof(struct in6_addr)); |
| 824 | memcpy(&dev->broadcast, &p->raddr, sizeof(struct in6_addr)); |
| 825 | |
| 826 | /* Set up flowi template */ |
| 827 | ipv6_addr_copy(&fl->fl6_src, &p->laddr); |
| 828 | ipv6_addr_copy(&fl->fl6_dst, &p->raddr); |
| 829 | fl->oif = p->link; |
| 830 | fl->fl6_flowlabel = 0; |
| 831 | |
| 832 | if (!(p->flags&IP6_TNL_F_USE_ORIG_TCLASS)) |
| 833 | fl->fl6_flowlabel |= IPV6_TCLASS_MASK & p->flowinfo; |
| 834 | if (!(p->flags&IP6_TNL_F_USE_ORIG_FLOWLABEL)) |
| 835 | fl->fl6_flowlabel |= IPV6_FLOWLABEL_MASK & p->flowinfo; |
| 836 | |
| 837 | ip6_tnl_set_cap(t); |
| 838 | |
| 839 | if (p->flags&IP6_TNL_F_CAP_XMIT && p->flags&IP6_TNL_F_CAP_RCV) |
| 840 | dev->flags |= IFF_POINTOPOINT; |
| 841 | else |
| 842 | dev->flags &= ~IFF_POINTOPOINT; |
| 843 | |
| 844 | dev->iflink = p->link; |
| 845 | |
| 846 | if (p->flags & IP6_TNL_F_CAP_XMIT) { |
| 847 | struct rt6_info *rt = rt6_lookup(&p->raddr, &p->laddr, |
| 848 | p->link, 0); |
| 849 | |
| 850 | if (rt == NULL) |
| 851 | return; |
| 852 | |
| 853 | if (rt->rt6i_dev) { |
| 854 | dev->hard_header_len = rt->rt6i_dev->hard_header_len + |
| 855 | sizeof (struct ipv6hdr); |
| 856 | |
| 857 | dev->mtu = rt->rt6i_dev->mtu - sizeof (struct ipv6hdr); |
| 858 | |
| 859 | if (dev->mtu < IPV6_MIN_MTU) |
| 860 | dev->mtu = IPV6_MIN_MTU; |
| 861 | } |
| 862 | dst_release(&rt->u.dst); |
| 863 | } |
| 864 | } |
| 865 | |
| 866 | /** |
| 867 | * ip6ip6_tnl_change - update the tunnel parameters |
| 868 | * @t: tunnel to be changed |
| 869 | * @p: tunnel configuration parameters |
| 870 | * @active: != 0 if tunnel is ready for use |
| 871 | * |
| 872 | * Description: |
| 873 | * ip6ip6_tnl_change() updates the tunnel parameters |
| 874 | **/ |
| 875 | |
| 876 | static int |
| 877 | ip6ip6_tnl_change(struct ip6_tnl *t, struct ip6_tnl_parm *p) |
| 878 | { |
| 879 | ipv6_addr_copy(&t->parms.laddr, &p->laddr); |
| 880 | ipv6_addr_copy(&t->parms.raddr, &p->raddr); |
| 881 | t->parms.flags = p->flags; |
| 882 | t->parms.hop_limit = p->hop_limit; |
| 883 | t->parms.encap_limit = p->encap_limit; |
| 884 | t->parms.flowinfo = p->flowinfo; |
Gabor Fekete | 8181b8c | 2005-06-08 14:54:38 -0700 | [diff] [blame] | 885 | t->parms.link = p->link; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 886 | ip6ip6_tnl_link_config(t); |
| 887 | return 0; |
| 888 | } |
| 889 | |
| 890 | /** |
| 891 | * ip6ip6_tnl_ioctl - configure ipv6 tunnels from userspace |
| 892 | * @dev: virtual device associated with tunnel |
| 893 | * @ifr: parameters passed from userspace |
| 894 | * @cmd: command to be performed |
| 895 | * |
| 896 | * Description: |
| 897 | * ip6ip6_tnl_ioctl() is used for managing IPv6 tunnels |
| 898 | * from userspace. |
| 899 | * |
| 900 | * The possible commands are the following: |
| 901 | * %SIOCGETTUNNEL: get tunnel parameters for device |
| 902 | * %SIOCADDTUNNEL: add tunnel matching given tunnel parameters |
| 903 | * %SIOCCHGTUNNEL: change tunnel parameters to those given |
| 904 | * %SIOCDELTUNNEL: delete tunnel |
| 905 | * |
| 906 | * The fallback device "ip6tnl0", created during module |
| 907 | * initialization, can be used for creating other tunnel devices. |
| 908 | * |
| 909 | * Return: |
| 910 | * 0 on success, |
| 911 | * %-EFAULT if unable to copy data to or from userspace, |
| 912 | * %-EPERM if current process hasn't %CAP_NET_ADMIN set |
| 913 | * %-EINVAL if passed tunnel parameters are invalid, |
| 914 | * %-EEXIST if changing a tunnel's parameters would cause a conflict |
| 915 | * %-ENODEV if attempting to change or delete a nonexisting device |
| 916 | **/ |
| 917 | |
| 918 | static int |
| 919 | ip6ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) |
| 920 | { |
| 921 | int err = 0; |
| 922 | int create; |
| 923 | struct ip6_tnl_parm p; |
| 924 | struct ip6_tnl *t = NULL; |
| 925 | |
| 926 | switch (cmd) { |
| 927 | case SIOCGETTUNNEL: |
| 928 | if (dev == ip6ip6_fb_tnl_dev) { |
| 929 | if (copy_from_user(&p, |
| 930 | ifr->ifr_ifru.ifru_data, |
| 931 | sizeof (p))) { |
| 932 | err = -EFAULT; |
| 933 | break; |
| 934 | } |
| 935 | if ((err = ip6ip6_tnl_locate(&p, &t, 0)) == -ENODEV) |
| 936 | t = (struct ip6_tnl *) dev->priv; |
| 937 | else if (err) |
| 938 | break; |
| 939 | } else |
| 940 | t = (struct ip6_tnl *) dev->priv; |
| 941 | |
| 942 | memcpy(&p, &t->parms, sizeof (p)); |
| 943 | if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof (p))) { |
| 944 | err = -EFAULT; |
| 945 | } |
| 946 | break; |
| 947 | case SIOCADDTUNNEL: |
| 948 | case SIOCCHGTUNNEL: |
| 949 | err = -EPERM; |
| 950 | create = (cmd == SIOCADDTUNNEL); |
| 951 | if (!capable(CAP_NET_ADMIN)) |
| 952 | break; |
| 953 | if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof (p))) { |
| 954 | err = -EFAULT; |
| 955 | break; |
| 956 | } |
| 957 | if (!create && dev != ip6ip6_fb_tnl_dev) { |
| 958 | t = (struct ip6_tnl *) dev->priv; |
| 959 | } |
| 960 | if (!t && (err = ip6ip6_tnl_locate(&p, &t, create))) { |
| 961 | break; |
| 962 | } |
| 963 | if (cmd == SIOCCHGTUNNEL) { |
| 964 | if (t->dev != dev) { |
| 965 | err = -EEXIST; |
| 966 | break; |
| 967 | } |
| 968 | ip6ip6_tnl_unlink(t); |
| 969 | err = ip6ip6_tnl_change(t, &p); |
| 970 | ip6ip6_tnl_link(t); |
| 971 | netdev_state_change(dev); |
| 972 | } |
| 973 | if (copy_to_user(ifr->ifr_ifru.ifru_data, |
| 974 | &t->parms, sizeof (p))) { |
| 975 | err = -EFAULT; |
| 976 | } else { |
| 977 | err = 0; |
| 978 | } |
| 979 | break; |
| 980 | case SIOCDELTUNNEL: |
| 981 | err = -EPERM; |
| 982 | if (!capable(CAP_NET_ADMIN)) |
| 983 | break; |
| 984 | |
| 985 | if (dev == ip6ip6_fb_tnl_dev) { |
| 986 | if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, |
| 987 | sizeof (p))) { |
| 988 | err = -EFAULT; |
| 989 | break; |
| 990 | } |
| 991 | err = ip6ip6_tnl_locate(&p, &t, 0); |
| 992 | if (err) |
| 993 | break; |
| 994 | if (t == ip6ip6_fb_tnl_dev->priv) { |
| 995 | err = -EPERM; |
| 996 | break; |
| 997 | } |
| 998 | } else { |
| 999 | t = (struct ip6_tnl *) dev->priv; |
| 1000 | } |
| 1001 | err = unregister_netdevice(t->dev); |
| 1002 | break; |
| 1003 | default: |
| 1004 | err = -EINVAL; |
| 1005 | } |
| 1006 | return err; |
| 1007 | } |
| 1008 | |
| 1009 | /** |
| 1010 | * ip6ip6_tnl_get_stats - return the stats for tunnel device |
| 1011 | * @dev: virtual device associated with tunnel |
| 1012 | * |
| 1013 | * Return: stats for device |
| 1014 | **/ |
| 1015 | |
| 1016 | static struct net_device_stats * |
| 1017 | ip6ip6_tnl_get_stats(struct net_device *dev) |
| 1018 | { |
| 1019 | return &(((struct ip6_tnl *) dev->priv)->stat); |
| 1020 | } |
| 1021 | |
| 1022 | /** |
| 1023 | * ip6ip6_tnl_change_mtu - change mtu manually for tunnel device |
| 1024 | * @dev: virtual device associated with tunnel |
| 1025 | * @new_mtu: the new mtu |
| 1026 | * |
| 1027 | * Return: |
| 1028 | * 0 on success, |
| 1029 | * %-EINVAL if mtu too small |
| 1030 | **/ |
| 1031 | |
| 1032 | static int |
| 1033 | ip6ip6_tnl_change_mtu(struct net_device *dev, int new_mtu) |
| 1034 | { |
| 1035 | if (new_mtu < IPV6_MIN_MTU) { |
| 1036 | return -EINVAL; |
| 1037 | } |
| 1038 | dev->mtu = new_mtu; |
| 1039 | return 0; |
| 1040 | } |
| 1041 | |
| 1042 | /** |
| 1043 | * ip6ip6_tnl_dev_setup - setup virtual tunnel device |
| 1044 | * @dev: virtual device associated with tunnel |
| 1045 | * |
| 1046 | * Description: |
| 1047 | * Initialize function pointers and device parameters |
| 1048 | **/ |
| 1049 | |
| 1050 | static void ip6ip6_tnl_dev_setup(struct net_device *dev) |
| 1051 | { |
| 1052 | SET_MODULE_OWNER(dev); |
| 1053 | dev->uninit = ip6ip6_tnl_dev_uninit; |
| 1054 | dev->destructor = free_netdev; |
| 1055 | dev->hard_start_xmit = ip6ip6_tnl_xmit; |
| 1056 | dev->get_stats = ip6ip6_tnl_get_stats; |
| 1057 | dev->do_ioctl = ip6ip6_tnl_ioctl; |
| 1058 | dev->change_mtu = ip6ip6_tnl_change_mtu; |
| 1059 | |
| 1060 | dev->type = ARPHRD_TUNNEL6; |
| 1061 | dev->hard_header_len = LL_MAX_HEADER + sizeof (struct ipv6hdr); |
| 1062 | dev->mtu = ETH_DATA_LEN - sizeof (struct ipv6hdr); |
| 1063 | dev->flags |= IFF_NOARP; |
| 1064 | dev->addr_len = sizeof(struct in6_addr); |
| 1065 | } |
| 1066 | |
| 1067 | |
| 1068 | /** |
| 1069 | * ip6ip6_tnl_dev_init_gen - general initializer for all tunnel devices |
| 1070 | * @dev: virtual device associated with tunnel |
| 1071 | **/ |
| 1072 | |
| 1073 | static inline void |
| 1074 | ip6ip6_tnl_dev_init_gen(struct net_device *dev) |
| 1075 | { |
| 1076 | struct ip6_tnl *t = (struct ip6_tnl *) dev->priv; |
| 1077 | t->fl.proto = IPPROTO_IPV6; |
| 1078 | t->dev = dev; |
| 1079 | strcpy(t->parms.name, dev->name); |
| 1080 | } |
| 1081 | |
| 1082 | /** |
| 1083 | * ip6ip6_tnl_dev_init - initializer for all non fallback tunnel devices |
| 1084 | * @dev: virtual device associated with tunnel |
| 1085 | **/ |
| 1086 | |
| 1087 | static int |
| 1088 | ip6ip6_tnl_dev_init(struct net_device *dev) |
| 1089 | { |
| 1090 | struct ip6_tnl *t = (struct ip6_tnl *) dev->priv; |
| 1091 | ip6ip6_tnl_dev_init_gen(dev); |
| 1092 | ip6ip6_tnl_link_config(t); |
| 1093 | return 0; |
| 1094 | } |
| 1095 | |
| 1096 | /** |
| 1097 | * ip6ip6_fb_tnl_dev_init - initializer for fallback tunnel device |
| 1098 | * @dev: fallback device |
| 1099 | * |
| 1100 | * Return: 0 |
| 1101 | **/ |
| 1102 | |
| 1103 | static int |
| 1104 | ip6ip6_fb_tnl_dev_init(struct net_device *dev) |
| 1105 | { |
| 1106 | struct ip6_tnl *t = dev->priv; |
| 1107 | ip6ip6_tnl_dev_init_gen(dev); |
| 1108 | dev_hold(dev); |
| 1109 | tnls_wc[0] = t; |
| 1110 | return 0; |
| 1111 | } |
| 1112 | |
| 1113 | static struct xfrm6_tunnel ip6ip6_handler = { |
| 1114 | .handler = ip6ip6_rcv, |
| 1115 | .err_handler = ip6ip6_err, |
| 1116 | }; |
| 1117 | |
| 1118 | /** |
| 1119 | * ip6_tunnel_init - register protocol and reserve needed resources |
| 1120 | * |
| 1121 | * Return: 0 on success |
| 1122 | **/ |
| 1123 | |
| 1124 | static int __init ip6_tunnel_init(void) |
| 1125 | { |
| 1126 | int err; |
| 1127 | |
| 1128 | if (xfrm6_tunnel_register(&ip6ip6_handler) < 0) { |
| 1129 | printk(KERN_ERR "ip6ip6 init: can't register tunnel\n"); |
| 1130 | return -EAGAIN; |
| 1131 | } |
| 1132 | ip6ip6_fb_tnl_dev = alloc_netdev(sizeof(struct ip6_tnl), "ip6tnl0", |
| 1133 | ip6ip6_tnl_dev_setup); |
| 1134 | |
| 1135 | if (!ip6ip6_fb_tnl_dev) { |
| 1136 | err = -ENOMEM; |
| 1137 | goto fail; |
| 1138 | } |
| 1139 | ip6ip6_fb_tnl_dev->init = ip6ip6_fb_tnl_dev_init; |
| 1140 | |
| 1141 | if ((err = register_netdev(ip6ip6_fb_tnl_dev))) { |
| 1142 | free_netdev(ip6ip6_fb_tnl_dev); |
| 1143 | goto fail; |
| 1144 | } |
| 1145 | return 0; |
| 1146 | fail: |
| 1147 | xfrm6_tunnel_deregister(&ip6ip6_handler); |
| 1148 | return err; |
| 1149 | } |
| 1150 | |
| 1151 | /** |
| 1152 | * ip6_tunnel_cleanup - free resources and unregister protocol |
| 1153 | **/ |
| 1154 | |
| 1155 | static void __exit ip6_tunnel_cleanup(void) |
| 1156 | { |
| 1157 | if (xfrm6_tunnel_deregister(&ip6ip6_handler) < 0) |
| 1158 | printk(KERN_INFO "ip6ip6 close: can't deregister tunnel\n"); |
| 1159 | |
| 1160 | unregister_netdev(ip6ip6_fb_tnl_dev); |
| 1161 | } |
| 1162 | |
| 1163 | module_init(ip6_tunnel_init); |
| 1164 | module_exit(ip6_tunnel_cleanup); |