blob: ee9f1d36346c647c5aadd5e4137561790ded9590 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Internet Control Message Protocol (ICMPv6)
3 * Linux INET6 implementation
4 *
5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt>
7 *
8 * $Id: icmp.c,v 1.38 2002/02/08 03:57:19 davem Exp $
9 *
10 * Based on net/ipv4/icmp.c
11 *
12 * RFC 1885
13 *
14 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public License
16 * as published by the Free Software Foundation; either version
17 * 2 of the License, or (at your option) any later version.
18 */
19
20/*
21 * Changes:
22 *
23 * Andi Kleen : exception handling
24 * Andi Kleen add rate limits. never reply to a icmp.
25 * add more length checks and other fixes.
26 * yoshfuji : ensure to sent parameter problem for
27 * fragments.
28 * YOSHIFUJI Hideaki @USAGI: added sysctl for icmp rate limit.
29 * Randy Dunlap and
30 * YOSHIFUJI Hideaki @USAGI: Per-interface statistics support
31 * Kazunori MIYAZAWA @USAGI: change output process to use ip6_append_data
32 */
33
34#include <linux/module.h>
35#include <linux/errno.h>
36#include <linux/types.h>
37#include <linux/socket.h>
38#include <linux/in.h>
39#include <linux/kernel.h>
40#include <linux/sched.h>
41#include <linux/sockios.h>
42#include <linux/net.h>
43#include <linux/skbuff.h>
44#include <linux/init.h>
45
46#ifdef CONFIG_SYSCTL
47#include <linux/sysctl.h>
48#endif
49
50#include <linux/inet.h>
51#include <linux/netdevice.h>
52#include <linux/icmpv6.h>
53
54#include <net/ip.h>
55#include <net/sock.h>
56
57#include <net/ipv6.h>
58#include <net/ip6_checksum.h>
59#include <net/protocol.h>
60#include <net/raw.h>
61#include <net/rawv6.h>
62#include <net/transp_v6.h>
63#include <net/ip6_route.h>
64#include <net/addrconf.h>
65#include <net/icmp.h>
66
67#include <asm/uaccess.h>
68#include <asm/system.h>
69
70DEFINE_SNMP_STAT(struct icmpv6_mib, icmpv6_statistics);
71
72/*
73 * The ICMP socket(s). This is the most convenient way to flow control
74 * our ICMP output as well as maintain a clean interface throughout
75 * all layers. All Socketless IP sends will soon be gone.
76 *
77 * On SMP we have one ICMP socket per-cpu.
78 */
79static DEFINE_PER_CPU(struct socket *, __icmpv6_socket) = NULL;
80#define icmpv6_socket __get_cpu_var(__icmpv6_socket)
81
82static int icmpv6_rcv(struct sk_buff **pskb, unsigned int *nhoffp);
83
84static struct inet6_protocol icmpv6_protocol = {
85 .handler = icmpv6_rcv,
86 .flags = INET6_PROTO_FINAL,
87};
88
89static __inline__ int icmpv6_xmit_lock(void)
90{
91 local_bh_disable();
92
93 if (unlikely(!spin_trylock(&icmpv6_socket->sk->sk_lock.slock))) {
94 /* This can happen if the output path (f.e. SIT or
95 * ip6ip6 tunnel) signals dst_link_failure() for an
96 * outgoing ICMP6 packet.
97 */
98 local_bh_enable();
99 return 1;
100 }
101 return 0;
102}
103
104static __inline__ void icmpv6_xmit_unlock(void)
105{
106 spin_unlock_bh(&icmpv6_socket->sk->sk_lock.slock);
107}
108
109/*
110 * Slightly more convenient version of icmpv6_send.
111 */
112void icmpv6_param_prob(struct sk_buff *skb, int code, int pos)
113{
114 icmpv6_send(skb, ICMPV6_PARAMPROB, code, pos, skb->dev);
115 kfree_skb(skb);
116}
117
118/*
119 * Figure out, may we reply to this packet with icmp error.
120 *
121 * We do not reply, if:
122 * - it was icmp error message.
123 * - it is truncated, so that it is known, that protocol is ICMPV6
124 * (i.e. in the middle of some exthdr)
125 *
126 * --ANK (980726)
127 */
128
129static int is_ineligible(struct sk_buff *skb)
130{
131 int ptr = (u8*)(skb->nh.ipv6h+1) - skb->data;
132 int len = skb->len - ptr;
133 __u8 nexthdr = skb->nh.ipv6h->nexthdr;
134
135 if (len < 0)
136 return 1;
137
Herbert Xu0d3d0772005-04-24 20:16:19 -0700138 ptr = ipv6_skip_exthdr(skb, ptr, &nexthdr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139 if (ptr < 0)
140 return 0;
141 if (nexthdr == IPPROTO_ICMPV6) {
142 u8 _type, *tp;
143 tp = skb_header_pointer(skb,
144 ptr+offsetof(struct icmp6hdr, icmp6_type),
145 sizeof(_type), &_type);
146 if (tp == NULL ||
147 !(*tp & ICMPV6_INFOMSG_MASK))
148 return 1;
149 }
150 return 0;
151}
152
153static int sysctl_icmpv6_time = 1*HZ;
154
155/*
156 * Check the ICMP output rate limit
157 */
158static inline int icmpv6_xrlim_allow(struct sock *sk, int type,
159 struct flowi *fl)
160{
161 struct dst_entry *dst;
162 int res = 0;
163
164 /* Informational messages are not limited. */
165 if (type & ICMPV6_INFOMSG_MASK)
166 return 1;
167
168 /* Do not limit pmtu discovery, it would break it. */
169 if (type == ICMPV6_PKT_TOOBIG)
170 return 1;
171
172 /*
173 * Look up the output route.
174 * XXX: perhaps the expire for routing entries cloned by
175 * this lookup should be more aggressive (not longer than timeout).
176 */
177 dst = ip6_route_output(sk, fl);
178 if (dst->error) {
179 IP6_INC_STATS(IPSTATS_MIB_OUTNOROUTES);
180 } else if (dst->dev && (dst->dev->flags&IFF_LOOPBACK)) {
181 res = 1;
182 } else {
183 struct rt6_info *rt = (struct rt6_info *)dst;
184 int tmo = sysctl_icmpv6_time;
185
186 /* Give more bandwidth to wider prefixes. */
187 if (rt->rt6i_dst.plen < 128)
188 tmo >>= ((128 - rt->rt6i_dst.plen)>>5);
189
190 res = xrlim_allow(dst, tmo);
191 }
192 dst_release(dst);
193 return res;
194}
195
196/*
197 * an inline helper for the "simple" if statement below
198 * checks if parameter problem report is caused by an
199 * unrecognized IPv6 option that has the Option Type
200 * highest-order two bits set to 10
201 */
202
203static __inline__ int opt_unrec(struct sk_buff *skb, __u32 offset)
204{
205 u8 _optval, *op;
206
207 offset += skb->nh.raw - skb->data;
208 op = skb_header_pointer(skb, offset, sizeof(_optval), &_optval);
209 if (op == NULL)
210 return 1;
211 return (*op & 0xC0) == 0x80;
212}
213
214static int icmpv6_push_pending_frames(struct sock *sk, struct flowi *fl, struct icmp6hdr *thdr, int len)
215{
216 struct sk_buff *skb;
217 struct icmp6hdr *icmp6h;
218 int err = 0;
219
220 if ((skb = skb_peek(&sk->sk_write_queue)) == NULL)
221 goto out;
222
223 icmp6h = (struct icmp6hdr*) skb->h.raw;
224 memcpy(icmp6h, thdr, sizeof(struct icmp6hdr));
225 icmp6h->icmp6_cksum = 0;
226
227 if (skb_queue_len(&sk->sk_write_queue) == 1) {
228 skb->csum = csum_partial((char *)icmp6h,
229 sizeof(struct icmp6hdr), skb->csum);
230 icmp6h->icmp6_cksum = csum_ipv6_magic(&fl->fl6_src,
231 &fl->fl6_dst,
232 len, fl->proto,
233 skb->csum);
234 } else {
235 u32 tmp_csum = 0;
236
237 skb_queue_walk(&sk->sk_write_queue, skb) {
238 tmp_csum = csum_add(tmp_csum, skb->csum);
239 }
240
241 tmp_csum = csum_partial((char *)icmp6h,
242 sizeof(struct icmp6hdr), tmp_csum);
243 tmp_csum = csum_ipv6_magic(&fl->fl6_src,
244 &fl->fl6_dst,
245 len, fl->proto, tmp_csum);
246 icmp6h->icmp6_cksum = tmp_csum;
247 }
248 if (icmp6h->icmp6_cksum == 0)
249 icmp6h->icmp6_cksum = -1;
250 ip6_push_pending_frames(sk);
251out:
252 return err;
253}
254
255struct icmpv6_msg {
256 struct sk_buff *skb;
257 int offset;
258};
259
260static int icmpv6_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb)
261{
262 struct icmpv6_msg *msg = (struct icmpv6_msg *) from;
263 struct sk_buff *org_skb = msg->skb;
264 __u32 csum = 0;
265
266 csum = skb_copy_and_csum_bits(org_skb, msg->offset + offset,
267 to, len, csum);
268 skb->csum = csum_block_add(skb->csum, csum, odd);
269 return 0;
270}
271
272/*
273 * Send an ICMP message in response to a packet in error
274 */
275void icmpv6_send(struct sk_buff *skb, int type, int code, __u32 info,
276 struct net_device *dev)
277{
278 struct inet6_dev *idev = NULL;
279 struct ipv6hdr *hdr = skb->nh.ipv6h;
YOSHIFUJI Hideaki84427d52005-06-13 14:59:44 -0700280 struct sock *sk;
281 struct ipv6_pinfo *np;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700282 struct in6_addr *saddr = NULL;
283 struct dst_entry *dst;
284 struct icmp6hdr tmp_hdr;
285 struct flowi fl;
286 struct icmpv6_msg msg;
287 int iif = 0;
288 int addr_type = 0;
289 int len;
290 int hlimit;
291 int err = 0;
292
293 if ((u8*)hdr < skb->head || (u8*)(hdr+1) > skb->tail)
294 return;
295
296 /*
297 * Make sure we respect the rules
298 * i.e. RFC 1885 2.4(e)
299 * Rule (e.1) is enforced by not using icmpv6_send
300 * in any code that processes icmp errors.
301 */
302 addr_type = ipv6_addr_type(&hdr->daddr);
303
304 if (ipv6_chk_addr(&hdr->daddr, skb->dev, 0))
305 saddr = &hdr->daddr;
306
307 /*
308 * Dest addr check
309 */
310
311 if ((addr_type & IPV6_ADDR_MULTICAST || skb->pkt_type != PACKET_HOST)) {
312 if (type != ICMPV6_PKT_TOOBIG &&
313 !(type == ICMPV6_PARAMPROB &&
314 code == ICMPV6_UNK_OPTION &&
315 (opt_unrec(skb, info))))
316 return;
317
318 saddr = NULL;
319 }
320
321 addr_type = ipv6_addr_type(&hdr->saddr);
322
323 /*
324 * Source addr check
325 */
326
327 if (addr_type & IPV6_ADDR_LINKLOCAL)
328 iif = skb->dev->ifindex;
329
330 /*
331 * Must not send if we know that source is Anycast also.
332 * for now we don't know that.
333 */
334 if ((addr_type == IPV6_ADDR_ANY) || (addr_type & IPV6_ADDR_MULTICAST)) {
335 LIMIT_NETDEBUG(
336 printk(KERN_DEBUG "icmpv6_send: addr_any/mcast source\n"));
337 return;
338 }
339
340 /*
341 * Never answer to a ICMP packet.
342 */
343 if (is_ineligible(skb)) {
344 LIMIT_NETDEBUG(
345 printk(KERN_DEBUG "icmpv6_send: no reply to icmp error\n"));
346 return;
347 }
348
349 memset(&fl, 0, sizeof(fl));
350 fl.proto = IPPROTO_ICMPV6;
351 ipv6_addr_copy(&fl.fl6_dst, &hdr->saddr);
352 if (saddr)
353 ipv6_addr_copy(&fl.fl6_src, saddr);
354 fl.oif = iif;
355 fl.fl_icmp_type = type;
356 fl.fl_icmp_code = code;
357
358 if (icmpv6_xmit_lock())
359 return;
360
YOSHIFUJI Hideaki84427d52005-06-13 14:59:44 -0700361 sk = icmpv6_socket->sk;
362 np = inet6_sk(sk);
363
Linus Torvalds1da177e2005-04-16 15:20:36 -0700364 if (!icmpv6_xrlim_allow(sk, type, &fl))
365 goto out;
366
367 tmp_hdr.icmp6_type = type;
368 tmp_hdr.icmp6_code = code;
369 tmp_hdr.icmp6_cksum = 0;
370 tmp_hdr.icmp6_pointer = htonl(info);
371
372 if (!fl.oif && ipv6_addr_is_multicast(&fl.fl6_dst))
373 fl.oif = np->mcast_oif;
374
375 err = ip6_dst_lookup(sk, &dst, &fl);
376 if (err)
377 goto out;
378 if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0)
379 goto out_dst_release;
380
381 if (ipv6_addr_is_multicast(&fl.fl6_dst))
382 hlimit = np->mcast_hops;
383 else
384 hlimit = np->hop_limit;
385 if (hlimit < 0)
386 hlimit = dst_metric(dst, RTAX_HOPLIMIT);
387 if (hlimit < 0)
388 hlimit = ipv6_get_hoplimit(dst->dev);
389
390 msg.skb = skb;
391 msg.offset = skb->nh.raw - skb->data;
392
393 len = skb->len - msg.offset;
394 len = min_t(unsigned int, len, IPV6_MIN_MTU - sizeof(struct ipv6hdr) -sizeof(struct icmp6hdr));
395 if (len < 0) {
396 LIMIT_NETDEBUG(
397 printk(KERN_DEBUG "icmp: len problem\n"));
398 goto out_dst_release;
399 }
400
401 idev = in6_dev_get(skb->dev);
402
403 err = ip6_append_data(sk, icmpv6_getfrag, &msg,
404 len + sizeof(struct icmp6hdr),
405 sizeof(struct icmp6hdr),
406 hlimit, NULL, &fl, (struct rt6_info*)dst,
407 MSG_DONTWAIT);
408 if (err) {
409 ip6_flush_pending_frames(sk);
410 goto out_put;
411 }
412 err = icmpv6_push_pending_frames(sk, &fl, &tmp_hdr, len + sizeof(struct icmp6hdr));
413
414 if (type >= ICMPV6_DEST_UNREACH && type <= ICMPV6_PARAMPROB)
415 ICMP6_INC_STATS_OFFSET_BH(idev, ICMP6_MIB_OUTDESTUNREACHS, type - ICMPV6_DEST_UNREACH);
416 ICMP6_INC_STATS_BH(idev, ICMP6_MIB_OUTMSGS);
417
418out_put:
419 if (likely(idev != NULL))
420 in6_dev_put(idev);
421out_dst_release:
422 dst_release(dst);
423out:
424 icmpv6_xmit_unlock();
425}
426
427static void icmpv6_echo_reply(struct sk_buff *skb)
428{
YOSHIFUJI Hideaki84427d52005-06-13 14:59:44 -0700429 struct sock *sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700430 struct inet6_dev *idev;
YOSHIFUJI Hideaki84427d52005-06-13 14:59:44 -0700431 struct ipv6_pinfo *np;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700432 struct in6_addr *saddr = NULL;
433 struct icmp6hdr *icmph = (struct icmp6hdr *) skb->h.raw;
434 struct icmp6hdr tmp_hdr;
435 struct flowi fl;
436 struct icmpv6_msg msg;
437 struct dst_entry *dst;
438 int err = 0;
439 int hlimit;
440
441 saddr = &skb->nh.ipv6h->daddr;
442
443 if (!ipv6_unicast_destination(skb))
444 saddr = NULL;
445
446 memcpy(&tmp_hdr, icmph, sizeof(tmp_hdr));
447 tmp_hdr.icmp6_type = ICMPV6_ECHO_REPLY;
448
449 memset(&fl, 0, sizeof(fl));
450 fl.proto = IPPROTO_ICMPV6;
451 ipv6_addr_copy(&fl.fl6_dst, &skb->nh.ipv6h->saddr);
452 if (saddr)
453 ipv6_addr_copy(&fl.fl6_src, saddr);
454 fl.oif = skb->dev->ifindex;
455 fl.fl_icmp_type = ICMPV6_ECHO_REPLY;
456
457 if (icmpv6_xmit_lock())
458 return;
459
YOSHIFUJI Hideaki84427d52005-06-13 14:59:44 -0700460 sk = icmpv6_socket->sk;
461 np = inet6_sk(sk);
462
Linus Torvalds1da177e2005-04-16 15:20:36 -0700463 if (!fl.oif && ipv6_addr_is_multicast(&fl.fl6_dst))
464 fl.oif = np->mcast_oif;
465
466 err = ip6_dst_lookup(sk, &dst, &fl);
467 if (err)
468 goto out;
469 if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0)
470 goto out_dst_release;
471
472 if (ipv6_addr_is_multicast(&fl.fl6_dst))
473 hlimit = np->mcast_hops;
474 else
475 hlimit = np->hop_limit;
476 if (hlimit < 0)
477 hlimit = dst_metric(dst, RTAX_HOPLIMIT);
478 if (hlimit < 0)
479 hlimit = ipv6_get_hoplimit(dst->dev);
480
481 idev = in6_dev_get(skb->dev);
482
483 msg.skb = skb;
484 msg.offset = 0;
485
486 err = ip6_append_data(sk, icmpv6_getfrag, &msg, skb->len + sizeof(struct icmp6hdr),
487 sizeof(struct icmp6hdr), hlimit, NULL, &fl,
488 (struct rt6_info*)dst, MSG_DONTWAIT);
489
490 if (err) {
491 ip6_flush_pending_frames(sk);
492 goto out_put;
493 }
494 err = icmpv6_push_pending_frames(sk, &fl, &tmp_hdr, skb->len + sizeof(struct icmp6hdr));
495
496 ICMP6_INC_STATS_BH(idev, ICMP6_MIB_OUTECHOREPLIES);
497 ICMP6_INC_STATS_BH(idev, ICMP6_MIB_OUTMSGS);
498
499out_put:
500 if (likely(idev != NULL))
501 in6_dev_put(idev);
502out_dst_release:
503 dst_release(dst);
504out:
505 icmpv6_xmit_unlock();
506}
507
508static void icmpv6_notify(struct sk_buff *skb, int type, int code, u32 info)
509{
510 struct in6_addr *saddr, *daddr;
511 struct inet6_protocol *ipprot;
512 struct sock *sk;
513 int inner_offset;
514 int hash;
515 u8 nexthdr;
516
517 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
518 return;
519
520 nexthdr = ((struct ipv6hdr *)skb->data)->nexthdr;
521 if (ipv6_ext_hdr(nexthdr)) {
522 /* now skip over extension headers */
Herbert Xu0d3d0772005-04-24 20:16:19 -0700523 inner_offset = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &nexthdr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700524 if (inner_offset<0)
525 return;
526 } else {
527 inner_offset = sizeof(struct ipv6hdr);
528 }
529
530 /* Checkin header including 8 bytes of inner protocol header. */
531 if (!pskb_may_pull(skb, inner_offset+8))
532 return;
533
534 saddr = &skb->nh.ipv6h->saddr;
535 daddr = &skb->nh.ipv6h->daddr;
536
537 /* BUGGG_FUTURE: we should try to parse exthdrs in this packet.
538 Without this we will not able f.e. to make source routed
539 pmtu discovery.
540 Corresponding argument (opt) to notifiers is already added.
541 --ANK (980726)
542 */
543
544 hash = nexthdr & (MAX_INET_PROTOS - 1);
545
546 rcu_read_lock();
547 ipprot = rcu_dereference(inet6_protos[hash]);
548 if (ipprot && ipprot->err_handler)
549 ipprot->err_handler(skb, NULL, type, code, inner_offset, info);
550 rcu_read_unlock();
551
552 read_lock(&raw_v6_lock);
553 if ((sk = sk_head(&raw_v6_htable[hash])) != NULL) {
Andrew McDonald0bd1b592005-08-09 19:44:42 -0700554 while((sk = __raw_v6_lookup(sk, nexthdr, daddr, saddr,
555 skb->dev->ifindex))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700556 rawv6_err(sk, skb, NULL, type, code, inner_offset, info);
557 sk = sk_next(sk);
558 }
559 }
560 read_unlock(&raw_v6_lock);
561}
562
563/*
564 * Handle icmp messages
565 */
566
567static int icmpv6_rcv(struct sk_buff **pskb, unsigned int *nhoffp)
568{
569 struct sk_buff *skb = *pskb;
570 struct net_device *dev = skb->dev;
571 struct inet6_dev *idev = __in6_dev_get(dev);
572 struct in6_addr *saddr, *daddr;
573 struct ipv6hdr *orig_hdr;
574 struct icmp6hdr *hdr;
575 int type;
576
577 ICMP6_INC_STATS_BH(idev, ICMP6_MIB_INMSGS);
578
579 saddr = &skb->nh.ipv6h->saddr;
580 daddr = &skb->nh.ipv6h->daddr;
581
582 /* Perform checksum. */
583 if (skb->ip_summed == CHECKSUM_HW) {
584 skb->ip_summed = CHECKSUM_UNNECESSARY;
585 if (csum_ipv6_magic(saddr, daddr, skb->len, IPPROTO_ICMPV6,
586 skb->csum)) {
587 LIMIT_NETDEBUG(
588 printk(KERN_DEBUG "ICMPv6 hw checksum failed\n"));
589 skb->ip_summed = CHECKSUM_NONE;
590 }
591 }
592 if (skb->ip_summed == CHECKSUM_NONE) {
593 if (csum_ipv6_magic(saddr, daddr, skb->len, IPPROTO_ICMPV6,
594 skb_checksum(skb, 0, skb->len, 0))) {
595 LIMIT_NETDEBUG(
596 printk(KERN_DEBUG "ICMPv6 checksum failed [%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x > %04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x]\n",
597 NIP6(*saddr), NIP6(*daddr)));
598 goto discard_it;
599 }
600 }
601
602 if (!pskb_pull(skb, sizeof(struct icmp6hdr)))
603 goto discard_it;
604
605 hdr = (struct icmp6hdr *) skb->h.raw;
606
607 type = hdr->icmp6_type;
608
609 if (type >= ICMPV6_DEST_UNREACH && type <= ICMPV6_PARAMPROB)
610 ICMP6_INC_STATS_OFFSET_BH(idev, ICMP6_MIB_INDESTUNREACHS, type - ICMPV6_DEST_UNREACH);
611 else if (type >= ICMPV6_ECHO_REQUEST && type <= NDISC_REDIRECT)
612 ICMP6_INC_STATS_OFFSET_BH(idev, ICMP6_MIB_INECHOS, type - ICMPV6_ECHO_REQUEST);
613
614 switch (type) {
615 case ICMPV6_ECHO_REQUEST:
616 icmpv6_echo_reply(skb);
617 break;
618
619 case ICMPV6_ECHO_REPLY:
620 /* we couldn't care less */
621 break;
622
623 case ICMPV6_PKT_TOOBIG:
624 /* BUGGG_FUTURE: if packet contains rthdr, we cannot update
625 standard destination cache. Seems, only "advanced"
626 destination cache will allow to solve this problem
627 --ANK (980726)
628 */
629 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
630 goto discard_it;
631 hdr = (struct icmp6hdr *) skb->h.raw;
632 orig_hdr = (struct ipv6hdr *) (hdr + 1);
633 rt6_pmtu_discovery(&orig_hdr->daddr, &orig_hdr->saddr, dev,
634 ntohl(hdr->icmp6_mtu));
635
636 /*
637 * Drop through to notify
638 */
639
640 case ICMPV6_DEST_UNREACH:
641 case ICMPV6_TIME_EXCEED:
642 case ICMPV6_PARAMPROB:
643 icmpv6_notify(skb, type, hdr->icmp6_code, hdr->icmp6_mtu);
644 break;
645
646 case NDISC_ROUTER_SOLICITATION:
647 case NDISC_ROUTER_ADVERTISEMENT:
648 case NDISC_NEIGHBOUR_SOLICITATION:
649 case NDISC_NEIGHBOUR_ADVERTISEMENT:
650 case NDISC_REDIRECT:
651 ndisc_rcv(skb);
652 break;
653
654 case ICMPV6_MGM_QUERY:
655 igmp6_event_query(skb);
656 break;
657
658 case ICMPV6_MGM_REPORT:
659 igmp6_event_report(skb);
660 break;
661
662 case ICMPV6_MGM_REDUCTION:
663 case ICMPV6_NI_QUERY:
664 case ICMPV6_NI_REPLY:
665 case ICMPV6_MLD2_REPORT:
666 case ICMPV6_DHAAD_REQUEST:
667 case ICMPV6_DHAAD_REPLY:
668 case ICMPV6_MOBILE_PREFIX_SOL:
669 case ICMPV6_MOBILE_PREFIX_ADV:
670 break;
671
672 default:
673 LIMIT_NETDEBUG(
674 printk(KERN_DEBUG "icmpv6: msg of unknown type\n"));
675
676 /* informational */
677 if (type & ICMPV6_INFOMSG_MASK)
678 break;
679
680 /*
681 * error of unknown type.
682 * must pass to upper level
683 */
684
685 icmpv6_notify(skb, type, hdr->icmp6_code, hdr->icmp6_mtu);
686 };
687 kfree_skb(skb);
688 return 0;
689
690discard_it:
691 ICMP6_INC_STATS_BH(idev, ICMP6_MIB_INERRORS);
692 kfree_skb(skb);
693 return 0;
694}
695
696int __init icmpv6_init(struct net_proto_family *ops)
697{
698 struct sock *sk;
699 int err, i, j;
700
701 for (i = 0; i < NR_CPUS; i++) {
702 if (!cpu_possible(i))
703 continue;
704
705 err = sock_create_kern(PF_INET6, SOCK_RAW, IPPROTO_ICMPV6,
706 &per_cpu(__icmpv6_socket, i));
707 if (err < 0) {
708 printk(KERN_ERR
709 "Failed to initialize the ICMP6 control socket "
710 "(err %d).\n",
711 err);
712 goto fail;
713 }
714
715 sk = per_cpu(__icmpv6_socket, i)->sk;
716 sk->sk_allocation = GFP_ATOMIC;
717
718 /* Enough space for 2 64K ICMP packets, including
719 * sk_buff struct overhead.
720 */
721 sk->sk_sndbuf =
722 (2 * ((64 * 1024) + sizeof(struct sk_buff)));
723
724 sk->sk_prot->unhash(sk);
725 }
726
727
728 if (inet6_add_protocol(&icmpv6_protocol, IPPROTO_ICMPV6) < 0) {
729 printk(KERN_ERR "Failed to register ICMP6 protocol\n");
730 err = -EAGAIN;
731 goto fail;
732 }
733
734 return 0;
735
736 fail:
737 for (j = 0; j < i; j++) {
738 if (!cpu_possible(j))
739 continue;
740 sock_release(per_cpu(__icmpv6_socket, j));
741 }
742
743 return err;
744}
745
746void icmpv6_cleanup(void)
747{
748 int i;
749
750 for (i = 0; i < NR_CPUS; i++) {
751 if (!cpu_possible(i))
752 continue;
753 sock_release(per_cpu(__icmpv6_socket, i));
754 }
755 inet6_del_protocol(&icmpv6_protocol, IPPROTO_ICMPV6);
756}
757
758static struct icmp6_err {
759 int err;
760 int fatal;
761} tab_unreach[] = {
762 { /* NOROUTE */
763 .err = ENETUNREACH,
764 .fatal = 0,
765 },
766 { /* ADM_PROHIBITED */
767 .err = EACCES,
768 .fatal = 1,
769 },
770 { /* Was NOT_NEIGHBOUR, now reserved */
771 .err = EHOSTUNREACH,
772 .fatal = 0,
773 },
774 { /* ADDR_UNREACH */
775 .err = EHOSTUNREACH,
776 .fatal = 0,
777 },
778 { /* PORT_UNREACH */
779 .err = ECONNREFUSED,
780 .fatal = 1,
781 },
782};
783
784int icmpv6_err_convert(int type, int code, int *err)
785{
786 int fatal = 0;
787
788 *err = EPROTO;
789
790 switch (type) {
791 case ICMPV6_DEST_UNREACH:
792 fatal = 1;
793 if (code <= ICMPV6_PORT_UNREACH) {
794 *err = tab_unreach[code].err;
795 fatal = tab_unreach[code].fatal;
796 }
797 break;
798
799 case ICMPV6_PKT_TOOBIG:
800 *err = EMSGSIZE;
801 break;
802
803 case ICMPV6_PARAMPROB:
804 *err = EPROTO;
805 fatal = 1;
806 break;
807
808 case ICMPV6_TIME_EXCEED:
809 *err = EHOSTUNREACH;
810 break;
811 };
812
813 return fatal;
814}
815
816#ifdef CONFIG_SYSCTL
817ctl_table ipv6_icmp_table[] = {
818 {
819 .ctl_name = NET_IPV6_ICMP_RATELIMIT,
820 .procname = "ratelimit",
821 .data = &sysctl_icmpv6_time,
822 .maxlen = sizeof(int),
823 .mode = 0644,
824 .proc_handler = &proc_dointvec
825 },
826 { .ctl_name = 0 },
827};
828#endif
829