blob: cb3a57d46af64f2cb60496a994def922a4cd9565 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * IP multicast routing support for mrouted 3.6/3.8
3 *
Alan Cox113aa832008-10-13 19:01:08 -07004 * (c) 1995 Alan Cox, <alan@lxorguk.ukuu.org.uk>
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 * Linux Consultancy and Custom Driver Development
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070012 * Fixes:
13 * Michael Chastain : Incorrect size of copying.
14 * Alan Cox : Added the cache manager code
15 * Alan Cox : Fixed the clone/copy bug and device race.
16 * Mike McLagan : Routing by source
17 * Malcolm Beattie : Buffer handling fixes.
18 * Alexey Kuznetsov : Double buffer free and other fixes.
19 * SVR Anand : Fixed several multicast bugs and problems.
20 * Alexey Kuznetsov : Status, optimisations and more.
21 * Brad Parker : Better behaviour on mrouted upcall
22 * overflow.
23 * Carlos Picoto : PIMv1 Support
24 * Pavlin Ivanov Radoslavov: PIMv2 Registers must checksum only PIM header
25 * Relax this requrement to work with older peers.
26 *
27 */
28
Linus Torvalds1da177e2005-04-16 15:20:36 -070029#include <asm/system.h>
30#include <asm/uaccess.h>
31#include <linux/types.h>
Randy Dunlap4fc268d2006-01-11 12:17:47 -080032#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033#include <linux/errno.h>
34#include <linux/timer.h>
35#include <linux/mm.h>
36#include <linux/kernel.h>
37#include <linux/fcntl.h>
38#include <linux/stat.h>
39#include <linux/socket.h>
40#include <linux/in.h>
41#include <linux/inet.h>
42#include <linux/netdevice.h>
43#include <linux/inetdevice.h>
44#include <linux/igmp.h>
45#include <linux/proc_fs.h>
46#include <linux/seq_file.h>
47#include <linux/mroute.h>
48#include <linux/init.h>
Kris Katterjohn46f25df2006-01-05 16:35:42 -080049#include <linux/if_ether.h>
Eric W. Biederman457c4cb2007-09-12 12:01:34 +020050#include <net/net_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070051#include <net/ip.h>
52#include <net/protocol.h>
53#include <linux/skbuff.h>
Arnaldo Carvalho de Melo14c85022005-12-27 02:43:12 -020054#include <net/route.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070055#include <net/sock.h>
56#include <net/icmp.h>
57#include <net/udp.h>
58#include <net/raw.h>
59#include <linux/notifier.h>
60#include <linux/if_arp.h>
61#include <linux/netfilter_ipv4.h>
62#include <net/ipip.h>
63#include <net/checksum.h>
Arnaldo Carvalho de Melodc5fc572007-03-25 23:06:12 -070064#include <net/netlink.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070065
66#if defined(CONFIG_IP_PIMSM_V1) || defined(CONFIG_IP_PIMSM_V2)
67#define CONFIG_IP_PIMSM 1
68#endif
69
70static struct sock *mroute_socket;
71
72
73/* Big lock, protecting vif table, mrt cache and mroute socket state.
74 Note that the changes are semaphored via rtnl_lock.
75 */
76
77static DEFINE_RWLOCK(mrt_lock);
78
79/*
80 * Multicast router control variables
81 */
82
83static struct vif_device vif_table[MAXVIFS]; /* Devices */
84static int maxvif;
85
86#define VIF_EXISTS(idx) (vif_table[idx].dev != NULL)
87
88static int mroute_do_assert; /* Set in PIM assert */
89static int mroute_do_pim;
90
91static struct mfc_cache *mfc_cache_array[MFC_LINES]; /* Forwarding cache */
92
93static struct mfc_cache *mfc_unres_queue; /* Queue of unresolved entries */
94static atomic_t cache_resolve_queue_len; /* Size of unresolved */
95
96/* Special spinlock for queue of unresolved entries */
97static DEFINE_SPINLOCK(mfc_unres_lock);
98
99/* We return to original Alan's scheme. Hash table of resolved
100 entries is changed only in process context and protected
101 with weak lock mrt_lock. Queue of unresolved entries is protected
102 with strong spinlock mfc_unres_lock.
103
104 In this case data path is free of exclusive locks at all.
105 */
106
Christoph Lametere18b8902006-12-06 20:33:20 -0800107static struct kmem_cache *mrt_cachep __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108
109static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local);
110static int ipmr_cache_report(struct sk_buff *pkt, vifi_t vifi, int assert);
111static int ipmr_fill_mroute(struct sk_buff *skb, struct mfc_cache *c, struct rtmsg *rtm);
112
113#ifdef CONFIG_IP_PIMSM_V2
114static struct net_protocol pim_protocol;
115#endif
116
117static struct timer_list ipmr_expire_timer;
118
119/* Service routines creating virtual interfaces: DVMRP tunnels and PIMREG */
120
Wang Chend6070322008-07-14 20:55:26 -0700121static void ipmr_del_tunnel(struct net_device *dev, struct vifctl *v)
122{
123 dev_close(dev);
124
125 dev = __dev_get_by_name(&init_net, "tunl0");
126 if (dev) {
Stephen Hemminger5bc3eb72008-11-19 21:52:05 -0800127 const struct net_device_ops *ops = dev->netdev_ops;
Wang Chend6070322008-07-14 20:55:26 -0700128 struct ifreq ifr;
Wang Chend6070322008-07-14 20:55:26 -0700129 struct ip_tunnel_parm p;
130
131 memset(&p, 0, sizeof(p));
132 p.iph.daddr = v->vifc_rmt_addr.s_addr;
133 p.iph.saddr = v->vifc_lcl_addr.s_addr;
134 p.iph.version = 4;
135 p.iph.ihl = 5;
136 p.iph.protocol = IPPROTO_IPIP;
137 sprintf(p.name, "dvmrp%d", v->vifc_vifi);
138 ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
139
Stephen Hemminger5bc3eb72008-11-19 21:52:05 -0800140 if (ops->ndo_do_ioctl) {
141 mm_segment_t oldfs = get_fs();
142
143 set_fs(KERNEL_DS);
144 ops->ndo_do_ioctl(dev, &ifr, SIOCDELTUNNEL);
145 set_fs(oldfs);
146 }
Wang Chend6070322008-07-14 20:55:26 -0700147 }
148}
149
Linus Torvalds1da177e2005-04-16 15:20:36 -0700150static
151struct net_device *ipmr_new_tunnel(struct vifctl *v)
152{
153 struct net_device *dev;
154
Eric W. Biederman881d9662007-09-17 11:56:21 -0700155 dev = __dev_get_by_name(&init_net, "tunl0");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156
157 if (dev) {
Stephen Hemminger5bc3eb72008-11-19 21:52:05 -0800158 const struct net_device_ops *ops = dev->netdev_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159 int err;
160 struct ifreq ifr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161 struct ip_tunnel_parm p;
162 struct in_device *in_dev;
163
164 memset(&p, 0, sizeof(p));
165 p.iph.daddr = v->vifc_rmt_addr.s_addr;
166 p.iph.saddr = v->vifc_lcl_addr.s_addr;
167 p.iph.version = 4;
168 p.iph.ihl = 5;
169 p.iph.protocol = IPPROTO_IPIP;
170 sprintf(p.name, "dvmrp%d", v->vifc_vifi);
Stephen Hemmingerba93ef72008-01-21 17:28:59 -0800171 ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700172
Stephen Hemminger5bc3eb72008-11-19 21:52:05 -0800173 if (ops->ndo_do_ioctl) {
174 mm_segment_t oldfs = get_fs();
175
176 set_fs(KERNEL_DS);
177 err = ops->ndo_do_ioctl(dev, &ifr, SIOCADDTUNNEL);
178 set_fs(oldfs);
179 } else
180 err = -EOPNOTSUPP;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700181
182 dev = NULL;
183
Eric W. Biederman881d9662007-09-17 11:56:21 -0700184 if (err == 0 && (dev = __dev_get_by_name(&init_net, p.name)) != NULL) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185 dev->flags |= IFF_MULTICAST;
186
Herbert Xue5ed6392005-10-03 14:35:55 -0700187 in_dev = __in_dev_get_rtnl(dev);
Herbert Xu71e27da2007-06-04 23:36:06 -0700188 if (in_dev == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189 goto failure;
Herbert Xu71e27da2007-06-04 23:36:06 -0700190
191 ipv4_devconf_setall(in_dev);
192 IPV4_DEVCONF(in_dev->cnf, RP_FILTER) = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193
194 if (dev_open(dev))
195 goto failure;
Wang Chen7dc00c82008-07-14 20:56:34 -0700196 dev_hold(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197 }
198 }
199 return dev;
200
201failure:
202 /* allow the register to be completed before unregistering. */
203 rtnl_unlock();
204 rtnl_lock();
205
206 unregister_netdevice(dev);
207 return NULL;
208}
209
210#ifdef CONFIG_IP_PIMSM
211
212static int reg_vif_num = -1;
213
214static int reg_vif_xmit(struct sk_buff *skb, struct net_device *dev)
215{
216 read_lock(&mrt_lock);
Pavel Emelyanovcf3677a2008-05-21 14:17:33 -0700217 dev->stats.tx_bytes += skb->len;
218 dev->stats.tx_packets++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700219 ipmr_cache_report(skb, reg_vif_num, IGMPMSG_WHOLEPKT);
220 read_unlock(&mrt_lock);
221 kfree_skb(skb);
222 return 0;
223}
224
Stephen Hemminger007c3832008-11-20 20:28:35 -0800225static const struct net_device_ops reg_vif_netdev_ops = {
226 .ndo_start_xmit = reg_vif_xmit,
227};
228
Linus Torvalds1da177e2005-04-16 15:20:36 -0700229static void reg_vif_setup(struct net_device *dev)
230{
231 dev->type = ARPHRD_PIMREG;
Kris Katterjohn46f25df2006-01-05 16:35:42 -0800232 dev->mtu = ETH_DATA_LEN - sizeof(struct iphdr) - 8;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700233 dev->flags = IFF_NOARP;
Stephen Hemminger007c3832008-11-20 20:28:35 -0800234 dev->netdev_ops = &reg_vif_netdev_ops,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700235 dev->destructor = free_netdev;
236}
237
238static struct net_device *ipmr_reg_vif(void)
239{
240 struct net_device *dev;
241 struct in_device *in_dev;
242
Pavel Emelyanovcf3677a2008-05-21 14:17:33 -0700243 dev = alloc_netdev(0, "pimreg", reg_vif_setup);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244
245 if (dev == NULL)
246 return NULL;
247
248 if (register_netdevice(dev)) {
249 free_netdev(dev);
250 return NULL;
251 }
252 dev->iflink = 0;
253
Herbert Xu71e27da2007-06-04 23:36:06 -0700254 rcu_read_lock();
255 if ((in_dev = __in_dev_get_rcu(dev)) == NULL) {
256 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257 goto failure;
Herbert Xu71e27da2007-06-04 23:36:06 -0700258 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259
Herbert Xu71e27da2007-06-04 23:36:06 -0700260 ipv4_devconf_setall(in_dev);
261 IPV4_DEVCONF(in_dev->cnf, RP_FILTER) = 0;
262 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700263
264 if (dev_open(dev))
265 goto failure;
266
Wang Chen7dc00c82008-07-14 20:56:34 -0700267 dev_hold(dev);
268
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269 return dev;
270
271failure:
272 /* allow the register to be completed before unregistering. */
273 rtnl_unlock();
274 rtnl_lock();
275
276 unregister_netdevice(dev);
277 return NULL;
278}
279#endif
280
281/*
282 * Delete a VIF entry
Wang Chen7dc00c82008-07-14 20:56:34 -0700283 * @notify: Set to 1, if the caller is a notifier_call
Linus Torvalds1da177e2005-04-16 15:20:36 -0700284 */
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900285
Wang Chen7dc00c82008-07-14 20:56:34 -0700286static int vif_delete(int vifi, int notify)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287{
288 struct vif_device *v;
289 struct net_device *dev;
290 struct in_device *in_dev;
291
292 if (vifi < 0 || vifi >= maxvif)
293 return -EADDRNOTAVAIL;
294
295 v = &vif_table[vifi];
296
297 write_lock_bh(&mrt_lock);
298 dev = v->dev;
299 v->dev = NULL;
300
301 if (!dev) {
302 write_unlock_bh(&mrt_lock);
303 return -EADDRNOTAVAIL;
304 }
305
306#ifdef CONFIG_IP_PIMSM
307 if (vifi == reg_vif_num)
308 reg_vif_num = -1;
309#endif
310
311 if (vifi+1 == maxvif) {
312 int tmp;
313 for (tmp=vifi-1; tmp>=0; tmp--) {
314 if (VIF_EXISTS(tmp))
315 break;
316 }
317 maxvif = tmp+1;
318 }
319
320 write_unlock_bh(&mrt_lock);
321
322 dev_set_allmulti(dev, -1);
323
Herbert Xue5ed6392005-10-03 14:35:55 -0700324 if ((in_dev = __in_dev_get_rtnl(dev)) != NULL) {
Herbert Xu42f811b2007-06-04 23:34:44 -0700325 IPV4_DEVCONF(in_dev->cnf, MC_FORWARDING)--;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700326 ip_rt_multicast_event(in_dev);
327 }
328
Wang Chen7dc00c82008-07-14 20:56:34 -0700329 if (v->flags&(VIFF_TUNNEL|VIFF_REGISTER) && !notify)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700330 unregister_netdevice(dev);
331
332 dev_put(dev);
333 return 0;
334}
335
336/* Destroy an unresolved cache entry, killing queued skbs
337 and reporting error to netlink readers.
338 */
339
340static void ipmr_destroy_unres(struct mfc_cache *c)
341{
342 struct sk_buff *skb;
Patrick McHardy9ef1d4c2005-06-28 12:55:30 -0700343 struct nlmsgerr *e;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344
345 atomic_dec(&cache_resolve_queue_len);
346
Jianjun Kongc354e122008-11-03 00:28:02 -0800347 while ((skb = skb_dequeue(&c->mfc_un.unres.unresolved))) {
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700348 if (ip_hdr(skb)->version == 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700349 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr));
350 nlh->nlmsg_type = NLMSG_ERROR;
351 nlh->nlmsg_len = NLMSG_LENGTH(sizeof(struct nlmsgerr));
352 skb_trim(skb, nlh->nlmsg_len);
Patrick McHardy9ef1d4c2005-06-28 12:55:30 -0700353 e = NLMSG_DATA(nlh);
354 e->error = -ETIMEDOUT;
355 memset(&e->msg, 0, sizeof(e->msg));
Thomas Graf2942e902006-08-15 00:30:25 -0700356
Denis V. Lunev97c53ca2007-11-19 22:26:51 -0800357 rtnl_unicast(skb, &init_net, NETLINK_CB(skb).pid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700358 } else
359 kfree_skb(skb);
360 }
361
362 kmem_cache_free(mrt_cachep, c);
363}
364
365
366/* Single timer process for all the unresolved queue. */
367
368static void ipmr_expire_process(unsigned long dummy)
369{
370 unsigned long now;
371 unsigned long expires;
372 struct mfc_cache *c, **cp;
373
374 if (!spin_trylock(&mfc_unres_lock)) {
375 mod_timer(&ipmr_expire_timer, jiffies+HZ/10);
376 return;
377 }
378
379 if (atomic_read(&cache_resolve_queue_len) == 0)
380 goto out;
381
382 now = jiffies;
383 expires = 10*HZ;
384 cp = &mfc_unres_queue;
385
386 while ((c=*cp) != NULL) {
387 if (time_after(c->mfc_un.unres.expires, now)) {
388 unsigned long interval = c->mfc_un.unres.expires - now;
389 if (interval < expires)
390 expires = interval;
391 cp = &c->next;
392 continue;
393 }
394
395 *cp = c->next;
396
397 ipmr_destroy_unres(c);
398 }
399
400 if (atomic_read(&cache_resolve_queue_len))
401 mod_timer(&ipmr_expire_timer, jiffies + expires);
402
403out:
404 spin_unlock(&mfc_unres_lock);
405}
406
407/* Fill oifs list. It is called under write locked mrt_lock. */
408
Baruch Evend1b04c02005-07-30 17:41:59 -0700409static void ipmr_update_thresholds(struct mfc_cache *cache, unsigned char *ttls)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700410{
411 int vifi;
412
413 cache->mfc_un.res.minvif = MAXVIFS;
414 cache->mfc_un.res.maxvif = 0;
415 memset(cache->mfc_un.res.ttls, 255, MAXVIFS);
416
417 for (vifi=0; vifi<maxvif; vifi++) {
418 if (VIF_EXISTS(vifi) && ttls[vifi] && ttls[vifi] < 255) {
419 cache->mfc_un.res.ttls[vifi] = ttls[vifi];
420 if (cache->mfc_un.res.minvif > vifi)
421 cache->mfc_un.res.minvif = vifi;
422 if (cache->mfc_un.res.maxvif <= vifi)
423 cache->mfc_un.res.maxvif = vifi + 1;
424 }
425 }
426}
427
428static int vif_add(struct vifctl *vifc, int mrtsock)
429{
430 int vifi = vifc->vifc_vifi;
431 struct vif_device *v = &vif_table[vifi];
432 struct net_device *dev;
433 struct in_device *in_dev;
Wang Chend6070322008-07-14 20:55:26 -0700434 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700435
436 /* Is vif busy ? */
437 if (VIF_EXISTS(vifi))
438 return -EADDRINUSE;
439
440 switch (vifc->vifc_flags) {
441#ifdef CONFIG_IP_PIMSM
442 case VIFF_REGISTER:
443 /*
444 * Special Purpose VIF in PIM
445 * All the packets will be sent to the daemon
446 */
447 if (reg_vif_num >= 0)
448 return -EADDRINUSE;
449 dev = ipmr_reg_vif();
450 if (!dev)
451 return -ENOBUFS;
Wang Chend6070322008-07-14 20:55:26 -0700452 err = dev_set_allmulti(dev, 1);
453 if (err) {
454 unregister_netdevice(dev);
Wang Chen7dc00c82008-07-14 20:56:34 -0700455 dev_put(dev);
Wang Chend6070322008-07-14 20:55:26 -0700456 return err;
457 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700458 break;
459#endif
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900460 case VIFF_TUNNEL:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700461 dev = ipmr_new_tunnel(vifc);
462 if (!dev)
463 return -ENOBUFS;
Wang Chend6070322008-07-14 20:55:26 -0700464 err = dev_set_allmulti(dev, 1);
465 if (err) {
466 ipmr_del_tunnel(dev, vifc);
Wang Chen7dc00c82008-07-14 20:56:34 -0700467 dev_put(dev);
Wang Chend6070322008-07-14 20:55:26 -0700468 return err;
469 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700470 break;
471 case 0:
Denis V. Lunev1ab35272008-01-22 22:04:30 -0800472 dev = ip_dev_find(&init_net, vifc->vifc_lcl_addr.s_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700473 if (!dev)
474 return -EADDRNOTAVAIL;
Wang Chend6070322008-07-14 20:55:26 -0700475 err = dev_set_allmulti(dev, 1);
Wang Chen7dc00c82008-07-14 20:56:34 -0700476 if (err) {
477 dev_put(dev);
Wang Chend6070322008-07-14 20:55:26 -0700478 return err;
Wang Chen7dc00c82008-07-14 20:56:34 -0700479 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700480 break;
481 default:
482 return -EINVAL;
483 }
484
Herbert Xue5ed6392005-10-03 14:35:55 -0700485 if ((in_dev = __in_dev_get_rtnl(dev)) == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700486 return -EADDRNOTAVAIL;
Herbert Xu42f811b2007-06-04 23:34:44 -0700487 IPV4_DEVCONF(in_dev->cnf, MC_FORWARDING)++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700488 ip_rt_multicast_event(in_dev);
489
490 /*
491 * Fill in the VIF structures
492 */
Jianjun Kongc354e122008-11-03 00:28:02 -0800493 v->rate_limit = vifc->vifc_rate_limit;
494 v->local = vifc->vifc_lcl_addr.s_addr;
495 v->remote = vifc->vifc_rmt_addr.s_addr;
496 v->flags = vifc->vifc_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700497 if (!mrtsock)
498 v->flags |= VIFF_STATIC;
Jianjun Kongc354e122008-11-03 00:28:02 -0800499 v->threshold = vifc->vifc_threshold;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700500 v->bytes_in = 0;
501 v->bytes_out = 0;
502 v->pkt_in = 0;
503 v->pkt_out = 0;
504 v->link = dev->ifindex;
505 if (v->flags&(VIFF_TUNNEL|VIFF_REGISTER))
506 v->link = dev->iflink;
507
508 /* And finish update writing critical data */
509 write_lock_bh(&mrt_lock);
Jianjun Kongc354e122008-11-03 00:28:02 -0800510 v->dev = dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700511#ifdef CONFIG_IP_PIMSM
512 if (v->flags&VIFF_REGISTER)
513 reg_vif_num = vifi;
514#endif
515 if (vifi+1 > maxvif)
516 maxvif = vifi+1;
517 write_unlock_bh(&mrt_lock);
518 return 0;
519}
520
Al Viro114c7842006-09-27 18:39:29 -0700521static struct mfc_cache *ipmr_cache_find(__be32 origin, __be32 mcastgrp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700522{
Jianjun Kongc354e122008-11-03 00:28:02 -0800523 int line = MFC_HASH(mcastgrp, origin);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700524 struct mfc_cache *c;
525
526 for (c=mfc_cache_array[line]; c; c = c->next) {
527 if (c->mfc_origin==origin && c->mfc_mcastgrp==mcastgrp)
528 break;
529 }
530 return c;
531}
532
533/*
534 * Allocate a multicast cache entry
535 */
536static struct mfc_cache *ipmr_cache_alloc(void)
537{
Jianjun Kongc354e122008-11-03 00:28:02 -0800538 struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL);
539 if (c == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700540 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700541 c->mfc_un.res.minvif = MAXVIFS;
542 return c;
543}
544
545static struct mfc_cache *ipmr_cache_alloc_unres(void)
546{
Jianjun Kongc354e122008-11-03 00:28:02 -0800547 struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC);
548 if (c == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700549 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700550 skb_queue_head_init(&c->mfc_un.unres.unresolved);
551 c->mfc_un.unres.expires = jiffies + 10*HZ;
552 return c;
553}
554
555/*
556 * A cache entry has gone into a resolved state from queued
557 */
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900558
Linus Torvalds1da177e2005-04-16 15:20:36 -0700559static void ipmr_cache_resolve(struct mfc_cache *uc, struct mfc_cache *c)
560{
561 struct sk_buff *skb;
Patrick McHardy9ef1d4c2005-06-28 12:55:30 -0700562 struct nlmsgerr *e;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700563
564 /*
565 * Play the pending entries through our router
566 */
567
Jianjun Kongc354e122008-11-03 00:28:02 -0800568 while ((skb = __skb_dequeue(&uc->mfc_un.unres.unresolved))) {
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700569 if (ip_hdr(skb)->version == 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700570 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr));
571
572 if (ipmr_fill_mroute(skb, c, NLMSG_DATA(nlh)) > 0) {
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -0700573 nlh->nlmsg_len = (skb_tail_pointer(skb) -
574 (u8 *)nlh);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700575 } else {
576 nlh->nlmsg_type = NLMSG_ERROR;
577 nlh->nlmsg_len = NLMSG_LENGTH(sizeof(struct nlmsgerr));
578 skb_trim(skb, nlh->nlmsg_len);
Patrick McHardy9ef1d4c2005-06-28 12:55:30 -0700579 e = NLMSG_DATA(nlh);
580 e->error = -EMSGSIZE;
581 memset(&e->msg, 0, sizeof(e->msg));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700582 }
Thomas Graf2942e902006-08-15 00:30:25 -0700583
Denis V. Lunev97c53ca2007-11-19 22:26:51 -0800584 rtnl_unicast(skb, &init_net, NETLINK_CB(skb).pid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700585 } else
586 ip_mr_forward(skb, c, 0);
587 }
588}
589
590/*
591 * Bounce a cache query up to mrouted. We could use netlink for this but mrouted
592 * expects the following bizarre scheme.
593 *
594 * Called under mrt_lock.
595 */
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900596
Linus Torvalds1da177e2005-04-16 15:20:36 -0700597static int ipmr_cache_report(struct sk_buff *pkt, vifi_t vifi, int assert)
598{
599 struct sk_buff *skb;
Arnaldo Carvalho de Meloc9bdd4b2007-03-12 20:09:15 -0300600 const int ihl = ip_hdrlen(pkt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700601 struct igmphdr *igmp;
602 struct igmpmsg *msg;
603 int ret;
604
605#ifdef CONFIG_IP_PIMSM
606 if (assert == IGMPMSG_WHOLEPKT)
607 skb = skb_realloc_headroom(pkt, sizeof(struct iphdr));
608 else
609#endif
610 skb = alloc_skb(128, GFP_ATOMIC);
611
Stephen Hemminger132adf52007-03-08 20:44:43 -0800612 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700613 return -ENOBUFS;
614
615#ifdef CONFIG_IP_PIMSM
616 if (assert == IGMPMSG_WHOLEPKT) {
617 /* Ugly, but we have no choice with this interface.
618 Duplicate old header, fix ihl, length etc.
619 And all this only to mangle msg->im_msgtype and
620 to set msg->im_mbz to "mbz" :-)
621 */
Arnaldo Carvalho de Melo878c8142007-03-11 22:38:29 -0300622 skb_push(skb, sizeof(struct iphdr));
623 skb_reset_network_header(skb);
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -0300624 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo0272ffc2007-03-12 20:05:39 -0300625 msg = (struct igmpmsg *)skb_network_header(skb);
Arnaldo Carvalho de Melod56f90a2007-04-10 20:50:43 -0700626 memcpy(msg, skb_network_header(pkt), sizeof(struct iphdr));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700627 msg->im_msgtype = IGMPMSG_WHOLEPKT;
628 msg->im_mbz = 0;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900629 msg->im_vif = reg_vif_num;
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700630 ip_hdr(skb)->ihl = sizeof(struct iphdr) >> 2;
631 ip_hdr(skb)->tot_len = htons(ntohs(ip_hdr(pkt)->tot_len) +
632 sizeof(struct iphdr));
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900633 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700634#endif
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900635 {
636
Linus Torvalds1da177e2005-04-16 15:20:36 -0700637 /*
638 * Copy the IP header
639 */
640
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -0700641 skb->network_header = skb->tail;
Arnaldo Carvalho de Meloddc7b8e2007-03-15 21:42:27 -0300642 skb_put(skb, ihl);
Arnaldo Carvalho de Melo27d7ff42007-03-31 11:55:19 -0300643 skb_copy_to_linear_data(skb, pkt->data, ihl);
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700644 ip_hdr(skb)->protocol = 0; /* Flag to the kernel this is a route add */
645 msg = (struct igmpmsg *)skb_network_header(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700646 msg->im_vif = vifi;
647 skb->dst = dst_clone(pkt->dst);
648
649 /*
650 * Add our header
651 */
652
Jianjun Kongc354e122008-11-03 00:28:02 -0800653 igmp=(struct igmphdr *)skb_put(skb, sizeof(struct igmphdr));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700654 igmp->type =
655 msg->im_msgtype = assert;
656 igmp->code = 0;
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700657 ip_hdr(skb)->tot_len = htons(skb->len); /* Fix the length */
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -0700658 skb->transport_header = skb->network_header;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900659 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700660
661 if (mroute_socket == NULL) {
662 kfree_skb(skb);
663 return -EINVAL;
664 }
665
666 /*
667 * Deliver to mrouted
668 */
Jianjun Kongc354e122008-11-03 00:28:02 -0800669 if ((ret = sock_queue_rcv_skb(mroute_socket, skb))<0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700670 if (net_ratelimit())
671 printk(KERN_WARNING "mroute: pending queue full, dropping entries.\n");
672 kfree_skb(skb);
673 }
674
675 return ret;
676}
677
678/*
679 * Queue a packet for resolution. It gets locked cache entry!
680 */
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900681
Linus Torvalds1da177e2005-04-16 15:20:36 -0700682static int
683ipmr_cache_unresolved(vifi_t vifi, struct sk_buff *skb)
684{
685 int err;
686 struct mfc_cache *c;
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700687 const struct iphdr *iph = ip_hdr(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700688
689 spin_lock_bh(&mfc_unres_lock);
690 for (c=mfc_unres_queue; c; c=c->next) {
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700691 if (c->mfc_mcastgrp == iph->daddr &&
692 c->mfc_origin == iph->saddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700693 break;
694 }
695
696 if (c == NULL) {
697 /*
698 * Create a new entry if allowable
699 */
700
Jianjun Kongc354e122008-11-03 00:28:02 -0800701 if (atomic_read(&cache_resolve_queue_len) >= 10 ||
Linus Torvalds1da177e2005-04-16 15:20:36 -0700702 (c=ipmr_cache_alloc_unres())==NULL) {
703 spin_unlock_bh(&mfc_unres_lock);
704
705 kfree_skb(skb);
706 return -ENOBUFS;
707 }
708
709 /*
710 * Fill in the new cache entry
711 */
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700712 c->mfc_parent = -1;
713 c->mfc_origin = iph->saddr;
714 c->mfc_mcastgrp = iph->daddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700715
716 /*
717 * Reflect first query at mrouted.
718 */
719 if ((err = ipmr_cache_report(skb, vifi, IGMPMSG_NOCACHE))<0) {
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900720 /* If the report failed throw the cache entry
Linus Torvalds1da177e2005-04-16 15:20:36 -0700721 out - Brad Parker
722 */
723 spin_unlock_bh(&mfc_unres_lock);
724
725 kmem_cache_free(mrt_cachep, c);
726 kfree_skb(skb);
727 return err;
728 }
729
730 atomic_inc(&cache_resolve_queue_len);
731 c->next = mfc_unres_queue;
732 mfc_unres_queue = c;
733
734 mod_timer(&ipmr_expire_timer, c->mfc_un.unres.expires);
735 }
736
737 /*
738 * See if we can append the packet
739 */
740 if (c->mfc_un.unres.unresolved.qlen>3) {
741 kfree_skb(skb);
742 err = -ENOBUFS;
743 } else {
Jianjun Kongc354e122008-11-03 00:28:02 -0800744 skb_queue_tail(&c->mfc_un.unres.unresolved, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700745 err = 0;
746 }
747
748 spin_unlock_bh(&mfc_unres_lock);
749 return err;
750}
751
752/*
753 * MFC cache manipulation by user space mroute daemon
754 */
755
756static int ipmr_mfc_delete(struct mfcctl *mfc)
757{
758 int line;
759 struct mfc_cache *c, **cp;
760
Jianjun Kongc354e122008-11-03 00:28:02 -0800761 line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700762
763 for (cp=&mfc_cache_array[line]; (c=*cp) != NULL; cp = &c->next) {
764 if (c->mfc_origin == mfc->mfcc_origin.s_addr &&
765 c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr) {
766 write_lock_bh(&mrt_lock);
767 *cp = c->next;
768 write_unlock_bh(&mrt_lock);
769
770 kmem_cache_free(mrt_cachep, c);
771 return 0;
772 }
773 }
774 return -ENOENT;
775}
776
777static int ipmr_mfc_add(struct mfcctl *mfc, int mrtsock)
778{
779 int line;
780 struct mfc_cache *uc, *c, **cp;
781
Jianjun Kongc354e122008-11-03 00:28:02 -0800782 line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700783
784 for (cp=&mfc_cache_array[line]; (c=*cp) != NULL; cp = &c->next) {
785 if (c->mfc_origin == mfc->mfcc_origin.s_addr &&
786 c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr)
787 break;
788 }
789
790 if (c != NULL) {
791 write_lock_bh(&mrt_lock);
792 c->mfc_parent = mfc->mfcc_parent;
Baruch Evend1b04c02005-07-30 17:41:59 -0700793 ipmr_update_thresholds(c, mfc->mfcc_ttls);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700794 if (!mrtsock)
795 c->mfc_flags |= MFC_STATIC;
796 write_unlock_bh(&mrt_lock);
797 return 0;
798 }
799
Joe Perchesf97c1e02007-12-16 13:45:43 -0800800 if (!ipv4_is_multicast(mfc->mfcc_mcastgrp.s_addr))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700801 return -EINVAL;
802
Jianjun Kongc354e122008-11-03 00:28:02 -0800803 c = ipmr_cache_alloc();
804 if (c == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700805 return -ENOMEM;
806
Jianjun Kongc354e122008-11-03 00:28:02 -0800807 c->mfc_origin = mfc->mfcc_origin.s_addr;
808 c->mfc_mcastgrp = mfc->mfcc_mcastgrp.s_addr;
809 c->mfc_parent = mfc->mfcc_parent;
Baruch Evend1b04c02005-07-30 17:41:59 -0700810 ipmr_update_thresholds(c, mfc->mfcc_ttls);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700811 if (!mrtsock)
812 c->mfc_flags |= MFC_STATIC;
813
814 write_lock_bh(&mrt_lock);
815 c->next = mfc_cache_array[line];
816 mfc_cache_array[line] = c;
817 write_unlock_bh(&mrt_lock);
818
819 /*
820 * Check to see if we resolved a queued list. If so we
821 * need to send on the frames and tidy up.
822 */
823 spin_lock_bh(&mfc_unres_lock);
824 for (cp = &mfc_unres_queue; (uc=*cp) != NULL;
825 cp = &uc->next) {
826 if (uc->mfc_origin == c->mfc_origin &&
827 uc->mfc_mcastgrp == c->mfc_mcastgrp) {
828 *cp = uc->next;
829 if (atomic_dec_and_test(&cache_resolve_queue_len))
830 del_timer(&ipmr_expire_timer);
831 break;
832 }
833 }
834 spin_unlock_bh(&mfc_unres_lock);
835
836 if (uc) {
837 ipmr_cache_resolve(uc, c);
838 kmem_cache_free(mrt_cachep, uc);
839 }
840 return 0;
841}
842
843/*
844 * Close the multicast socket, and clear the vif tables etc
845 */
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900846
Linus Torvalds1da177e2005-04-16 15:20:36 -0700847static void mroute_clean_tables(struct sock *sk)
848{
849 int i;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900850
Linus Torvalds1da177e2005-04-16 15:20:36 -0700851 /*
852 * Shut down all active vif entries
853 */
Stephen Hemminger132adf52007-03-08 20:44:43 -0800854 for (i=0; i<maxvif; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700855 if (!(vif_table[i].flags&VIFF_STATIC))
Wang Chen7dc00c82008-07-14 20:56:34 -0700856 vif_delete(i, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700857 }
858
859 /*
860 * Wipe the cache
861 */
Jianjun Kongc354e122008-11-03 00:28:02 -0800862 for (i=0; i<MFC_LINES; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700863 struct mfc_cache *c, **cp;
864
865 cp = &mfc_cache_array[i];
866 while ((c = *cp) != NULL) {
867 if (c->mfc_flags&MFC_STATIC) {
868 cp = &c->next;
869 continue;
870 }
871 write_lock_bh(&mrt_lock);
872 *cp = c->next;
873 write_unlock_bh(&mrt_lock);
874
875 kmem_cache_free(mrt_cachep, c);
876 }
877 }
878
879 if (atomic_read(&cache_resolve_queue_len) != 0) {
880 struct mfc_cache *c;
881
882 spin_lock_bh(&mfc_unres_lock);
883 while (mfc_unres_queue != NULL) {
884 c = mfc_unres_queue;
885 mfc_unres_queue = c->next;
886 spin_unlock_bh(&mfc_unres_lock);
887
888 ipmr_destroy_unres(c);
889
890 spin_lock_bh(&mfc_unres_lock);
891 }
892 spin_unlock_bh(&mfc_unres_lock);
893 }
894}
895
896static void mrtsock_destruct(struct sock *sk)
897{
898 rtnl_lock();
899 if (sk == mroute_socket) {
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +0900900 IPV4_DEVCONF_ALL(sock_net(sk), MC_FORWARDING)--;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700901
902 write_lock_bh(&mrt_lock);
Jianjun Kongc354e122008-11-03 00:28:02 -0800903 mroute_socket = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700904 write_unlock_bh(&mrt_lock);
905
906 mroute_clean_tables(sk);
907 }
908 rtnl_unlock();
909}
910
911/*
912 * Socket options and virtual interface manipulation. The whole
913 * virtual interface system is a complete heap, but unfortunately
914 * that's how BSD mrouted happens to think. Maybe one day with a proper
915 * MOSPF/PIM router set up we can clean this up.
916 */
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900917
Jianjun Kongc354e122008-11-03 00:28:02 -0800918int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, int optlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700919{
920 int ret;
921 struct vifctl vif;
922 struct mfcctl mfc;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900923
Stephen Hemminger132adf52007-03-08 20:44:43 -0800924 if (optname != MRT_INIT) {
925 if (sk != mroute_socket && !capable(CAP_NET_ADMIN))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700926 return -EACCES;
927 }
928
Stephen Hemminger132adf52007-03-08 20:44:43 -0800929 switch (optname) {
930 case MRT_INIT:
931 if (sk->sk_type != SOCK_RAW ||
932 inet_sk(sk)->num != IPPROTO_IGMP)
933 return -EOPNOTSUPP;
Jianjun Kongc354e122008-11-03 00:28:02 -0800934 if (optlen != sizeof(int))
Stephen Hemminger132adf52007-03-08 20:44:43 -0800935 return -ENOPROTOOPT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700936
Stephen Hemminger132adf52007-03-08 20:44:43 -0800937 rtnl_lock();
938 if (mroute_socket) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700939 rtnl_unlock();
Stephen Hemminger132adf52007-03-08 20:44:43 -0800940 return -EADDRINUSE;
941 }
942
943 ret = ip_ra_control(sk, 1, mrtsock_destruct);
944 if (ret == 0) {
945 write_lock_bh(&mrt_lock);
Jianjun Kongc354e122008-11-03 00:28:02 -0800946 mroute_socket = sk;
Stephen Hemminger132adf52007-03-08 20:44:43 -0800947 write_unlock_bh(&mrt_lock);
948
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +0900949 IPV4_DEVCONF_ALL(sock_net(sk), MC_FORWARDING)++;
Stephen Hemminger132adf52007-03-08 20:44:43 -0800950 }
951 rtnl_unlock();
952 return ret;
953 case MRT_DONE:
Jianjun Kongc354e122008-11-03 00:28:02 -0800954 if (sk != mroute_socket)
Stephen Hemminger132adf52007-03-08 20:44:43 -0800955 return -EACCES;
956 return ip_ra_control(sk, 0, NULL);
957 case MRT_ADD_VIF:
958 case MRT_DEL_VIF:
Jianjun Kongc354e122008-11-03 00:28:02 -0800959 if (optlen != sizeof(vif))
Stephen Hemminger132adf52007-03-08 20:44:43 -0800960 return -EINVAL;
Jianjun Kongc354e122008-11-03 00:28:02 -0800961 if (copy_from_user(&vif, optval, sizeof(vif)))
Stephen Hemminger132adf52007-03-08 20:44:43 -0800962 return -EFAULT;
963 if (vif.vifc_vifi >= MAXVIFS)
964 return -ENFILE;
965 rtnl_lock();
Jianjun Kongc354e122008-11-03 00:28:02 -0800966 if (optname == MRT_ADD_VIF) {
Stephen Hemminger132adf52007-03-08 20:44:43 -0800967 ret = vif_add(&vif, sk==mroute_socket);
968 } else {
Wang Chen7dc00c82008-07-14 20:56:34 -0700969 ret = vif_delete(vif.vifc_vifi, 0);
Stephen Hemminger132adf52007-03-08 20:44:43 -0800970 }
971 rtnl_unlock();
972 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700973
974 /*
975 * Manipulate the forwarding caches. These live
976 * in a sort of kernel/user symbiosis.
977 */
Stephen Hemminger132adf52007-03-08 20:44:43 -0800978 case MRT_ADD_MFC:
979 case MRT_DEL_MFC:
Jianjun Kongc354e122008-11-03 00:28:02 -0800980 if (optlen != sizeof(mfc))
Stephen Hemminger132adf52007-03-08 20:44:43 -0800981 return -EINVAL;
Jianjun Kongc354e122008-11-03 00:28:02 -0800982 if (copy_from_user(&mfc, optval, sizeof(mfc)))
Stephen Hemminger132adf52007-03-08 20:44:43 -0800983 return -EFAULT;
984 rtnl_lock();
Jianjun Kongc354e122008-11-03 00:28:02 -0800985 if (optname == MRT_DEL_MFC)
Stephen Hemminger132adf52007-03-08 20:44:43 -0800986 ret = ipmr_mfc_delete(&mfc);
987 else
988 ret = ipmr_mfc_add(&mfc, sk==mroute_socket);
989 rtnl_unlock();
990 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700991 /*
992 * Control PIM assert.
993 */
Stephen Hemminger132adf52007-03-08 20:44:43 -0800994 case MRT_ASSERT:
995 {
996 int v;
997 if (get_user(v,(int __user *)optval))
998 return -EFAULT;
999 mroute_do_assert=(v)?1:0;
1000 return 0;
1001 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001002#ifdef CONFIG_IP_PIMSM
Stephen Hemminger132adf52007-03-08 20:44:43 -08001003 case MRT_PIM:
1004 {
Stephen Hemmingerba93ef72008-01-21 17:28:59 -08001005 int v;
1006
Stephen Hemminger132adf52007-03-08 20:44:43 -08001007 if (get_user(v,(int __user *)optval))
1008 return -EFAULT;
Stephen Hemmingerba93ef72008-01-21 17:28:59 -08001009 v = (v) ? 1 : 0;
1010
Stephen Hemminger132adf52007-03-08 20:44:43 -08001011 rtnl_lock();
1012 ret = 0;
1013 if (v != mroute_do_pim) {
1014 mroute_do_pim = v;
1015 mroute_do_assert = v;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001016#ifdef CONFIG_IP_PIMSM_V2
Stephen Hemminger132adf52007-03-08 20:44:43 -08001017 if (mroute_do_pim)
1018 ret = inet_add_protocol(&pim_protocol,
1019 IPPROTO_PIM);
1020 else
1021 ret = inet_del_protocol(&pim_protocol,
1022 IPPROTO_PIM);
1023 if (ret < 0)
1024 ret = -EAGAIN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001025#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001026 }
Stephen Hemminger132adf52007-03-08 20:44:43 -08001027 rtnl_unlock();
1028 return ret;
1029 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001030#endif
Stephen Hemminger132adf52007-03-08 20:44:43 -08001031 /*
1032 * Spurious command, or MRT_VERSION which you cannot
1033 * set.
1034 */
1035 default:
1036 return -ENOPROTOOPT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001037 }
1038}
1039
1040/*
1041 * Getsock opt support for the multicast routing system.
1042 */
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001043
Jianjun Kongc354e122008-11-03 00:28:02 -08001044int ip_mroute_getsockopt(struct sock *sk, int optname, char __user *optval, int __user *optlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001045{
1046 int olr;
1047 int val;
1048
Jianjun Kongc354e122008-11-03 00:28:02 -08001049 if (optname != MRT_VERSION &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07001050#ifdef CONFIG_IP_PIMSM
1051 optname!=MRT_PIM &&
1052#endif
1053 optname!=MRT_ASSERT)
1054 return -ENOPROTOOPT;
1055
1056 if (get_user(olr, optlen))
1057 return -EFAULT;
1058
1059 olr = min_t(unsigned int, olr, sizeof(int));
1060 if (olr < 0)
1061 return -EINVAL;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001062
Jianjun Kongc354e122008-11-03 00:28:02 -08001063 if (put_user(olr, optlen))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001064 return -EFAULT;
Jianjun Kongc354e122008-11-03 00:28:02 -08001065 if (optname == MRT_VERSION)
1066 val = 0x0305;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001067#ifdef CONFIG_IP_PIMSM
Jianjun Kongc354e122008-11-03 00:28:02 -08001068 else if (optname == MRT_PIM)
1069 val = mroute_do_pim;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001070#endif
1071 else
Jianjun Kongc354e122008-11-03 00:28:02 -08001072 val = mroute_do_assert;
1073 if (copy_to_user(optval, &val, olr))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001074 return -EFAULT;
1075 return 0;
1076}
1077
1078/*
1079 * The IP multicast ioctl support routines.
1080 */
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001081
Linus Torvalds1da177e2005-04-16 15:20:36 -07001082int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg)
1083{
1084 struct sioc_sg_req sr;
1085 struct sioc_vif_req vr;
1086 struct vif_device *vif;
1087 struct mfc_cache *c;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001088
Stephen Hemminger132adf52007-03-08 20:44:43 -08001089 switch (cmd) {
1090 case SIOCGETVIFCNT:
Jianjun Kongc354e122008-11-03 00:28:02 -08001091 if (copy_from_user(&vr, arg, sizeof(vr)))
Stephen Hemminger132adf52007-03-08 20:44:43 -08001092 return -EFAULT;
Jianjun Kongc354e122008-11-03 00:28:02 -08001093 if (vr.vifi >= maxvif)
Stephen Hemminger132adf52007-03-08 20:44:43 -08001094 return -EINVAL;
1095 read_lock(&mrt_lock);
1096 vif=&vif_table[vr.vifi];
1097 if (VIF_EXISTS(vr.vifi)) {
Jianjun Kongc354e122008-11-03 00:28:02 -08001098 vr.icount = vif->pkt_in;
1099 vr.ocount = vif->pkt_out;
1100 vr.ibytes = vif->bytes_in;
1101 vr.obytes = vif->bytes_out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001102 read_unlock(&mrt_lock);
Stephen Hemminger132adf52007-03-08 20:44:43 -08001103
Jianjun Kongc354e122008-11-03 00:28:02 -08001104 if (copy_to_user(arg, &vr, sizeof(vr)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001105 return -EFAULT;
Stephen Hemminger132adf52007-03-08 20:44:43 -08001106 return 0;
1107 }
1108 read_unlock(&mrt_lock);
1109 return -EADDRNOTAVAIL;
1110 case SIOCGETSGCNT:
Jianjun Kongc354e122008-11-03 00:28:02 -08001111 if (copy_from_user(&sr, arg, sizeof(sr)))
Stephen Hemminger132adf52007-03-08 20:44:43 -08001112 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001113
Stephen Hemminger132adf52007-03-08 20:44:43 -08001114 read_lock(&mrt_lock);
1115 c = ipmr_cache_find(sr.src.s_addr, sr.grp.s_addr);
1116 if (c) {
1117 sr.pktcnt = c->mfc_un.res.pkt;
1118 sr.bytecnt = c->mfc_un.res.bytes;
1119 sr.wrong_if = c->mfc_un.res.wrong_if;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001120 read_unlock(&mrt_lock);
Stephen Hemminger132adf52007-03-08 20:44:43 -08001121
Jianjun Kongc354e122008-11-03 00:28:02 -08001122 if (copy_to_user(arg, &sr, sizeof(sr)))
Stephen Hemminger132adf52007-03-08 20:44:43 -08001123 return -EFAULT;
1124 return 0;
1125 }
1126 read_unlock(&mrt_lock);
1127 return -EADDRNOTAVAIL;
1128 default:
1129 return -ENOIOCTLCMD;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001130 }
1131}
1132
1133
1134static int ipmr_device_event(struct notifier_block *this, unsigned long event, void *ptr)
1135{
Eric W. Biedermane9dc8652007-09-12 13:02:17 +02001136 struct net_device *dev = ptr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001137 struct vif_device *v;
1138 int ct;
Eric W. Biedermane9dc8652007-09-12 13:02:17 +02001139
YOSHIFUJI Hideaki721499e2008-07-19 22:34:43 -07001140 if (!net_eq(dev_net(dev), &init_net))
Eric W. Biedermane9dc8652007-09-12 13:02:17 +02001141 return NOTIFY_DONE;
1142
Linus Torvalds1da177e2005-04-16 15:20:36 -07001143 if (event != NETDEV_UNREGISTER)
1144 return NOTIFY_DONE;
1145 v=&vif_table[0];
Jianjun Kongc354e122008-11-03 00:28:02 -08001146 for (ct=0; ct<maxvif; ct++,v++) {
1147 if (v->dev == dev)
Wang Chen7dc00c82008-07-14 20:56:34 -07001148 vif_delete(ct, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001149 }
1150 return NOTIFY_DONE;
1151}
1152
1153
Jianjun Kongc354e122008-11-03 00:28:02 -08001154static struct notifier_block ip_mr_notifier = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001155 .notifier_call = ipmr_device_event,
1156};
1157
1158/*
1159 * Encapsulate a packet by attaching a valid IPIP header to it.
1160 * This avoids tunnel drivers and other mess and gives us the speed so
1161 * important for multicast video.
1162 */
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001163
Al Viro114c7842006-09-27 18:39:29 -07001164static void ip_encap(struct sk_buff *skb, __be32 saddr, __be32 daddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001165{
Arnaldo Carvalho de Melo8856dfa2007-03-10 19:40:39 -03001166 struct iphdr *iph;
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07001167 struct iphdr *old_iph = ip_hdr(skb);
Arnaldo Carvalho de Melo8856dfa2007-03-10 19:40:39 -03001168
1169 skb_push(skb, sizeof(struct iphdr));
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07001170 skb->transport_header = skb->network_header;
Arnaldo Carvalho de Melo8856dfa2007-03-10 19:40:39 -03001171 skb_reset_network_header(skb);
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07001172 iph = ip_hdr(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001173
1174 iph->version = 4;
Arnaldo Carvalho de Meloe023dd62007-03-12 20:09:36 -03001175 iph->tos = old_iph->tos;
1176 iph->ttl = old_iph->ttl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001177 iph->frag_off = 0;
1178 iph->daddr = daddr;
1179 iph->saddr = saddr;
1180 iph->protocol = IPPROTO_IPIP;
1181 iph->ihl = 5;
1182 iph->tot_len = htons(skb->len);
1183 ip_select_ident(iph, skb->dst, NULL);
1184 ip_send_check(iph);
1185
Linus Torvalds1da177e2005-04-16 15:20:36 -07001186 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
1187 nf_reset(skb);
1188}
1189
1190static inline int ipmr_forward_finish(struct sk_buff *skb)
1191{
1192 struct ip_options * opt = &(IPCB(skb)->opt);
1193
Pavel Emelyanov7c73a6f2008-07-16 20:20:11 -07001194 IP_INC_STATS_BH(dev_net(skb->dst->dev), IPSTATS_MIB_OUTFORWDATAGRAMS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001195
1196 if (unlikely(opt->optlen))
1197 ip_forward_options(skb);
1198
1199 return dst_output(skb);
1200}
1201
1202/*
1203 * Processing handlers for ipmr_forward
1204 */
1205
1206static void ipmr_queue_xmit(struct sk_buff *skb, struct mfc_cache *c, int vifi)
1207{
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07001208 const struct iphdr *iph = ip_hdr(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001209 struct vif_device *vif = &vif_table[vifi];
1210 struct net_device *dev;
1211 struct rtable *rt;
1212 int encap = 0;
1213
1214 if (vif->dev == NULL)
1215 goto out_free;
1216
1217#ifdef CONFIG_IP_PIMSM
1218 if (vif->flags & VIFF_REGISTER) {
1219 vif->pkt_out++;
Jianjun Kongc354e122008-11-03 00:28:02 -08001220 vif->bytes_out += skb->len;
Pavel Emelyanovcf3677a2008-05-21 14:17:33 -07001221 vif->dev->stats.tx_bytes += skb->len;
1222 vif->dev->stats.tx_packets++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001223 ipmr_cache_report(skb, vifi, IGMPMSG_WHOLEPKT);
1224 kfree_skb(skb);
1225 return;
1226 }
1227#endif
1228
1229 if (vif->flags&VIFF_TUNNEL) {
1230 struct flowi fl = { .oif = vif->link,
1231 .nl_u = { .ip4_u =
1232 { .daddr = vif->remote,
1233 .saddr = vif->local,
1234 .tos = RT_TOS(iph->tos) } },
1235 .proto = IPPROTO_IPIP };
Denis V. Lunevf2063512008-01-22 22:07:34 -08001236 if (ip_route_output_key(&init_net, &rt, &fl))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001237 goto out_free;
1238 encap = sizeof(struct iphdr);
1239 } else {
1240 struct flowi fl = { .oif = vif->link,
1241 .nl_u = { .ip4_u =
1242 { .daddr = iph->daddr,
1243 .tos = RT_TOS(iph->tos) } },
1244 .proto = IPPROTO_IPIP };
Denis V. Lunevf2063512008-01-22 22:07:34 -08001245 if (ip_route_output_key(&init_net, &rt, &fl))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001246 goto out_free;
1247 }
1248
1249 dev = rt->u.dst.dev;
1250
1251 if (skb->len+encap > dst_mtu(&rt->u.dst) && (ntohs(iph->frag_off) & IP_DF)) {
1252 /* Do not fragment multicasts. Alas, IPv4 does not
1253 allow to send ICMP, so that packets will disappear
1254 to blackhole.
1255 */
1256
Pavel Emelyanov7c73a6f2008-07-16 20:20:11 -07001257 IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001258 ip_rt_put(rt);
1259 goto out_free;
1260 }
1261
1262 encap += LL_RESERVED_SPACE(dev) + rt->u.dst.header_len;
1263
1264 if (skb_cow(skb, encap)) {
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001265 ip_rt_put(rt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001266 goto out_free;
1267 }
1268
1269 vif->pkt_out++;
Jianjun Kongc354e122008-11-03 00:28:02 -08001270 vif->bytes_out += skb->len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001271
1272 dst_release(skb->dst);
1273 skb->dst = &rt->u.dst;
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07001274 ip_decrease_ttl(ip_hdr(skb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001275
1276 /* FIXME: forward and output firewalls used to be called here.
1277 * What do we do with netfilter? -- RR */
1278 if (vif->flags & VIFF_TUNNEL) {
1279 ip_encap(skb, vif->local, vif->remote);
1280 /* FIXME: extra output firewall step used to be here. --RR */
Pavel Emelyanov2f4c02d2008-05-21 14:16:14 -07001281 vif->dev->stats.tx_packets++;
1282 vif->dev->stats.tx_bytes += skb->len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001283 }
1284
1285 IPCB(skb)->flags |= IPSKB_FORWARDED;
1286
1287 /*
1288 * RFC1584 teaches, that DVMRP/PIM router must deliver packets locally
1289 * not only before forwarding, but after forwarding on all output
1290 * interfaces. It is clear, if mrouter runs a multicasting
1291 * program, it should receive packets not depending to what interface
1292 * program is joined.
1293 * If we will not make it, the program will have to join on all
1294 * interfaces. On the other hand, multihoming host (or router, but
1295 * not mrouter) cannot join to more than one interface - it will
1296 * result in receiving multiple packets.
1297 */
Patrick McHardy6e23ae22007-11-19 18:53:30 -08001298 NF_HOOK(PF_INET, NF_INET_FORWARD, skb, skb->dev, dev,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001299 ipmr_forward_finish);
1300 return;
1301
1302out_free:
1303 kfree_skb(skb);
1304 return;
1305}
1306
1307static int ipmr_find_vif(struct net_device *dev)
1308{
1309 int ct;
1310 for (ct=maxvif-1; ct>=0; ct--) {
1311 if (vif_table[ct].dev == dev)
1312 break;
1313 }
1314 return ct;
1315}
1316
1317/* "local" means that we should preserve one skb (for local delivery) */
1318
1319static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local)
1320{
1321 int psend = -1;
1322 int vif, ct;
1323
1324 vif = cache->mfc_parent;
1325 cache->mfc_un.res.pkt++;
1326 cache->mfc_un.res.bytes += skb->len;
1327
1328 /*
1329 * Wrong interface: drop packet and (maybe) send PIM assert.
1330 */
1331 if (vif_table[vif].dev != skb->dev) {
1332 int true_vifi;
1333
Eric Dumazetee6b9672008-03-05 18:30:47 -08001334 if (skb->rtable->fl.iif == 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001335 /* It is our own packet, looped back.
1336 Very complicated situation...
1337
1338 The best workaround until routing daemons will be
1339 fixed is not to redistribute packet, if it was
1340 send through wrong interface. It means, that
1341 multicast applications WILL NOT work for
1342 (S,G), which have default multicast route pointing
1343 to wrong oif. In any case, it is not a good
1344 idea to use multicasting applications on router.
1345 */
1346 goto dont_forward;
1347 }
1348
1349 cache->mfc_un.res.wrong_if++;
1350 true_vifi = ipmr_find_vif(skb->dev);
1351
1352 if (true_vifi >= 0 && mroute_do_assert &&
1353 /* pimsm uses asserts, when switching from RPT to SPT,
1354 so that we cannot check that packet arrived on an oif.
1355 It is bad, but otherwise we would need to move pretty
1356 large chunk of pimd to kernel. Ough... --ANK
1357 */
1358 (mroute_do_pim || cache->mfc_un.res.ttls[true_vifi] < 255) &&
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001359 time_after(jiffies,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001360 cache->mfc_un.res.last_assert + MFC_ASSERT_THRESH)) {
1361 cache->mfc_un.res.last_assert = jiffies;
1362 ipmr_cache_report(skb, true_vifi, IGMPMSG_WRONGVIF);
1363 }
1364 goto dont_forward;
1365 }
1366
1367 vif_table[vif].pkt_in++;
Jianjun Kongc354e122008-11-03 00:28:02 -08001368 vif_table[vif].bytes_in += skb->len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001369
1370 /*
1371 * Forward the frame
1372 */
1373 for (ct = cache->mfc_un.res.maxvif-1; ct >= cache->mfc_un.res.minvif; ct--) {
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07001374 if (ip_hdr(skb)->ttl > cache->mfc_un.res.ttls[ct]) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001375 if (psend != -1) {
1376 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
1377 if (skb2)
1378 ipmr_queue_xmit(skb2, cache, psend);
1379 }
Jianjun Kongc354e122008-11-03 00:28:02 -08001380 psend = ct;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001381 }
1382 }
1383 if (psend != -1) {
1384 if (local) {
1385 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
1386 if (skb2)
1387 ipmr_queue_xmit(skb2, cache, psend);
1388 } else {
1389 ipmr_queue_xmit(skb, cache, psend);
1390 return 0;
1391 }
1392 }
1393
1394dont_forward:
1395 if (!local)
1396 kfree_skb(skb);
1397 return 0;
1398}
1399
1400
1401/*
1402 * Multicast packets for forwarding arrive here
1403 */
1404
1405int ip_mr_input(struct sk_buff *skb)
1406{
1407 struct mfc_cache *cache;
Eric Dumazetee6b9672008-03-05 18:30:47 -08001408 int local = skb->rtable->rt_flags&RTCF_LOCAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001409
1410 /* Packet is looped back after forward, it should not be
1411 forwarded second time, but still can be delivered locally.
1412 */
1413 if (IPCB(skb)->flags&IPSKB_FORWARDED)
1414 goto dont_forward;
1415
1416 if (!local) {
1417 if (IPCB(skb)->opt.router_alert) {
1418 if (ip_call_ra_chain(skb))
1419 return 0;
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07001420 } else if (ip_hdr(skb)->protocol == IPPROTO_IGMP){
Linus Torvalds1da177e2005-04-16 15:20:36 -07001421 /* IGMPv1 (and broken IGMPv2 implementations sort of
1422 Cisco IOS <= 11.2(8)) do not put router alert
1423 option to IGMP packets destined to routable
1424 groups. It is very bad, because it means
1425 that we can forward NO IGMP messages.
1426 */
1427 read_lock(&mrt_lock);
1428 if (mroute_socket) {
Patrick McHardy2715bcf2005-06-21 14:06:24 -07001429 nf_reset(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001430 raw_rcv(mroute_socket, skb);
1431 read_unlock(&mrt_lock);
1432 return 0;
1433 }
1434 read_unlock(&mrt_lock);
1435 }
1436 }
1437
1438 read_lock(&mrt_lock);
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07001439 cache = ipmr_cache_find(ip_hdr(skb)->saddr, ip_hdr(skb)->daddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001440
1441 /*
1442 * No usable cache entry
1443 */
Jianjun Kongc354e122008-11-03 00:28:02 -08001444 if (cache == NULL) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001445 int vif;
1446
1447 if (local) {
1448 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
1449 ip_local_deliver(skb);
1450 if (skb2 == NULL) {
1451 read_unlock(&mrt_lock);
1452 return -ENOBUFS;
1453 }
1454 skb = skb2;
1455 }
1456
1457 vif = ipmr_find_vif(skb->dev);
1458 if (vif >= 0) {
1459 int err = ipmr_cache_unresolved(vif, skb);
1460 read_unlock(&mrt_lock);
1461
1462 return err;
1463 }
1464 read_unlock(&mrt_lock);
1465 kfree_skb(skb);
1466 return -ENODEV;
1467 }
1468
1469 ip_mr_forward(skb, cache, local);
1470
1471 read_unlock(&mrt_lock);
1472
1473 if (local)
1474 return ip_local_deliver(skb);
1475
1476 return 0;
1477
1478dont_forward:
1479 if (local)
1480 return ip_local_deliver(skb);
1481 kfree_skb(skb);
1482 return 0;
1483}
1484
1485#ifdef CONFIG_IP_PIMSM_V1
1486/*
1487 * Handle IGMP messages of PIMv1
1488 */
1489
1490int pim_rcv_v1(struct sk_buff * skb)
1491{
1492 struct igmphdr *pim;
1493 struct iphdr *encap;
1494 struct net_device *reg_dev = NULL;
1495
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001496 if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(*encap)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001497 goto drop;
1498
Arnaldo Carvalho de Melod9edf9e2007-03-13 14:19:23 -03001499 pim = igmp_hdr(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001500
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001501 if (!mroute_do_pim ||
Linus Torvalds1da177e2005-04-16 15:20:36 -07001502 skb->len < sizeof(*pim) + sizeof(*encap) ||
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001503 pim->group != PIM_V1_VERSION || pim->code != PIM_V1_REGISTER)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001504 goto drop;
1505
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07001506 encap = (struct iphdr *)(skb_transport_header(skb) +
1507 sizeof(struct igmphdr));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001508 /*
1509 Check that:
1510 a. packet is really destinted to a multicast group
1511 b. packet is not a NULL-REGISTER
1512 c. packet is not truncated
1513 */
Joe Perchesf97c1e02007-12-16 13:45:43 -08001514 if (!ipv4_is_multicast(encap->daddr) ||
Linus Torvalds1da177e2005-04-16 15:20:36 -07001515 encap->tot_len == 0 ||
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001516 ntohs(encap->tot_len) + sizeof(*pim) > skb->len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001517 goto drop;
1518
1519 read_lock(&mrt_lock);
1520 if (reg_vif_num >= 0)
1521 reg_dev = vif_table[reg_vif_num].dev;
1522 if (reg_dev)
1523 dev_hold(reg_dev);
1524 read_unlock(&mrt_lock);
1525
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001526 if (reg_dev == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001527 goto drop;
1528
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07001529 skb->mac_header = skb->network_header;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001530 skb_pull(skb, (u8*)encap - skb->data);
Arnaldo Carvalho de Melo31c77112007-03-10 19:04:55 -03001531 skb_reset_network_header(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001532 skb->dev = reg_dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001533 skb->protocol = htons(ETH_P_IP);
1534 skb->ip_summed = 0;
1535 skb->pkt_type = PACKET_HOST;
1536 dst_release(skb->dst);
1537 skb->dst = NULL;
Pavel Emelyanovcf3677a2008-05-21 14:17:33 -07001538 reg_dev->stats.rx_bytes += skb->len;
1539 reg_dev->stats.rx_packets++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001540 nf_reset(skb);
1541 netif_rx(skb);
1542 dev_put(reg_dev);
1543 return 0;
1544 drop:
1545 kfree_skb(skb);
1546 return 0;
1547}
1548#endif
1549
1550#ifdef CONFIG_IP_PIMSM_V2
1551static int pim_rcv(struct sk_buff * skb)
1552{
1553 struct pimreghdr *pim;
1554 struct iphdr *encap;
1555 struct net_device *reg_dev = NULL;
1556
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001557 if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(*encap)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001558 goto drop;
1559
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07001560 pim = (struct pimreghdr *)skb_transport_header(skb);
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001561 if (pim->type != ((PIM_VERSION<<4)|(PIM_REGISTER)) ||
Linus Torvalds1da177e2005-04-16 15:20:36 -07001562 (pim->flags&PIM_NULL_REGISTER) ||
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001563 (ip_compute_csum((void *)pim, sizeof(*pim)) != 0 &&
Al Virod3bc23e2006-11-14 21:24:49 -08001564 csum_fold(skb_checksum(skb, 0, skb->len, 0))))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001565 goto drop;
1566
1567 /* check if the inner packet is destined to mcast group */
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07001568 encap = (struct iphdr *)(skb_transport_header(skb) +
1569 sizeof(struct pimreghdr));
Joe Perchesf97c1e02007-12-16 13:45:43 -08001570 if (!ipv4_is_multicast(encap->daddr) ||
Linus Torvalds1da177e2005-04-16 15:20:36 -07001571 encap->tot_len == 0 ||
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001572 ntohs(encap->tot_len) + sizeof(*pim) > skb->len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001573 goto drop;
1574
1575 read_lock(&mrt_lock);
1576 if (reg_vif_num >= 0)
1577 reg_dev = vif_table[reg_vif_num].dev;
1578 if (reg_dev)
1579 dev_hold(reg_dev);
1580 read_unlock(&mrt_lock);
1581
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001582 if (reg_dev == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001583 goto drop;
1584
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07001585 skb->mac_header = skb->network_header;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001586 skb_pull(skb, (u8*)encap - skb->data);
Arnaldo Carvalho de Melo31c77112007-03-10 19:04:55 -03001587 skb_reset_network_header(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001588 skb->dev = reg_dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001589 skb->protocol = htons(ETH_P_IP);
1590 skb->ip_summed = 0;
1591 skb->pkt_type = PACKET_HOST;
1592 dst_release(skb->dst);
Pavel Emelyanovcf3677a2008-05-21 14:17:33 -07001593 reg_dev->stats.rx_bytes += skb->len;
1594 reg_dev->stats.rx_packets++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001595 skb->dst = NULL;
1596 nf_reset(skb);
1597 netif_rx(skb);
1598 dev_put(reg_dev);
1599 return 0;
1600 drop:
1601 kfree_skb(skb);
1602 return 0;
1603}
1604#endif
1605
1606static int
1607ipmr_fill_mroute(struct sk_buff *skb, struct mfc_cache *c, struct rtmsg *rtm)
1608{
1609 int ct;
1610 struct rtnexthop *nhp;
1611 struct net_device *dev = vif_table[c->mfc_parent].dev;
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001612 u8 *b = skb_tail_pointer(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001613 struct rtattr *mp_head;
1614
1615 if (dev)
1616 RTA_PUT(skb, RTA_IIF, 4, &dev->ifindex);
1617
Jianjun Kongc354e122008-11-03 00:28:02 -08001618 mp_head = (struct rtattr *)skb_put(skb, RTA_LENGTH(0));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001619
1620 for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) {
1621 if (c->mfc_un.res.ttls[ct] < 255) {
1622 if (skb_tailroom(skb) < RTA_ALIGN(RTA_ALIGN(sizeof(*nhp)) + 4))
1623 goto rtattr_failure;
Jianjun Kongc354e122008-11-03 00:28:02 -08001624 nhp = (struct rtnexthop *)skb_put(skb, RTA_ALIGN(sizeof(*nhp)));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001625 nhp->rtnh_flags = 0;
1626 nhp->rtnh_hops = c->mfc_un.res.ttls[ct];
1627 nhp->rtnh_ifindex = vif_table[ct].dev->ifindex;
1628 nhp->rtnh_len = sizeof(*nhp);
1629 }
1630 }
1631 mp_head->rta_type = RTA_MULTIPATH;
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001632 mp_head->rta_len = skb_tail_pointer(skb) - (u8 *)mp_head;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001633 rtm->rtm_type = RTN_MULTICAST;
1634 return 1;
1635
1636rtattr_failure:
Arnaldo Carvalho de Melodc5fc572007-03-25 23:06:12 -07001637 nlmsg_trim(skb, b);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001638 return -EMSGSIZE;
1639}
1640
1641int ipmr_get_route(struct sk_buff *skb, struct rtmsg *rtm, int nowait)
1642{
1643 int err;
1644 struct mfc_cache *cache;
Eric Dumazetee6b9672008-03-05 18:30:47 -08001645 struct rtable *rt = skb->rtable;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001646
1647 read_lock(&mrt_lock);
1648 cache = ipmr_cache_find(rt->rt_src, rt->rt_dst);
1649
Jianjun Kongc354e122008-11-03 00:28:02 -08001650 if (cache == NULL) {
Alexey Kuznetsov72287492006-07-25 16:45:12 -07001651 struct sk_buff *skb2;
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07001652 struct iphdr *iph;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001653 struct net_device *dev;
1654 int vif;
1655
1656 if (nowait) {
1657 read_unlock(&mrt_lock);
1658 return -EAGAIN;
1659 }
1660
1661 dev = skb->dev;
1662 if (dev == NULL || (vif = ipmr_find_vif(dev)) < 0) {
1663 read_unlock(&mrt_lock);
1664 return -ENODEV;
1665 }
Alexey Kuznetsov72287492006-07-25 16:45:12 -07001666 skb2 = skb_clone(skb, GFP_ATOMIC);
1667 if (!skb2) {
1668 read_unlock(&mrt_lock);
1669 return -ENOMEM;
1670 }
1671
Arnaldo Carvalho de Meloe2d1bca2007-04-10 20:46:21 -07001672 skb_push(skb2, sizeof(struct iphdr));
1673 skb_reset_network_header(skb2);
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07001674 iph = ip_hdr(skb2);
1675 iph->ihl = sizeof(struct iphdr) >> 2;
1676 iph->saddr = rt->rt_src;
1677 iph->daddr = rt->rt_dst;
1678 iph->version = 0;
Alexey Kuznetsov72287492006-07-25 16:45:12 -07001679 err = ipmr_cache_unresolved(vif, skb2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001680 read_unlock(&mrt_lock);
1681 return err;
1682 }
1683
1684 if (!nowait && (rtm->rtm_flags&RTM_F_NOTIFY))
1685 cache->mfc_flags |= MFC_NOTIFY;
1686 err = ipmr_fill_mroute(skb, cache, rtm);
1687 read_unlock(&mrt_lock);
1688 return err;
1689}
1690
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001691#ifdef CONFIG_PROC_FS
Linus Torvalds1da177e2005-04-16 15:20:36 -07001692/*
1693 * The /proc interfaces to multicast routing /proc/ip_mr_cache /proc/ip_mr_vif
1694 */
1695struct ipmr_vif_iter {
1696 int ct;
1697};
1698
1699static struct vif_device *ipmr_vif_seq_idx(struct ipmr_vif_iter *iter,
1700 loff_t pos)
1701{
1702 for (iter->ct = 0; iter->ct < maxvif; ++iter->ct) {
Stephen Hemminger132adf52007-03-08 20:44:43 -08001703 if (!VIF_EXISTS(iter->ct))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001704 continue;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001705 if (pos-- == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001706 return &vif_table[iter->ct];
1707 }
1708 return NULL;
1709}
1710
1711static void *ipmr_vif_seq_start(struct seq_file *seq, loff_t *pos)
Stephen Hemmingerba93ef72008-01-21 17:28:59 -08001712 __acquires(mrt_lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001713{
1714 read_lock(&mrt_lock);
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001715 return *pos ? ipmr_vif_seq_idx(seq->private, *pos - 1)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001716 : SEQ_START_TOKEN;
1717}
1718
1719static void *ipmr_vif_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1720{
1721 struct ipmr_vif_iter *iter = seq->private;
1722
1723 ++*pos;
1724 if (v == SEQ_START_TOKEN)
1725 return ipmr_vif_seq_idx(iter, 0);
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001726
Linus Torvalds1da177e2005-04-16 15:20:36 -07001727 while (++iter->ct < maxvif) {
Stephen Hemminger132adf52007-03-08 20:44:43 -08001728 if (!VIF_EXISTS(iter->ct))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001729 continue;
1730 return &vif_table[iter->ct];
1731 }
1732 return NULL;
1733}
1734
1735static void ipmr_vif_seq_stop(struct seq_file *seq, void *v)
Stephen Hemmingerba93ef72008-01-21 17:28:59 -08001736 __releases(mrt_lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001737{
1738 read_unlock(&mrt_lock);
1739}
1740
1741static int ipmr_vif_seq_show(struct seq_file *seq, void *v)
1742{
1743 if (v == SEQ_START_TOKEN) {
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001744 seq_puts(seq,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001745 "Interface BytesIn PktsIn BytesOut PktsOut Flags Local Remote\n");
1746 } else {
1747 const struct vif_device *vif = v;
1748 const char *name = vif->dev ? vif->dev->name : "none";
1749
1750 seq_printf(seq,
1751 "%2Zd %-10s %8ld %7ld %8ld %7ld %05X %08X %08X\n",
1752 vif - vif_table,
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001753 name, vif->bytes_in, vif->pkt_in,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001754 vif->bytes_out, vif->pkt_out,
1755 vif->flags, vif->local, vif->remote);
1756 }
1757 return 0;
1758}
1759
Stephen Hemmingerf6908082007-03-12 14:34:29 -07001760static const struct seq_operations ipmr_vif_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001761 .start = ipmr_vif_seq_start,
1762 .next = ipmr_vif_seq_next,
1763 .stop = ipmr_vif_seq_stop,
1764 .show = ipmr_vif_seq_show,
1765};
1766
1767static int ipmr_vif_open(struct inode *inode, struct file *file)
1768{
Pavel Emelyanovcf7732e2007-10-10 02:29:29 -07001769 return seq_open_private(file, &ipmr_vif_seq_ops,
1770 sizeof(struct ipmr_vif_iter));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001771}
1772
Arjan van de Ven9a321442007-02-12 00:55:35 -08001773static const struct file_operations ipmr_vif_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001774 .owner = THIS_MODULE,
1775 .open = ipmr_vif_open,
1776 .read = seq_read,
1777 .llseek = seq_lseek,
1778 .release = seq_release_private,
1779};
1780
1781struct ipmr_mfc_iter {
1782 struct mfc_cache **cache;
1783 int ct;
1784};
1785
1786
1787static struct mfc_cache *ipmr_mfc_seq_idx(struct ipmr_mfc_iter *it, loff_t pos)
1788{
1789 struct mfc_cache *mfc;
1790
1791 it->cache = mfc_cache_array;
1792 read_lock(&mrt_lock);
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001793 for (it->ct = 0; it->ct < MFC_LINES; it->ct++)
Stephen Hemminger132adf52007-03-08 20:44:43 -08001794 for (mfc = mfc_cache_array[it->ct]; mfc; mfc = mfc->next)
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001795 if (pos-- == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001796 return mfc;
1797 read_unlock(&mrt_lock);
1798
1799 it->cache = &mfc_unres_queue;
1800 spin_lock_bh(&mfc_unres_lock);
Stephen Hemminger132adf52007-03-08 20:44:43 -08001801 for (mfc = mfc_unres_queue; mfc; mfc = mfc->next)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001802 if (pos-- == 0)
1803 return mfc;
1804 spin_unlock_bh(&mfc_unres_lock);
1805
1806 it->cache = NULL;
1807 return NULL;
1808}
1809
1810
1811static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos)
1812{
1813 struct ipmr_mfc_iter *it = seq->private;
1814 it->cache = NULL;
1815 it->ct = 0;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001816 return *pos ? ipmr_mfc_seq_idx(seq->private, *pos - 1)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001817 : SEQ_START_TOKEN;
1818}
1819
1820static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1821{
1822 struct mfc_cache *mfc = v;
1823 struct ipmr_mfc_iter *it = seq->private;
1824
1825 ++*pos;
1826
1827 if (v == SEQ_START_TOKEN)
1828 return ipmr_mfc_seq_idx(seq->private, 0);
1829
1830 if (mfc->next)
1831 return mfc->next;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001832
1833 if (it->cache == &mfc_unres_queue)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001834 goto end_of_list;
1835
1836 BUG_ON(it->cache != mfc_cache_array);
1837
1838 while (++it->ct < MFC_LINES) {
1839 mfc = mfc_cache_array[it->ct];
1840 if (mfc)
1841 return mfc;
1842 }
1843
1844 /* exhausted cache_array, show unresolved */
1845 read_unlock(&mrt_lock);
1846 it->cache = &mfc_unres_queue;
1847 it->ct = 0;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001848
Linus Torvalds1da177e2005-04-16 15:20:36 -07001849 spin_lock_bh(&mfc_unres_lock);
1850 mfc = mfc_unres_queue;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001851 if (mfc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001852 return mfc;
1853
1854 end_of_list:
1855 spin_unlock_bh(&mfc_unres_lock);
1856 it->cache = NULL;
1857
1858 return NULL;
1859}
1860
1861static void ipmr_mfc_seq_stop(struct seq_file *seq, void *v)
1862{
1863 struct ipmr_mfc_iter *it = seq->private;
1864
1865 if (it->cache == &mfc_unres_queue)
1866 spin_unlock_bh(&mfc_unres_lock);
1867 else if (it->cache == mfc_cache_array)
1868 read_unlock(&mrt_lock);
1869}
1870
1871static int ipmr_mfc_seq_show(struct seq_file *seq, void *v)
1872{
1873 int n;
1874
1875 if (v == SEQ_START_TOKEN) {
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001876 seq_puts(seq,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001877 "Group Origin Iif Pkts Bytes Wrong Oifs\n");
1878 } else {
1879 const struct mfc_cache *mfc = v;
1880 const struct ipmr_mfc_iter *it = seq->private;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001881
Benjamin Thery1ea472e2008-12-03 22:21:47 -08001882 seq_printf(seq, "%08lX %08lX %-3d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001883 (unsigned long) mfc->mfc_mcastgrp,
1884 (unsigned long) mfc->mfc_origin,
Benjamin Thery1ea472e2008-12-03 22:21:47 -08001885 mfc->mfc_parent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001886
1887 if (it->cache != &mfc_unres_queue) {
Benjamin Thery1ea472e2008-12-03 22:21:47 -08001888 seq_printf(seq, " %8lu %8lu %8lu",
1889 mfc->mfc_un.res.pkt,
1890 mfc->mfc_un.res.bytes,
1891 mfc->mfc_un.res.wrong_if);
Stephen Hemminger132adf52007-03-08 20:44:43 -08001892 for (n = mfc->mfc_un.res.minvif;
1893 n < mfc->mfc_un.res.maxvif; n++ ) {
1894 if (VIF_EXISTS(n)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001895 && mfc->mfc_un.res.ttls[n] < 255)
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001896 seq_printf(seq,
1897 " %2d:%-3d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001898 n, mfc->mfc_un.res.ttls[n]);
1899 }
Benjamin Thery1ea472e2008-12-03 22:21:47 -08001900 } else {
1901 /* unresolved mfc_caches don't contain
1902 * pkt, bytes and wrong_if values
1903 */
1904 seq_printf(seq, " %8lu %8lu %8lu", 0ul, 0ul, 0ul);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001905 }
1906 seq_putc(seq, '\n');
1907 }
1908 return 0;
1909}
1910
Stephen Hemmingerf6908082007-03-12 14:34:29 -07001911static const struct seq_operations ipmr_mfc_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001912 .start = ipmr_mfc_seq_start,
1913 .next = ipmr_mfc_seq_next,
1914 .stop = ipmr_mfc_seq_stop,
1915 .show = ipmr_mfc_seq_show,
1916};
1917
1918static int ipmr_mfc_open(struct inode *inode, struct file *file)
1919{
Pavel Emelyanovcf7732e2007-10-10 02:29:29 -07001920 return seq_open_private(file, &ipmr_mfc_seq_ops,
1921 sizeof(struct ipmr_mfc_iter));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001922}
1923
Arjan van de Ven9a321442007-02-12 00:55:35 -08001924static const struct file_operations ipmr_mfc_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001925 .owner = THIS_MODULE,
1926 .open = ipmr_mfc_open,
1927 .read = seq_read,
1928 .llseek = seq_lseek,
1929 .release = seq_release_private,
1930};
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001931#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001932
1933#ifdef CONFIG_IP_PIMSM_V2
1934static struct net_protocol pim_protocol = {
1935 .handler = pim_rcv,
1936};
1937#endif
1938
1939
1940/*
1941 * Setup for IP multicast routing
1942 */
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001943
Wang Chen03d2f892008-07-03 12:13:36 +08001944int __init ip_mr_init(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001945{
Wang Chen03d2f892008-07-03 12:13:36 +08001946 int err;
1947
Linus Torvalds1da177e2005-04-16 15:20:36 -07001948 mrt_cachep = kmem_cache_create("ip_mrt_cache",
1949 sizeof(struct mfc_cache),
Alexey Dobriyane5d679f2006-08-26 19:25:52 -07001950 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC,
Paul Mundt20c2df82007-07-20 10:11:58 +09001951 NULL);
Wang Chen03d2f892008-07-03 12:13:36 +08001952 if (!mrt_cachep)
1953 return -ENOMEM;
1954
Pavel Emelyanovb24b8a22008-01-23 21:20:07 -08001955 setup_timer(&ipmr_expire_timer, ipmr_expire_process, 0);
Wang Chen03d2f892008-07-03 12:13:36 +08001956 err = register_netdevice_notifier(&ip_mr_notifier);
1957 if (err)
1958 goto reg_notif_fail;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001959#ifdef CONFIG_PROC_FS
Wang Chen03d2f892008-07-03 12:13:36 +08001960 err = -ENOMEM;
1961 if (!proc_net_fops_create(&init_net, "ip_mr_vif", 0, &ipmr_vif_fops))
1962 goto proc_vif_fail;
1963 if (!proc_net_fops_create(&init_net, "ip_mr_cache", 0, &ipmr_mfc_fops))
1964 goto proc_cache_fail;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001965#endif
Wang Chen03d2f892008-07-03 12:13:36 +08001966 return 0;
Wang Chen03d2f892008-07-03 12:13:36 +08001967#ifdef CONFIG_PROC_FS
Wang Chen03d2f892008-07-03 12:13:36 +08001968proc_cache_fail:
1969 proc_net_remove(&init_net, "ip_mr_vif");
Benjamin Theryc3e38892008-11-19 14:07:41 -08001970proc_vif_fail:
1971 unregister_netdevice_notifier(&ip_mr_notifier);
Wang Chen03d2f892008-07-03 12:13:36 +08001972#endif
Benjamin Theryc3e38892008-11-19 14:07:41 -08001973reg_notif_fail:
1974 del_timer(&ipmr_expire_timer);
1975 kmem_cache_destroy(mrt_cachep);
Wang Chen03d2f892008-07-03 12:13:36 +08001976 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001977}