blob: ac324b702e8b70e1c45cb764d7fe3ccedbaf910a [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * IP multicast routing support for mrouted 3.6/3.8
3 *
Alan Cox113aa832008-10-13 19:01:08 -07004 * (c) 1995 Alan Cox, <alan@lxorguk.ukuu.org.uk>
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 * Linux Consultancy and Custom Driver Development
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070012 * Fixes:
13 * Michael Chastain : Incorrect size of copying.
14 * Alan Cox : Added the cache manager code
15 * Alan Cox : Fixed the clone/copy bug and device race.
16 * Mike McLagan : Routing by source
17 * Malcolm Beattie : Buffer handling fixes.
18 * Alexey Kuznetsov : Double buffer free and other fixes.
19 * SVR Anand : Fixed several multicast bugs and problems.
20 * Alexey Kuznetsov : Status, optimisations and more.
21 * Brad Parker : Better behaviour on mrouted upcall
22 * overflow.
23 * Carlos Picoto : PIMv1 Support
24 * Pavlin Ivanov Radoslavov: PIMv2 Registers must checksum only PIM header
25 * Relax this requrement to work with older peers.
26 *
27 */
28
Linus Torvalds1da177e2005-04-16 15:20:36 -070029#include <asm/system.h>
30#include <asm/uaccess.h>
31#include <linux/types.h>
Randy Dunlap4fc268d2006-01-11 12:17:47 -080032#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033#include <linux/errno.h>
34#include <linux/timer.h>
35#include <linux/mm.h>
36#include <linux/kernel.h>
37#include <linux/fcntl.h>
38#include <linux/stat.h>
39#include <linux/socket.h>
40#include <linux/in.h>
41#include <linux/inet.h>
42#include <linux/netdevice.h>
43#include <linux/inetdevice.h>
44#include <linux/igmp.h>
45#include <linux/proc_fs.h>
46#include <linux/seq_file.h>
47#include <linux/mroute.h>
48#include <linux/init.h>
Kris Katterjohn46f25df2006-01-05 16:35:42 -080049#include <linux/if_ether.h>
Eric W. Biederman457c4cb2007-09-12 12:01:34 +020050#include <net/net_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070051#include <net/ip.h>
52#include <net/protocol.h>
53#include <linux/skbuff.h>
Arnaldo Carvalho de Melo14c85022005-12-27 02:43:12 -020054#include <net/route.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070055#include <net/sock.h>
56#include <net/icmp.h>
57#include <net/udp.h>
58#include <net/raw.h>
59#include <linux/notifier.h>
60#include <linux/if_arp.h>
61#include <linux/netfilter_ipv4.h>
62#include <net/ipip.h>
63#include <net/checksum.h>
Arnaldo Carvalho de Melodc5fc572007-03-25 23:06:12 -070064#include <net/netlink.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070065
66#if defined(CONFIG_IP_PIMSM_V1) || defined(CONFIG_IP_PIMSM_V2)
67#define CONFIG_IP_PIMSM 1
68#endif
69
Linus Torvalds1da177e2005-04-16 15:20:36 -070070/* Big lock, protecting vif table, mrt cache and mroute socket state.
71 Note that the changes are semaphored via rtnl_lock.
72 */
73
74static DEFINE_RWLOCK(mrt_lock);
75
76/*
77 * Multicast router control variables
78 */
79
80static struct vif_device vif_table[MAXVIFS]; /* Devices */
81static int maxvif;
82
83#define VIF_EXISTS(idx) (vif_table[idx].dev != NULL)
84
85static int mroute_do_assert; /* Set in PIM assert */
86static int mroute_do_pim;
87
88static struct mfc_cache *mfc_cache_array[MFC_LINES]; /* Forwarding cache */
89
90static struct mfc_cache *mfc_unres_queue; /* Queue of unresolved entries */
91static atomic_t cache_resolve_queue_len; /* Size of unresolved */
92
93/* Special spinlock for queue of unresolved entries */
94static DEFINE_SPINLOCK(mfc_unres_lock);
95
96/* We return to original Alan's scheme. Hash table of resolved
97 entries is changed only in process context and protected
98 with weak lock mrt_lock. Queue of unresolved entries is protected
99 with strong spinlock mfc_unres_lock.
100
101 In this case data path is free of exclusive locks at all.
102 */
103
Christoph Lametere18b8902006-12-06 20:33:20 -0800104static struct kmem_cache *mrt_cachep __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105
106static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local);
107static int ipmr_cache_report(struct sk_buff *pkt, vifi_t vifi, int assert);
108static int ipmr_fill_mroute(struct sk_buff *skb, struct mfc_cache *c, struct rtmsg *rtm);
109
110#ifdef CONFIG_IP_PIMSM_V2
111static struct net_protocol pim_protocol;
112#endif
113
114static struct timer_list ipmr_expire_timer;
115
116/* Service routines creating virtual interfaces: DVMRP tunnels and PIMREG */
117
Wang Chend6070322008-07-14 20:55:26 -0700118static void ipmr_del_tunnel(struct net_device *dev, struct vifctl *v)
119{
120 dev_close(dev);
121
122 dev = __dev_get_by_name(&init_net, "tunl0");
123 if (dev) {
Stephen Hemminger5bc3eb72008-11-19 21:52:05 -0800124 const struct net_device_ops *ops = dev->netdev_ops;
Wang Chend6070322008-07-14 20:55:26 -0700125 struct ifreq ifr;
Wang Chend6070322008-07-14 20:55:26 -0700126 struct ip_tunnel_parm p;
127
128 memset(&p, 0, sizeof(p));
129 p.iph.daddr = v->vifc_rmt_addr.s_addr;
130 p.iph.saddr = v->vifc_lcl_addr.s_addr;
131 p.iph.version = 4;
132 p.iph.ihl = 5;
133 p.iph.protocol = IPPROTO_IPIP;
134 sprintf(p.name, "dvmrp%d", v->vifc_vifi);
135 ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
136
Stephen Hemminger5bc3eb72008-11-19 21:52:05 -0800137 if (ops->ndo_do_ioctl) {
138 mm_segment_t oldfs = get_fs();
139
140 set_fs(KERNEL_DS);
141 ops->ndo_do_ioctl(dev, &ifr, SIOCDELTUNNEL);
142 set_fs(oldfs);
143 }
Wang Chend6070322008-07-14 20:55:26 -0700144 }
145}
146
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147static
148struct net_device *ipmr_new_tunnel(struct vifctl *v)
149{
150 struct net_device *dev;
151
Eric W. Biederman881d9662007-09-17 11:56:21 -0700152 dev = __dev_get_by_name(&init_net, "tunl0");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153
154 if (dev) {
Stephen Hemminger5bc3eb72008-11-19 21:52:05 -0800155 const struct net_device_ops *ops = dev->netdev_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156 int err;
157 struct ifreq ifr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158 struct ip_tunnel_parm p;
159 struct in_device *in_dev;
160
161 memset(&p, 0, sizeof(p));
162 p.iph.daddr = v->vifc_rmt_addr.s_addr;
163 p.iph.saddr = v->vifc_lcl_addr.s_addr;
164 p.iph.version = 4;
165 p.iph.ihl = 5;
166 p.iph.protocol = IPPROTO_IPIP;
167 sprintf(p.name, "dvmrp%d", v->vifc_vifi);
Stephen Hemmingerba93ef72008-01-21 17:28:59 -0800168 ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169
Stephen Hemminger5bc3eb72008-11-19 21:52:05 -0800170 if (ops->ndo_do_ioctl) {
171 mm_segment_t oldfs = get_fs();
172
173 set_fs(KERNEL_DS);
174 err = ops->ndo_do_ioctl(dev, &ifr, SIOCADDTUNNEL);
175 set_fs(oldfs);
176 } else
177 err = -EOPNOTSUPP;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700178
179 dev = NULL;
180
Eric W. Biederman881d9662007-09-17 11:56:21 -0700181 if (err == 0 && (dev = __dev_get_by_name(&init_net, p.name)) != NULL) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182 dev->flags |= IFF_MULTICAST;
183
Herbert Xue5ed6392005-10-03 14:35:55 -0700184 in_dev = __in_dev_get_rtnl(dev);
Herbert Xu71e27da2007-06-04 23:36:06 -0700185 if (in_dev == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186 goto failure;
Herbert Xu71e27da2007-06-04 23:36:06 -0700187
188 ipv4_devconf_setall(in_dev);
189 IPV4_DEVCONF(in_dev->cnf, RP_FILTER) = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190
191 if (dev_open(dev))
192 goto failure;
Wang Chen7dc00c82008-07-14 20:56:34 -0700193 dev_hold(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194 }
195 }
196 return dev;
197
198failure:
199 /* allow the register to be completed before unregistering. */
200 rtnl_unlock();
201 rtnl_lock();
202
203 unregister_netdevice(dev);
204 return NULL;
205}
206
207#ifdef CONFIG_IP_PIMSM
208
209static int reg_vif_num = -1;
210
211static int reg_vif_xmit(struct sk_buff *skb, struct net_device *dev)
212{
213 read_lock(&mrt_lock);
Pavel Emelyanovcf3677a2008-05-21 14:17:33 -0700214 dev->stats.tx_bytes += skb->len;
215 dev->stats.tx_packets++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216 ipmr_cache_report(skb, reg_vif_num, IGMPMSG_WHOLEPKT);
217 read_unlock(&mrt_lock);
218 kfree_skb(skb);
219 return 0;
220}
221
Stephen Hemminger007c3832008-11-20 20:28:35 -0800222static const struct net_device_ops reg_vif_netdev_ops = {
223 .ndo_start_xmit = reg_vif_xmit,
224};
225
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226static void reg_vif_setup(struct net_device *dev)
227{
228 dev->type = ARPHRD_PIMREG;
Kris Katterjohn46f25df2006-01-05 16:35:42 -0800229 dev->mtu = ETH_DATA_LEN - sizeof(struct iphdr) - 8;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230 dev->flags = IFF_NOARP;
Stephen Hemminger007c3832008-11-20 20:28:35 -0800231 dev->netdev_ops = &reg_vif_netdev_ops,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232 dev->destructor = free_netdev;
233}
234
235static struct net_device *ipmr_reg_vif(void)
236{
237 struct net_device *dev;
238 struct in_device *in_dev;
239
Pavel Emelyanovcf3677a2008-05-21 14:17:33 -0700240 dev = alloc_netdev(0, "pimreg", reg_vif_setup);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700241
242 if (dev == NULL)
243 return NULL;
244
245 if (register_netdevice(dev)) {
246 free_netdev(dev);
247 return NULL;
248 }
249 dev->iflink = 0;
250
Herbert Xu71e27da2007-06-04 23:36:06 -0700251 rcu_read_lock();
252 if ((in_dev = __in_dev_get_rcu(dev)) == NULL) {
253 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254 goto failure;
Herbert Xu71e27da2007-06-04 23:36:06 -0700255 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256
Herbert Xu71e27da2007-06-04 23:36:06 -0700257 ipv4_devconf_setall(in_dev);
258 IPV4_DEVCONF(in_dev->cnf, RP_FILTER) = 0;
259 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700260
261 if (dev_open(dev))
262 goto failure;
263
Wang Chen7dc00c82008-07-14 20:56:34 -0700264 dev_hold(dev);
265
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266 return dev;
267
268failure:
269 /* allow the register to be completed before unregistering. */
270 rtnl_unlock();
271 rtnl_lock();
272
273 unregister_netdevice(dev);
274 return NULL;
275}
276#endif
277
278/*
279 * Delete a VIF entry
Wang Chen7dc00c82008-07-14 20:56:34 -0700280 * @notify: Set to 1, if the caller is a notifier_call
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281 */
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900282
Wang Chen7dc00c82008-07-14 20:56:34 -0700283static int vif_delete(int vifi, int notify)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700284{
285 struct vif_device *v;
286 struct net_device *dev;
287 struct in_device *in_dev;
288
289 if (vifi < 0 || vifi >= maxvif)
290 return -EADDRNOTAVAIL;
291
292 v = &vif_table[vifi];
293
294 write_lock_bh(&mrt_lock);
295 dev = v->dev;
296 v->dev = NULL;
297
298 if (!dev) {
299 write_unlock_bh(&mrt_lock);
300 return -EADDRNOTAVAIL;
301 }
302
303#ifdef CONFIG_IP_PIMSM
304 if (vifi == reg_vif_num)
305 reg_vif_num = -1;
306#endif
307
308 if (vifi+1 == maxvif) {
309 int tmp;
310 for (tmp=vifi-1; tmp>=0; tmp--) {
311 if (VIF_EXISTS(tmp))
312 break;
313 }
314 maxvif = tmp+1;
315 }
316
317 write_unlock_bh(&mrt_lock);
318
319 dev_set_allmulti(dev, -1);
320
Herbert Xue5ed6392005-10-03 14:35:55 -0700321 if ((in_dev = __in_dev_get_rtnl(dev)) != NULL) {
Herbert Xu42f811b2007-06-04 23:34:44 -0700322 IPV4_DEVCONF(in_dev->cnf, MC_FORWARDING)--;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700323 ip_rt_multicast_event(in_dev);
324 }
325
Wang Chen7dc00c82008-07-14 20:56:34 -0700326 if (v->flags&(VIFF_TUNNEL|VIFF_REGISTER) && !notify)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700327 unregister_netdevice(dev);
328
329 dev_put(dev);
330 return 0;
331}
332
333/* Destroy an unresolved cache entry, killing queued skbs
334 and reporting error to netlink readers.
335 */
336
337static void ipmr_destroy_unres(struct mfc_cache *c)
338{
339 struct sk_buff *skb;
Patrick McHardy9ef1d4c2005-06-28 12:55:30 -0700340 struct nlmsgerr *e;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700341
342 atomic_dec(&cache_resolve_queue_len);
343
Jianjun Kongc354e122008-11-03 00:28:02 -0800344 while ((skb = skb_dequeue(&c->mfc_un.unres.unresolved))) {
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700345 if (ip_hdr(skb)->version == 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr));
347 nlh->nlmsg_type = NLMSG_ERROR;
348 nlh->nlmsg_len = NLMSG_LENGTH(sizeof(struct nlmsgerr));
349 skb_trim(skb, nlh->nlmsg_len);
Patrick McHardy9ef1d4c2005-06-28 12:55:30 -0700350 e = NLMSG_DATA(nlh);
351 e->error = -ETIMEDOUT;
352 memset(&e->msg, 0, sizeof(e->msg));
Thomas Graf2942e902006-08-15 00:30:25 -0700353
Denis V. Lunev97c53ca2007-11-19 22:26:51 -0800354 rtnl_unicast(skb, &init_net, NETLINK_CB(skb).pid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355 } else
356 kfree_skb(skb);
357 }
358
359 kmem_cache_free(mrt_cachep, c);
360}
361
362
363/* Single timer process for all the unresolved queue. */
364
365static void ipmr_expire_process(unsigned long dummy)
366{
367 unsigned long now;
368 unsigned long expires;
369 struct mfc_cache *c, **cp;
370
371 if (!spin_trylock(&mfc_unres_lock)) {
372 mod_timer(&ipmr_expire_timer, jiffies+HZ/10);
373 return;
374 }
375
376 if (atomic_read(&cache_resolve_queue_len) == 0)
377 goto out;
378
379 now = jiffies;
380 expires = 10*HZ;
381 cp = &mfc_unres_queue;
382
383 while ((c=*cp) != NULL) {
384 if (time_after(c->mfc_un.unres.expires, now)) {
385 unsigned long interval = c->mfc_un.unres.expires - now;
386 if (interval < expires)
387 expires = interval;
388 cp = &c->next;
389 continue;
390 }
391
392 *cp = c->next;
393
394 ipmr_destroy_unres(c);
395 }
396
397 if (atomic_read(&cache_resolve_queue_len))
398 mod_timer(&ipmr_expire_timer, jiffies + expires);
399
400out:
401 spin_unlock(&mfc_unres_lock);
402}
403
404/* Fill oifs list. It is called under write locked mrt_lock. */
405
Baruch Evend1b04c02005-07-30 17:41:59 -0700406static void ipmr_update_thresholds(struct mfc_cache *cache, unsigned char *ttls)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700407{
408 int vifi;
409
410 cache->mfc_un.res.minvif = MAXVIFS;
411 cache->mfc_un.res.maxvif = 0;
412 memset(cache->mfc_un.res.ttls, 255, MAXVIFS);
413
414 for (vifi=0; vifi<maxvif; vifi++) {
415 if (VIF_EXISTS(vifi) && ttls[vifi] && ttls[vifi] < 255) {
416 cache->mfc_un.res.ttls[vifi] = ttls[vifi];
417 if (cache->mfc_un.res.minvif > vifi)
418 cache->mfc_un.res.minvif = vifi;
419 if (cache->mfc_un.res.maxvif <= vifi)
420 cache->mfc_un.res.maxvif = vifi + 1;
421 }
422 }
423}
424
425static int vif_add(struct vifctl *vifc, int mrtsock)
426{
427 int vifi = vifc->vifc_vifi;
428 struct vif_device *v = &vif_table[vifi];
429 struct net_device *dev;
430 struct in_device *in_dev;
Wang Chend6070322008-07-14 20:55:26 -0700431 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700432
433 /* Is vif busy ? */
434 if (VIF_EXISTS(vifi))
435 return -EADDRINUSE;
436
437 switch (vifc->vifc_flags) {
438#ifdef CONFIG_IP_PIMSM
439 case VIFF_REGISTER:
440 /*
441 * Special Purpose VIF in PIM
442 * All the packets will be sent to the daemon
443 */
444 if (reg_vif_num >= 0)
445 return -EADDRINUSE;
446 dev = ipmr_reg_vif();
447 if (!dev)
448 return -ENOBUFS;
Wang Chend6070322008-07-14 20:55:26 -0700449 err = dev_set_allmulti(dev, 1);
450 if (err) {
451 unregister_netdevice(dev);
Wang Chen7dc00c82008-07-14 20:56:34 -0700452 dev_put(dev);
Wang Chend6070322008-07-14 20:55:26 -0700453 return err;
454 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700455 break;
456#endif
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900457 case VIFF_TUNNEL:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700458 dev = ipmr_new_tunnel(vifc);
459 if (!dev)
460 return -ENOBUFS;
Wang Chend6070322008-07-14 20:55:26 -0700461 err = dev_set_allmulti(dev, 1);
462 if (err) {
463 ipmr_del_tunnel(dev, vifc);
Wang Chen7dc00c82008-07-14 20:56:34 -0700464 dev_put(dev);
Wang Chend6070322008-07-14 20:55:26 -0700465 return err;
466 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700467 break;
468 case 0:
Denis V. Lunev1ab35272008-01-22 22:04:30 -0800469 dev = ip_dev_find(&init_net, vifc->vifc_lcl_addr.s_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700470 if (!dev)
471 return -EADDRNOTAVAIL;
Wang Chend6070322008-07-14 20:55:26 -0700472 err = dev_set_allmulti(dev, 1);
Wang Chen7dc00c82008-07-14 20:56:34 -0700473 if (err) {
474 dev_put(dev);
Wang Chend6070322008-07-14 20:55:26 -0700475 return err;
Wang Chen7dc00c82008-07-14 20:56:34 -0700476 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700477 break;
478 default:
479 return -EINVAL;
480 }
481
Herbert Xue5ed6392005-10-03 14:35:55 -0700482 if ((in_dev = __in_dev_get_rtnl(dev)) == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700483 return -EADDRNOTAVAIL;
Herbert Xu42f811b2007-06-04 23:34:44 -0700484 IPV4_DEVCONF(in_dev->cnf, MC_FORWARDING)++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700485 ip_rt_multicast_event(in_dev);
486
487 /*
488 * Fill in the VIF structures
489 */
Jianjun Kongc354e122008-11-03 00:28:02 -0800490 v->rate_limit = vifc->vifc_rate_limit;
491 v->local = vifc->vifc_lcl_addr.s_addr;
492 v->remote = vifc->vifc_rmt_addr.s_addr;
493 v->flags = vifc->vifc_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700494 if (!mrtsock)
495 v->flags |= VIFF_STATIC;
Jianjun Kongc354e122008-11-03 00:28:02 -0800496 v->threshold = vifc->vifc_threshold;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700497 v->bytes_in = 0;
498 v->bytes_out = 0;
499 v->pkt_in = 0;
500 v->pkt_out = 0;
501 v->link = dev->ifindex;
502 if (v->flags&(VIFF_TUNNEL|VIFF_REGISTER))
503 v->link = dev->iflink;
504
505 /* And finish update writing critical data */
506 write_lock_bh(&mrt_lock);
Jianjun Kongc354e122008-11-03 00:28:02 -0800507 v->dev = dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700508#ifdef CONFIG_IP_PIMSM
509 if (v->flags&VIFF_REGISTER)
510 reg_vif_num = vifi;
511#endif
512 if (vifi+1 > maxvif)
513 maxvif = vifi+1;
514 write_unlock_bh(&mrt_lock);
515 return 0;
516}
517
Al Viro114c7842006-09-27 18:39:29 -0700518static struct mfc_cache *ipmr_cache_find(__be32 origin, __be32 mcastgrp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700519{
Jianjun Kongc354e122008-11-03 00:28:02 -0800520 int line = MFC_HASH(mcastgrp, origin);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700521 struct mfc_cache *c;
522
523 for (c=mfc_cache_array[line]; c; c = c->next) {
524 if (c->mfc_origin==origin && c->mfc_mcastgrp==mcastgrp)
525 break;
526 }
527 return c;
528}
529
530/*
531 * Allocate a multicast cache entry
532 */
533static struct mfc_cache *ipmr_cache_alloc(void)
534{
Jianjun Kongc354e122008-11-03 00:28:02 -0800535 struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL);
536 if (c == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700537 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700538 c->mfc_un.res.minvif = MAXVIFS;
539 return c;
540}
541
542static struct mfc_cache *ipmr_cache_alloc_unres(void)
543{
Jianjun Kongc354e122008-11-03 00:28:02 -0800544 struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC);
545 if (c == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700546 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700547 skb_queue_head_init(&c->mfc_un.unres.unresolved);
548 c->mfc_un.unres.expires = jiffies + 10*HZ;
549 return c;
550}
551
552/*
553 * A cache entry has gone into a resolved state from queued
554 */
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900555
Linus Torvalds1da177e2005-04-16 15:20:36 -0700556static void ipmr_cache_resolve(struct mfc_cache *uc, struct mfc_cache *c)
557{
558 struct sk_buff *skb;
Patrick McHardy9ef1d4c2005-06-28 12:55:30 -0700559 struct nlmsgerr *e;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700560
561 /*
562 * Play the pending entries through our router
563 */
564
Jianjun Kongc354e122008-11-03 00:28:02 -0800565 while ((skb = __skb_dequeue(&uc->mfc_un.unres.unresolved))) {
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700566 if (ip_hdr(skb)->version == 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700567 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr));
568
569 if (ipmr_fill_mroute(skb, c, NLMSG_DATA(nlh)) > 0) {
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -0700570 nlh->nlmsg_len = (skb_tail_pointer(skb) -
571 (u8 *)nlh);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700572 } else {
573 nlh->nlmsg_type = NLMSG_ERROR;
574 nlh->nlmsg_len = NLMSG_LENGTH(sizeof(struct nlmsgerr));
575 skb_trim(skb, nlh->nlmsg_len);
Patrick McHardy9ef1d4c2005-06-28 12:55:30 -0700576 e = NLMSG_DATA(nlh);
577 e->error = -EMSGSIZE;
578 memset(&e->msg, 0, sizeof(e->msg));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700579 }
Thomas Graf2942e902006-08-15 00:30:25 -0700580
Denis V. Lunev97c53ca2007-11-19 22:26:51 -0800581 rtnl_unicast(skb, &init_net, NETLINK_CB(skb).pid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700582 } else
583 ip_mr_forward(skb, c, 0);
584 }
585}
586
587/*
588 * Bounce a cache query up to mrouted. We could use netlink for this but mrouted
589 * expects the following bizarre scheme.
590 *
591 * Called under mrt_lock.
592 */
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900593
Linus Torvalds1da177e2005-04-16 15:20:36 -0700594static int ipmr_cache_report(struct sk_buff *pkt, vifi_t vifi, int assert)
595{
596 struct sk_buff *skb;
Arnaldo Carvalho de Meloc9bdd4b2007-03-12 20:09:15 -0300597 const int ihl = ip_hdrlen(pkt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700598 struct igmphdr *igmp;
599 struct igmpmsg *msg;
600 int ret;
601
602#ifdef CONFIG_IP_PIMSM
603 if (assert == IGMPMSG_WHOLEPKT)
604 skb = skb_realloc_headroom(pkt, sizeof(struct iphdr));
605 else
606#endif
607 skb = alloc_skb(128, GFP_ATOMIC);
608
Stephen Hemminger132adf52007-03-08 20:44:43 -0800609 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700610 return -ENOBUFS;
611
612#ifdef CONFIG_IP_PIMSM
613 if (assert == IGMPMSG_WHOLEPKT) {
614 /* Ugly, but we have no choice with this interface.
615 Duplicate old header, fix ihl, length etc.
616 And all this only to mangle msg->im_msgtype and
617 to set msg->im_mbz to "mbz" :-)
618 */
Arnaldo Carvalho de Melo878c8142007-03-11 22:38:29 -0300619 skb_push(skb, sizeof(struct iphdr));
620 skb_reset_network_header(skb);
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -0300621 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo0272ffc2007-03-12 20:05:39 -0300622 msg = (struct igmpmsg *)skb_network_header(skb);
Arnaldo Carvalho de Melod56f90a2007-04-10 20:50:43 -0700623 memcpy(msg, skb_network_header(pkt), sizeof(struct iphdr));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700624 msg->im_msgtype = IGMPMSG_WHOLEPKT;
625 msg->im_mbz = 0;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900626 msg->im_vif = reg_vif_num;
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700627 ip_hdr(skb)->ihl = sizeof(struct iphdr) >> 2;
628 ip_hdr(skb)->tot_len = htons(ntohs(ip_hdr(pkt)->tot_len) +
629 sizeof(struct iphdr));
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900630 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700631#endif
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900632 {
633
Linus Torvalds1da177e2005-04-16 15:20:36 -0700634 /*
635 * Copy the IP header
636 */
637
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -0700638 skb->network_header = skb->tail;
Arnaldo Carvalho de Meloddc7b8e2007-03-15 21:42:27 -0300639 skb_put(skb, ihl);
Arnaldo Carvalho de Melo27d7ff42007-03-31 11:55:19 -0300640 skb_copy_to_linear_data(skb, pkt->data, ihl);
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700641 ip_hdr(skb)->protocol = 0; /* Flag to the kernel this is a route add */
642 msg = (struct igmpmsg *)skb_network_header(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700643 msg->im_vif = vifi;
644 skb->dst = dst_clone(pkt->dst);
645
646 /*
647 * Add our header
648 */
649
Jianjun Kongc354e122008-11-03 00:28:02 -0800650 igmp=(struct igmphdr *)skb_put(skb, sizeof(struct igmphdr));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700651 igmp->type =
652 msg->im_msgtype = assert;
653 igmp->code = 0;
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700654 ip_hdr(skb)->tot_len = htons(skb->len); /* Fix the length */
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -0700655 skb->transport_header = skb->network_header;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900656 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700657
Benjamin Thery70a269e2009-01-22 04:56:15 +0000658 if (init_net.ipv4.mroute_sk == NULL) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700659 kfree_skb(skb);
660 return -EINVAL;
661 }
662
663 /*
664 * Deliver to mrouted
665 */
Benjamin Thery70a269e2009-01-22 04:56:15 +0000666 ret = sock_queue_rcv_skb(init_net.ipv4.mroute_sk, skb);
667 if (ret < 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700668 if (net_ratelimit())
669 printk(KERN_WARNING "mroute: pending queue full, dropping entries.\n");
670 kfree_skb(skb);
671 }
672
673 return ret;
674}
675
676/*
677 * Queue a packet for resolution. It gets locked cache entry!
678 */
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900679
Linus Torvalds1da177e2005-04-16 15:20:36 -0700680static int
681ipmr_cache_unresolved(vifi_t vifi, struct sk_buff *skb)
682{
683 int err;
684 struct mfc_cache *c;
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700685 const struct iphdr *iph = ip_hdr(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700686
687 spin_lock_bh(&mfc_unres_lock);
688 for (c=mfc_unres_queue; c; c=c->next) {
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700689 if (c->mfc_mcastgrp == iph->daddr &&
690 c->mfc_origin == iph->saddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700691 break;
692 }
693
694 if (c == NULL) {
695 /*
696 * Create a new entry if allowable
697 */
698
Jianjun Kongc354e122008-11-03 00:28:02 -0800699 if (atomic_read(&cache_resolve_queue_len) >= 10 ||
Linus Torvalds1da177e2005-04-16 15:20:36 -0700700 (c=ipmr_cache_alloc_unres())==NULL) {
701 spin_unlock_bh(&mfc_unres_lock);
702
703 kfree_skb(skb);
704 return -ENOBUFS;
705 }
706
707 /*
708 * Fill in the new cache entry
709 */
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700710 c->mfc_parent = -1;
711 c->mfc_origin = iph->saddr;
712 c->mfc_mcastgrp = iph->daddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700713
714 /*
715 * Reflect first query at mrouted.
716 */
717 if ((err = ipmr_cache_report(skb, vifi, IGMPMSG_NOCACHE))<0) {
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900718 /* If the report failed throw the cache entry
Linus Torvalds1da177e2005-04-16 15:20:36 -0700719 out - Brad Parker
720 */
721 spin_unlock_bh(&mfc_unres_lock);
722
723 kmem_cache_free(mrt_cachep, c);
724 kfree_skb(skb);
725 return err;
726 }
727
728 atomic_inc(&cache_resolve_queue_len);
729 c->next = mfc_unres_queue;
730 mfc_unres_queue = c;
731
732 mod_timer(&ipmr_expire_timer, c->mfc_un.unres.expires);
733 }
734
735 /*
736 * See if we can append the packet
737 */
738 if (c->mfc_un.unres.unresolved.qlen>3) {
739 kfree_skb(skb);
740 err = -ENOBUFS;
741 } else {
Jianjun Kongc354e122008-11-03 00:28:02 -0800742 skb_queue_tail(&c->mfc_un.unres.unresolved, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700743 err = 0;
744 }
745
746 spin_unlock_bh(&mfc_unres_lock);
747 return err;
748}
749
750/*
751 * MFC cache manipulation by user space mroute daemon
752 */
753
754static int ipmr_mfc_delete(struct mfcctl *mfc)
755{
756 int line;
757 struct mfc_cache *c, **cp;
758
Jianjun Kongc354e122008-11-03 00:28:02 -0800759 line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700760
761 for (cp=&mfc_cache_array[line]; (c=*cp) != NULL; cp = &c->next) {
762 if (c->mfc_origin == mfc->mfcc_origin.s_addr &&
763 c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr) {
764 write_lock_bh(&mrt_lock);
765 *cp = c->next;
766 write_unlock_bh(&mrt_lock);
767
768 kmem_cache_free(mrt_cachep, c);
769 return 0;
770 }
771 }
772 return -ENOENT;
773}
774
775static int ipmr_mfc_add(struct mfcctl *mfc, int mrtsock)
776{
777 int line;
778 struct mfc_cache *uc, *c, **cp;
779
Jianjun Kongc354e122008-11-03 00:28:02 -0800780 line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700781
782 for (cp=&mfc_cache_array[line]; (c=*cp) != NULL; cp = &c->next) {
783 if (c->mfc_origin == mfc->mfcc_origin.s_addr &&
784 c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr)
785 break;
786 }
787
788 if (c != NULL) {
789 write_lock_bh(&mrt_lock);
790 c->mfc_parent = mfc->mfcc_parent;
Baruch Evend1b04c02005-07-30 17:41:59 -0700791 ipmr_update_thresholds(c, mfc->mfcc_ttls);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700792 if (!mrtsock)
793 c->mfc_flags |= MFC_STATIC;
794 write_unlock_bh(&mrt_lock);
795 return 0;
796 }
797
Joe Perchesf97c1e02007-12-16 13:45:43 -0800798 if (!ipv4_is_multicast(mfc->mfcc_mcastgrp.s_addr))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700799 return -EINVAL;
800
Jianjun Kongc354e122008-11-03 00:28:02 -0800801 c = ipmr_cache_alloc();
802 if (c == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700803 return -ENOMEM;
804
Jianjun Kongc354e122008-11-03 00:28:02 -0800805 c->mfc_origin = mfc->mfcc_origin.s_addr;
806 c->mfc_mcastgrp = mfc->mfcc_mcastgrp.s_addr;
807 c->mfc_parent = mfc->mfcc_parent;
Baruch Evend1b04c02005-07-30 17:41:59 -0700808 ipmr_update_thresholds(c, mfc->mfcc_ttls);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700809 if (!mrtsock)
810 c->mfc_flags |= MFC_STATIC;
811
812 write_lock_bh(&mrt_lock);
813 c->next = mfc_cache_array[line];
814 mfc_cache_array[line] = c;
815 write_unlock_bh(&mrt_lock);
816
817 /*
818 * Check to see if we resolved a queued list. If so we
819 * need to send on the frames and tidy up.
820 */
821 spin_lock_bh(&mfc_unres_lock);
822 for (cp = &mfc_unres_queue; (uc=*cp) != NULL;
823 cp = &uc->next) {
824 if (uc->mfc_origin == c->mfc_origin &&
825 uc->mfc_mcastgrp == c->mfc_mcastgrp) {
826 *cp = uc->next;
827 if (atomic_dec_and_test(&cache_resolve_queue_len))
828 del_timer(&ipmr_expire_timer);
829 break;
830 }
831 }
832 spin_unlock_bh(&mfc_unres_lock);
833
834 if (uc) {
835 ipmr_cache_resolve(uc, c);
836 kmem_cache_free(mrt_cachep, uc);
837 }
838 return 0;
839}
840
841/*
842 * Close the multicast socket, and clear the vif tables etc
843 */
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900844
Linus Torvalds1da177e2005-04-16 15:20:36 -0700845static void mroute_clean_tables(struct sock *sk)
846{
847 int i;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900848
Linus Torvalds1da177e2005-04-16 15:20:36 -0700849 /*
850 * Shut down all active vif entries
851 */
Stephen Hemminger132adf52007-03-08 20:44:43 -0800852 for (i=0; i<maxvif; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700853 if (!(vif_table[i].flags&VIFF_STATIC))
Wang Chen7dc00c82008-07-14 20:56:34 -0700854 vif_delete(i, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700855 }
856
857 /*
858 * Wipe the cache
859 */
Jianjun Kongc354e122008-11-03 00:28:02 -0800860 for (i=0; i<MFC_LINES; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700861 struct mfc_cache *c, **cp;
862
863 cp = &mfc_cache_array[i];
864 while ((c = *cp) != NULL) {
865 if (c->mfc_flags&MFC_STATIC) {
866 cp = &c->next;
867 continue;
868 }
869 write_lock_bh(&mrt_lock);
870 *cp = c->next;
871 write_unlock_bh(&mrt_lock);
872
873 kmem_cache_free(mrt_cachep, c);
874 }
875 }
876
877 if (atomic_read(&cache_resolve_queue_len) != 0) {
878 struct mfc_cache *c;
879
880 spin_lock_bh(&mfc_unres_lock);
881 while (mfc_unres_queue != NULL) {
882 c = mfc_unres_queue;
883 mfc_unres_queue = c->next;
884 spin_unlock_bh(&mfc_unres_lock);
885
886 ipmr_destroy_unres(c);
887
888 spin_lock_bh(&mfc_unres_lock);
889 }
890 spin_unlock_bh(&mfc_unres_lock);
891 }
892}
893
894static void mrtsock_destruct(struct sock *sk)
895{
896 rtnl_lock();
Benjamin Thery70a269e2009-01-22 04:56:15 +0000897 if (sk == init_net.ipv4.mroute_sk) {
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +0900898 IPV4_DEVCONF_ALL(sock_net(sk), MC_FORWARDING)--;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700899
900 write_lock_bh(&mrt_lock);
Benjamin Thery70a269e2009-01-22 04:56:15 +0000901 init_net.ipv4.mroute_sk = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700902 write_unlock_bh(&mrt_lock);
903
904 mroute_clean_tables(sk);
905 }
906 rtnl_unlock();
907}
908
909/*
910 * Socket options and virtual interface manipulation. The whole
911 * virtual interface system is a complete heap, but unfortunately
912 * that's how BSD mrouted happens to think. Maybe one day with a proper
913 * MOSPF/PIM router set up we can clean this up.
914 */
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900915
Jianjun Kongc354e122008-11-03 00:28:02 -0800916int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, int optlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700917{
918 int ret;
919 struct vifctl vif;
920 struct mfcctl mfc;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900921
Stephen Hemminger132adf52007-03-08 20:44:43 -0800922 if (optname != MRT_INIT) {
Benjamin Thery70a269e2009-01-22 04:56:15 +0000923 if (sk != init_net.ipv4.mroute_sk && !capable(CAP_NET_ADMIN))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700924 return -EACCES;
925 }
926
Stephen Hemminger132adf52007-03-08 20:44:43 -0800927 switch (optname) {
928 case MRT_INIT:
929 if (sk->sk_type != SOCK_RAW ||
930 inet_sk(sk)->num != IPPROTO_IGMP)
931 return -EOPNOTSUPP;
Jianjun Kongc354e122008-11-03 00:28:02 -0800932 if (optlen != sizeof(int))
Stephen Hemminger132adf52007-03-08 20:44:43 -0800933 return -ENOPROTOOPT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700934
Stephen Hemminger132adf52007-03-08 20:44:43 -0800935 rtnl_lock();
Benjamin Thery70a269e2009-01-22 04:56:15 +0000936 if (init_net.ipv4.mroute_sk) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700937 rtnl_unlock();
Stephen Hemminger132adf52007-03-08 20:44:43 -0800938 return -EADDRINUSE;
939 }
940
941 ret = ip_ra_control(sk, 1, mrtsock_destruct);
942 if (ret == 0) {
943 write_lock_bh(&mrt_lock);
Benjamin Thery70a269e2009-01-22 04:56:15 +0000944 init_net.ipv4.mroute_sk = sk;
Stephen Hemminger132adf52007-03-08 20:44:43 -0800945 write_unlock_bh(&mrt_lock);
946
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +0900947 IPV4_DEVCONF_ALL(sock_net(sk), MC_FORWARDING)++;
Stephen Hemminger132adf52007-03-08 20:44:43 -0800948 }
949 rtnl_unlock();
950 return ret;
951 case MRT_DONE:
Benjamin Thery70a269e2009-01-22 04:56:15 +0000952 if (sk != init_net.ipv4.mroute_sk)
Stephen Hemminger132adf52007-03-08 20:44:43 -0800953 return -EACCES;
954 return ip_ra_control(sk, 0, NULL);
955 case MRT_ADD_VIF:
956 case MRT_DEL_VIF:
Jianjun Kongc354e122008-11-03 00:28:02 -0800957 if (optlen != sizeof(vif))
Stephen Hemminger132adf52007-03-08 20:44:43 -0800958 return -EINVAL;
Jianjun Kongc354e122008-11-03 00:28:02 -0800959 if (copy_from_user(&vif, optval, sizeof(vif)))
Stephen Hemminger132adf52007-03-08 20:44:43 -0800960 return -EFAULT;
961 if (vif.vifc_vifi >= MAXVIFS)
962 return -ENFILE;
963 rtnl_lock();
Jianjun Kongc354e122008-11-03 00:28:02 -0800964 if (optname == MRT_ADD_VIF) {
Benjamin Thery70a269e2009-01-22 04:56:15 +0000965 ret = vif_add(&vif, sk == init_net.ipv4.mroute_sk);
Stephen Hemminger132adf52007-03-08 20:44:43 -0800966 } else {
Wang Chen7dc00c82008-07-14 20:56:34 -0700967 ret = vif_delete(vif.vifc_vifi, 0);
Stephen Hemminger132adf52007-03-08 20:44:43 -0800968 }
969 rtnl_unlock();
970 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700971
972 /*
973 * Manipulate the forwarding caches. These live
974 * in a sort of kernel/user symbiosis.
975 */
Stephen Hemminger132adf52007-03-08 20:44:43 -0800976 case MRT_ADD_MFC:
977 case MRT_DEL_MFC:
Jianjun Kongc354e122008-11-03 00:28:02 -0800978 if (optlen != sizeof(mfc))
Stephen Hemminger132adf52007-03-08 20:44:43 -0800979 return -EINVAL;
Jianjun Kongc354e122008-11-03 00:28:02 -0800980 if (copy_from_user(&mfc, optval, sizeof(mfc)))
Stephen Hemminger132adf52007-03-08 20:44:43 -0800981 return -EFAULT;
982 rtnl_lock();
Jianjun Kongc354e122008-11-03 00:28:02 -0800983 if (optname == MRT_DEL_MFC)
Stephen Hemminger132adf52007-03-08 20:44:43 -0800984 ret = ipmr_mfc_delete(&mfc);
985 else
Benjamin Thery70a269e2009-01-22 04:56:15 +0000986 ret = ipmr_mfc_add(&mfc, sk == init_net.ipv4.mroute_sk);
Stephen Hemminger132adf52007-03-08 20:44:43 -0800987 rtnl_unlock();
988 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700989 /*
990 * Control PIM assert.
991 */
Stephen Hemminger132adf52007-03-08 20:44:43 -0800992 case MRT_ASSERT:
993 {
994 int v;
995 if (get_user(v,(int __user *)optval))
996 return -EFAULT;
997 mroute_do_assert=(v)?1:0;
998 return 0;
999 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001000#ifdef CONFIG_IP_PIMSM
Stephen Hemminger132adf52007-03-08 20:44:43 -08001001 case MRT_PIM:
1002 {
Stephen Hemmingerba93ef72008-01-21 17:28:59 -08001003 int v;
1004
Stephen Hemminger132adf52007-03-08 20:44:43 -08001005 if (get_user(v,(int __user *)optval))
1006 return -EFAULT;
Stephen Hemmingerba93ef72008-01-21 17:28:59 -08001007 v = (v) ? 1 : 0;
1008
Stephen Hemminger132adf52007-03-08 20:44:43 -08001009 rtnl_lock();
1010 ret = 0;
1011 if (v != mroute_do_pim) {
1012 mroute_do_pim = v;
1013 mroute_do_assert = v;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001014#ifdef CONFIG_IP_PIMSM_V2
Stephen Hemminger132adf52007-03-08 20:44:43 -08001015 if (mroute_do_pim)
1016 ret = inet_add_protocol(&pim_protocol,
1017 IPPROTO_PIM);
1018 else
1019 ret = inet_del_protocol(&pim_protocol,
1020 IPPROTO_PIM);
1021 if (ret < 0)
1022 ret = -EAGAIN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001023#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001024 }
Stephen Hemminger132adf52007-03-08 20:44:43 -08001025 rtnl_unlock();
1026 return ret;
1027 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001028#endif
Stephen Hemminger132adf52007-03-08 20:44:43 -08001029 /*
1030 * Spurious command, or MRT_VERSION which you cannot
1031 * set.
1032 */
1033 default:
1034 return -ENOPROTOOPT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001035 }
1036}
1037
1038/*
1039 * Getsock opt support for the multicast routing system.
1040 */
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001041
Jianjun Kongc354e122008-11-03 00:28:02 -08001042int ip_mroute_getsockopt(struct sock *sk, int optname, char __user *optval, int __user *optlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001043{
1044 int olr;
1045 int val;
1046
Jianjun Kongc354e122008-11-03 00:28:02 -08001047 if (optname != MRT_VERSION &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07001048#ifdef CONFIG_IP_PIMSM
1049 optname!=MRT_PIM &&
1050#endif
1051 optname!=MRT_ASSERT)
1052 return -ENOPROTOOPT;
1053
1054 if (get_user(olr, optlen))
1055 return -EFAULT;
1056
1057 olr = min_t(unsigned int, olr, sizeof(int));
1058 if (olr < 0)
1059 return -EINVAL;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001060
Jianjun Kongc354e122008-11-03 00:28:02 -08001061 if (put_user(olr, optlen))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001062 return -EFAULT;
Jianjun Kongc354e122008-11-03 00:28:02 -08001063 if (optname == MRT_VERSION)
1064 val = 0x0305;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001065#ifdef CONFIG_IP_PIMSM
Jianjun Kongc354e122008-11-03 00:28:02 -08001066 else if (optname == MRT_PIM)
1067 val = mroute_do_pim;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001068#endif
1069 else
Jianjun Kongc354e122008-11-03 00:28:02 -08001070 val = mroute_do_assert;
1071 if (copy_to_user(optval, &val, olr))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001072 return -EFAULT;
1073 return 0;
1074}
1075
1076/*
1077 * The IP multicast ioctl support routines.
1078 */
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001079
Linus Torvalds1da177e2005-04-16 15:20:36 -07001080int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg)
1081{
1082 struct sioc_sg_req sr;
1083 struct sioc_vif_req vr;
1084 struct vif_device *vif;
1085 struct mfc_cache *c;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001086
Stephen Hemminger132adf52007-03-08 20:44:43 -08001087 switch (cmd) {
1088 case SIOCGETVIFCNT:
Jianjun Kongc354e122008-11-03 00:28:02 -08001089 if (copy_from_user(&vr, arg, sizeof(vr)))
Stephen Hemminger132adf52007-03-08 20:44:43 -08001090 return -EFAULT;
Jianjun Kongc354e122008-11-03 00:28:02 -08001091 if (vr.vifi >= maxvif)
Stephen Hemminger132adf52007-03-08 20:44:43 -08001092 return -EINVAL;
1093 read_lock(&mrt_lock);
1094 vif=&vif_table[vr.vifi];
1095 if (VIF_EXISTS(vr.vifi)) {
Jianjun Kongc354e122008-11-03 00:28:02 -08001096 vr.icount = vif->pkt_in;
1097 vr.ocount = vif->pkt_out;
1098 vr.ibytes = vif->bytes_in;
1099 vr.obytes = vif->bytes_out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001100 read_unlock(&mrt_lock);
Stephen Hemminger132adf52007-03-08 20:44:43 -08001101
Jianjun Kongc354e122008-11-03 00:28:02 -08001102 if (copy_to_user(arg, &vr, sizeof(vr)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001103 return -EFAULT;
Stephen Hemminger132adf52007-03-08 20:44:43 -08001104 return 0;
1105 }
1106 read_unlock(&mrt_lock);
1107 return -EADDRNOTAVAIL;
1108 case SIOCGETSGCNT:
Jianjun Kongc354e122008-11-03 00:28:02 -08001109 if (copy_from_user(&sr, arg, sizeof(sr)))
Stephen Hemminger132adf52007-03-08 20:44:43 -08001110 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001111
Stephen Hemminger132adf52007-03-08 20:44:43 -08001112 read_lock(&mrt_lock);
1113 c = ipmr_cache_find(sr.src.s_addr, sr.grp.s_addr);
1114 if (c) {
1115 sr.pktcnt = c->mfc_un.res.pkt;
1116 sr.bytecnt = c->mfc_un.res.bytes;
1117 sr.wrong_if = c->mfc_un.res.wrong_if;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001118 read_unlock(&mrt_lock);
Stephen Hemminger132adf52007-03-08 20:44:43 -08001119
Jianjun Kongc354e122008-11-03 00:28:02 -08001120 if (copy_to_user(arg, &sr, sizeof(sr)))
Stephen Hemminger132adf52007-03-08 20:44:43 -08001121 return -EFAULT;
1122 return 0;
1123 }
1124 read_unlock(&mrt_lock);
1125 return -EADDRNOTAVAIL;
1126 default:
1127 return -ENOIOCTLCMD;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001128 }
1129}
1130
1131
1132static int ipmr_device_event(struct notifier_block *this, unsigned long event, void *ptr)
1133{
Eric W. Biedermane9dc8652007-09-12 13:02:17 +02001134 struct net_device *dev = ptr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001135 struct vif_device *v;
1136 int ct;
Eric W. Biedermane9dc8652007-09-12 13:02:17 +02001137
YOSHIFUJI Hideaki721499e2008-07-19 22:34:43 -07001138 if (!net_eq(dev_net(dev), &init_net))
Eric W. Biedermane9dc8652007-09-12 13:02:17 +02001139 return NOTIFY_DONE;
1140
Linus Torvalds1da177e2005-04-16 15:20:36 -07001141 if (event != NETDEV_UNREGISTER)
1142 return NOTIFY_DONE;
1143 v=&vif_table[0];
Jianjun Kongc354e122008-11-03 00:28:02 -08001144 for (ct=0; ct<maxvif; ct++,v++) {
1145 if (v->dev == dev)
Wang Chen7dc00c82008-07-14 20:56:34 -07001146 vif_delete(ct, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001147 }
1148 return NOTIFY_DONE;
1149}
1150
1151
Jianjun Kongc354e122008-11-03 00:28:02 -08001152static struct notifier_block ip_mr_notifier = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001153 .notifier_call = ipmr_device_event,
1154};
1155
1156/*
1157 * Encapsulate a packet by attaching a valid IPIP header to it.
1158 * This avoids tunnel drivers and other mess and gives us the speed so
1159 * important for multicast video.
1160 */
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001161
Al Viro114c7842006-09-27 18:39:29 -07001162static void ip_encap(struct sk_buff *skb, __be32 saddr, __be32 daddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001163{
Arnaldo Carvalho de Melo8856dfa2007-03-10 19:40:39 -03001164 struct iphdr *iph;
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07001165 struct iphdr *old_iph = ip_hdr(skb);
Arnaldo Carvalho de Melo8856dfa2007-03-10 19:40:39 -03001166
1167 skb_push(skb, sizeof(struct iphdr));
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07001168 skb->transport_header = skb->network_header;
Arnaldo Carvalho de Melo8856dfa2007-03-10 19:40:39 -03001169 skb_reset_network_header(skb);
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07001170 iph = ip_hdr(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001171
1172 iph->version = 4;
Arnaldo Carvalho de Meloe023dd62007-03-12 20:09:36 -03001173 iph->tos = old_iph->tos;
1174 iph->ttl = old_iph->ttl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001175 iph->frag_off = 0;
1176 iph->daddr = daddr;
1177 iph->saddr = saddr;
1178 iph->protocol = IPPROTO_IPIP;
1179 iph->ihl = 5;
1180 iph->tot_len = htons(skb->len);
1181 ip_select_ident(iph, skb->dst, NULL);
1182 ip_send_check(iph);
1183
Linus Torvalds1da177e2005-04-16 15:20:36 -07001184 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
1185 nf_reset(skb);
1186}
1187
1188static inline int ipmr_forward_finish(struct sk_buff *skb)
1189{
1190 struct ip_options * opt = &(IPCB(skb)->opt);
1191
Pavel Emelyanov7c73a6f2008-07-16 20:20:11 -07001192 IP_INC_STATS_BH(dev_net(skb->dst->dev), IPSTATS_MIB_OUTFORWDATAGRAMS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001193
1194 if (unlikely(opt->optlen))
1195 ip_forward_options(skb);
1196
1197 return dst_output(skb);
1198}
1199
1200/*
1201 * Processing handlers for ipmr_forward
1202 */
1203
1204static void ipmr_queue_xmit(struct sk_buff *skb, struct mfc_cache *c, int vifi)
1205{
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07001206 const struct iphdr *iph = ip_hdr(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001207 struct vif_device *vif = &vif_table[vifi];
1208 struct net_device *dev;
1209 struct rtable *rt;
1210 int encap = 0;
1211
1212 if (vif->dev == NULL)
1213 goto out_free;
1214
1215#ifdef CONFIG_IP_PIMSM
1216 if (vif->flags & VIFF_REGISTER) {
1217 vif->pkt_out++;
Jianjun Kongc354e122008-11-03 00:28:02 -08001218 vif->bytes_out += skb->len;
Pavel Emelyanovcf3677a2008-05-21 14:17:33 -07001219 vif->dev->stats.tx_bytes += skb->len;
1220 vif->dev->stats.tx_packets++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001221 ipmr_cache_report(skb, vifi, IGMPMSG_WHOLEPKT);
1222 kfree_skb(skb);
1223 return;
1224 }
1225#endif
1226
1227 if (vif->flags&VIFF_TUNNEL) {
1228 struct flowi fl = { .oif = vif->link,
1229 .nl_u = { .ip4_u =
1230 { .daddr = vif->remote,
1231 .saddr = vif->local,
1232 .tos = RT_TOS(iph->tos) } },
1233 .proto = IPPROTO_IPIP };
Denis V. Lunevf2063512008-01-22 22:07:34 -08001234 if (ip_route_output_key(&init_net, &rt, &fl))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001235 goto out_free;
1236 encap = sizeof(struct iphdr);
1237 } else {
1238 struct flowi fl = { .oif = vif->link,
1239 .nl_u = { .ip4_u =
1240 { .daddr = iph->daddr,
1241 .tos = RT_TOS(iph->tos) } },
1242 .proto = IPPROTO_IPIP };
Denis V. Lunevf2063512008-01-22 22:07:34 -08001243 if (ip_route_output_key(&init_net, &rt, &fl))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001244 goto out_free;
1245 }
1246
1247 dev = rt->u.dst.dev;
1248
1249 if (skb->len+encap > dst_mtu(&rt->u.dst) && (ntohs(iph->frag_off) & IP_DF)) {
1250 /* Do not fragment multicasts. Alas, IPv4 does not
1251 allow to send ICMP, so that packets will disappear
1252 to blackhole.
1253 */
1254
Pavel Emelyanov7c73a6f2008-07-16 20:20:11 -07001255 IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001256 ip_rt_put(rt);
1257 goto out_free;
1258 }
1259
1260 encap += LL_RESERVED_SPACE(dev) + rt->u.dst.header_len;
1261
1262 if (skb_cow(skb, encap)) {
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001263 ip_rt_put(rt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001264 goto out_free;
1265 }
1266
1267 vif->pkt_out++;
Jianjun Kongc354e122008-11-03 00:28:02 -08001268 vif->bytes_out += skb->len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001269
1270 dst_release(skb->dst);
1271 skb->dst = &rt->u.dst;
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07001272 ip_decrease_ttl(ip_hdr(skb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001273
1274 /* FIXME: forward and output firewalls used to be called here.
1275 * What do we do with netfilter? -- RR */
1276 if (vif->flags & VIFF_TUNNEL) {
1277 ip_encap(skb, vif->local, vif->remote);
1278 /* FIXME: extra output firewall step used to be here. --RR */
Pavel Emelyanov2f4c02d2008-05-21 14:16:14 -07001279 vif->dev->stats.tx_packets++;
1280 vif->dev->stats.tx_bytes += skb->len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001281 }
1282
1283 IPCB(skb)->flags |= IPSKB_FORWARDED;
1284
1285 /*
1286 * RFC1584 teaches, that DVMRP/PIM router must deliver packets locally
1287 * not only before forwarding, but after forwarding on all output
1288 * interfaces. It is clear, if mrouter runs a multicasting
1289 * program, it should receive packets not depending to what interface
1290 * program is joined.
1291 * If we will not make it, the program will have to join on all
1292 * interfaces. On the other hand, multihoming host (or router, but
1293 * not mrouter) cannot join to more than one interface - it will
1294 * result in receiving multiple packets.
1295 */
Patrick McHardy6e23ae22007-11-19 18:53:30 -08001296 NF_HOOK(PF_INET, NF_INET_FORWARD, skb, skb->dev, dev,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001297 ipmr_forward_finish);
1298 return;
1299
1300out_free:
1301 kfree_skb(skb);
1302 return;
1303}
1304
1305static int ipmr_find_vif(struct net_device *dev)
1306{
1307 int ct;
1308 for (ct=maxvif-1; ct>=0; ct--) {
1309 if (vif_table[ct].dev == dev)
1310 break;
1311 }
1312 return ct;
1313}
1314
1315/* "local" means that we should preserve one skb (for local delivery) */
1316
1317static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local)
1318{
1319 int psend = -1;
1320 int vif, ct;
1321
1322 vif = cache->mfc_parent;
1323 cache->mfc_un.res.pkt++;
1324 cache->mfc_un.res.bytes += skb->len;
1325
1326 /*
1327 * Wrong interface: drop packet and (maybe) send PIM assert.
1328 */
1329 if (vif_table[vif].dev != skb->dev) {
1330 int true_vifi;
1331
Eric Dumazetee6b9672008-03-05 18:30:47 -08001332 if (skb->rtable->fl.iif == 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001333 /* It is our own packet, looped back.
1334 Very complicated situation...
1335
1336 The best workaround until routing daemons will be
1337 fixed is not to redistribute packet, if it was
1338 send through wrong interface. It means, that
1339 multicast applications WILL NOT work for
1340 (S,G), which have default multicast route pointing
1341 to wrong oif. In any case, it is not a good
1342 idea to use multicasting applications on router.
1343 */
1344 goto dont_forward;
1345 }
1346
1347 cache->mfc_un.res.wrong_if++;
1348 true_vifi = ipmr_find_vif(skb->dev);
1349
1350 if (true_vifi >= 0 && mroute_do_assert &&
1351 /* pimsm uses asserts, when switching from RPT to SPT,
1352 so that we cannot check that packet arrived on an oif.
1353 It is bad, but otherwise we would need to move pretty
1354 large chunk of pimd to kernel. Ough... --ANK
1355 */
1356 (mroute_do_pim || cache->mfc_un.res.ttls[true_vifi] < 255) &&
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001357 time_after(jiffies,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001358 cache->mfc_un.res.last_assert + MFC_ASSERT_THRESH)) {
1359 cache->mfc_un.res.last_assert = jiffies;
1360 ipmr_cache_report(skb, true_vifi, IGMPMSG_WRONGVIF);
1361 }
1362 goto dont_forward;
1363 }
1364
1365 vif_table[vif].pkt_in++;
Jianjun Kongc354e122008-11-03 00:28:02 -08001366 vif_table[vif].bytes_in += skb->len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001367
1368 /*
1369 * Forward the frame
1370 */
1371 for (ct = cache->mfc_un.res.maxvif-1; ct >= cache->mfc_un.res.minvif; ct--) {
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07001372 if (ip_hdr(skb)->ttl > cache->mfc_un.res.ttls[ct]) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001373 if (psend != -1) {
1374 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
1375 if (skb2)
1376 ipmr_queue_xmit(skb2, cache, psend);
1377 }
Jianjun Kongc354e122008-11-03 00:28:02 -08001378 psend = ct;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001379 }
1380 }
1381 if (psend != -1) {
1382 if (local) {
1383 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
1384 if (skb2)
1385 ipmr_queue_xmit(skb2, cache, psend);
1386 } else {
1387 ipmr_queue_xmit(skb, cache, psend);
1388 return 0;
1389 }
1390 }
1391
1392dont_forward:
1393 if (!local)
1394 kfree_skb(skb);
1395 return 0;
1396}
1397
1398
1399/*
1400 * Multicast packets for forwarding arrive here
1401 */
1402
1403int ip_mr_input(struct sk_buff *skb)
1404{
1405 struct mfc_cache *cache;
Eric Dumazetee6b9672008-03-05 18:30:47 -08001406 int local = skb->rtable->rt_flags&RTCF_LOCAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001407
1408 /* Packet is looped back after forward, it should not be
1409 forwarded second time, but still can be delivered locally.
1410 */
1411 if (IPCB(skb)->flags&IPSKB_FORWARDED)
1412 goto dont_forward;
1413
1414 if (!local) {
1415 if (IPCB(skb)->opt.router_alert) {
1416 if (ip_call_ra_chain(skb))
1417 return 0;
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07001418 } else if (ip_hdr(skb)->protocol == IPPROTO_IGMP){
Linus Torvalds1da177e2005-04-16 15:20:36 -07001419 /* IGMPv1 (and broken IGMPv2 implementations sort of
1420 Cisco IOS <= 11.2(8)) do not put router alert
1421 option to IGMP packets destined to routable
1422 groups. It is very bad, because it means
1423 that we can forward NO IGMP messages.
1424 */
1425 read_lock(&mrt_lock);
Benjamin Thery70a269e2009-01-22 04:56:15 +00001426 if (init_net.ipv4.mroute_sk) {
Patrick McHardy2715bcf2005-06-21 14:06:24 -07001427 nf_reset(skb);
Benjamin Thery70a269e2009-01-22 04:56:15 +00001428 raw_rcv(init_net.ipv4.mroute_sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001429 read_unlock(&mrt_lock);
1430 return 0;
1431 }
1432 read_unlock(&mrt_lock);
1433 }
1434 }
1435
1436 read_lock(&mrt_lock);
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07001437 cache = ipmr_cache_find(ip_hdr(skb)->saddr, ip_hdr(skb)->daddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001438
1439 /*
1440 * No usable cache entry
1441 */
Jianjun Kongc354e122008-11-03 00:28:02 -08001442 if (cache == NULL) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001443 int vif;
1444
1445 if (local) {
1446 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
1447 ip_local_deliver(skb);
1448 if (skb2 == NULL) {
1449 read_unlock(&mrt_lock);
1450 return -ENOBUFS;
1451 }
1452 skb = skb2;
1453 }
1454
1455 vif = ipmr_find_vif(skb->dev);
1456 if (vif >= 0) {
1457 int err = ipmr_cache_unresolved(vif, skb);
1458 read_unlock(&mrt_lock);
1459
1460 return err;
1461 }
1462 read_unlock(&mrt_lock);
1463 kfree_skb(skb);
1464 return -ENODEV;
1465 }
1466
1467 ip_mr_forward(skb, cache, local);
1468
1469 read_unlock(&mrt_lock);
1470
1471 if (local)
1472 return ip_local_deliver(skb);
1473
1474 return 0;
1475
1476dont_forward:
1477 if (local)
1478 return ip_local_deliver(skb);
1479 kfree_skb(skb);
1480 return 0;
1481}
1482
Ilpo Järvinenb1879202008-12-16 01:15:11 -08001483#ifdef CONFIG_IP_PIMSM
1484static int __pim_rcv(struct sk_buff *skb, unsigned int pimlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001485{
Ilpo Järvinenb1879202008-12-16 01:15:11 -08001486 struct net_device *reg_dev = NULL;
1487 struct iphdr *encap;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001488
Ilpo Järvinenb1879202008-12-16 01:15:11 -08001489 encap = (struct iphdr *)(skb_transport_header(skb) + pimlen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001490 /*
1491 Check that:
1492 a. packet is really destinted to a multicast group
1493 b. packet is not a NULL-REGISTER
1494 c. packet is not truncated
1495 */
Joe Perchesf97c1e02007-12-16 13:45:43 -08001496 if (!ipv4_is_multicast(encap->daddr) ||
Linus Torvalds1da177e2005-04-16 15:20:36 -07001497 encap->tot_len == 0 ||
Ilpo Järvinenb1879202008-12-16 01:15:11 -08001498 ntohs(encap->tot_len) + pimlen > skb->len)
1499 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001500
1501 read_lock(&mrt_lock);
1502 if (reg_vif_num >= 0)
1503 reg_dev = vif_table[reg_vif_num].dev;
1504 if (reg_dev)
1505 dev_hold(reg_dev);
1506 read_unlock(&mrt_lock);
1507
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001508 if (reg_dev == NULL)
Ilpo Järvinenb1879202008-12-16 01:15:11 -08001509 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001510
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07001511 skb->mac_header = skb->network_header;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001512 skb_pull(skb, (u8*)encap - skb->data);
Arnaldo Carvalho de Melo31c77112007-03-10 19:04:55 -03001513 skb_reset_network_header(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001514 skb->dev = reg_dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001515 skb->protocol = htons(ETH_P_IP);
1516 skb->ip_summed = 0;
1517 skb->pkt_type = PACKET_HOST;
1518 dst_release(skb->dst);
1519 skb->dst = NULL;
Pavel Emelyanovcf3677a2008-05-21 14:17:33 -07001520 reg_dev->stats.rx_bytes += skb->len;
1521 reg_dev->stats.rx_packets++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001522 nf_reset(skb);
1523 netif_rx(skb);
1524 dev_put(reg_dev);
Ilpo Järvinenb1879202008-12-16 01:15:11 -08001525
Linus Torvalds1da177e2005-04-16 15:20:36 -07001526 return 0;
Ilpo Järvinenb1879202008-12-16 01:15:11 -08001527}
1528#endif
1529
1530#ifdef CONFIG_IP_PIMSM_V1
1531/*
1532 * Handle IGMP messages of PIMv1
1533 */
1534
1535int pim_rcv_v1(struct sk_buff * skb)
1536{
1537 struct igmphdr *pim;
1538
1539 if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(struct iphdr)))
1540 goto drop;
1541
1542 pim = igmp_hdr(skb);
1543
1544 if (!mroute_do_pim ||
1545 pim->group != PIM_V1_VERSION || pim->code != PIM_V1_REGISTER)
1546 goto drop;
1547
1548 if (__pim_rcv(skb, sizeof(*pim))) {
1549drop:
1550 kfree_skb(skb);
1551 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001552 return 0;
1553}
1554#endif
1555
1556#ifdef CONFIG_IP_PIMSM_V2
1557static int pim_rcv(struct sk_buff * skb)
1558{
1559 struct pimreghdr *pim;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001560
Ilpo Järvinenb1879202008-12-16 01:15:11 -08001561 if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(struct iphdr)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001562 goto drop;
1563
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07001564 pim = (struct pimreghdr *)skb_transport_header(skb);
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001565 if (pim->type != ((PIM_VERSION<<4)|(PIM_REGISTER)) ||
Linus Torvalds1da177e2005-04-16 15:20:36 -07001566 (pim->flags&PIM_NULL_REGISTER) ||
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001567 (ip_compute_csum((void *)pim, sizeof(*pim)) != 0 &&
Al Virod3bc23e2006-11-14 21:24:49 -08001568 csum_fold(skb_checksum(skb, 0, skb->len, 0))))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001569 goto drop;
1570
Ilpo Järvinenb1879202008-12-16 01:15:11 -08001571 if (__pim_rcv(skb, sizeof(*pim))) {
1572drop:
1573 kfree_skb(skb);
1574 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001575 return 0;
1576}
1577#endif
1578
1579static int
1580ipmr_fill_mroute(struct sk_buff *skb, struct mfc_cache *c, struct rtmsg *rtm)
1581{
1582 int ct;
1583 struct rtnexthop *nhp;
1584 struct net_device *dev = vif_table[c->mfc_parent].dev;
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001585 u8 *b = skb_tail_pointer(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001586 struct rtattr *mp_head;
1587
1588 if (dev)
1589 RTA_PUT(skb, RTA_IIF, 4, &dev->ifindex);
1590
Jianjun Kongc354e122008-11-03 00:28:02 -08001591 mp_head = (struct rtattr *)skb_put(skb, RTA_LENGTH(0));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001592
1593 for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) {
1594 if (c->mfc_un.res.ttls[ct] < 255) {
1595 if (skb_tailroom(skb) < RTA_ALIGN(RTA_ALIGN(sizeof(*nhp)) + 4))
1596 goto rtattr_failure;
Jianjun Kongc354e122008-11-03 00:28:02 -08001597 nhp = (struct rtnexthop *)skb_put(skb, RTA_ALIGN(sizeof(*nhp)));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001598 nhp->rtnh_flags = 0;
1599 nhp->rtnh_hops = c->mfc_un.res.ttls[ct];
1600 nhp->rtnh_ifindex = vif_table[ct].dev->ifindex;
1601 nhp->rtnh_len = sizeof(*nhp);
1602 }
1603 }
1604 mp_head->rta_type = RTA_MULTIPATH;
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001605 mp_head->rta_len = skb_tail_pointer(skb) - (u8 *)mp_head;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001606 rtm->rtm_type = RTN_MULTICAST;
1607 return 1;
1608
1609rtattr_failure:
Arnaldo Carvalho de Melodc5fc572007-03-25 23:06:12 -07001610 nlmsg_trim(skb, b);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001611 return -EMSGSIZE;
1612}
1613
1614int ipmr_get_route(struct sk_buff *skb, struct rtmsg *rtm, int nowait)
1615{
1616 int err;
1617 struct mfc_cache *cache;
Eric Dumazetee6b9672008-03-05 18:30:47 -08001618 struct rtable *rt = skb->rtable;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001619
1620 read_lock(&mrt_lock);
1621 cache = ipmr_cache_find(rt->rt_src, rt->rt_dst);
1622
Jianjun Kongc354e122008-11-03 00:28:02 -08001623 if (cache == NULL) {
Alexey Kuznetsov72287492006-07-25 16:45:12 -07001624 struct sk_buff *skb2;
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07001625 struct iphdr *iph;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001626 struct net_device *dev;
1627 int vif;
1628
1629 if (nowait) {
1630 read_unlock(&mrt_lock);
1631 return -EAGAIN;
1632 }
1633
1634 dev = skb->dev;
1635 if (dev == NULL || (vif = ipmr_find_vif(dev)) < 0) {
1636 read_unlock(&mrt_lock);
1637 return -ENODEV;
1638 }
Alexey Kuznetsov72287492006-07-25 16:45:12 -07001639 skb2 = skb_clone(skb, GFP_ATOMIC);
1640 if (!skb2) {
1641 read_unlock(&mrt_lock);
1642 return -ENOMEM;
1643 }
1644
Arnaldo Carvalho de Meloe2d1bca2007-04-10 20:46:21 -07001645 skb_push(skb2, sizeof(struct iphdr));
1646 skb_reset_network_header(skb2);
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07001647 iph = ip_hdr(skb2);
1648 iph->ihl = sizeof(struct iphdr) >> 2;
1649 iph->saddr = rt->rt_src;
1650 iph->daddr = rt->rt_dst;
1651 iph->version = 0;
Alexey Kuznetsov72287492006-07-25 16:45:12 -07001652 err = ipmr_cache_unresolved(vif, skb2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001653 read_unlock(&mrt_lock);
1654 return err;
1655 }
1656
1657 if (!nowait && (rtm->rtm_flags&RTM_F_NOTIFY))
1658 cache->mfc_flags |= MFC_NOTIFY;
1659 err = ipmr_fill_mroute(skb, cache, rtm);
1660 read_unlock(&mrt_lock);
1661 return err;
1662}
1663
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001664#ifdef CONFIG_PROC_FS
Linus Torvalds1da177e2005-04-16 15:20:36 -07001665/*
1666 * The /proc interfaces to multicast routing /proc/ip_mr_cache /proc/ip_mr_vif
1667 */
1668struct ipmr_vif_iter {
1669 int ct;
1670};
1671
1672static struct vif_device *ipmr_vif_seq_idx(struct ipmr_vif_iter *iter,
1673 loff_t pos)
1674{
1675 for (iter->ct = 0; iter->ct < maxvif; ++iter->ct) {
Stephen Hemminger132adf52007-03-08 20:44:43 -08001676 if (!VIF_EXISTS(iter->ct))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001677 continue;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001678 if (pos-- == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001679 return &vif_table[iter->ct];
1680 }
1681 return NULL;
1682}
1683
1684static void *ipmr_vif_seq_start(struct seq_file *seq, loff_t *pos)
Stephen Hemmingerba93ef72008-01-21 17:28:59 -08001685 __acquires(mrt_lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001686{
1687 read_lock(&mrt_lock);
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001688 return *pos ? ipmr_vif_seq_idx(seq->private, *pos - 1)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001689 : SEQ_START_TOKEN;
1690}
1691
1692static void *ipmr_vif_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1693{
1694 struct ipmr_vif_iter *iter = seq->private;
1695
1696 ++*pos;
1697 if (v == SEQ_START_TOKEN)
1698 return ipmr_vif_seq_idx(iter, 0);
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001699
Linus Torvalds1da177e2005-04-16 15:20:36 -07001700 while (++iter->ct < maxvif) {
Stephen Hemminger132adf52007-03-08 20:44:43 -08001701 if (!VIF_EXISTS(iter->ct))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001702 continue;
1703 return &vif_table[iter->ct];
1704 }
1705 return NULL;
1706}
1707
1708static void ipmr_vif_seq_stop(struct seq_file *seq, void *v)
Stephen Hemmingerba93ef72008-01-21 17:28:59 -08001709 __releases(mrt_lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001710{
1711 read_unlock(&mrt_lock);
1712}
1713
1714static int ipmr_vif_seq_show(struct seq_file *seq, void *v)
1715{
1716 if (v == SEQ_START_TOKEN) {
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001717 seq_puts(seq,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001718 "Interface BytesIn PktsIn BytesOut PktsOut Flags Local Remote\n");
1719 } else {
1720 const struct vif_device *vif = v;
1721 const char *name = vif->dev ? vif->dev->name : "none";
1722
1723 seq_printf(seq,
1724 "%2Zd %-10s %8ld %7ld %8ld %7ld %05X %08X %08X\n",
1725 vif - vif_table,
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001726 name, vif->bytes_in, vif->pkt_in,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001727 vif->bytes_out, vif->pkt_out,
1728 vif->flags, vif->local, vif->remote);
1729 }
1730 return 0;
1731}
1732
Stephen Hemmingerf6908082007-03-12 14:34:29 -07001733static const struct seq_operations ipmr_vif_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001734 .start = ipmr_vif_seq_start,
1735 .next = ipmr_vif_seq_next,
1736 .stop = ipmr_vif_seq_stop,
1737 .show = ipmr_vif_seq_show,
1738};
1739
1740static int ipmr_vif_open(struct inode *inode, struct file *file)
1741{
Pavel Emelyanovcf7732e2007-10-10 02:29:29 -07001742 return seq_open_private(file, &ipmr_vif_seq_ops,
1743 sizeof(struct ipmr_vif_iter));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001744}
1745
Arjan van de Ven9a321442007-02-12 00:55:35 -08001746static const struct file_operations ipmr_vif_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001747 .owner = THIS_MODULE,
1748 .open = ipmr_vif_open,
1749 .read = seq_read,
1750 .llseek = seq_lseek,
1751 .release = seq_release_private,
1752};
1753
1754struct ipmr_mfc_iter {
1755 struct mfc_cache **cache;
1756 int ct;
1757};
1758
1759
1760static struct mfc_cache *ipmr_mfc_seq_idx(struct ipmr_mfc_iter *it, loff_t pos)
1761{
1762 struct mfc_cache *mfc;
1763
1764 it->cache = mfc_cache_array;
1765 read_lock(&mrt_lock);
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001766 for (it->ct = 0; it->ct < MFC_LINES; it->ct++)
Stephen Hemminger132adf52007-03-08 20:44:43 -08001767 for (mfc = mfc_cache_array[it->ct]; mfc; mfc = mfc->next)
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001768 if (pos-- == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001769 return mfc;
1770 read_unlock(&mrt_lock);
1771
1772 it->cache = &mfc_unres_queue;
1773 spin_lock_bh(&mfc_unres_lock);
Stephen Hemminger132adf52007-03-08 20:44:43 -08001774 for (mfc = mfc_unres_queue; mfc; mfc = mfc->next)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001775 if (pos-- == 0)
1776 return mfc;
1777 spin_unlock_bh(&mfc_unres_lock);
1778
1779 it->cache = NULL;
1780 return NULL;
1781}
1782
1783
1784static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos)
1785{
1786 struct ipmr_mfc_iter *it = seq->private;
1787 it->cache = NULL;
1788 it->ct = 0;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001789 return *pos ? ipmr_mfc_seq_idx(seq->private, *pos - 1)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001790 : SEQ_START_TOKEN;
1791}
1792
1793static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1794{
1795 struct mfc_cache *mfc = v;
1796 struct ipmr_mfc_iter *it = seq->private;
1797
1798 ++*pos;
1799
1800 if (v == SEQ_START_TOKEN)
1801 return ipmr_mfc_seq_idx(seq->private, 0);
1802
1803 if (mfc->next)
1804 return mfc->next;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001805
1806 if (it->cache == &mfc_unres_queue)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001807 goto end_of_list;
1808
1809 BUG_ON(it->cache != mfc_cache_array);
1810
1811 while (++it->ct < MFC_LINES) {
1812 mfc = mfc_cache_array[it->ct];
1813 if (mfc)
1814 return mfc;
1815 }
1816
1817 /* exhausted cache_array, show unresolved */
1818 read_unlock(&mrt_lock);
1819 it->cache = &mfc_unres_queue;
1820 it->ct = 0;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001821
Linus Torvalds1da177e2005-04-16 15:20:36 -07001822 spin_lock_bh(&mfc_unres_lock);
1823 mfc = mfc_unres_queue;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001824 if (mfc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001825 return mfc;
1826
1827 end_of_list:
1828 spin_unlock_bh(&mfc_unres_lock);
1829 it->cache = NULL;
1830
1831 return NULL;
1832}
1833
1834static void ipmr_mfc_seq_stop(struct seq_file *seq, void *v)
1835{
1836 struct ipmr_mfc_iter *it = seq->private;
1837
1838 if (it->cache == &mfc_unres_queue)
1839 spin_unlock_bh(&mfc_unres_lock);
1840 else if (it->cache == mfc_cache_array)
1841 read_unlock(&mrt_lock);
1842}
1843
1844static int ipmr_mfc_seq_show(struct seq_file *seq, void *v)
1845{
1846 int n;
1847
1848 if (v == SEQ_START_TOKEN) {
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001849 seq_puts(seq,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001850 "Group Origin Iif Pkts Bytes Wrong Oifs\n");
1851 } else {
1852 const struct mfc_cache *mfc = v;
1853 const struct ipmr_mfc_iter *it = seq->private;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001854
Benjamin Thery999890b2008-12-03 22:22:16 -08001855 seq_printf(seq, "%08lX %08lX %-3hd",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001856 (unsigned long) mfc->mfc_mcastgrp,
1857 (unsigned long) mfc->mfc_origin,
Benjamin Thery1ea472e2008-12-03 22:21:47 -08001858 mfc->mfc_parent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001859
1860 if (it->cache != &mfc_unres_queue) {
Benjamin Thery1ea472e2008-12-03 22:21:47 -08001861 seq_printf(seq, " %8lu %8lu %8lu",
1862 mfc->mfc_un.res.pkt,
1863 mfc->mfc_un.res.bytes,
1864 mfc->mfc_un.res.wrong_if);
Stephen Hemminger132adf52007-03-08 20:44:43 -08001865 for (n = mfc->mfc_un.res.minvif;
1866 n < mfc->mfc_un.res.maxvif; n++ ) {
1867 if (VIF_EXISTS(n)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001868 && mfc->mfc_un.res.ttls[n] < 255)
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001869 seq_printf(seq,
1870 " %2d:%-3d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001871 n, mfc->mfc_un.res.ttls[n]);
1872 }
Benjamin Thery1ea472e2008-12-03 22:21:47 -08001873 } else {
1874 /* unresolved mfc_caches don't contain
1875 * pkt, bytes and wrong_if values
1876 */
1877 seq_printf(seq, " %8lu %8lu %8lu", 0ul, 0ul, 0ul);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001878 }
1879 seq_putc(seq, '\n');
1880 }
1881 return 0;
1882}
1883
Stephen Hemmingerf6908082007-03-12 14:34:29 -07001884static const struct seq_operations ipmr_mfc_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001885 .start = ipmr_mfc_seq_start,
1886 .next = ipmr_mfc_seq_next,
1887 .stop = ipmr_mfc_seq_stop,
1888 .show = ipmr_mfc_seq_show,
1889};
1890
1891static int ipmr_mfc_open(struct inode *inode, struct file *file)
1892{
Pavel Emelyanovcf7732e2007-10-10 02:29:29 -07001893 return seq_open_private(file, &ipmr_mfc_seq_ops,
1894 sizeof(struct ipmr_mfc_iter));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001895}
1896
Arjan van de Ven9a321442007-02-12 00:55:35 -08001897static const struct file_operations ipmr_mfc_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001898 .owner = THIS_MODULE,
1899 .open = ipmr_mfc_open,
1900 .read = seq_read,
1901 .llseek = seq_lseek,
1902 .release = seq_release_private,
1903};
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001904#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001905
1906#ifdef CONFIG_IP_PIMSM_V2
1907static struct net_protocol pim_protocol = {
1908 .handler = pim_rcv,
1909};
1910#endif
1911
1912
1913/*
1914 * Setup for IP multicast routing
1915 */
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001916
Wang Chen03d2f892008-07-03 12:13:36 +08001917int __init ip_mr_init(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001918{
Wang Chen03d2f892008-07-03 12:13:36 +08001919 int err;
1920
Linus Torvalds1da177e2005-04-16 15:20:36 -07001921 mrt_cachep = kmem_cache_create("ip_mrt_cache",
1922 sizeof(struct mfc_cache),
Alexey Dobriyane5d679f332006-08-26 19:25:52 -07001923 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC,
Paul Mundt20c2df82007-07-20 10:11:58 +09001924 NULL);
Wang Chen03d2f892008-07-03 12:13:36 +08001925 if (!mrt_cachep)
1926 return -ENOMEM;
1927
Pavel Emelyanovb24b8a22008-01-23 21:20:07 -08001928 setup_timer(&ipmr_expire_timer, ipmr_expire_process, 0);
Wang Chen03d2f892008-07-03 12:13:36 +08001929 err = register_netdevice_notifier(&ip_mr_notifier);
1930 if (err)
1931 goto reg_notif_fail;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001932#ifdef CONFIG_PROC_FS
Wang Chen03d2f892008-07-03 12:13:36 +08001933 err = -ENOMEM;
1934 if (!proc_net_fops_create(&init_net, "ip_mr_vif", 0, &ipmr_vif_fops))
1935 goto proc_vif_fail;
1936 if (!proc_net_fops_create(&init_net, "ip_mr_cache", 0, &ipmr_mfc_fops))
1937 goto proc_cache_fail;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001938#endif
Wang Chen03d2f892008-07-03 12:13:36 +08001939 return 0;
Wang Chen03d2f892008-07-03 12:13:36 +08001940#ifdef CONFIG_PROC_FS
Wang Chen03d2f892008-07-03 12:13:36 +08001941proc_cache_fail:
1942 proc_net_remove(&init_net, "ip_mr_vif");
Benjamin Theryc3e38892008-11-19 14:07:41 -08001943proc_vif_fail:
1944 unregister_netdevice_notifier(&ip_mr_notifier);
Wang Chen03d2f892008-07-03 12:13:36 +08001945#endif
Benjamin Theryc3e38892008-11-19 14:07:41 -08001946reg_notif_fail:
1947 del_timer(&ipmr_expire_timer);
1948 kmem_cache_destroy(mrt_cachep);
Wang Chen03d2f892008-07-03 12:13:36 +08001949 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001950}