blob: 50d0b301380e7d1c94fd9ff1e68c133cbec77b7d [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * IP multicast routing support for mrouted 3.6/3.8
3 *
4 * (c) 1995 Alan Cox, <alan@redhat.com>
5 * Linux Consultancy and Custom Driver Development
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 *
12 * Version: $Id: ipmr.c,v 1.65 2001/10/31 21:55:54 davem Exp $
13 *
14 * Fixes:
15 * Michael Chastain : Incorrect size of copying.
16 * Alan Cox : Added the cache manager code
17 * Alan Cox : Fixed the clone/copy bug and device race.
18 * Mike McLagan : Routing by source
19 * Malcolm Beattie : Buffer handling fixes.
20 * Alexey Kuznetsov : Double buffer free and other fixes.
21 * SVR Anand : Fixed several multicast bugs and problems.
22 * Alexey Kuznetsov : Status, optimisations and more.
23 * Brad Parker : Better behaviour on mrouted upcall
24 * overflow.
25 * Carlos Picoto : PIMv1 Support
26 * Pavlin Ivanov Radoslavov: PIMv2 Registers must checksum only PIM header
27 * Relax this requrement to work with older peers.
28 *
29 */
30
Linus Torvalds1da177e2005-04-16 15:20:36 -070031#include <asm/system.h>
32#include <asm/uaccess.h>
33#include <linux/types.h>
Randy Dunlap4fc268d2006-01-11 12:17:47 -080034#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035#include <linux/errno.h>
36#include <linux/timer.h>
37#include <linux/mm.h>
38#include <linux/kernel.h>
39#include <linux/fcntl.h>
40#include <linux/stat.h>
41#include <linux/socket.h>
42#include <linux/in.h>
43#include <linux/inet.h>
44#include <linux/netdevice.h>
45#include <linux/inetdevice.h>
46#include <linux/igmp.h>
47#include <linux/proc_fs.h>
48#include <linux/seq_file.h>
49#include <linux/mroute.h>
50#include <linux/init.h>
Kris Katterjohn46f25df2006-01-05 16:35:42 -080051#include <linux/if_ether.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070052#include <net/ip.h>
53#include <net/protocol.h>
54#include <linux/skbuff.h>
Arnaldo Carvalho de Melo14c85022005-12-27 02:43:12 -020055#include <net/route.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070056#include <net/sock.h>
57#include <net/icmp.h>
58#include <net/udp.h>
59#include <net/raw.h>
60#include <linux/notifier.h>
61#include <linux/if_arp.h>
62#include <linux/netfilter_ipv4.h>
63#include <net/ipip.h>
64#include <net/checksum.h>
65
66#if defined(CONFIG_IP_PIMSM_V1) || defined(CONFIG_IP_PIMSM_V2)
67#define CONFIG_IP_PIMSM 1
68#endif
69
70static struct sock *mroute_socket;
71
72
73/* Big lock, protecting vif table, mrt cache and mroute socket state.
74 Note that the changes are semaphored via rtnl_lock.
75 */
76
77static DEFINE_RWLOCK(mrt_lock);
78
79/*
80 * Multicast router control variables
81 */
82
83static struct vif_device vif_table[MAXVIFS]; /* Devices */
84static int maxvif;
85
86#define VIF_EXISTS(idx) (vif_table[idx].dev != NULL)
87
88static int mroute_do_assert; /* Set in PIM assert */
89static int mroute_do_pim;
90
91static struct mfc_cache *mfc_cache_array[MFC_LINES]; /* Forwarding cache */
92
93static struct mfc_cache *mfc_unres_queue; /* Queue of unresolved entries */
94static atomic_t cache_resolve_queue_len; /* Size of unresolved */
95
96/* Special spinlock for queue of unresolved entries */
97static DEFINE_SPINLOCK(mfc_unres_lock);
98
99/* We return to original Alan's scheme. Hash table of resolved
100 entries is changed only in process context and protected
101 with weak lock mrt_lock. Queue of unresolved entries is protected
102 with strong spinlock mfc_unres_lock.
103
104 In this case data path is free of exclusive locks at all.
105 */
106
Christoph Lametere18b8902006-12-06 20:33:20 -0800107static struct kmem_cache *mrt_cachep __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108
109static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local);
110static int ipmr_cache_report(struct sk_buff *pkt, vifi_t vifi, int assert);
111static int ipmr_fill_mroute(struct sk_buff *skb, struct mfc_cache *c, struct rtmsg *rtm);
112
113#ifdef CONFIG_IP_PIMSM_V2
114static struct net_protocol pim_protocol;
115#endif
116
117static struct timer_list ipmr_expire_timer;
118
119/* Service routines creating virtual interfaces: DVMRP tunnels and PIMREG */
120
121static
122struct net_device *ipmr_new_tunnel(struct vifctl *v)
123{
124 struct net_device *dev;
125
126 dev = __dev_get_by_name("tunl0");
127
128 if (dev) {
129 int err;
130 struct ifreq ifr;
131 mm_segment_t oldfs;
132 struct ip_tunnel_parm p;
133 struct in_device *in_dev;
134
135 memset(&p, 0, sizeof(p));
136 p.iph.daddr = v->vifc_rmt_addr.s_addr;
137 p.iph.saddr = v->vifc_lcl_addr.s_addr;
138 p.iph.version = 4;
139 p.iph.ihl = 5;
140 p.iph.protocol = IPPROTO_IPIP;
141 sprintf(p.name, "dvmrp%d", v->vifc_vifi);
142 ifr.ifr_ifru.ifru_data = (void*)&p;
143
144 oldfs = get_fs(); set_fs(KERNEL_DS);
145 err = dev->do_ioctl(dev, &ifr, SIOCADDTUNNEL);
146 set_fs(oldfs);
147
148 dev = NULL;
149
150 if (err == 0 && (dev = __dev_get_by_name(p.name)) != NULL) {
151 dev->flags |= IFF_MULTICAST;
152
Herbert Xue5ed6392005-10-03 14:35:55 -0700153 in_dev = __in_dev_get_rtnl(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700154 if (in_dev == NULL && (in_dev = inetdev_init(dev)) == NULL)
155 goto failure;
156 in_dev->cnf.rp_filter = 0;
157
158 if (dev_open(dev))
159 goto failure;
160 }
161 }
162 return dev;
163
164failure:
165 /* allow the register to be completed before unregistering. */
166 rtnl_unlock();
167 rtnl_lock();
168
169 unregister_netdevice(dev);
170 return NULL;
171}
172
173#ifdef CONFIG_IP_PIMSM
174
175static int reg_vif_num = -1;
176
177static int reg_vif_xmit(struct sk_buff *skb, struct net_device *dev)
178{
179 read_lock(&mrt_lock);
Patrick McHardy2941a482006-01-08 22:05:26 -0800180 ((struct net_device_stats*)netdev_priv(dev))->tx_bytes += skb->len;
181 ((struct net_device_stats*)netdev_priv(dev))->tx_packets++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182 ipmr_cache_report(skb, reg_vif_num, IGMPMSG_WHOLEPKT);
183 read_unlock(&mrt_lock);
184 kfree_skb(skb);
185 return 0;
186}
187
188static struct net_device_stats *reg_vif_get_stats(struct net_device *dev)
189{
Patrick McHardy2941a482006-01-08 22:05:26 -0800190 return (struct net_device_stats*)netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191}
192
193static void reg_vif_setup(struct net_device *dev)
194{
195 dev->type = ARPHRD_PIMREG;
Kris Katterjohn46f25df2006-01-05 16:35:42 -0800196 dev->mtu = ETH_DATA_LEN - sizeof(struct iphdr) - 8;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197 dev->flags = IFF_NOARP;
198 dev->hard_start_xmit = reg_vif_xmit;
199 dev->get_stats = reg_vif_get_stats;
200 dev->destructor = free_netdev;
201}
202
203static struct net_device *ipmr_reg_vif(void)
204{
205 struct net_device *dev;
206 struct in_device *in_dev;
207
208 dev = alloc_netdev(sizeof(struct net_device_stats), "pimreg",
209 reg_vif_setup);
210
211 if (dev == NULL)
212 return NULL;
213
214 if (register_netdevice(dev)) {
215 free_netdev(dev);
216 return NULL;
217 }
218 dev->iflink = 0;
219
220 if ((in_dev = inetdev_init(dev)) == NULL)
221 goto failure;
222
223 in_dev->cnf.rp_filter = 0;
224
225 if (dev_open(dev))
226 goto failure;
227
228 return dev;
229
230failure:
231 /* allow the register to be completed before unregistering. */
232 rtnl_unlock();
233 rtnl_lock();
234
235 unregister_netdevice(dev);
236 return NULL;
237}
238#endif
239
240/*
241 * Delete a VIF entry
242 */
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900243
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244static int vif_delete(int vifi)
245{
246 struct vif_device *v;
247 struct net_device *dev;
248 struct in_device *in_dev;
249
250 if (vifi < 0 || vifi >= maxvif)
251 return -EADDRNOTAVAIL;
252
253 v = &vif_table[vifi];
254
255 write_lock_bh(&mrt_lock);
256 dev = v->dev;
257 v->dev = NULL;
258
259 if (!dev) {
260 write_unlock_bh(&mrt_lock);
261 return -EADDRNOTAVAIL;
262 }
263
264#ifdef CONFIG_IP_PIMSM
265 if (vifi == reg_vif_num)
266 reg_vif_num = -1;
267#endif
268
269 if (vifi+1 == maxvif) {
270 int tmp;
271 for (tmp=vifi-1; tmp>=0; tmp--) {
272 if (VIF_EXISTS(tmp))
273 break;
274 }
275 maxvif = tmp+1;
276 }
277
278 write_unlock_bh(&mrt_lock);
279
280 dev_set_allmulti(dev, -1);
281
Herbert Xue5ed6392005-10-03 14:35:55 -0700282 if ((in_dev = __in_dev_get_rtnl(dev)) != NULL) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283 in_dev->cnf.mc_forwarding--;
284 ip_rt_multicast_event(in_dev);
285 }
286
287 if (v->flags&(VIFF_TUNNEL|VIFF_REGISTER))
288 unregister_netdevice(dev);
289
290 dev_put(dev);
291 return 0;
292}
293
294/* Destroy an unresolved cache entry, killing queued skbs
295 and reporting error to netlink readers.
296 */
297
298static void ipmr_destroy_unres(struct mfc_cache *c)
299{
300 struct sk_buff *skb;
Patrick McHardy9ef1d4c2005-06-28 12:55:30 -0700301 struct nlmsgerr *e;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700302
303 atomic_dec(&cache_resolve_queue_len);
304
Stephen Hemminger132adf52007-03-08 20:44:43 -0800305 while ((skb=skb_dequeue(&c->mfc_un.unres.unresolved))) {
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700306 if (ip_hdr(skb)->version == 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr));
308 nlh->nlmsg_type = NLMSG_ERROR;
309 nlh->nlmsg_len = NLMSG_LENGTH(sizeof(struct nlmsgerr));
310 skb_trim(skb, nlh->nlmsg_len);
Patrick McHardy9ef1d4c2005-06-28 12:55:30 -0700311 e = NLMSG_DATA(nlh);
312 e->error = -ETIMEDOUT;
313 memset(&e->msg, 0, sizeof(e->msg));
Thomas Graf2942e902006-08-15 00:30:25 -0700314
315 rtnl_unicast(skb, NETLINK_CB(skb).pid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700316 } else
317 kfree_skb(skb);
318 }
319
320 kmem_cache_free(mrt_cachep, c);
321}
322
323
324/* Single timer process for all the unresolved queue. */
325
326static void ipmr_expire_process(unsigned long dummy)
327{
328 unsigned long now;
329 unsigned long expires;
330 struct mfc_cache *c, **cp;
331
332 if (!spin_trylock(&mfc_unres_lock)) {
333 mod_timer(&ipmr_expire_timer, jiffies+HZ/10);
334 return;
335 }
336
337 if (atomic_read(&cache_resolve_queue_len) == 0)
338 goto out;
339
340 now = jiffies;
341 expires = 10*HZ;
342 cp = &mfc_unres_queue;
343
344 while ((c=*cp) != NULL) {
345 if (time_after(c->mfc_un.unres.expires, now)) {
346 unsigned long interval = c->mfc_un.unres.expires - now;
347 if (interval < expires)
348 expires = interval;
349 cp = &c->next;
350 continue;
351 }
352
353 *cp = c->next;
354
355 ipmr_destroy_unres(c);
356 }
357
358 if (atomic_read(&cache_resolve_queue_len))
359 mod_timer(&ipmr_expire_timer, jiffies + expires);
360
361out:
362 spin_unlock(&mfc_unres_lock);
363}
364
365/* Fill oifs list. It is called under write locked mrt_lock. */
366
Baruch Evend1b04c02005-07-30 17:41:59 -0700367static void ipmr_update_thresholds(struct mfc_cache *cache, unsigned char *ttls)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700368{
369 int vifi;
370
371 cache->mfc_un.res.minvif = MAXVIFS;
372 cache->mfc_un.res.maxvif = 0;
373 memset(cache->mfc_un.res.ttls, 255, MAXVIFS);
374
375 for (vifi=0; vifi<maxvif; vifi++) {
376 if (VIF_EXISTS(vifi) && ttls[vifi] && ttls[vifi] < 255) {
377 cache->mfc_un.res.ttls[vifi] = ttls[vifi];
378 if (cache->mfc_un.res.minvif > vifi)
379 cache->mfc_un.res.minvif = vifi;
380 if (cache->mfc_un.res.maxvif <= vifi)
381 cache->mfc_un.res.maxvif = vifi + 1;
382 }
383 }
384}
385
386static int vif_add(struct vifctl *vifc, int mrtsock)
387{
388 int vifi = vifc->vifc_vifi;
389 struct vif_device *v = &vif_table[vifi];
390 struct net_device *dev;
391 struct in_device *in_dev;
392
393 /* Is vif busy ? */
394 if (VIF_EXISTS(vifi))
395 return -EADDRINUSE;
396
397 switch (vifc->vifc_flags) {
398#ifdef CONFIG_IP_PIMSM
399 case VIFF_REGISTER:
400 /*
401 * Special Purpose VIF in PIM
402 * All the packets will be sent to the daemon
403 */
404 if (reg_vif_num >= 0)
405 return -EADDRINUSE;
406 dev = ipmr_reg_vif();
407 if (!dev)
408 return -ENOBUFS;
409 break;
410#endif
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900411 case VIFF_TUNNEL:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700412 dev = ipmr_new_tunnel(vifc);
413 if (!dev)
414 return -ENOBUFS;
415 break;
416 case 0:
Stephen Hemminger15333062006-03-20 22:32:28 -0800417 dev = ip_dev_find(vifc->vifc_lcl_addr.s_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700418 if (!dev)
419 return -EADDRNOTAVAIL;
Stephen Hemminger15333062006-03-20 22:32:28 -0800420 dev_put(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700421 break;
422 default:
423 return -EINVAL;
424 }
425
Herbert Xue5ed6392005-10-03 14:35:55 -0700426 if ((in_dev = __in_dev_get_rtnl(dev)) == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700427 return -EADDRNOTAVAIL;
428 in_dev->cnf.mc_forwarding++;
429 dev_set_allmulti(dev, +1);
430 ip_rt_multicast_event(in_dev);
431
432 /*
433 * Fill in the VIF structures
434 */
435 v->rate_limit=vifc->vifc_rate_limit;
436 v->local=vifc->vifc_lcl_addr.s_addr;
437 v->remote=vifc->vifc_rmt_addr.s_addr;
438 v->flags=vifc->vifc_flags;
439 if (!mrtsock)
440 v->flags |= VIFF_STATIC;
441 v->threshold=vifc->vifc_threshold;
442 v->bytes_in = 0;
443 v->bytes_out = 0;
444 v->pkt_in = 0;
445 v->pkt_out = 0;
446 v->link = dev->ifindex;
447 if (v->flags&(VIFF_TUNNEL|VIFF_REGISTER))
448 v->link = dev->iflink;
449
450 /* And finish update writing critical data */
451 write_lock_bh(&mrt_lock);
452 dev_hold(dev);
453 v->dev=dev;
454#ifdef CONFIG_IP_PIMSM
455 if (v->flags&VIFF_REGISTER)
456 reg_vif_num = vifi;
457#endif
458 if (vifi+1 > maxvif)
459 maxvif = vifi+1;
460 write_unlock_bh(&mrt_lock);
461 return 0;
462}
463
Al Viro114c7842006-09-27 18:39:29 -0700464static struct mfc_cache *ipmr_cache_find(__be32 origin, __be32 mcastgrp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700465{
466 int line=MFC_HASH(mcastgrp,origin);
467 struct mfc_cache *c;
468
469 for (c=mfc_cache_array[line]; c; c = c->next) {
470 if (c->mfc_origin==origin && c->mfc_mcastgrp==mcastgrp)
471 break;
472 }
473 return c;
474}
475
476/*
477 * Allocate a multicast cache entry
478 */
479static struct mfc_cache *ipmr_cache_alloc(void)
480{
Robert P. J. Dayc3762222007-02-10 01:45:03 -0800481 struct mfc_cache *c=kmem_cache_zalloc(mrt_cachep, GFP_KERNEL);
Stephen Hemminger132adf52007-03-08 20:44:43 -0800482 if (c==NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700483 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700484 c->mfc_un.res.minvif = MAXVIFS;
485 return c;
486}
487
488static struct mfc_cache *ipmr_cache_alloc_unres(void)
489{
Robert P. J. Dayc3762222007-02-10 01:45:03 -0800490 struct mfc_cache *c=kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC);
Stephen Hemminger132adf52007-03-08 20:44:43 -0800491 if (c==NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700492 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700493 skb_queue_head_init(&c->mfc_un.unres.unresolved);
494 c->mfc_un.unres.expires = jiffies + 10*HZ;
495 return c;
496}
497
498/*
499 * A cache entry has gone into a resolved state from queued
500 */
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900501
Linus Torvalds1da177e2005-04-16 15:20:36 -0700502static void ipmr_cache_resolve(struct mfc_cache *uc, struct mfc_cache *c)
503{
504 struct sk_buff *skb;
Patrick McHardy9ef1d4c2005-06-28 12:55:30 -0700505 struct nlmsgerr *e;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700506
507 /*
508 * Play the pending entries through our router
509 */
510
Stephen Hemminger132adf52007-03-08 20:44:43 -0800511 while ((skb=__skb_dequeue(&uc->mfc_un.unres.unresolved))) {
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700512 if (ip_hdr(skb)->version == 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700513 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr));
514
515 if (ipmr_fill_mroute(skb, c, NLMSG_DATA(nlh)) > 0) {
516 nlh->nlmsg_len = skb->tail - (u8*)nlh;
517 } else {
518 nlh->nlmsg_type = NLMSG_ERROR;
519 nlh->nlmsg_len = NLMSG_LENGTH(sizeof(struct nlmsgerr));
520 skb_trim(skb, nlh->nlmsg_len);
Patrick McHardy9ef1d4c2005-06-28 12:55:30 -0700521 e = NLMSG_DATA(nlh);
522 e->error = -EMSGSIZE;
523 memset(&e->msg, 0, sizeof(e->msg));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700524 }
Thomas Graf2942e902006-08-15 00:30:25 -0700525
526 rtnl_unicast(skb, NETLINK_CB(skb).pid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700527 } else
528 ip_mr_forward(skb, c, 0);
529 }
530}
531
532/*
533 * Bounce a cache query up to mrouted. We could use netlink for this but mrouted
534 * expects the following bizarre scheme.
535 *
536 * Called under mrt_lock.
537 */
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900538
Linus Torvalds1da177e2005-04-16 15:20:36 -0700539static int ipmr_cache_report(struct sk_buff *pkt, vifi_t vifi, int assert)
540{
541 struct sk_buff *skb;
Arnaldo Carvalho de Meloc9bdd4b2007-03-12 20:09:15 -0300542 const int ihl = ip_hdrlen(pkt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700543 struct igmphdr *igmp;
544 struct igmpmsg *msg;
545 int ret;
546
547#ifdef CONFIG_IP_PIMSM
548 if (assert == IGMPMSG_WHOLEPKT)
549 skb = skb_realloc_headroom(pkt, sizeof(struct iphdr));
550 else
551#endif
552 skb = alloc_skb(128, GFP_ATOMIC);
553
Stephen Hemminger132adf52007-03-08 20:44:43 -0800554 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700555 return -ENOBUFS;
556
557#ifdef CONFIG_IP_PIMSM
558 if (assert == IGMPMSG_WHOLEPKT) {
559 /* Ugly, but we have no choice with this interface.
560 Duplicate old header, fix ihl, length etc.
561 And all this only to mangle msg->im_msgtype and
562 to set msg->im_mbz to "mbz" :-)
563 */
Arnaldo Carvalho de Melo878c8142007-03-11 22:38:29 -0300564 skb_push(skb, sizeof(struct iphdr));
565 skb_reset_network_header(skb);
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -0300566 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo0272ffc2007-03-12 20:05:39 -0300567 msg = (struct igmpmsg *)skb_network_header(skb);
Arnaldo Carvalho de Melod56f90a2007-04-10 20:50:43 -0700568 memcpy(msg, skb_network_header(pkt), sizeof(struct iphdr));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700569 msg->im_msgtype = IGMPMSG_WHOLEPKT;
570 msg->im_mbz = 0;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900571 msg->im_vif = reg_vif_num;
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700572 ip_hdr(skb)->ihl = sizeof(struct iphdr) >> 2;
573 ip_hdr(skb)->tot_len = htons(ntohs(ip_hdr(pkt)->tot_len) +
574 sizeof(struct iphdr));
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900575 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700576#endif
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900577 {
578
Linus Torvalds1da177e2005-04-16 15:20:36 -0700579 /*
580 * Copy the IP header
581 */
582
Arnaldo Carvalho de Meloddc7b8e2007-03-15 21:42:27 -0300583 skb_set_network_header(skb, skb->tail - skb->data);
584 skb_put(skb, ihl);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700585 memcpy(skb->data,pkt->data,ihl);
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700586 ip_hdr(skb)->protocol = 0; /* Flag to the kernel this is a route add */
587 msg = (struct igmpmsg *)skb_network_header(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700588 msg->im_vif = vifi;
589 skb->dst = dst_clone(pkt->dst);
590
591 /*
592 * Add our header
593 */
594
595 igmp=(struct igmphdr *)skb_put(skb,sizeof(struct igmphdr));
596 igmp->type =
597 msg->im_msgtype = assert;
598 igmp->code = 0;
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700599 ip_hdr(skb)->tot_len = htons(skb->len); /* Fix the length */
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -0700600 skb->transport_header = skb->network_header;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900601 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700602
603 if (mroute_socket == NULL) {
604 kfree_skb(skb);
605 return -EINVAL;
606 }
607
608 /*
609 * Deliver to mrouted
610 */
611 if ((ret=sock_queue_rcv_skb(mroute_socket,skb))<0) {
612 if (net_ratelimit())
613 printk(KERN_WARNING "mroute: pending queue full, dropping entries.\n");
614 kfree_skb(skb);
615 }
616
617 return ret;
618}
619
620/*
621 * Queue a packet for resolution. It gets locked cache entry!
622 */
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900623
Linus Torvalds1da177e2005-04-16 15:20:36 -0700624static int
625ipmr_cache_unresolved(vifi_t vifi, struct sk_buff *skb)
626{
627 int err;
628 struct mfc_cache *c;
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700629 const struct iphdr *iph = ip_hdr(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700630
631 spin_lock_bh(&mfc_unres_lock);
632 for (c=mfc_unres_queue; c; c=c->next) {
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700633 if (c->mfc_mcastgrp == iph->daddr &&
634 c->mfc_origin == iph->saddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700635 break;
636 }
637
638 if (c == NULL) {
639 /*
640 * Create a new entry if allowable
641 */
642
643 if (atomic_read(&cache_resolve_queue_len)>=10 ||
644 (c=ipmr_cache_alloc_unres())==NULL) {
645 spin_unlock_bh(&mfc_unres_lock);
646
647 kfree_skb(skb);
648 return -ENOBUFS;
649 }
650
651 /*
652 * Fill in the new cache entry
653 */
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700654 c->mfc_parent = -1;
655 c->mfc_origin = iph->saddr;
656 c->mfc_mcastgrp = iph->daddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700657
658 /*
659 * Reflect first query at mrouted.
660 */
661 if ((err = ipmr_cache_report(skb, vifi, IGMPMSG_NOCACHE))<0) {
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900662 /* If the report failed throw the cache entry
Linus Torvalds1da177e2005-04-16 15:20:36 -0700663 out - Brad Parker
664 */
665 spin_unlock_bh(&mfc_unres_lock);
666
667 kmem_cache_free(mrt_cachep, c);
668 kfree_skb(skb);
669 return err;
670 }
671
672 atomic_inc(&cache_resolve_queue_len);
673 c->next = mfc_unres_queue;
674 mfc_unres_queue = c;
675
676 mod_timer(&ipmr_expire_timer, c->mfc_un.unres.expires);
677 }
678
679 /*
680 * See if we can append the packet
681 */
682 if (c->mfc_un.unres.unresolved.qlen>3) {
683 kfree_skb(skb);
684 err = -ENOBUFS;
685 } else {
686 skb_queue_tail(&c->mfc_un.unres.unresolved,skb);
687 err = 0;
688 }
689
690 spin_unlock_bh(&mfc_unres_lock);
691 return err;
692}
693
694/*
695 * MFC cache manipulation by user space mroute daemon
696 */
697
698static int ipmr_mfc_delete(struct mfcctl *mfc)
699{
700 int line;
701 struct mfc_cache *c, **cp;
702
703 line=MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr);
704
705 for (cp=&mfc_cache_array[line]; (c=*cp) != NULL; cp = &c->next) {
706 if (c->mfc_origin == mfc->mfcc_origin.s_addr &&
707 c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr) {
708 write_lock_bh(&mrt_lock);
709 *cp = c->next;
710 write_unlock_bh(&mrt_lock);
711
712 kmem_cache_free(mrt_cachep, c);
713 return 0;
714 }
715 }
716 return -ENOENT;
717}
718
719static int ipmr_mfc_add(struct mfcctl *mfc, int mrtsock)
720{
721 int line;
722 struct mfc_cache *uc, *c, **cp;
723
724 line=MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr);
725
726 for (cp=&mfc_cache_array[line]; (c=*cp) != NULL; cp = &c->next) {
727 if (c->mfc_origin == mfc->mfcc_origin.s_addr &&
728 c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr)
729 break;
730 }
731
732 if (c != NULL) {
733 write_lock_bh(&mrt_lock);
734 c->mfc_parent = mfc->mfcc_parent;
Baruch Evend1b04c02005-07-30 17:41:59 -0700735 ipmr_update_thresholds(c, mfc->mfcc_ttls);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700736 if (!mrtsock)
737 c->mfc_flags |= MFC_STATIC;
738 write_unlock_bh(&mrt_lock);
739 return 0;
740 }
741
Stephen Hemminger132adf52007-03-08 20:44:43 -0800742 if (!MULTICAST(mfc->mfcc_mcastgrp.s_addr))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700743 return -EINVAL;
744
745 c=ipmr_cache_alloc();
746 if (c==NULL)
747 return -ENOMEM;
748
749 c->mfc_origin=mfc->mfcc_origin.s_addr;
750 c->mfc_mcastgrp=mfc->mfcc_mcastgrp.s_addr;
751 c->mfc_parent=mfc->mfcc_parent;
Baruch Evend1b04c02005-07-30 17:41:59 -0700752 ipmr_update_thresholds(c, mfc->mfcc_ttls);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700753 if (!mrtsock)
754 c->mfc_flags |= MFC_STATIC;
755
756 write_lock_bh(&mrt_lock);
757 c->next = mfc_cache_array[line];
758 mfc_cache_array[line] = c;
759 write_unlock_bh(&mrt_lock);
760
761 /*
762 * Check to see if we resolved a queued list. If so we
763 * need to send on the frames and tidy up.
764 */
765 spin_lock_bh(&mfc_unres_lock);
766 for (cp = &mfc_unres_queue; (uc=*cp) != NULL;
767 cp = &uc->next) {
768 if (uc->mfc_origin == c->mfc_origin &&
769 uc->mfc_mcastgrp == c->mfc_mcastgrp) {
770 *cp = uc->next;
771 if (atomic_dec_and_test(&cache_resolve_queue_len))
772 del_timer(&ipmr_expire_timer);
773 break;
774 }
775 }
776 spin_unlock_bh(&mfc_unres_lock);
777
778 if (uc) {
779 ipmr_cache_resolve(uc, c);
780 kmem_cache_free(mrt_cachep, uc);
781 }
782 return 0;
783}
784
785/*
786 * Close the multicast socket, and clear the vif tables etc
787 */
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900788
Linus Torvalds1da177e2005-04-16 15:20:36 -0700789static void mroute_clean_tables(struct sock *sk)
790{
791 int i;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900792
Linus Torvalds1da177e2005-04-16 15:20:36 -0700793 /*
794 * Shut down all active vif entries
795 */
Stephen Hemminger132adf52007-03-08 20:44:43 -0800796 for (i=0; i<maxvif; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700797 if (!(vif_table[i].flags&VIFF_STATIC))
798 vif_delete(i);
799 }
800
801 /*
802 * Wipe the cache
803 */
804 for (i=0;i<MFC_LINES;i++) {
805 struct mfc_cache *c, **cp;
806
807 cp = &mfc_cache_array[i];
808 while ((c = *cp) != NULL) {
809 if (c->mfc_flags&MFC_STATIC) {
810 cp = &c->next;
811 continue;
812 }
813 write_lock_bh(&mrt_lock);
814 *cp = c->next;
815 write_unlock_bh(&mrt_lock);
816
817 kmem_cache_free(mrt_cachep, c);
818 }
819 }
820
821 if (atomic_read(&cache_resolve_queue_len) != 0) {
822 struct mfc_cache *c;
823
824 spin_lock_bh(&mfc_unres_lock);
825 while (mfc_unres_queue != NULL) {
826 c = mfc_unres_queue;
827 mfc_unres_queue = c->next;
828 spin_unlock_bh(&mfc_unres_lock);
829
830 ipmr_destroy_unres(c);
831
832 spin_lock_bh(&mfc_unres_lock);
833 }
834 spin_unlock_bh(&mfc_unres_lock);
835 }
836}
837
838static void mrtsock_destruct(struct sock *sk)
839{
840 rtnl_lock();
841 if (sk == mroute_socket) {
842 ipv4_devconf.mc_forwarding--;
843
844 write_lock_bh(&mrt_lock);
845 mroute_socket=NULL;
846 write_unlock_bh(&mrt_lock);
847
848 mroute_clean_tables(sk);
849 }
850 rtnl_unlock();
851}
852
853/*
854 * Socket options and virtual interface manipulation. The whole
855 * virtual interface system is a complete heap, but unfortunately
856 * that's how BSD mrouted happens to think. Maybe one day with a proper
857 * MOSPF/PIM router set up we can clean this up.
858 */
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900859
Linus Torvalds1da177e2005-04-16 15:20:36 -0700860int ip_mroute_setsockopt(struct sock *sk,int optname,char __user *optval,int optlen)
861{
862 int ret;
863 struct vifctl vif;
864 struct mfcctl mfc;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900865
Stephen Hemminger132adf52007-03-08 20:44:43 -0800866 if (optname != MRT_INIT) {
867 if (sk != mroute_socket && !capable(CAP_NET_ADMIN))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700868 return -EACCES;
869 }
870
Stephen Hemminger132adf52007-03-08 20:44:43 -0800871 switch (optname) {
872 case MRT_INIT:
873 if (sk->sk_type != SOCK_RAW ||
874 inet_sk(sk)->num != IPPROTO_IGMP)
875 return -EOPNOTSUPP;
876 if (optlen!=sizeof(int))
877 return -ENOPROTOOPT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700878
Stephen Hemminger132adf52007-03-08 20:44:43 -0800879 rtnl_lock();
880 if (mroute_socket) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700881 rtnl_unlock();
Stephen Hemminger132adf52007-03-08 20:44:43 -0800882 return -EADDRINUSE;
883 }
884
885 ret = ip_ra_control(sk, 1, mrtsock_destruct);
886 if (ret == 0) {
887 write_lock_bh(&mrt_lock);
888 mroute_socket=sk;
889 write_unlock_bh(&mrt_lock);
890
891 ipv4_devconf.mc_forwarding++;
892 }
893 rtnl_unlock();
894 return ret;
895 case MRT_DONE:
896 if (sk!=mroute_socket)
897 return -EACCES;
898 return ip_ra_control(sk, 0, NULL);
899 case MRT_ADD_VIF:
900 case MRT_DEL_VIF:
901 if (optlen!=sizeof(vif))
902 return -EINVAL;
903 if (copy_from_user(&vif,optval,sizeof(vif)))
904 return -EFAULT;
905 if (vif.vifc_vifi >= MAXVIFS)
906 return -ENFILE;
907 rtnl_lock();
908 if (optname==MRT_ADD_VIF) {
909 ret = vif_add(&vif, sk==mroute_socket);
910 } else {
911 ret = vif_delete(vif.vifc_vifi);
912 }
913 rtnl_unlock();
914 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700915
916 /*
917 * Manipulate the forwarding caches. These live
918 * in a sort of kernel/user symbiosis.
919 */
Stephen Hemminger132adf52007-03-08 20:44:43 -0800920 case MRT_ADD_MFC:
921 case MRT_DEL_MFC:
922 if (optlen!=sizeof(mfc))
923 return -EINVAL;
924 if (copy_from_user(&mfc,optval, sizeof(mfc)))
925 return -EFAULT;
926 rtnl_lock();
927 if (optname==MRT_DEL_MFC)
928 ret = ipmr_mfc_delete(&mfc);
929 else
930 ret = ipmr_mfc_add(&mfc, sk==mroute_socket);
931 rtnl_unlock();
932 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700933 /*
934 * Control PIM assert.
935 */
Stephen Hemminger132adf52007-03-08 20:44:43 -0800936 case MRT_ASSERT:
937 {
938 int v;
939 if (get_user(v,(int __user *)optval))
940 return -EFAULT;
941 mroute_do_assert=(v)?1:0;
942 return 0;
943 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700944#ifdef CONFIG_IP_PIMSM
Stephen Hemminger132adf52007-03-08 20:44:43 -0800945 case MRT_PIM:
946 {
947 int v, ret;
948 if (get_user(v,(int __user *)optval))
949 return -EFAULT;
950 v = (v)?1:0;
951 rtnl_lock();
952 ret = 0;
953 if (v != mroute_do_pim) {
954 mroute_do_pim = v;
955 mroute_do_assert = v;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700956#ifdef CONFIG_IP_PIMSM_V2
Stephen Hemminger132adf52007-03-08 20:44:43 -0800957 if (mroute_do_pim)
958 ret = inet_add_protocol(&pim_protocol,
959 IPPROTO_PIM);
960 else
961 ret = inet_del_protocol(&pim_protocol,
962 IPPROTO_PIM);
963 if (ret < 0)
964 ret = -EAGAIN;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700965#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700966 }
Stephen Hemminger132adf52007-03-08 20:44:43 -0800967 rtnl_unlock();
968 return ret;
969 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700970#endif
Stephen Hemminger132adf52007-03-08 20:44:43 -0800971 /*
972 * Spurious command, or MRT_VERSION which you cannot
973 * set.
974 */
975 default:
976 return -ENOPROTOOPT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700977 }
978}
979
980/*
981 * Getsock opt support for the multicast routing system.
982 */
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900983
Linus Torvalds1da177e2005-04-16 15:20:36 -0700984int ip_mroute_getsockopt(struct sock *sk,int optname,char __user *optval,int __user *optlen)
985{
986 int olr;
987 int val;
988
Stephen Hemminger132adf52007-03-08 20:44:43 -0800989 if (optname!=MRT_VERSION &&
Linus Torvalds1da177e2005-04-16 15:20:36 -0700990#ifdef CONFIG_IP_PIMSM
991 optname!=MRT_PIM &&
992#endif
993 optname!=MRT_ASSERT)
994 return -ENOPROTOOPT;
995
996 if (get_user(olr, optlen))
997 return -EFAULT;
998
999 olr = min_t(unsigned int, olr, sizeof(int));
1000 if (olr < 0)
1001 return -EINVAL;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001002
Stephen Hemminger132adf52007-03-08 20:44:43 -08001003 if (put_user(olr,optlen))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001004 return -EFAULT;
Stephen Hemminger132adf52007-03-08 20:44:43 -08001005 if (optname==MRT_VERSION)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001006 val=0x0305;
1007#ifdef CONFIG_IP_PIMSM
Stephen Hemminger132adf52007-03-08 20:44:43 -08001008 else if (optname==MRT_PIM)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001009 val=mroute_do_pim;
1010#endif
1011 else
1012 val=mroute_do_assert;
Stephen Hemminger132adf52007-03-08 20:44:43 -08001013 if (copy_to_user(optval,&val,olr))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001014 return -EFAULT;
1015 return 0;
1016}
1017
1018/*
1019 * The IP multicast ioctl support routines.
1020 */
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001021
Linus Torvalds1da177e2005-04-16 15:20:36 -07001022int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg)
1023{
1024 struct sioc_sg_req sr;
1025 struct sioc_vif_req vr;
1026 struct vif_device *vif;
1027 struct mfc_cache *c;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001028
Stephen Hemminger132adf52007-03-08 20:44:43 -08001029 switch (cmd) {
1030 case SIOCGETVIFCNT:
1031 if (copy_from_user(&vr,arg,sizeof(vr)))
1032 return -EFAULT;
1033 if (vr.vifi>=maxvif)
1034 return -EINVAL;
1035 read_lock(&mrt_lock);
1036 vif=&vif_table[vr.vifi];
1037 if (VIF_EXISTS(vr.vifi)) {
1038 vr.icount=vif->pkt_in;
1039 vr.ocount=vif->pkt_out;
1040 vr.ibytes=vif->bytes_in;
1041 vr.obytes=vif->bytes_out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001042 read_unlock(&mrt_lock);
Stephen Hemminger132adf52007-03-08 20:44:43 -08001043
1044 if (copy_to_user(arg,&vr,sizeof(vr)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001045 return -EFAULT;
Stephen Hemminger132adf52007-03-08 20:44:43 -08001046 return 0;
1047 }
1048 read_unlock(&mrt_lock);
1049 return -EADDRNOTAVAIL;
1050 case SIOCGETSGCNT:
1051 if (copy_from_user(&sr,arg,sizeof(sr)))
1052 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001053
Stephen Hemminger132adf52007-03-08 20:44:43 -08001054 read_lock(&mrt_lock);
1055 c = ipmr_cache_find(sr.src.s_addr, sr.grp.s_addr);
1056 if (c) {
1057 sr.pktcnt = c->mfc_un.res.pkt;
1058 sr.bytecnt = c->mfc_un.res.bytes;
1059 sr.wrong_if = c->mfc_un.res.wrong_if;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001060 read_unlock(&mrt_lock);
Stephen Hemminger132adf52007-03-08 20:44:43 -08001061
1062 if (copy_to_user(arg,&sr,sizeof(sr)))
1063 return -EFAULT;
1064 return 0;
1065 }
1066 read_unlock(&mrt_lock);
1067 return -EADDRNOTAVAIL;
1068 default:
1069 return -ENOIOCTLCMD;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001070 }
1071}
1072
1073
1074static int ipmr_device_event(struct notifier_block *this, unsigned long event, void *ptr)
1075{
1076 struct vif_device *v;
1077 int ct;
1078 if (event != NETDEV_UNREGISTER)
1079 return NOTIFY_DONE;
1080 v=&vif_table[0];
Stephen Hemminger132adf52007-03-08 20:44:43 -08001081 for (ct=0;ct<maxvif;ct++,v++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001082 if (v->dev==ptr)
1083 vif_delete(ct);
1084 }
1085 return NOTIFY_DONE;
1086}
1087
1088
1089static struct notifier_block ip_mr_notifier={
1090 .notifier_call = ipmr_device_event,
1091};
1092
1093/*
1094 * Encapsulate a packet by attaching a valid IPIP header to it.
1095 * This avoids tunnel drivers and other mess and gives us the speed so
1096 * important for multicast video.
1097 */
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001098
Al Viro114c7842006-09-27 18:39:29 -07001099static void ip_encap(struct sk_buff *skb, __be32 saddr, __be32 daddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001100{
Arnaldo Carvalho de Melo8856dfa2007-03-10 19:40:39 -03001101 struct iphdr *iph;
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07001102 struct iphdr *old_iph = ip_hdr(skb);
Arnaldo Carvalho de Melo8856dfa2007-03-10 19:40:39 -03001103
1104 skb_push(skb, sizeof(struct iphdr));
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07001105 skb->transport_header = skb->network_header;
Arnaldo Carvalho de Melo8856dfa2007-03-10 19:40:39 -03001106 skb_reset_network_header(skb);
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07001107 iph = ip_hdr(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001108
1109 iph->version = 4;
Arnaldo Carvalho de Meloe023dd62007-03-12 20:09:36 -03001110 iph->tos = old_iph->tos;
1111 iph->ttl = old_iph->ttl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001112 iph->frag_off = 0;
1113 iph->daddr = daddr;
1114 iph->saddr = saddr;
1115 iph->protocol = IPPROTO_IPIP;
1116 iph->ihl = 5;
1117 iph->tot_len = htons(skb->len);
1118 ip_select_ident(iph, skb->dst, NULL);
1119 ip_send_check(iph);
1120
Linus Torvalds1da177e2005-04-16 15:20:36 -07001121 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
1122 nf_reset(skb);
1123}
1124
1125static inline int ipmr_forward_finish(struct sk_buff *skb)
1126{
1127 struct ip_options * opt = &(IPCB(skb)->opt);
1128
1129 IP_INC_STATS_BH(IPSTATS_MIB_OUTFORWDATAGRAMS);
1130
1131 if (unlikely(opt->optlen))
1132 ip_forward_options(skb);
1133
1134 return dst_output(skb);
1135}
1136
1137/*
1138 * Processing handlers for ipmr_forward
1139 */
1140
1141static void ipmr_queue_xmit(struct sk_buff *skb, struct mfc_cache *c, int vifi)
1142{
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07001143 const struct iphdr *iph = ip_hdr(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001144 struct vif_device *vif = &vif_table[vifi];
1145 struct net_device *dev;
1146 struct rtable *rt;
1147 int encap = 0;
1148
1149 if (vif->dev == NULL)
1150 goto out_free;
1151
1152#ifdef CONFIG_IP_PIMSM
1153 if (vif->flags & VIFF_REGISTER) {
1154 vif->pkt_out++;
1155 vif->bytes_out+=skb->len;
Patrick McHardy2941a482006-01-08 22:05:26 -08001156 ((struct net_device_stats*)netdev_priv(vif->dev))->tx_bytes += skb->len;
1157 ((struct net_device_stats*)netdev_priv(vif->dev))->tx_packets++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001158 ipmr_cache_report(skb, vifi, IGMPMSG_WHOLEPKT);
1159 kfree_skb(skb);
1160 return;
1161 }
1162#endif
1163
1164 if (vif->flags&VIFF_TUNNEL) {
1165 struct flowi fl = { .oif = vif->link,
1166 .nl_u = { .ip4_u =
1167 { .daddr = vif->remote,
1168 .saddr = vif->local,
1169 .tos = RT_TOS(iph->tos) } },
1170 .proto = IPPROTO_IPIP };
1171 if (ip_route_output_key(&rt, &fl))
1172 goto out_free;
1173 encap = sizeof(struct iphdr);
1174 } else {
1175 struct flowi fl = { .oif = vif->link,
1176 .nl_u = { .ip4_u =
1177 { .daddr = iph->daddr,
1178 .tos = RT_TOS(iph->tos) } },
1179 .proto = IPPROTO_IPIP };
1180 if (ip_route_output_key(&rt, &fl))
1181 goto out_free;
1182 }
1183
1184 dev = rt->u.dst.dev;
1185
1186 if (skb->len+encap > dst_mtu(&rt->u.dst) && (ntohs(iph->frag_off) & IP_DF)) {
1187 /* Do not fragment multicasts. Alas, IPv4 does not
1188 allow to send ICMP, so that packets will disappear
1189 to blackhole.
1190 */
1191
1192 IP_INC_STATS_BH(IPSTATS_MIB_FRAGFAILS);
1193 ip_rt_put(rt);
1194 goto out_free;
1195 }
1196
1197 encap += LL_RESERVED_SPACE(dev) + rt->u.dst.header_len;
1198
1199 if (skb_cow(skb, encap)) {
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001200 ip_rt_put(rt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001201 goto out_free;
1202 }
1203
1204 vif->pkt_out++;
1205 vif->bytes_out+=skb->len;
1206
1207 dst_release(skb->dst);
1208 skb->dst = &rt->u.dst;
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07001209 ip_decrease_ttl(ip_hdr(skb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001210
1211 /* FIXME: forward and output firewalls used to be called here.
1212 * What do we do with netfilter? -- RR */
1213 if (vif->flags & VIFF_TUNNEL) {
1214 ip_encap(skb, vif->local, vif->remote);
1215 /* FIXME: extra output firewall step used to be here. --RR */
Patrick McHardy2941a482006-01-08 22:05:26 -08001216 ((struct ip_tunnel *)netdev_priv(vif->dev))->stat.tx_packets++;
1217 ((struct ip_tunnel *)netdev_priv(vif->dev))->stat.tx_bytes+=skb->len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001218 }
1219
1220 IPCB(skb)->flags |= IPSKB_FORWARDED;
1221
1222 /*
1223 * RFC1584 teaches, that DVMRP/PIM router must deliver packets locally
1224 * not only before forwarding, but after forwarding on all output
1225 * interfaces. It is clear, if mrouter runs a multicasting
1226 * program, it should receive packets not depending to what interface
1227 * program is joined.
1228 * If we will not make it, the program will have to join on all
1229 * interfaces. On the other hand, multihoming host (or router, but
1230 * not mrouter) cannot join to more than one interface - it will
1231 * result in receiving multiple packets.
1232 */
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001233 NF_HOOK(PF_INET, NF_IP_FORWARD, skb, skb->dev, dev,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001234 ipmr_forward_finish);
1235 return;
1236
1237out_free:
1238 kfree_skb(skb);
1239 return;
1240}
1241
1242static int ipmr_find_vif(struct net_device *dev)
1243{
1244 int ct;
1245 for (ct=maxvif-1; ct>=0; ct--) {
1246 if (vif_table[ct].dev == dev)
1247 break;
1248 }
1249 return ct;
1250}
1251
1252/* "local" means that we should preserve one skb (for local delivery) */
1253
1254static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local)
1255{
1256 int psend = -1;
1257 int vif, ct;
1258
1259 vif = cache->mfc_parent;
1260 cache->mfc_un.res.pkt++;
1261 cache->mfc_un.res.bytes += skb->len;
1262
1263 /*
1264 * Wrong interface: drop packet and (maybe) send PIM assert.
1265 */
1266 if (vif_table[vif].dev != skb->dev) {
1267 int true_vifi;
1268
1269 if (((struct rtable*)skb->dst)->fl.iif == 0) {
1270 /* It is our own packet, looped back.
1271 Very complicated situation...
1272
1273 The best workaround until routing daemons will be
1274 fixed is not to redistribute packet, if it was
1275 send through wrong interface. It means, that
1276 multicast applications WILL NOT work for
1277 (S,G), which have default multicast route pointing
1278 to wrong oif. In any case, it is not a good
1279 idea to use multicasting applications on router.
1280 */
1281 goto dont_forward;
1282 }
1283
1284 cache->mfc_un.res.wrong_if++;
1285 true_vifi = ipmr_find_vif(skb->dev);
1286
1287 if (true_vifi >= 0 && mroute_do_assert &&
1288 /* pimsm uses asserts, when switching from RPT to SPT,
1289 so that we cannot check that packet arrived on an oif.
1290 It is bad, but otherwise we would need to move pretty
1291 large chunk of pimd to kernel. Ough... --ANK
1292 */
1293 (mroute_do_pim || cache->mfc_un.res.ttls[true_vifi] < 255) &&
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001294 time_after(jiffies,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001295 cache->mfc_un.res.last_assert + MFC_ASSERT_THRESH)) {
1296 cache->mfc_un.res.last_assert = jiffies;
1297 ipmr_cache_report(skb, true_vifi, IGMPMSG_WRONGVIF);
1298 }
1299 goto dont_forward;
1300 }
1301
1302 vif_table[vif].pkt_in++;
1303 vif_table[vif].bytes_in+=skb->len;
1304
1305 /*
1306 * Forward the frame
1307 */
1308 for (ct = cache->mfc_un.res.maxvif-1; ct >= cache->mfc_un.res.minvif; ct--) {
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07001309 if (ip_hdr(skb)->ttl > cache->mfc_un.res.ttls[ct]) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001310 if (psend != -1) {
1311 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
1312 if (skb2)
1313 ipmr_queue_xmit(skb2, cache, psend);
1314 }
1315 psend=ct;
1316 }
1317 }
1318 if (psend != -1) {
1319 if (local) {
1320 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
1321 if (skb2)
1322 ipmr_queue_xmit(skb2, cache, psend);
1323 } else {
1324 ipmr_queue_xmit(skb, cache, psend);
1325 return 0;
1326 }
1327 }
1328
1329dont_forward:
1330 if (!local)
1331 kfree_skb(skb);
1332 return 0;
1333}
1334
1335
1336/*
1337 * Multicast packets for forwarding arrive here
1338 */
1339
1340int ip_mr_input(struct sk_buff *skb)
1341{
1342 struct mfc_cache *cache;
1343 int local = ((struct rtable*)skb->dst)->rt_flags&RTCF_LOCAL;
1344
1345 /* Packet is looped back after forward, it should not be
1346 forwarded second time, but still can be delivered locally.
1347 */
1348 if (IPCB(skb)->flags&IPSKB_FORWARDED)
1349 goto dont_forward;
1350
1351 if (!local) {
1352 if (IPCB(skb)->opt.router_alert) {
1353 if (ip_call_ra_chain(skb))
1354 return 0;
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07001355 } else if (ip_hdr(skb)->protocol == IPPROTO_IGMP){
Linus Torvalds1da177e2005-04-16 15:20:36 -07001356 /* IGMPv1 (and broken IGMPv2 implementations sort of
1357 Cisco IOS <= 11.2(8)) do not put router alert
1358 option to IGMP packets destined to routable
1359 groups. It is very bad, because it means
1360 that we can forward NO IGMP messages.
1361 */
1362 read_lock(&mrt_lock);
1363 if (mroute_socket) {
Patrick McHardy2715bcf2005-06-21 14:06:24 -07001364 nf_reset(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001365 raw_rcv(mroute_socket, skb);
1366 read_unlock(&mrt_lock);
1367 return 0;
1368 }
1369 read_unlock(&mrt_lock);
1370 }
1371 }
1372
1373 read_lock(&mrt_lock);
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07001374 cache = ipmr_cache_find(ip_hdr(skb)->saddr, ip_hdr(skb)->daddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001375
1376 /*
1377 * No usable cache entry
1378 */
1379 if (cache==NULL) {
1380 int vif;
1381
1382 if (local) {
1383 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
1384 ip_local_deliver(skb);
1385 if (skb2 == NULL) {
1386 read_unlock(&mrt_lock);
1387 return -ENOBUFS;
1388 }
1389 skb = skb2;
1390 }
1391
1392 vif = ipmr_find_vif(skb->dev);
1393 if (vif >= 0) {
1394 int err = ipmr_cache_unresolved(vif, skb);
1395 read_unlock(&mrt_lock);
1396
1397 return err;
1398 }
1399 read_unlock(&mrt_lock);
1400 kfree_skb(skb);
1401 return -ENODEV;
1402 }
1403
1404 ip_mr_forward(skb, cache, local);
1405
1406 read_unlock(&mrt_lock);
1407
1408 if (local)
1409 return ip_local_deliver(skb);
1410
1411 return 0;
1412
1413dont_forward:
1414 if (local)
1415 return ip_local_deliver(skb);
1416 kfree_skb(skb);
1417 return 0;
1418}
1419
1420#ifdef CONFIG_IP_PIMSM_V1
1421/*
1422 * Handle IGMP messages of PIMv1
1423 */
1424
1425int pim_rcv_v1(struct sk_buff * skb)
1426{
1427 struct igmphdr *pim;
1428 struct iphdr *encap;
1429 struct net_device *reg_dev = NULL;
1430
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001431 if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(*encap)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001432 goto drop;
1433
Arnaldo Carvalho de Melod9edf9e2007-03-13 14:19:23 -03001434 pim = igmp_hdr(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001435
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001436 if (!mroute_do_pim ||
Linus Torvalds1da177e2005-04-16 15:20:36 -07001437 skb->len < sizeof(*pim) + sizeof(*encap) ||
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001438 pim->group != PIM_V1_VERSION || pim->code != PIM_V1_REGISTER)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001439 goto drop;
1440
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07001441 encap = (struct iphdr *)(skb_transport_header(skb) +
1442 sizeof(struct igmphdr));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001443 /*
1444 Check that:
1445 a. packet is really destinted to a multicast group
1446 b. packet is not a NULL-REGISTER
1447 c. packet is not truncated
1448 */
1449 if (!MULTICAST(encap->daddr) ||
1450 encap->tot_len == 0 ||
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001451 ntohs(encap->tot_len) + sizeof(*pim) > skb->len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001452 goto drop;
1453
1454 read_lock(&mrt_lock);
1455 if (reg_vif_num >= 0)
1456 reg_dev = vif_table[reg_vif_num].dev;
1457 if (reg_dev)
1458 dev_hold(reg_dev);
1459 read_unlock(&mrt_lock);
1460
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001461 if (reg_dev == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001462 goto drop;
1463
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07001464 skb->mac_header = skb->network_header;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001465 skb_pull(skb, (u8*)encap - skb->data);
Arnaldo Carvalho de Melo31c77112007-03-10 19:04:55 -03001466 skb_reset_network_header(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001467 skb->dev = reg_dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001468 skb->protocol = htons(ETH_P_IP);
1469 skb->ip_summed = 0;
1470 skb->pkt_type = PACKET_HOST;
1471 dst_release(skb->dst);
1472 skb->dst = NULL;
Patrick McHardy2941a482006-01-08 22:05:26 -08001473 ((struct net_device_stats*)netdev_priv(reg_dev))->rx_bytes += skb->len;
1474 ((struct net_device_stats*)netdev_priv(reg_dev))->rx_packets++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001475 nf_reset(skb);
1476 netif_rx(skb);
1477 dev_put(reg_dev);
1478 return 0;
1479 drop:
1480 kfree_skb(skb);
1481 return 0;
1482}
1483#endif
1484
1485#ifdef CONFIG_IP_PIMSM_V2
1486static int pim_rcv(struct sk_buff * skb)
1487{
1488 struct pimreghdr *pim;
1489 struct iphdr *encap;
1490 struct net_device *reg_dev = NULL;
1491
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001492 if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(*encap)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001493 goto drop;
1494
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07001495 pim = (struct pimreghdr *)skb_transport_header(skb);
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001496 if (pim->type != ((PIM_VERSION<<4)|(PIM_REGISTER)) ||
Linus Torvalds1da177e2005-04-16 15:20:36 -07001497 (pim->flags&PIM_NULL_REGISTER) ||
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001498 (ip_compute_csum((void *)pim, sizeof(*pim)) != 0 &&
Al Virod3bc23e2006-11-14 21:24:49 -08001499 csum_fold(skb_checksum(skb, 0, skb->len, 0))))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001500 goto drop;
1501
1502 /* check if the inner packet is destined to mcast group */
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07001503 encap = (struct iphdr *)(skb_transport_header(skb) +
1504 sizeof(struct pimreghdr));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001505 if (!MULTICAST(encap->daddr) ||
1506 encap->tot_len == 0 ||
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001507 ntohs(encap->tot_len) + sizeof(*pim) > skb->len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001508 goto drop;
1509
1510 read_lock(&mrt_lock);
1511 if (reg_vif_num >= 0)
1512 reg_dev = vif_table[reg_vif_num].dev;
1513 if (reg_dev)
1514 dev_hold(reg_dev);
1515 read_unlock(&mrt_lock);
1516
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001517 if (reg_dev == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001518 goto drop;
1519
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07001520 skb->mac_header = skb->network_header;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001521 skb_pull(skb, (u8*)encap - skb->data);
Arnaldo Carvalho de Melo31c77112007-03-10 19:04:55 -03001522 skb_reset_network_header(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001523 skb->dev = reg_dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001524 skb->protocol = htons(ETH_P_IP);
1525 skb->ip_summed = 0;
1526 skb->pkt_type = PACKET_HOST;
1527 dst_release(skb->dst);
Patrick McHardy2941a482006-01-08 22:05:26 -08001528 ((struct net_device_stats*)netdev_priv(reg_dev))->rx_bytes += skb->len;
1529 ((struct net_device_stats*)netdev_priv(reg_dev))->rx_packets++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001530 skb->dst = NULL;
1531 nf_reset(skb);
1532 netif_rx(skb);
1533 dev_put(reg_dev);
1534 return 0;
1535 drop:
1536 kfree_skb(skb);
1537 return 0;
1538}
1539#endif
1540
1541static int
1542ipmr_fill_mroute(struct sk_buff *skb, struct mfc_cache *c, struct rtmsg *rtm)
1543{
1544 int ct;
1545 struct rtnexthop *nhp;
1546 struct net_device *dev = vif_table[c->mfc_parent].dev;
1547 u8 *b = skb->tail;
1548 struct rtattr *mp_head;
1549
1550 if (dev)
1551 RTA_PUT(skb, RTA_IIF, 4, &dev->ifindex);
1552
1553 mp_head = (struct rtattr*)skb_put(skb, RTA_LENGTH(0));
1554
1555 for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) {
1556 if (c->mfc_un.res.ttls[ct] < 255) {
1557 if (skb_tailroom(skb) < RTA_ALIGN(RTA_ALIGN(sizeof(*nhp)) + 4))
1558 goto rtattr_failure;
1559 nhp = (struct rtnexthop*)skb_put(skb, RTA_ALIGN(sizeof(*nhp)));
1560 nhp->rtnh_flags = 0;
1561 nhp->rtnh_hops = c->mfc_un.res.ttls[ct];
1562 nhp->rtnh_ifindex = vif_table[ct].dev->ifindex;
1563 nhp->rtnh_len = sizeof(*nhp);
1564 }
1565 }
1566 mp_head->rta_type = RTA_MULTIPATH;
1567 mp_head->rta_len = skb->tail - (u8*)mp_head;
1568 rtm->rtm_type = RTN_MULTICAST;
1569 return 1;
1570
1571rtattr_failure:
1572 skb_trim(skb, b - skb->data);
1573 return -EMSGSIZE;
1574}
1575
1576int ipmr_get_route(struct sk_buff *skb, struct rtmsg *rtm, int nowait)
1577{
1578 int err;
1579 struct mfc_cache *cache;
1580 struct rtable *rt = (struct rtable*)skb->dst;
1581
1582 read_lock(&mrt_lock);
1583 cache = ipmr_cache_find(rt->rt_src, rt->rt_dst);
1584
1585 if (cache==NULL) {
Alexey Kuznetsov72287492006-07-25 16:45:12 -07001586 struct sk_buff *skb2;
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07001587 struct iphdr *iph;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001588 struct net_device *dev;
1589 int vif;
1590
1591 if (nowait) {
1592 read_unlock(&mrt_lock);
1593 return -EAGAIN;
1594 }
1595
1596 dev = skb->dev;
1597 if (dev == NULL || (vif = ipmr_find_vif(dev)) < 0) {
1598 read_unlock(&mrt_lock);
1599 return -ENODEV;
1600 }
Alexey Kuznetsov72287492006-07-25 16:45:12 -07001601 skb2 = skb_clone(skb, GFP_ATOMIC);
1602 if (!skb2) {
1603 read_unlock(&mrt_lock);
1604 return -ENOMEM;
1605 }
1606
Arnaldo Carvalho de Meloe2d1bca2007-04-10 20:46:21 -07001607 skb_push(skb2, sizeof(struct iphdr));
1608 skb_reset_network_header(skb2);
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07001609 iph = ip_hdr(skb2);
1610 iph->ihl = sizeof(struct iphdr) >> 2;
1611 iph->saddr = rt->rt_src;
1612 iph->daddr = rt->rt_dst;
1613 iph->version = 0;
Alexey Kuznetsov72287492006-07-25 16:45:12 -07001614 err = ipmr_cache_unresolved(vif, skb2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001615 read_unlock(&mrt_lock);
1616 return err;
1617 }
1618
1619 if (!nowait && (rtm->rtm_flags&RTM_F_NOTIFY))
1620 cache->mfc_flags |= MFC_NOTIFY;
1621 err = ipmr_fill_mroute(skb, cache, rtm);
1622 read_unlock(&mrt_lock);
1623 return err;
1624}
1625
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001626#ifdef CONFIG_PROC_FS
Linus Torvalds1da177e2005-04-16 15:20:36 -07001627/*
1628 * The /proc interfaces to multicast routing /proc/ip_mr_cache /proc/ip_mr_vif
1629 */
1630struct ipmr_vif_iter {
1631 int ct;
1632};
1633
1634static struct vif_device *ipmr_vif_seq_idx(struct ipmr_vif_iter *iter,
1635 loff_t pos)
1636{
1637 for (iter->ct = 0; iter->ct < maxvif; ++iter->ct) {
Stephen Hemminger132adf52007-03-08 20:44:43 -08001638 if (!VIF_EXISTS(iter->ct))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001639 continue;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001640 if (pos-- == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001641 return &vif_table[iter->ct];
1642 }
1643 return NULL;
1644}
1645
1646static void *ipmr_vif_seq_start(struct seq_file *seq, loff_t *pos)
1647{
1648 read_lock(&mrt_lock);
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001649 return *pos ? ipmr_vif_seq_idx(seq->private, *pos - 1)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001650 : SEQ_START_TOKEN;
1651}
1652
1653static void *ipmr_vif_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1654{
1655 struct ipmr_vif_iter *iter = seq->private;
1656
1657 ++*pos;
1658 if (v == SEQ_START_TOKEN)
1659 return ipmr_vif_seq_idx(iter, 0);
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001660
Linus Torvalds1da177e2005-04-16 15:20:36 -07001661 while (++iter->ct < maxvif) {
Stephen Hemminger132adf52007-03-08 20:44:43 -08001662 if (!VIF_EXISTS(iter->ct))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001663 continue;
1664 return &vif_table[iter->ct];
1665 }
1666 return NULL;
1667}
1668
1669static void ipmr_vif_seq_stop(struct seq_file *seq, void *v)
1670{
1671 read_unlock(&mrt_lock);
1672}
1673
1674static int ipmr_vif_seq_show(struct seq_file *seq, void *v)
1675{
1676 if (v == SEQ_START_TOKEN) {
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001677 seq_puts(seq,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001678 "Interface BytesIn PktsIn BytesOut PktsOut Flags Local Remote\n");
1679 } else {
1680 const struct vif_device *vif = v;
1681 const char *name = vif->dev ? vif->dev->name : "none";
1682
1683 seq_printf(seq,
1684 "%2Zd %-10s %8ld %7ld %8ld %7ld %05X %08X %08X\n",
1685 vif - vif_table,
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001686 name, vif->bytes_in, vif->pkt_in,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001687 vif->bytes_out, vif->pkt_out,
1688 vif->flags, vif->local, vif->remote);
1689 }
1690 return 0;
1691}
1692
Stephen Hemmingerf6908082007-03-12 14:34:29 -07001693static const struct seq_operations ipmr_vif_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001694 .start = ipmr_vif_seq_start,
1695 .next = ipmr_vif_seq_next,
1696 .stop = ipmr_vif_seq_stop,
1697 .show = ipmr_vif_seq_show,
1698};
1699
1700static int ipmr_vif_open(struct inode *inode, struct file *file)
1701{
1702 struct seq_file *seq;
1703 int rc = -ENOMEM;
1704 struct ipmr_vif_iter *s = kmalloc(sizeof(*s), GFP_KERNEL);
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001705
Linus Torvalds1da177e2005-04-16 15:20:36 -07001706 if (!s)
1707 goto out;
1708
1709 rc = seq_open(file, &ipmr_vif_seq_ops);
1710 if (rc)
1711 goto out_kfree;
1712
1713 s->ct = 0;
1714 seq = file->private_data;
1715 seq->private = s;
1716out:
1717 return rc;
1718out_kfree:
1719 kfree(s);
1720 goto out;
1721
1722}
1723
Arjan van de Ven9a321442007-02-12 00:55:35 -08001724static const struct file_operations ipmr_vif_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001725 .owner = THIS_MODULE,
1726 .open = ipmr_vif_open,
1727 .read = seq_read,
1728 .llseek = seq_lseek,
1729 .release = seq_release_private,
1730};
1731
1732struct ipmr_mfc_iter {
1733 struct mfc_cache **cache;
1734 int ct;
1735};
1736
1737
1738static struct mfc_cache *ipmr_mfc_seq_idx(struct ipmr_mfc_iter *it, loff_t pos)
1739{
1740 struct mfc_cache *mfc;
1741
1742 it->cache = mfc_cache_array;
1743 read_lock(&mrt_lock);
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001744 for (it->ct = 0; it->ct < MFC_LINES; it->ct++)
Stephen Hemminger132adf52007-03-08 20:44:43 -08001745 for (mfc = mfc_cache_array[it->ct]; mfc; mfc = mfc->next)
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001746 if (pos-- == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001747 return mfc;
1748 read_unlock(&mrt_lock);
1749
1750 it->cache = &mfc_unres_queue;
1751 spin_lock_bh(&mfc_unres_lock);
Stephen Hemminger132adf52007-03-08 20:44:43 -08001752 for (mfc = mfc_unres_queue; mfc; mfc = mfc->next)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001753 if (pos-- == 0)
1754 return mfc;
1755 spin_unlock_bh(&mfc_unres_lock);
1756
1757 it->cache = NULL;
1758 return NULL;
1759}
1760
1761
1762static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos)
1763{
1764 struct ipmr_mfc_iter *it = seq->private;
1765 it->cache = NULL;
1766 it->ct = 0;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001767 return *pos ? ipmr_mfc_seq_idx(seq->private, *pos - 1)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001768 : SEQ_START_TOKEN;
1769}
1770
1771static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1772{
1773 struct mfc_cache *mfc = v;
1774 struct ipmr_mfc_iter *it = seq->private;
1775
1776 ++*pos;
1777
1778 if (v == SEQ_START_TOKEN)
1779 return ipmr_mfc_seq_idx(seq->private, 0);
1780
1781 if (mfc->next)
1782 return mfc->next;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001783
1784 if (it->cache == &mfc_unres_queue)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001785 goto end_of_list;
1786
1787 BUG_ON(it->cache != mfc_cache_array);
1788
1789 while (++it->ct < MFC_LINES) {
1790 mfc = mfc_cache_array[it->ct];
1791 if (mfc)
1792 return mfc;
1793 }
1794
1795 /* exhausted cache_array, show unresolved */
1796 read_unlock(&mrt_lock);
1797 it->cache = &mfc_unres_queue;
1798 it->ct = 0;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001799
Linus Torvalds1da177e2005-04-16 15:20:36 -07001800 spin_lock_bh(&mfc_unres_lock);
1801 mfc = mfc_unres_queue;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001802 if (mfc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001803 return mfc;
1804
1805 end_of_list:
1806 spin_unlock_bh(&mfc_unres_lock);
1807 it->cache = NULL;
1808
1809 return NULL;
1810}
1811
1812static void ipmr_mfc_seq_stop(struct seq_file *seq, void *v)
1813{
1814 struct ipmr_mfc_iter *it = seq->private;
1815
1816 if (it->cache == &mfc_unres_queue)
1817 spin_unlock_bh(&mfc_unres_lock);
1818 else if (it->cache == mfc_cache_array)
1819 read_unlock(&mrt_lock);
1820}
1821
1822static int ipmr_mfc_seq_show(struct seq_file *seq, void *v)
1823{
1824 int n;
1825
1826 if (v == SEQ_START_TOKEN) {
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001827 seq_puts(seq,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001828 "Group Origin Iif Pkts Bytes Wrong Oifs\n");
1829 } else {
1830 const struct mfc_cache *mfc = v;
1831 const struct ipmr_mfc_iter *it = seq->private;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001832
Linus Torvalds1da177e2005-04-16 15:20:36 -07001833 seq_printf(seq, "%08lX %08lX %-3d %8ld %8ld %8ld",
1834 (unsigned long) mfc->mfc_mcastgrp,
1835 (unsigned long) mfc->mfc_origin,
1836 mfc->mfc_parent,
1837 mfc->mfc_un.res.pkt,
1838 mfc->mfc_un.res.bytes,
1839 mfc->mfc_un.res.wrong_if);
1840
1841 if (it->cache != &mfc_unres_queue) {
Stephen Hemminger132adf52007-03-08 20:44:43 -08001842 for (n = mfc->mfc_un.res.minvif;
1843 n < mfc->mfc_un.res.maxvif; n++ ) {
1844 if (VIF_EXISTS(n)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001845 && mfc->mfc_un.res.ttls[n] < 255)
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001846 seq_printf(seq,
1847 " %2d:%-3d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001848 n, mfc->mfc_un.res.ttls[n]);
1849 }
1850 }
1851 seq_putc(seq, '\n');
1852 }
1853 return 0;
1854}
1855
Stephen Hemmingerf6908082007-03-12 14:34:29 -07001856static const struct seq_operations ipmr_mfc_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001857 .start = ipmr_mfc_seq_start,
1858 .next = ipmr_mfc_seq_next,
1859 .stop = ipmr_mfc_seq_stop,
1860 .show = ipmr_mfc_seq_show,
1861};
1862
1863static int ipmr_mfc_open(struct inode *inode, struct file *file)
1864{
1865 struct seq_file *seq;
1866 int rc = -ENOMEM;
1867 struct ipmr_mfc_iter *s = kmalloc(sizeof(*s), GFP_KERNEL);
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001868
Linus Torvalds1da177e2005-04-16 15:20:36 -07001869 if (!s)
1870 goto out;
1871
1872 rc = seq_open(file, &ipmr_mfc_seq_ops);
1873 if (rc)
1874 goto out_kfree;
1875
1876 seq = file->private_data;
1877 seq->private = s;
1878out:
1879 return rc;
1880out_kfree:
1881 kfree(s);
1882 goto out;
1883
1884}
1885
Arjan van de Ven9a321442007-02-12 00:55:35 -08001886static const struct file_operations ipmr_mfc_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001887 .owner = THIS_MODULE,
1888 .open = ipmr_mfc_open,
1889 .read = seq_read,
1890 .llseek = seq_lseek,
1891 .release = seq_release_private,
1892};
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001893#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001894
1895#ifdef CONFIG_IP_PIMSM_V2
1896static struct net_protocol pim_protocol = {
1897 .handler = pim_rcv,
1898};
1899#endif
1900
1901
1902/*
1903 * Setup for IP multicast routing
1904 */
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001905
Linus Torvalds1da177e2005-04-16 15:20:36 -07001906void __init ip_mr_init(void)
1907{
1908 mrt_cachep = kmem_cache_create("ip_mrt_cache",
1909 sizeof(struct mfc_cache),
Alexey Dobriyane5d679f332006-08-26 19:25:52 -07001910 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001911 NULL, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001912 init_timer(&ipmr_expire_timer);
1913 ipmr_expire_timer.function=ipmr_expire_process;
1914 register_netdevice_notifier(&ip_mr_notifier);
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001915#ifdef CONFIG_PROC_FS
Linus Torvalds1da177e2005-04-16 15:20:36 -07001916 proc_net_fops_create("ip_mr_vif", 0, &ipmr_vif_fops);
1917 proc_net_fops_create("ip_mr_cache", 0, &ipmr_mfc_fops);
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001918#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001919}