blob: 228f650250f6f0b6ebeacc2f63dd3f6addcc1d58 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Simple traffic shaper for Linux NET3.
3 *
4 * (c) Copyright 1996 Alan Cox <alan@redhat.com>, All Rights Reserved.
5 * http://www.redhat.com
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 *
Jeff Garzik6aa20a22006-09-13 13:24:59 -040012 * Neither Alan Cox nor CymruNet Ltd. admit liability nor provide
13 * warranty for any of this software. This material is provided
14 * "AS-IS" and at no charge.
15 *
16 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070017 * Algorithm:
18 *
19 * Queue Frame:
20 * Compute time length of frame at regulated speed
21 * Add frame to queue at appropriate point
22 * Adjust time length computation for followup frames
23 * Any frame that falls outside of its boundaries is freed
24 *
25 * We work to the following constants
26 *
27 * SHAPER_QLEN Maximum queued frames
28 * SHAPER_LATENCY Bounding latency on a frame. Leaving this latency
Jeff Garzik6aa20a22006-09-13 13:24:59 -040029 * window drops the frame. This stops us queueing
Linus Torvalds1da177e2005-04-16 15:20:36 -070030 * frames for a long time and confusing a remote
31 * host.
32 * SHAPER_MAXSLIP Maximum time a priority frame may jump forward.
33 * That bounds the penalty we will inflict on low
34 * priority traffic.
35 * SHAPER_BURST Time range we call "now" in order to reduce
36 * system load. The more we make this the burstier
37 * the behaviour, the better local performance you
38 * get through packet clustering on routers and the
39 * worse the remote end gets to judge rtts.
40 *
41 * This is designed to handle lower speed links ( < 200K/second or so). We
42 * run off a 100-150Hz base clock typically. This gives us a resolution at
43 * 200Kbit/second of about 2Kbit or 256 bytes. Above that our timer
44 * resolution may start to cause much more burstiness in the traffic. We
Jeff Garzik6aa20a22006-09-13 13:24:59 -040045 * could avoid a lot of that by calling kick_shaper() at the end of the
46 * tied device transmissions. If you run above about 100K second you
Linus Torvalds1da177e2005-04-16 15:20:36 -070047 * may need to tune the supposed speed rate for the right values.
48 *
49 * BUGS:
50 * Downing the interface under the shaper before the shaper
51 * will render your machine defunct. Don't for now shape over
52 * PPP or SLIP therefore!
53 * This will be fixed in BETA4
54 *
55 * Update History :
56 *
57 * bh_atomic() SMP races fixes and rewritten the locking code to
58 * be SMP safe and irq-mask friendly.
59 * NOTE: we can't use start_bh_atomic() in kick_shaper()
60 * because it's going to be recalled from an irq handler,
61 * and synchronize_bh() is a nono if called from irq context.
62 * 1999 Andrea Arcangeli
63 *
64 * Device statistics (tx_pakets, tx_bytes,
65 * tx_drops: queue_over_time and collisions: max_queue_exceded)
66 * 1999/06/18 Jordi Murgo <savage@apostols.org>
67 *
68 * Use skb->cb for private data.
69 * 2000/03 Andi Kleen
70 */
Jeff Garzik6aa20a22006-09-13 13:24:59 -040071
Linus Torvalds1da177e2005-04-16 15:20:36 -070072#include <linux/module.h>
73#include <linux/kernel.h>
74#include <linux/fcntl.h>
75#include <linux/mm.h>
76#include <linux/slab.h>
77#include <linux/string.h>
78#include <linux/errno.h>
79#include <linux/netdevice.h>
80#include <linux/etherdevice.h>
81#include <linux/skbuff.h>
82#include <linux/if_arp.h>
83#include <linux/init.h>
84#include <linux/if_shaper.h>
Marcelo Feitoza Parisiff5688a2006-01-09 18:37:15 -080085#include <linux/jiffies.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070086
87#include <net/dst.h>
88#include <net/arp.h>
Eric W. Biederman881d9662007-09-17 11:56:21 -070089#include <net/net_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070090
Jeff Garzik6aa20a22006-09-13 13:24:59 -040091struct shaper_cb {
Linus Torvalds1da177e2005-04-16 15:20:36 -070092 unsigned long shapeclock; /* Time it should go out */
93 unsigned long shapestamp; /* Stamp for shaper */
94 __u32 shapelatency; /* Latency on frame */
95 __u32 shapelen; /* Frame length in clocks */
96 __u16 shapepend; /* Pending */
Jeff Garzik6aa20a22006-09-13 13:24:59 -040097};
Linus Torvalds1da177e2005-04-16 15:20:36 -070098#define SHAPERCB(skb) ((struct shaper_cb *) ((skb)->cb))
99
100static int sh_debug; /* Debug flag */
101
102#define SHAPER_BANNER "CymruNet Traffic Shaper BETA 0.04 for Linux 2.1\n"
103
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104static void shaper_kick(struct shaper *sh);
105
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106/*
107 * Compute clocks on a buffer
108 */
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400109
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110static int shaper_clocks(struct shaper *shaper, struct sk_buff *skb)
111{
112 int t=skb->len/shaper->bytespertick;
113 return t;
114}
115
116/*
117 * Set the speed of a shaper. We compute this in bytes per tick since
118 * thats how the machine wants to run. Quoted input is in bits per second
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400119 * as is traditional (note not BAUD). We assume 8 bit bytes.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120 */
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400121
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122static void shaper_setspeed(struct shaper *shaper, int bitspersec)
123{
124 shaper->bitspersec=bitspersec;
125 shaper->bytespertick=(bitspersec/HZ)/8;
126 if(!shaper->bytespertick)
127 shaper->bytespertick++;
128}
129
130/*
131 * Throw a frame at a shaper.
132 */
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400133
Christoph Hellwigb597ef42005-06-02 16:36:00 -0700134
135static int shaper_start_xmit(struct sk_buff *skb, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700136{
Christoph Hellwigb597ef42005-06-02 16:36:00 -0700137 struct shaper *shaper = dev->priv;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138 struct sk_buff *ptr;
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400139
Christoph Hellwigbc971de2005-07-05 15:03:46 -0700140 spin_lock(&shaper->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141 ptr=shaper->sendq.prev;
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400142
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143 /*
144 * Set up our packet details
145 */
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400146
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147 SHAPERCB(skb)->shapelatency=0;
148 SHAPERCB(skb)->shapeclock=shaper->recovery;
149 if(time_before(SHAPERCB(skb)->shapeclock, jiffies))
150 SHAPERCB(skb)->shapeclock=jiffies;
151 skb->priority=0; /* short term bug fix */
152 SHAPERCB(skb)->shapestamp=jiffies;
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400153
Linus Torvalds1da177e2005-04-16 15:20:36 -0700154 /*
155 * Time slots for this packet.
156 */
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400157
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158 SHAPERCB(skb)->shapelen= shaper_clocks(shaper,skb);
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400159
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160 {
161 struct sk_buff *tmp;
162 /*
163 * Up our shape clock by the time pending on the queue
164 * (Should keep this in the shaper as a variable..)
165 */
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400166 for(tmp=skb_peek(&shaper->sendq); tmp!=NULL &&
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167 tmp!=(struct sk_buff *)&shaper->sendq; tmp=tmp->next)
168 SHAPERCB(skb)->shapeclock+=SHAPERCB(tmp)->shapelen;
169 /*
170 * Queue over time. Spill packet.
171 */
Marcelo Feitoza Parisiff5688a2006-01-09 18:37:15 -0800172 if(time_after(SHAPERCB(skb)->shapeclock,jiffies + SHAPER_LATENCY)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173 dev_kfree_skb(skb);
Jeff Garzik09f75cd2007-10-03 17:41:50 -0700174 dev->stats.tx_dropped++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175 } else
176 skb_queue_tail(&shaper->sendq, skb);
177 }
David S. Miller8728b832005-08-09 19:25:21 -0700178
Linus Torvalds1da177e2005-04-16 15:20:36 -0700179 if(sh_debug)
180 printk("Frame queued.\n");
181 if(skb_queue_len(&shaper->sendq)>SHAPER_QLEN)
182 {
183 ptr=skb_dequeue(&shaper->sendq);
184 dev_kfree_skb(ptr);
Jeff Garzik09f75cd2007-10-03 17:41:50 -0700185 dev->stats.collisions++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186 }
Christoph Hellwigb597ef42005-06-02 16:36:00 -0700187 shaper_kick(shaper);
Christoph Hellwigbc971de2005-07-05 15:03:46 -0700188 spin_unlock(&shaper->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189 return 0;
190}
191
192/*
193 * Transmit from a shaper
194 */
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400195
Linus Torvalds1da177e2005-04-16 15:20:36 -0700196static void shaper_queue_xmit(struct shaper *shaper, struct sk_buff *skb)
197{
198 struct sk_buff *newskb=skb_clone(skb, GFP_ATOMIC);
199 if(sh_debug)
200 printk("Kick frame on %p\n",newskb);
201 if(newskb)
202 {
203 newskb->dev=shaper->dev;
204 newskb->priority=2;
205 if(sh_debug)
206 printk("Kick new frame to %s, %d\n",
207 shaper->dev->name,newskb->priority);
208 dev_queue_xmit(newskb);
209
Jeff Garzik09f75cd2007-10-03 17:41:50 -0700210 shaper->dev->stats.tx_bytes += skb->len;
211 shaper->dev->stats.tx_packets++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212
213 if(sh_debug)
214 printk("Kicked new frame out.\n");
215 dev_kfree_skb(skb);
216 }
217}
218
219/*
220 * Timer handler for shaping clock
221 */
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400222
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223static void shaper_timer(unsigned long data)
224{
Christoph Hellwigb597ef42005-06-02 16:36:00 -0700225 struct shaper *shaper = (struct shaper *)data;
226
Christoph Hellwigbc971de2005-07-05 15:03:46 -0700227 spin_lock(&shaper->lock);
228 shaper_kick(shaper);
229 spin_unlock(&shaper->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230}
231
232/*
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400233 * Kick a shaper queue and try and do something sensible with the
234 * queue.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700235 */
236
237static void shaper_kick(struct shaper *shaper)
238{
239 struct sk_buff *skb;
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400240
Linus Torvalds1da177e2005-04-16 15:20:36 -0700241 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700242 * Walk the list (may be empty)
243 */
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400244
Linus Torvalds1da177e2005-04-16 15:20:36 -0700245 while((skb=skb_peek(&shaper->sendq))!=NULL)
246 {
247 /*
248 * Each packet due to go out by now (within an error
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400249 * of SHAPER_BURST) gets kicked onto the link
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250 */
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400251
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252 if(sh_debug)
253 printk("Clock = %ld, jiffies = %ld\n", SHAPERCB(skb)->shapeclock, jiffies);
254 if(time_before_eq(SHAPERCB(skb)->shapeclock, jiffies + SHAPER_BURST))
255 {
256 /*
257 * Pull the frame and get interrupts back on.
258 */
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400259
David S. Miller8728b832005-08-09 19:25:21 -0700260 skb_unlink(skb, &shaper->sendq);
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400261 if (shaper->recovery <
Linus Torvalds1da177e2005-04-16 15:20:36 -0700262 SHAPERCB(skb)->shapeclock + SHAPERCB(skb)->shapelen)
263 shaper->recovery = SHAPERCB(skb)->shapeclock + SHAPERCB(skb)->shapelen;
264 /*
265 * Pass on to the physical target device via
266 * our low level packet thrower.
267 */
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400268
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269 SHAPERCB(skb)->shapepend=0;
270 shaper_queue_xmit(shaper, skb); /* Fire */
271 }
272 else
273 break;
274 }
275
276 /*
277 * Next kick.
278 */
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400279
Linus Torvalds1da177e2005-04-16 15:20:36 -0700280 if(skb!=NULL)
281 mod_timer(&shaper->timer, SHAPERCB(skb)->shapeclock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700282}
283
284
285/*
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400286 * Bring the interface up. We just disallow this until a
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287 * bind.
288 */
289
290static int shaper_open(struct net_device *dev)
291{
292 struct shaper *shaper=dev->priv;
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400293
Linus Torvalds1da177e2005-04-16 15:20:36 -0700294 /*
295 * Can't open until attached.
296 * Also can't open until speed is set, or we'll get
297 * a division by zero.
298 */
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400299
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300 if(shaper->dev==NULL)
301 return -ENODEV;
302 if(shaper->bitspersec==0)
303 return -EINVAL;
304 return 0;
305}
306
307/*
308 * Closing a shaper flushes the queues.
309 */
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400310
Linus Torvalds1da177e2005-04-16 15:20:36 -0700311static int shaper_close(struct net_device *dev)
312{
313 struct shaper *shaper=dev->priv;
Christoph Hellwigbc971de2005-07-05 15:03:46 -0700314 struct sk_buff *skb;
315
316 while ((skb = skb_dequeue(&shaper->sendq)) != NULL)
317 dev_kfree_skb(skb);
318
319 spin_lock_bh(&shaper->lock);
320 shaper_kick(shaper);
321 spin_unlock_bh(&shaper->lock);
322
Linus Torvalds1da177e2005-04-16 15:20:36 -0700323 del_timer_sync(&shaper->timer);
324 return 0;
325}
326
327/*
328 * Revectored calls. We alter the parameters and call the functions
329 * for our attached device. This enables us to bandwidth allocate after
330 * ARP and other resolutions and not before.
331 */
332
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400333static int shaper_header(struct sk_buff *skb, struct net_device *dev,
Stephen Hemminger3b04ddd2007-10-09 01:40:57 -0700334 unsigned short type,
335 const void *daddr, const void *saddr, unsigned len)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336{
337 struct shaper *sh=dev->priv;
338 int v;
339 if(sh_debug)
340 printk("Shaper header\n");
Stephen Hemminger3b04ddd2007-10-09 01:40:57 -0700341 skb->dev = sh->dev;
342 v = dev_hard_header(skb, sh->dev, type, daddr, saddr, len);
343 skb->dev = dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344 return v;
345}
346
347static int shaper_rebuild_header(struct sk_buff *skb)
348{
349 struct shaper *sh=skb->dev->priv;
350 struct net_device *dev=skb->dev;
351 int v;
352 if(sh_debug)
353 printk("Shaper rebuild header\n");
354 skb->dev=sh->dev;
Stephen Hemminger3b04ddd2007-10-09 01:40:57 -0700355 v = sh->dev->header_ops->rebuild(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700356 skb->dev=dev;
357 return v;
358}
359
360#if 0
361static int shaper_cache(struct neighbour *neigh, struct hh_cache *hh)
362{
363 struct shaper *sh=neigh->dev->priv;
364 struct net_device *tmp;
365 int ret;
366 if(sh_debug)
367 printk("Shaper header cache bind\n");
368 tmp=neigh->dev;
369 neigh->dev=sh->dev;
370 ret=sh->hard_header_cache(neigh,hh);
371 neigh->dev=tmp;
372 return ret;
373}
374
375static void shaper_cache_update(struct hh_cache *hh, struct net_device *dev,
376 unsigned char *haddr)
377{
378 struct shaper *sh=dev->priv;
379 if(sh_debug)
380 printk("Shaper cache update\n");
381 sh->header_cache_update(hh, sh->dev, haddr);
382}
383#endif
384
385#ifdef CONFIG_INET
386
387static int shaper_neigh_setup(struct neighbour *n)
388{
389#ifdef CONFIG_INET
390 if (n->nud_state == NUD_NONE) {
391 n->ops = &arp_broken_ops;
392 n->output = n->ops->output;
393 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400394#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700395 return 0;
396}
397
398static int shaper_neigh_setup_dev(struct net_device *dev, struct neigh_parms *p)
399{
400#ifdef CONFIG_INET
401 if (p->tbl->family == AF_INET) {
402 p->neigh_setup = shaper_neigh_setup;
403 p->ucast_probes = 0;
404 p->mcast_probes = 0;
405 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400406#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700407 return 0;
408}
409
410#else /* !(CONFIG_INET) */
411
412static int shaper_neigh_setup_dev(struct net_device *dev, struct neigh_parms *p)
413{
414 return 0;
415}
416
417#endif
418
Stephen Hemminger3b04ddd2007-10-09 01:40:57 -0700419static const struct header_ops shaper_ops = {
420 .create = shaper_header,
421 .rebuild = shaper_rebuild_header,
422};
423
Linus Torvalds1da177e2005-04-16 15:20:36 -0700424static int shaper_attach(struct net_device *shdev, struct shaper *sh, struct net_device *dev)
425{
426 sh->dev = dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700427 sh->get_stats=dev->get_stats;
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400428
Linus Torvalds1da177e2005-04-16 15:20:36 -0700429 shdev->neigh_setup = shaper_neigh_setup_dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700430 shdev->hard_header_len=dev->hard_header_len;
431 shdev->type=dev->type;
432 shdev->addr_len=dev->addr_len;
433 shdev->mtu=dev->mtu;
434 sh->bitspersec=0;
435 return 0;
436}
437
438static int shaper_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
439{
440 struct shaperconf *ss= (struct shaperconf *)&ifr->ifr_ifru;
441 struct shaper *sh=dev->priv;
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400442
Linus Torvalds1da177e2005-04-16 15:20:36 -0700443 if(ss->ss_cmd == SHAPER_SET_DEV || ss->ss_cmd == SHAPER_SET_SPEED)
444 {
445 if(!capable(CAP_NET_ADMIN))
446 return -EPERM;
447 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400448
Linus Torvalds1da177e2005-04-16 15:20:36 -0700449 switch(ss->ss_cmd)
450 {
451 case SHAPER_SET_DEV:
452 {
Eric W. Biederman881d9662007-09-17 11:56:21 -0700453 struct net_device *them=__dev_get_by_name(&init_net, ss->ss_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700454 if(them==NULL)
455 return -ENODEV;
456 if(sh->dev)
457 return -EBUSY;
458 return shaper_attach(dev,dev->priv, them);
459 }
460 case SHAPER_GET_DEV:
461 if(sh->dev==NULL)
462 return -ENODEV;
463 strcpy(ss->ss_name, sh->dev->name);
464 return 0;
465 case SHAPER_SET_SPEED:
466 shaper_setspeed(sh,ss->ss_speed);
467 return 0;
468 case SHAPER_GET_SPEED:
469 ss->ss_speed=sh->bitspersec;
470 return 0;
471 default:
472 return -EINVAL;
473 }
474}
475
476static void shaper_init_priv(struct net_device *dev)
477{
478 struct shaper *sh = dev->priv;
479
480 skb_queue_head_init(&sh->sendq);
481 init_timer(&sh->timer);
482 sh->timer.function=shaper_timer;
483 sh->timer.data=(unsigned long)sh;
Christoph Hellwigbc971de2005-07-05 15:03:46 -0700484 spin_lock_init(&sh->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700485}
486
487/*
488 * Add a shaper device to the system
489 */
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400490
Linus Torvalds1da177e2005-04-16 15:20:36 -0700491static void __init shaper_setup(struct net_device *dev)
492{
493 /*
494 * Set up the shaper.
495 */
496
Linus Torvalds1da177e2005-04-16 15:20:36 -0700497 shaper_init_priv(dev);
498
499 dev->open = shaper_open;
500 dev->stop = shaper_close;
501 dev->hard_start_xmit = shaper_start_xmit;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700502 dev->set_multicast_list = NULL;
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400503
Linus Torvalds1da177e2005-04-16 15:20:36 -0700504 /*
505 * Intialise the packet queues
506 */
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400507
Linus Torvalds1da177e2005-04-16 15:20:36 -0700508 /*
509 * Handlers for when we attach to a device.
510 */
511
Linus Torvalds1da177e2005-04-16 15:20:36 -0700512 dev->neigh_setup = shaper_neigh_setup_dev;
513 dev->do_ioctl = shaper_ioctl;
514 dev->hard_header_len = 0;
515 dev->type = ARPHRD_ETHER; /* initially */
516 dev->set_mac_address = NULL;
517 dev->mtu = 1500;
518 dev->addr_len = 0;
519 dev->tx_queue_len = 10;
520 dev->flags = 0;
521}
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400522
Linus Torvalds1da177e2005-04-16 15:20:36 -0700523static int shapers = 1;
524#ifdef MODULE
525
526module_param(shapers, int, 0);
527MODULE_PARM_DESC(shapers, "Traffic shaper: maximum number of shapers");
528
529#else /* MODULE */
530
531static int __init set_num_shapers(char *str)
532{
533 shapers = simple_strtol(str, NULL, 0);
534 return 1;
535}
536
537__setup("shapers=", set_num_shapers);
538
539#endif /* MODULE */
540
541static struct net_device **devs;
542
543static unsigned int shapers_registered = 0;
544
545static int __init shaper_init(void)
546{
547 int i;
548 size_t alloc_size;
549 struct net_device *dev;
550 char name[IFNAMSIZ];
551
552 if (shapers < 1)
553 return -ENODEV;
554
555 alloc_size = sizeof(*dev) * shapers;
Yoann Padioleaudd00cc42007-07-19 01:49:03 -0700556 devs = kzalloc(alloc_size, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700557 if (!devs)
558 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700559
560 for (i = 0; i < shapers; i++) {
561
562 snprintf(name, IFNAMSIZ, "shaper%d", i);
563 dev = alloc_netdev(sizeof(struct shaper), name,
564 shaper_setup);
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400565 if (!dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700566 break;
567
568 if (register_netdev(dev)) {
569 free_netdev(dev);
570 break;
571 }
572
573 devs[i] = dev;
574 shapers_registered++;
575 }
576
577 if (!shapers_registered) {
578 kfree(devs);
579 devs = NULL;
580 }
581
582 return (shapers_registered ? 0 : -ENODEV);
583}
584
585static void __exit shaper_exit (void)
586{
587 int i;
588
589 for (i = 0; i < shapers_registered; i++) {
590 if (devs[i]) {
591 unregister_netdev(devs[i]);
592 free_netdev(devs[i]);
593 }
594 }
595
596 kfree(devs);
597 devs = NULL;
598}
599
600module_init(shaper_init);
601module_exit(shaper_exit);
602MODULE_LICENSE("GPL");
603