blob: 20edeb3457921e5377c68a6e7d69681682fca135 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Simple traffic shaper for Linux NET3.
3 *
4 * (c) Copyright 1996 Alan Cox <alan@redhat.com>, All Rights Reserved.
5 * http://www.redhat.com
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 *
12 * Neither Alan Cox nor CymruNet Ltd. admit liability nor provide
13 * warranty for any of this software. This material is provided
14 * "AS-IS" and at no charge.
15 *
16 *
17 * Algorithm:
18 *
19 * Queue Frame:
20 * Compute time length of frame at regulated speed
21 * Add frame to queue at appropriate point
22 * Adjust time length computation for followup frames
23 * Any frame that falls outside of its boundaries is freed
24 *
25 * We work to the following constants
26 *
27 * SHAPER_QLEN Maximum queued frames
28 * SHAPER_LATENCY Bounding latency on a frame. Leaving this latency
29 * window drops the frame. This stops us queueing
30 * frames for a long time and confusing a remote
31 * host.
32 * SHAPER_MAXSLIP Maximum time a priority frame may jump forward.
33 * That bounds the penalty we will inflict on low
34 * priority traffic.
35 * SHAPER_BURST Time range we call "now" in order to reduce
36 * system load. The more we make this the burstier
37 * the behaviour, the better local performance you
38 * get through packet clustering on routers and the
39 * worse the remote end gets to judge rtts.
40 *
41 * This is designed to handle lower speed links ( < 200K/second or so). We
42 * run off a 100-150Hz base clock typically. This gives us a resolution at
43 * 200Kbit/second of about 2Kbit or 256 bytes. Above that our timer
44 * resolution may start to cause much more burstiness in the traffic. We
45 * could avoid a lot of that by calling kick_shaper() at the end of the
46 * tied device transmissions. If you run above about 100K second you
47 * may need to tune the supposed speed rate for the right values.
48 *
49 * BUGS:
50 * Downing the interface under the shaper before the shaper
51 * will render your machine defunct. Don't for now shape over
52 * PPP or SLIP therefore!
53 * This will be fixed in BETA4
54 *
55 * Update History :
56 *
57 * bh_atomic() SMP races fixes and rewritten the locking code to
58 * be SMP safe and irq-mask friendly.
59 * NOTE: we can't use start_bh_atomic() in kick_shaper()
60 * because it's going to be recalled from an irq handler,
61 * and synchronize_bh() is a nono if called from irq context.
62 * 1999 Andrea Arcangeli
63 *
64 * Device statistics (tx_pakets, tx_bytes,
65 * tx_drops: queue_over_time and collisions: max_queue_exceded)
66 * 1999/06/18 Jordi Murgo <savage@apostols.org>
67 *
68 * Use skb->cb for private data.
69 * 2000/03 Andi Kleen
70 */
71
72#include <linux/config.h>
73#include <linux/module.h>
74#include <linux/kernel.h>
75#include <linux/fcntl.h>
76#include <linux/mm.h>
77#include <linux/slab.h>
78#include <linux/string.h>
79#include <linux/errno.h>
80#include <linux/netdevice.h>
81#include <linux/etherdevice.h>
82#include <linux/skbuff.h>
83#include <linux/if_arp.h>
84#include <linux/init.h>
85#include <linux/if_shaper.h>
86
87#include <net/dst.h>
88#include <net/arp.h>
89
90struct shaper_cb {
91 unsigned long shapeclock; /* Time it should go out */
92 unsigned long shapestamp; /* Stamp for shaper */
93 __u32 shapelatency; /* Latency on frame */
94 __u32 shapelen; /* Frame length in clocks */
95 __u16 shapepend; /* Pending */
96};
97#define SHAPERCB(skb) ((struct shaper_cb *) ((skb)->cb))
98
99static int sh_debug; /* Debug flag */
100
101#define SHAPER_BANNER "CymruNet Traffic Shaper BETA 0.04 for Linux 2.1\n"
102
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103static void shaper_kick(struct shaper *sh);
104
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105/*
106 * Compute clocks on a buffer
107 */
108
109static int shaper_clocks(struct shaper *shaper, struct sk_buff *skb)
110{
111 int t=skb->len/shaper->bytespertick;
112 return t;
113}
114
115/*
116 * Set the speed of a shaper. We compute this in bytes per tick since
117 * thats how the machine wants to run. Quoted input is in bits per second
118 * as is traditional (note not BAUD). We assume 8 bit bytes.
119 */
120
121static void shaper_setspeed(struct shaper *shaper, int bitspersec)
122{
123 shaper->bitspersec=bitspersec;
124 shaper->bytespertick=(bitspersec/HZ)/8;
125 if(!shaper->bytespertick)
126 shaper->bytespertick++;
127}
128
129/*
130 * Throw a frame at a shaper.
131 */
132
Christoph Hellwigb597ef42005-06-02 16:36:00 -0700133
134static int shaper_start_xmit(struct sk_buff *skb, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135{
Christoph Hellwigb597ef42005-06-02 16:36:00 -0700136 struct shaper *shaper = dev->priv;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700137 struct sk_buff *ptr;
138
Christoph Hellwigb597ef42005-06-02 16:36:00 -0700139 if (down_trylock(&shaper->sem))
140 return -1;
141
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142 ptr=shaper->sendq.prev;
143
144 /*
145 * Set up our packet details
146 */
147
148 SHAPERCB(skb)->shapelatency=0;
149 SHAPERCB(skb)->shapeclock=shaper->recovery;
150 if(time_before(SHAPERCB(skb)->shapeclock, jiffies))
151 SHAPERCB(skb)->shapeclock=jiffies;
152 skb->priority=0; /* short term bug fix */
153 SHAPERCB(skb)->shapestamp=jiffies;
154
155 /*
156 * Time slots for this packet.
157 */
158
159 SHAPERCB(skb)->shapelen= shaper_clocks(shaper,skb);
160
161#ifdef SHAPER_COMPLEX /* and broken.. */
162
163 while(ptr && ptr!=(struct sk_buff *)&shaper->sendq)
164 {
165 if(ptr->pri<skb->pri
166 && jiffies - SHAPERCB(ptr)->shapeclock < SHAPER_MAXSLIP)
167 {
168 struct sk_buff *tmp=ptr->prev;
169
170 /*
171 * It goes before us therefore we slip the length
172 * of the new frame.
173 */
174
175 SHAPERCB(ptr)->shapeclock+=SHAPERCB(skb)->shapelen;
176 SHAPERCB(ptr)->shapelatency+=SHAPERCB(skb)->shapelen;
177
178 /*
179 * The packet may have slipped so far back it
180 * fell off.
181 */
182 if(SHAPERCB(ptr)->shapelatency > SHAPER_LATENCY)
183 {
184 skb_unlink(ptr);
185 dev_kfree_skb(ptr);
186 }
187 ptr=tmp;
188 }
189 else
190 break;
191 }
192 if(ptr==NULL || ptr==(struct sk_buff *)&shaper->sendq)
193 skb_queue_head(&shaper->sendq,skb);
194 else
195 {
196 struct sk_buff *tmp;
197 /*
198 * Set the packet clock out time according to the
199 * frames ahead. Im sure a bit of thought could drop
200 * this loop.
201 */
202 for(tmp=skb_peek(&shaper->sendq); tmp!=NULL && tmp!=ptr; tmp=tmp->next)
203 SHAPERCB(skb)->shapeclock+=tmp->shapelen;
204 skb_append(ptr,skb);
205 }
206#else
207 {
208 struct sk_buff *tmp;
209 /*
210 * Up our shape clock by the time pending on the queue
211 * (Should keep this in the shaper as a variable..)
212 */
213 for(tmp=skb_peek(&shaper->sendq); tmp!=NULL &&
214 tmp!=(struct sk_buff *)&shaper->sendq; tmp=tmp->next)
215 SHAPERCB(skb)->shapeclock+=SHAPERCB(tmp)->shapelen;
216 /*
217 * Queue over time. Spill packet.
218 */
219 if(SHAPERCB(skb)->shapeclock-jiffies > SHAPER_LATENCY) {
220 dev_kfree_skb(skb);
221 shaper->stats.tx_dropped++;
222 } else
223 skb_queue_tail(&shaper->sendq, skb);
224 }
225#endif
226 if(sh_debug)
227 printk("Frame queued.\n");
228 if(skb_queue_len(&shaper->sendq)>SHAPER_QLEN)
229 {
230 ptr=skb_dequeue(&shaper->sendq);
231 dev_kfree_skb(ptr);
232 shaper->stats.collisions++;
233 }
Christoph Hellwigb597ef42005-06-02 16:36:00 -0700234 shaper_kick(shaper);
235 up(&shaper->sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700236 return 0;
237}
238
239/*
240 * Transmit from a shaper
241 */
242
243static void shaper_queue_xmit(struct shaper *shaper, struct sk_buff *skb)
244{
245 struct sk_buff *newskb=skb_clone(skb, GFP_ATOMIC);
246 if(sh_debug)
247 printk("Kick frame on %p\n",newskb);
248 if(newskb)
249 {
250 newskb->dev=shaper->dev;
251 newskb->priority=2;
252 if(sh_debug)
253 printk("Kick new frame to %s, %d\n",
254 shaper->dev->name,newskb->priority);
255 dev_queue_xmit(newskb);
256
257 shaper->stats.tx_bytes += skb->len;
258 shaper->stats.tx_packets++;
259
260 if(sh_debug)
261 printk("Kicked new frame out.\n");
262 dev_kfree_skb(skb);
263 }
264}
265
266/*
267 * Timer handler for shaping clock
268 */
269
270static void shaper_timer(unsigned long data)
271{
Christoph Hellwigb597ef42005-06-02 16:36:00 -0700272 struct shaper *shaper = (struct shaper *)data;
273
274 if (!down_trylock(&shaper->sem)) {
275 shaper_kick(shaper);
276 up(&shaper->sem);
277 } else
278 mod_timer(&shaper->timer, jiffies);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700279}
280
281/*
282 * Kick a shaper queue and try and do something sensible with the
283 * queue.
284 */
285
286static void shaper_kick(struct shaper *shaper)
287{
288 struct sk_buff *skb;
289
290 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700291 * Walk the list (may be empty)
292 */
293
294 while((skb=skb_peek(&shaper->sendq))!=NULL)
295 {
296 /*
297 * Each packet due to go out by now (within an error
298 * of SHAPER_BURST) gets kicked onto the link
299 */
300
301 if(sh_debug)
302 printk("Clock = %ld, jiffies = %ld\n", SHAPERCB(skb)->shapeclock, jiffies);
303 if(time_before_eq(SHAPERCB(skb)->shapeclock, jiffies + SHAPER_BURST))
304 {
305 /*
306 * Pull the frame and get interrupts back on.
307 */
308
309 skb_unlink(skb);
310 if (shaper->recovery <
311 SHAPERCB(skb)->shapeclock + SHAPERCB(skb)->shapelen)
312 shaper->recovery = SHAPERCB(skb)->shapeclock + SHAPERCB(skb)->shapelen;
313 /*
314 * Pass on to the physical target device via
315 * our low level packet thrower.
316 */
317
318 SHAPERCB(skb)->shapepend=0;
319 shaper_queue_xmit(shaper, skb); /* Fire */
320 }
321 else
322 break;
323 }
324
325 /*
326 * Next kick.
327 */
328
329 if(skb!=NULL)
330 mod_timer(&shaper->timer, SHAPERCB(skb)->shapeclock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700331}
332
333
334/*
335 * Flush the shaper queues on a closedown
336 */
337
338static void shaper_flush(struct shaper *shaper)
339{
340 struct sk_buff *skb;
Christoph Hellwigb597ef42005-06-02 16:36:00 -0700341
342 down(&shaper->sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700343 while((skb=skb_dequeue(&shaper->sendq))!=NULL)
344 dev_kfree_skb(skb);
Christoph Hellwigb597ef42005-06-02 16:36:00 -0700345 shaper_kick(shaper);
346 up(&shaper->sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700347}
348
349/*
350 * Bring the interface up. We just disallow this until a
351 * bind.
352 */
353
354static int shaper_open(struct net_device *dev)
355{
356 struct shaper *shaper=dev->priv;
357
358 /*
359 * Can't open until attached.
360 * Also can't open until speed is set, or we'll get
361 * a division by zero.
362 */
363
364 if(shaper->dev==NULL)
365 return -ENODEV;
366 if(shaper->bitspersec==0)
367 return -EINVAL;
368 return 0;
369}
370
371/*
372 * Closing a shaper flushes the queues.
373 */
374
375static int shaper_close(struct net_device *dev)
376{
377 struct shaper *shaper=dev->priv;
378 shaper_flush(shaper);
379 del_timer_sync(&shaper->timer);
380 return 0;
381}
382
383/*
384 * Revectored calls. We alter the parameters and call the functions
385 * for our attached device. This enables us to bandwidth allocate after
386 * ARP and other resolutions and not before.
387 */
388
Linus Torvalds1da177e2005-04-16 15:20:36 -0700389static struct net_device_stats *shaper_get_stats(struct net_device *dev)
390{
391 struct shaper *sh=dev->priv;
392 return &sh->stats;
393}
394
395static int shaper_header(struct sk_buff *skb, struct net_device *dev,
396 unsigned short type, void *daddr, void *saddr, unsigned len)
397{
398 struct shaper *sh=dev->priv;
399 int v;
400 if(sh_debug)
401 printk("Shaper header\n");
402 skb->dev=sh->dev;
403 v=sh->hard_header(skb,sh->dev,type,daddr,saddr,len);
404 skb->dev=dev;
405 return v;
406}
407
408static int shaper_rebuild_header(struct sk_buff *skb)
409{
410 struct shaper *sh=skb->dev->priv;
411 struct net_device *dev=skb->dev;
412 int v;
413 if(sh_debug)
414 printk("Shaper rebuild header\n");
415 skb->dev=sh->dev;
416 v=sh->rebuild_header(skb);
417 skb->dev=dev;
418 return v;
419}
420
421#if 0
422static int shaper_cache(struct neighbour *neigh, struct hh_cache *hh)
423{
424 struct shaper *sh=neigh->dev->priv;
425 struct net_device *tmp;
426 int ret;
427 if(sh_debug)
428 printk("Shaper header cache bind\n");
429 tmp=neigh->dev;
430 neigh->dev=sh->dev;
431 ret=sh->hard_header_cache(neigh,hh);
432 neigh->dev=tmp;
433 return ret;
434}
435
436static void shaper_cache_update(struct hh_cache *hh, struct net_device *dev,
437 unsigned char *haddr)
438{
439 struct shaper *sh=dev->priv;
440 if(sh_debug)
441 printk("Shaper cache update\n");
442 sh->header_cache_update(hh, sh->dev, haddr);
443}
444#endif
445
446#ifdef CONFIG_INET
447
448static int shaper_neigh_setup(struct neighbour *n)
449{
450#ifdef CONFIG_INET
451 if (n->nud_state == NUD_NONE) {
452 n->ops = &arp_broken_ops;
453 n->output = n->ops->output;
454 }
455#endif
456 return 0;
457}
458
459static int shaper_neigh_setup_dev(struct net_device *dev, struct neigh_parms *p)
460{
461#ifdef CONFIG_INET
462 if (p->tbl->family == AF_INET) {
463 p->neigh_setup = shaper_neigh_setup;
464 p->ucast_probes = 0;
465 p->mcast_probes = 0;
466 }
467#endif
468 return 0;
469}
470
471#else /* !(CONFIG_INET) */
472
473static int shaper_neigh_setup_dev(struct net_device *dev, struct neigh_parms *p)
474{
475 return 0;
476}
477
478#endif
479
480static int shaper_attach(struct net_device *shdev, struct shaper *sh, struct net_device *dev)
481{
482 sh->dev = dev;
483 sh->hard_start_xmit=dev->hard_start_xmit;
484 sh->get_stats=dev->get_stats;
485 if(dev->hard_header)
486 {
487 sh->hard_header=dev->hard_header;
488 shdev->hard_header = shaper_header;
489 }
490 else
491 shdev->hard_header = NULL;
492
493 if(dev->rebuild_header)
494 {
495 sh->rebuild_header = dev->rebuild_header;
496 shdev->rebuild_header = shaper_rebuild_header;
497 }
498 else
499 shdev->rebuild_header = NULL;
500
501#if 0
502 if(dev->hard_header_cache)
503 {
504 sh->hard_header_cache = dev->hard_header_cache;
505 shdev->hard_header_cache= shaper_cache;
506 }
507 else
508 {
509 shdev->hard_header_cache= NULL;
510 }
511
512 if(dev->header_cache_update)
513 {
514 sh->header_cache_update = dev->header_cache_update;
515 shdev->header_cache_update = shaper_cache_update;
516 }
517 else
518 shdev->header_cache_update= NULL;
519#else
520 shdev->header_cache_update = NULL;
521 shdev->hard_header_cache = NULL;
522#endif
523 shdev->neigh_setup = shaper_neigh_setup_dev;
524
525 shdev->hard_header_len=dev->hard_header_len;
526 shdev->type=dev->type;
527 shdev->addr_len=dev->addr_len;
528 shdev->mtu=dev->mtu;
529 sh->bitspersec=0;
530 return 0;
531}
532
533static int shaper_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
534{
535 struct shaperconf *ss= (struct shaperconf *)&ifr->ifr_ifru;
536 struct shaper *sh=dev->priv;
537
538 if(ss->ss_cmd == SHAPER_SET_DEV || ss->ss_cmd == SHAPER_SET_SPEED)
539 {
540 if(!capable(CAP_NET_ADMIN))
541 return -EPERM;
542 }
543
544 switch(ss->ss_cmd)
545 {
546 case SHAPER_SET_DEV:
547 {
548 struct net_device *them=__dev_get_by_name(ss->ss_name);
549 if(them==NULL)
550 return -ENODEV;
551 if(sh->dev)
552 return -EBUSY;
553 return shaper_attach(dev,dev->priv, them);
554 }
555 case SHAPER_GET_DEV:
556 if(sh->dev==NULL)
557 return -ENODEV;
558 strcpy(ss->ss_name, sh->dev->name);
559 return 0;
560 case SHAPER_SET_SPEED:
561 shaper_setspeed(sh,ss->ss_speed);
562 return 0;
563 case SHAPER_GET_SPEED:
564 ss->ss_speed=sh->bitspersec;
565 return 0;
566 default:
567 return -EINVAL;
568 }
569}
570
571static void shaper_init_priv(struct net_device *dev)
572{
573 struct shaper *sh = dev->priv;
574
575 skb_queue_head_init(&sh->sendq);
576 init_timer(&sh->timer);
577 sh->timer.function=shaper_timer;
578 sh->timer.data=(unsigned long)sh;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700579}
580
581/*
582 * Add a shaper device to the system
583 */
584
585static void __init shaper_setup(struct net_device *dev)
586{
587 /*
588 * Set up the shaper.
589 */
590
591 SET_MODULE_OWNER(dev);
592
593 shaper_init_priv(dev);
594
595 dev->open = shaper_open;
596 dev->stop = shaper_close;
597 dev->hard_start_xmit = shaper_start_xmit;
598 dev->get_stats = shaper_get_stats;
599 dev->set_multicast_list = NULL;
600
601 /*
602 * Intialise the packet queues
603 */
604
605 /*
606 * Handlers for when we attach to a device.
607 */
608
609 dev->hard_header = shaper_header;
610 dev->rebuild_header = shaper_rebuild_header;
611#if 0
612 dev->hard_header_cache = shaper_cache;
613 dev->header_cache_update= shaper_cache_update;
614#endif
615 dev->neigh_setup = shaper_neigh_setup_dev;
616 dev->do_ioctl = shaper_ioctl;
617 dev->hard_header_len = 0;
618 dev->type = ARPHRD_ETHER; /* initially */
619 dev->set_mac_address = NULL;
620 dev->mtu = 1500;
621 dev->addr_len = 0;
622 dev->tx_queue_len = 10;
623 dev->flags = 0;
624}
625
626static int shapers = 1;
627#ifdef MODULE
628
629module_param(shapers, int, 0);
630MODULE_PARM_DESC(shapers, "Traffic shaper: maximum number of shapers");
631
632#else /* MODULE */
633
634static int __init set_num_shapers(char *str)
635{
636 shapers = simple_strtol(str, NULL, 0);
637 return 1;
638}
639
640__setup("shapers=", set_num_shapers);
641
642#endif /* MODULE */
643
644static struct net_device **devs;
645
646static unsigned int shapers_registered = 0;
647
648static int __init shaper_init(void)
649{
650 int i;
651 size_t alloc_size;
652 struct net_device *dev;
653 char name[IFNAMSIZ];
654
655 if (shapers < 1)
656 return -ENODEV;
657
658 alloc_size = sizeof(*dev) * shapers;
659 devs = kmalloc(alloc_size, GFP_KERNEL);
660 if (!devs)
661 return -ENOMEM;
662 memset(devs, 0, alloc_size);
663
664 for (i = 0; i < shapers; i++) {
665
666 snprintf(name, IFNAMSIZ, "shaper%d", i);
667 dev = alloc_netdev(sizeof(struct shaper), name,
668 shaper_setup);
669 if (!dev)
670 break;
671
672 if (register_netdev(dev)) {
673 free_netdev(dev);
674 break;
675 }
676
677 devs[i] = dev;
678 shapers_registered++;
679 }
680
681 if (!shapers_registered) {
682 kfree(devs);
683 devs = NULL;
684 }
685
686 return (shapers_registered ? 0 : -ENODEV);
687}
688
689static void __exit shaper_exit (void)
690{
691 int i;
692
693 for (i = 0; i < shapers_registered; i++) {
694 if (devs[i]) {
695 unregister_netdev(devs[i]);
696 free_netdev(devs[i]);
697 }
698 }
699
700 kfree(devs);
701 devs = NULL;
702}
703
704module_init(shaper_init);
705module_exit(shaper_exit);
706MODULE_LICENSE("GPL");
707