blob: a3953bbe2d79844c612f72a9a16df594d30c267e [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * net/sched/sch_cbq.c Class-Based Queueing discipline.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 *
11 */
12
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070014#include <linux/types.h>
15#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include <linux/string.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#include <linux/errno.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include <linux/skbuff.h>
Patrick McHardy0ba48052007-07-02 22:49:07 -070019#include <net/netlink.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070020#include <net/pkt_sched.h>
21
22
23/* Class-Based Queueing (CBQ) algorithm.
24 =======================================
25
26 Sources: [1] Sally Floyd and Van Jacobson, "Link-sharing and Resource
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +090027 Management Models for Packet Networks",
Linus Torvalds1da177e2005-04-16 15:20:36 -070028 IEEE/ACM Transactions on Networking, Vol.3, No.4, 1995
29
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +090030 [2] Sally Floyd, "Notes on CBQ and Guaranteed Service", 1995
Linus Torvalds1da177e2005-04-16 15:20:36 -070031
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +090032 [3] Sally Floyd, "Notes on Class-Based Queueing: Setting
Linus Torvalds1da177e2005-04-16 15:20:36 -070033 Parameters", 1996
34
35 [4] Sally Floyd and Michael Speer, "Experimental Results
36 for Class-Based Queueing", 1998, not published.
37
38 -----------------------------------------------------------------------
39
40 Algorithm skeleton was taken from NS simulator cbq.cc.
41 If someone wants to check this code against the LBL version,
42 he should take into account that ONLY the skeleton was borrowed,
43 the implementation is different. Particularly:
44
45 --- The WRR algorithm is different. Our version looks more
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +090046 reasonable (I hope) and works when quanta are allowed to be
47 less than MTU, which is always the case when real time classes
48 have small rates. Note, that the statement of [3] is
49 incomplete, delay may actually be estimated even if class
50 per-round allotment is less than MTU. Namely, if per-round
51 allotment is W*r_i, and r_1+...+r_k = r < 1
Linus Torvalds1da177e2005-04-16 15:20:36 -070052
53 delay_i <= ([MTU/(W*r_i)]*W*r + W*r + k*MTU)/B
54
55 In the worst case we have IntServ estimate with D = W*r+k*MTU
56 and C = MTU*r. The proof (if correct at all) is trivial.
57
58
59 --- It seems that cbq-2.0 is not very accurate. At least, I cannot
60 interpret some places, which look like wrong translations
61 from NS. Anyone is advised to find these differences
62 and explain to me, why I am wrong 8).
63
64 --- Linux has no EOI event, so that we cannot estimate true class
65 idle time. Workaround is to consider the next dequeue event
66 as sign that previous packet is finished. This is wrong because of
67 internal device queueing, but on a permanently loaded link it is true.
68 Moreover, combined with clock integrator, this scheme looks
69 very close to an ideal solution. */
70
71struct cbq_sched_data;
72
73
74struct cbq_class
75{
Patrick McHardyd77fea22008-07-05 23:22:05 -070076 struct Qdisc_class_common common;
Linus Torvalds1da177e2005-04-16 15:20:36 -070077 struct cbq_class *next_alive; /* next class with backlog in this priority band */
78
79/* Parameters */
Linus Torvalds1da177e2005-04-16 15:20:36 -070080 unsigned char priority; /* class priority */
81 unsigned char priority2; /* priority to be used after overlimit */
82 unsigned char ewma_log; /* time constant for idle time calculation */
83 unsigned char ovl_strategy;
Patrick McHardyc3bc7cf2007-07-15 00:03:05 -070084#ifdef CONFIG_NET_CLS_ACT
Linus Torvalds1da177e2005-04-16 15:20:36 -070085 unsigned char police;
86#endif
87
88 u32 defmap;
89
90 /* Link-sharing scheduler parameters */
91 long maxidle; /* Class parameters: see below. */
92 long offtime;
93 long minidle;
94 u32 avpkt;
95 struct qdisc_rate_table *R_tab;
96
97 /* Overlimit strategy parameters */
98 void (*overlimit)(struct cbq_class *cl);
Patrick McHardy1a13cb62007-03-16 01:22:20 -070099 psched_tdiff_t penalty;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100
101 /* General scheduler (WRR) parameters */
102 long allot;
103 long quantum; /* Allotment per WRR round */
104 long weight; /* Relative allotment: see below */
105
106 struct Qdisc *qdisc; /* Ptr to CBQ discipline */
107 struct cbq_class *split; /* Ptr to split node */
108 struct cbq_class *share; /* Ptr to LS parent in the class tree */
109 struct cbq_class *tparent; /* Ptr to tree parent in the class tree */
110 struct cbq_class *borrow; /* NULL if class is bandwidth limited;
111 parent otherwise */
112 struct cbq_class *sibling; /* Sibling chain */
113 struct cbq_class *children; /* Pointer to children chain */
114
115 struct Qdisc *q; /* Elementary queueing discipline */
116
117
118/* Variables */
119 unsigned char cpriority; /* Effective priority */
120 unsigned char delayed;
121 unsigned char level; /* level of the class in hierarchy:
122 0 for leaf classes, and maximal
123 level of children + 1 for nodes.
124 */
125
126 psched_time_t last; /* Last end of service */
127 psched_time_t undertime;
128 long avgidle;
129 long deficit; /* Saved deficit for WRR */
Patrick McHardy1a13cb62007-03-16 01:22:20 -0700130 psched_time_t penalized;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700131 struct gnet_stats_basic bstats;
132 struct gnet_stats_queue qstats;
133 struct gnet_stats_rate_est rate_est;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134 struct tc_cbq_xstats xstats;
135
136 struct tcf_proto *filter_list;
137
138 int refcnt;
139 int filters;
140
141 struct cbq_class *defaults[TC_PRIO_MAX+1];
142};
143
144struct cbq_sched_data
145{
Patrick McHardyd77fea22008-07-05 23:22:05 -0700146 struct Qdisc_class_hash clhash; /* Hash table of all classes */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147 int nclasses[TC_CBQ_MAXPRIO+1];
148 unsigned quanta[TC_CBQ_MAXPRIO+1];
149
150 struct cbq_class link;
151
152 unsigned activemask;
153 struct cbq_class *active[TC_CBQ_MAXPRIO+1]; /* List of all classes
154 with backlog */
155
Patrick McHardyc3bc7cf2007-07-15 00:03:05 -0700156#ifdef CONFIG_NET_CLS_ACT
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157 struct cbq_class *rx_class;
158#endif
159 struct cbq_class *tx_class;
160 struct cbq_class *tx_borrowed;
161 int tx_len;
162 psched_time_t now; /* Cached timestamp */
163 psched_time_t now_rt; /* Cached real time */
164 unsigned pmask;
165
Patrick McHardy1a13cb62007-03-16 01:22:20 -0700166 struct hrtimer delay_timer;
Patrick McHardy88a99352007-03-16 01:21:11 -0700167 struct qdisc_watchdog watchdog; /* Watchdog timer,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168 started when CBQ has
169 backlog, but cannot
170 transmit just now */
Patrick McHardy88a99352007-03-16 01:21:11 -0700171 psched_tdiff_t wd_expires;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700172 int toplevel;
173 u32 hgenerator;
174};
175
176
Jesper Dangaard Brouere9bef552007-09-12 16:35:24 +0200177#define L2T(cl,len) qdisc_l2t((cl)->R_tab,len)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700178
Linus Torvalds1da177e2005-04-16 15:20:36 -0700179static __inline__ struct cbq_class *
180cbq_class_lookup(struct cbq_sched_data *q, u32 classid)
181{
Patrick McHardyd77fea22008-07-05 23:22:05 -0700182 struct Qdisc_class_common *clc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183
Patrick McHardyd77fea22008-07-05 23:22:05 -0700184 clc = qdisc_class_find(&q->clhash, classid);
185 if (clc == NULL)
186 return NULL;
187 return container_of(clc, struct cbq_class, common);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188}
189
Patrick McHardyc3bc7cf2007-07-15 00:03:05 -0700190#ifdef CONFIG_NET_CLS_ACT
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191
192static struct cbq_class *
193cbq_reclassify(struct sk_buff *skb, struct cbq_class *this)
194{
195 struct cbq_class *cl, *new;
196
197 for (cl = this->tparent; cl; cl = cl->tparent)
198 if ((new = cl->defaults[TC_PRIO_BESTEFFORT]) != NULL && new != this)
199 return new;
200
201 return NULL;
202}
203
204#endif
205
206/* Classify packet. The procedure is pretty complicated, but
207 it allows us to combine link sharing and priority scheduling
208 transparently.
209
210 Namely, you can put link sharing rules (f.e. route based) at root of CBQ,
211 so that it resolves to split nodes. Then packets are classified
212 by logical priority, or a more specific classifier may be attached
213 to the split node.
214 */
215
216static struct cbq_class *
217cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
218{
219 struct cbq_sched_data *q = qdisc_priv(sch);
220 struct cbq_class *head = &q->link;
221 struct cbq_class **defmap;
222 struct cbq_class *cl = NULL;
223 u32 prio = skb->priority;
224 struct tcf_result res;
225
226 /*
227 * Step 1. If skb->priority points to one of our classes, use it.
228 */
229 if (TC_H_MAJ(prio^sch->handle) == 0 &&
230 (cl = cbq_class_lookup(q, prio)) != NULL)
231 return cl;
232
Jamal Hadi Salim29f1df62006-01-08 22:35:55 -0800233 *qerr = NET_XMIT_BYPASS;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234 for (;;) {
235 int result = 0;
236 defmap = head->defaults;
237
238 /*
239 * Step 2+n. Apply classifier.
240 */
Patrick McHardy73ca4912007-07-15 00:02:31 -0700241 if (!head->filter_list ||
242 (result = tc_classify_compat(skb, head->filter_list, &res)) < 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243 goto fallback;
244
245 if ((cl = (void*)res.class) == NULL) {
246 if (TC_H_MAJ(res.classid))
247 cl = cbq_class_lookup(q, res.classid);
248 else if ((cl = defmap[res.classid&TC_PRIO_MAX]) == NULL)
249 cl = defmap[TC_PRIO_BESTEFFORT];
250
251 if (cl == NULL || cl->level >= head->level)
252 goto fallback;
253 }
254
255#ifdef CONFIG_NET_CLS_ACT
256 switch (result) {
257 case TC_ACT_QUEUED:
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900258 case TC_ACT_STOLEN:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259 *qerr = NET_XMIT_SUCCESS;
260 case TC_ACT_SHOT:
261 return NULL;
Patrick McHardy73ca4912007-07-15 00:02:31 -0700262 case TC_ACT_RECLASSIFY:
263 return cbq_reclassify(skb, cl);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700264 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700265#endif
266 if (cl->level == 0)
267 return cl;
268
269 /*
270 * Step 3+n. If classifier selected a link sharing class,
271 * apply agency specific classifier.
272 * Repeat this procdure until we hit a leaf node.
273 */
274 head = cl;
275 }
276
277fallback:
278 cl = head;
279
280 /*
281 * Step 4. No success...
282 */
283 if (TC_H_MAJ(prio) == 0 &&
284 !(cl = head->defaults[prio&TC_PRIO_MAX]) &&
285 !(cl = head->defaults[TC_PRIO_BESTEFFORT]))
286 return head;
287
288 return cl;
289}
290
291/*
292 A packet has just been enqueued on the empty class.
293 cbq_activate_class adds it to the tail of active class list
294 of its priority band.
295 */
296
297static __inline__ void cbq_activate_class(struct cbq_class *cl)
298{
299 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
300 int prio = cl->cpriority;
301 struct cbq_class *cl_tail;
302
303 cl_tail = q->active[prio];
304 q->active[prio] = cl;
305
306 if (cl_tail != NULL) {
307 cl->next_alive = cl_tail->next_alive;
308 cl_tail->next_alive = cl;
309 } else {
310 cl->next_alive = cl;
311 q->activemask |= (1<<prio);
312 }
313}
314
315/*
316 Unlink class from active chain.
317 Note that this same procedure is done directly in cbq_dequeue*
318 during round-robin procedure.
319 */
320
321static void cbq_deactivate_class(struct cbq_class *this)
322{
323 struct cbq_sched_data *q = qdisc_priv(this->qdisc);
324 int prio = this->cpriority;
325 struct cbq_class *cl;
326 struct cbq_class *cl_prev = q->active[prio];
327
328 do {
329 cl = cl_prev->next_alive;
330 if (cl == this) {
331 cl_prev->next_alive = cl->next_alive;
332 cl->next_alive = NULL;
333
334 if (cl == q->active[prio]) {
335 q->active[prio] = cl_prev;
336 if (cl == q->active[prio]) {
337 q->active[prio] = NULL;
338 q->activemask &= ~(1<<prio);
339 return;
340 }
341 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700342 return;
343 }
344 } while ((cl_prev = cl) != q->active[prio]);
345}
346
347static void
348cbq_mark_toplevel(struct cbq_sched_data *q, struct cbq_class *cl)
349{
350 int toplevel = q->toplevel;
351
352 if (toplevel > cl->level && !(cl->q->flags&TCQ_F_THROTTLED)) {
353 psched_time_t now;
354 psched_tdiff_t incr;
355
Patrick McHardy3bebcda2007-03-23 11:29:25 -0700356 now = psched_get_time();
Patrick McHardy8edc0c32007-03-23 11:28:55 -0700357 incr = now - q->now_rt;
Patrick McHardy7c59e252007-03-23 11:27:45 -0700358 now = q->now + incr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700359
360 do {
Patrick McHardy104e0872007-03-23 11:28:07 -0700361 if (cl->undertime < now) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362 q->toplevel = cl->level;
363 return;
364 }
365 } while ((cl=cl->borrow) != NULL && toplevel > cl->level);
366 }
367}
368
369static int
370cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
371{
372 struct cbq_sched_data *q = qdisc_priv(sch);
373 int len = skb->len;
Satyam Sharmaddeee3c2007-09-16 14:54:05 -0700374 int uninitialized_var(ret);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700375 struct cbq_class *cl = cbq_classify(skb, sch, &ret);
376
Patrick McHardyc3bc7cf2007-07-15 00:03:05 -0700377#ifdef CONFIG_NET_CLS_ACT
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378 q->rx_class = cl;
379#endif
380 if (cl == NULL) {
Jamal Hadi Salim29f1df62006-01-08 22:35:55 -0800381 if (ret == NET_XMIT_BYPASS)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700382 sch->qstats.drops++;
383 kfree_skb(skb);
384 return ret;
385 }
386
Patrick McHardyc3bc7cf2007-07-15 00:03:05 -0700387#ifdef CONFIG_NET_CLS_ACT
Linus Torvalds1da177e2005-04-16 15:20:36 -0700388 cl->q->__parent = sch;
389#endif
390 if ((ret = cl->q->enqueue(skb, cl->q)) == NET_XMIT_SUCCESS) {
391 sch->q.qlen++;
392 sch->bstats.packets++;
393 sch->bstats.bytes+=len;
394 cbq_mark_toplevel(q, cl);
395 if (!cl->next_alive)
396 cbq_activate_class(cl);
397 return ret;
398 }
399
400 sch->qstats.drops++;
401 cbq_mark_toplevel(q, cl);
402 cl->qstats.drops++;
403 return ret;
404}
405
406static int
407cbq_requeue(struct sk_buff *skb, struct Qdisc *sch)
408{
409 struct cbq_sched_data *q = qdisc_priv(sch);
410 struct cbq_class *cl;
411 int ret;
412
413 if ((cl = q->tx_class) == NULL) {
414 kfree_skb(skb);
415 sch->qstats.drops++;
416 return NET_XMIT_CN;
417 }
418 q->tx_class = NULL;
419
420 cbq_mark_toplevel(q, cl);
421
Patrick McHardyc3bc7cf2007-07-15 00:03:05 -0700422#ifdef CONFIG_NET_CLS_ACT
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423 q->rx_class = cl;
424 cl->q->__parent = sch;
425#endif
426 if ((ret = cl->q->ops->requeue(skb, cl->q)) == 0) {
427 sch->q.qlen++;
428 sch->qstats.requeues++;
429 if (!cl->next_alive)
430 cbq_activate_class(cl);
431 return 0;
432 }
433 sch->qstats.drops++;
434 cl->qstats.drops++;
435 return ret;
436}
437
438/* Overlimit actions */
439
440/* TC_CBQ_OVL_CLASSIC: (default) penalize leaf class by adding offtime */
441
442static void cbq_ovl_classic(struct cbq_class *cl)
443{
444 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
Patrick McHardy8edc0c32007-03-23 11:28:55 -0700445 psched_tdiff_t delay = cl->undertime - q->now;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700446
447 if (!cl->delayed) {
448 delay += cl->offtime;
449
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900450 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700451 Class goes to sleep, so that it will have no
452 chance to work avgidle. Let's forgive it 8)
453
454 BTW cbq-2.0 has a crap in this
455 place, apparently they forgot to shift it by cl->ewma_log.
456 */
457 if (cl->avgidle < 0)
458 delay -= (-cl->avgidle) - ((-cl->avgidle) >> cl->ewma_log);
459 if (cl->avgidle < cl->minidle)
460 cl->avgidle = cl->minidle;
461 if (delay <= 0)
462 delay = 1;
Patrick McHardy7c59e252007-03-23 11:27:45 -0700463 cl->undertime = q->now + delay;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700464
465 cl->xstats.overactions++;
466 cl->delayed = 1;
467 }
468 if (q->wd_expires == 0 || q->wd_expires > delay)
469 q->wd_expires = delay;
470
471 /* Dirty work! We must schedule wakeups based on
472 real available rate, rather than leaf rate,
473 which may be tiny (even zero).
474 */
475 if (q->toplevel == TC_CBQ_MAXLEVEL) {
476 struct cbq_class *b;
477 psched_tdiff_t base_delay = q->wd_expires;
478
479 for (b = cl->borrow; b; b = b->borrow) {
Patrick McHardy8edc0c32007-03-23 11:28:55 -0700480 delay = b->undertime - q->now;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700481 if (delay < base_delay) {
482 if (delay <= 0)
483 delay = 1;
484 base_delay = delay;
485 }
486 }
487
488 q->wd_expires = base_delay;
489 }
490}
491
492/* TC_CBQ_OVL_RCLASSIC: penalize by offtime classes in hierarchy, when
493 they go overlimit
494 */
495
496static void cbq_ovl_rclassic(struct cbq_class *cl)
497{
498 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
499 struct cbq_class *this = cl;
500
501 do {
502 if (cl->level > q->toplevel) {
503 cl = NULL;
504 break;
505 }
506 } while ((cl = cl->borrow) != NULL);
507
508 if (cl == NULL)
509 cl = this;
510 cbq_ovl_classic(cl);
511}
512
513/* TC_CBQ_OVL_DELAY: delay until it will go to underlimit */
514
515static void cbq_ovl_delay(struct cbq_class *cl)
516{
517 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
Patrick McHardy8edc0c32007-03-23 11:28:55 -0700518 psched_tdiff_t delay = cl->undertime - q->now;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700519
520 if (!cl->delayed) {
Patrick McHardy1a13cb62007-03-16 01:22:20 -0700521 psched_time_t sched = q->now;
522 ktime_t expires;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700523
524 delay += cl->offtime;
525 if (cl->avgidle < 0)
526 delay -= (-cl->avgidle) - ((-cl->avgidle) >> cl->ewma_log);
527 if (cl->avgidle < cl->minidle)
528 cl->avgidle = cl->minidle;
Patrick McHardy7c59e252007-03-23 11:27:45 -0700529 cl->undertime = q->now + delay;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700530
531 if (delay > 0) {
Patrick McHardy1a13cb62007-03-16 01:22:20 -0700532 sched += delay + cl->penalty;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700533 cl->penalized = sched;
534 cl->cpriority = TC_CBQ_MAXPRIO;
535 q->pmask |= (1<<TC_CBQ_MAXPRIO);
Patrick McHardy1a13cb62007-03-16 01:22:20 -0700536
537 expires = ktime_set(0, 0);
538 expires = ktime_add_ns(expires, PSCHED_US2NS(sched));
539 if (hrtimer_try_to_cancel(&q->delay_timer) &&
540 ktime_to_ns(ktime_sub(q->delay_timer.expires,
541 expires)) > 0)
542 q->delay_timer.expires = expires;
543 hrtimer_restart(&q->delay_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700544 cl->delayed = 1;
545 cl->xstats.overactions++;
546 return;
547 }
548 delay = 1;
549 }
550 if (q->wd_expires == 0 || q->wd_expires > delay)
551 q->wd_expires = delay;
552}
553
554/* TC_CBQ_OVL_LOWPRIO: penalize class by lowering its priority band */
555
556static void cbq_ovl_lowprio(struct cbq_class *cl)
557{
558 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
559
Patrick McHardy1a13cb62007-03-16 01:22:20 -0700560 cl->penalized = q->now + cl->penalty;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700561
562 if (cl->cpriority != cl->priority2) {
563 cl->cpriority = cl->priority2;
564 q->pmask |= (1<<cl->cpriority);
565 cl->xstats.overactions++;
566 }
567 cbq_ovl_classic(cl);
568}
569
570/* TC_CBQ_OVL_DROP: penalize class by dropping */
571
572static void cbq_ovl_drop(struct cbq_class *cl)
573{
574 if (cl->q->ops->drop)
575 if (cl->q->ops->drop(cl->q))
576 cl->qdisc->q.qlen--;
577 cl->xstats.overactions++;
578 cbq_ovl_classic(cl);
579}
580
Patrick McHardy1a13cb62007-03-16 01:22:20 -0700581static psched_tdiff_t cbq_undelay_prio(struct cbq_sched_data *q, int prio,
582 psched_time_t now)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700583{
584 struct cbq_class *cl;
585 struct cbq_class *cl_prev = q->active[prio];
Patrick McHardy1a13cb62007-03-16 01:22:20 -0700586 psched_time_t sched = now;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700587
588 if (cl_prev == NULL)
Patrick McHardye9054a32007-03-16 01:21:40 -0700589 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700590
591 do {
592 cl = cl_prev->next_alive;
Patrick McHardy1a13cb62007-03-16 01:22:20 -0700593 if (now - cl->penalized > 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700594 cl_prev->next_alive = cl->next_alive;
595 cl->next_alive = NULL;
596 cl->cpriority = cl->priority;
597 cl->delayed = 0;
598 cbq_activate_class(cl);
599
600 if (cl == q->active[prio]) {
601 q->active[prio] = cl_prev;
602 if (cl == q->active[prio]) {
603 q->active[prio] = NULL;
604 return 0;
605 }
606 }
607
608 cl = cl_prev->next_alive;
Patrick McHardy1a13cb62007-03-16 01:22:20 -0700609 } else if (sched - cl->penalized > 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700610 sched = cl->penalized;
611 } while ((cl_prev = cl) != q->active[prio]);
612
Patrick McHardy1a13cb62007-03-16 01:22:20 -0700613 return sched - now;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700614}
615
Patrick McHardy1a13cb62007-03-16 01:22:20 -0700616static enum hrtimer_restart cbq_undelay(struct hrtimer *timer)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700617{
Patrick McHardy1a13cb62007-03-16 01:22:20 -0700618 struct cbq_sched_data *q = container_of(timer, struct cbq_sched_data,
619 delay_timer);
620 struct Qdisc *sch = q->watchdog.qdisc;
621 psched_time_t now;
622 psched_tdiff_t delay = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700623 unsigned pmask;
624
Patrick McHardy3bebcda2007-03-23 11:29:25 -0700625 now = psched_get_time();
Patrick McHardy1a13cb62007-03-16 01:22:20 -0700626
Linus Torvalds1da177e2005-04-16 15:20:36 -0700627 pmask = q->pmask;
628 q->pmask = 0;
629
630 while (pmask) {
631 int prio = ffz(~pmask);
Patrick McHardy1a13cb62007-03-16 01:22:20 -0700632 psched_tdiff_t tmp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700633
634 pmask &= ~(1<<prio);
635
Patrick McHardy1a13cb62007-03-16 01:22:20 -0700636 tmp = cbq_undelay_prio(q, prio, now);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700637 if (tmp > 0) {
638 q->pmask |= 1<<prio;
639 if (tmp < delay || delay == 0)
640 delay = tmp;
641 }
642 }
643
644 if (delay) {
Patrick McHardy1a13cb62007-03-16 01:22:20 -0700645 ktime_t time;
646
647 time = ktime_set(0, 0);
648 time = ktime_add_ns(time, PSCHED_US2NS(now + delay));
649 hrtimer_start(&q->delay_timer, time, HRTIMER_MODE_ABS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700650 }
651
652 sch->flags &= ~TCQ_F_THROTTLED;
David S. Miller37437bb2008-07-16 02:15:04 -0700653 __netif_schedule(sch);
Patrick McHardy1a13cb62007-03-16 01:22:20 -0700654 return HRTIMER_NORESTART;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700655}
656
Patrick McHardyc3bc7cf2007-07-15 00:03:05 -0700657#ifdef CONFIG_NET_CLS_ACT
Linus Torvalds1da177e2005-04-16 15:20:36 -0700658static int cbq_reshape_fail(struct sk_buff *skb, struct Qdisc *child)
659{
660 int len = skb->len;
661 struct Qdisc *sch = child->__parent;
662 struct cbq_sched_data *q = qdisc_priv(sch);
663 struct cbq_class *cl = q->rx_class;
664
665 q->rx_class = NULL;
666
667 if (cl && (cl = cbq_reclassify(skb, cl)) != NULL) {
668
669 cbq_mark_toplevel(q, cl);
670
671 q->rx_class = cl;
672 cl->q->__parent = sch;
673
674 if (cl->q->enqueue(skb, cl->q) == 0) {
675 sch->q.qlen++;
676 sch->bstats.packets++;
677 sch->bstats.bytes+=len;
678 if (!cl->next_alive)
679 cbq_activate_class(cl);
680 return 0;
681 }
682 sch->qstats.drops++;
683 return 0;
684 }
685
686 sch->qstats.drops++;
687 return -1;
688}
689#endif
690
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900691/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700692 It is mission critical procedure.
693
694 We "regenerate" toplevel cutoff, if transmitting class
695 has backlog and it is not regulated. It is not part of
696 original CBQ description, but looks more reasonable.
697 Probably, it is wrong. This question needs further investigation.
698*/
699
700static __inline__ void
701cbq_update_toplevel(struct cbq_sched_data *q, struct cbq_class *cl,
702 struct cbq_class *borrowed)
703{
704 if (cl && q->toplevel >= borrowed->level) {
705 if (cl->q->q.qlen > 1) {
706 do {
Patrick McHardya0849802007-03-23 11:28:30 -0700707 if (borrowed->undertime == PSCHED_PASTPERFECT) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700708 q->toplevel = borrowed->level;
709 return;
710 }
711 } while ((borrowed=borrowed->borrow) != NULL);
712 }
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900713#if 0
Linus Torvalds1da177e2005-04-16 15:20:36 -0700714 /* It is not necessary now. Uncommenting it
715 will save CPU cycles, but decrease fairness.
716 */
717 q->toplevel = TC_CBQ_MAXLEVEL;
718#endif
719 }
720}
721
722static void
723cbq_update(struct cbq_sched_data *q)
724{
725 struct cbq_class *this = q->tx_class;
726 struct cbq_class *cl = this;
727 int len = q->tx_len;
728
729 q->tx_class = NULL;
730
731 for ( ; cl; cl = cl->share) {
732 long avgidle = cl->avgidle;
733 long idle;
734
735 cl->bstats.packets++;
736 cl->bstats.bytes += len;
737
738 /*
739 (now - last) is total time between packet right edges.
740 (last_pktlen/rate) is "virtual" busy time, so that
741
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900742 idle = (now - last) - last_pktlen/rate
Linus Torvalds1da177e2005-04-16 15:20:36 -0700743 */
744
Patrick McHardy8edc0c32007-03-23 11:28:55 -0700745 idle = q->now - cl->last;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700746 if ((unsigned long)idle > 128*1024*1024) {
747 avgidle = cl->maxidle;
748 } else {
749 idle -= L2T(cl, len);
750
751 /* true_avgidle := (1-W)*true_avgidle + W*idle,
752 where W=2^{-ewma_log}. But cl->avgidle is scaled:
753 cl->avgidle == true_avgidle/W,
754 hence:
755 */
756 avgidle += idle - (avgidle>>cl->ewma_log);
757 }
758
759 if (avgidle <= 0) {
760 /* Overlimit or at-limit */
761
762 if (avgidle < cl->minidle)
763 avgidle = cl->minidle;
764
765 cl->avgidle = avgidle;
766
767 /* Calculate expected time, when this class
768 will be allowed to send.
769 It will occur, when:
770 (1-W)*true_avgidle + W*delay = 0, i.e.
771 idle = (1/W - 1)*(-true_avgidle)
772 or
773 idle = (1 - W)*(-cl->avgidle);
774 */
775 idle = (-avgidle) - ((-avgidle) >> cl->ewma_log);
776
777 /*
778 That is not all.
779 To maintain the rate allocated to the class,
780 we add to undertime virtual clock,
781 necessary to complete transmitted packet.
782 (len/phys_bandwidth has been already passed
783 to the moment of cbq_update)
784 */
785
786 idle -= L2T(&q->link, len);
787 idle += L2T(cl, len);
788
Patrick McHardy7c59e252007-03-23 11:27:45 -0700789 cl->undertime = q->now + idle;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700790 } else {
791 /* Underlimit */
792
Patrick McHardya0849802007-03-23 11:28:30 -0700793 cl->undertime = PSCHED_PASTPERFECT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700794 if (avgidle > cl->maxidle)
795 cl->avgidle = cl->maxidle;
796 else
797 cl->avgidle = avgidle;
798 }
799 cl->last = q->now;
800 }
801
802 cbq_update_toplevel(q, this, q->tx_borrowed);
803}
804
805static __inline__ struct cbq_class *
806cbq_under_limit(struct cbq_class *cl)
807{
808 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
809 struct cbq_class *this_cl = cl;
810
811 if (cl->tparent == NULL)
812 return cl;
813
Patrick McHardya0849802007-03-23 11:28:30 -0700814 if (cl->undertime == PSCHED_PASTPERFECT || q->now >= cl->undertime) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700815 cl->delayed = 0;
816 return cl;
817 }
818
819 do {
820 /* It is very suspicious place. Now overlimit
821 action is generated for not bounded classes
822 only if link is completely congested.
823 Though it is in agree with ancestor-only paradigm,
824 it looks very stupid. Particularly,
825 it means that this chunk of code will either
826 never be called or result in strong amplification
827 of burstiness. Dangerous, silly, and, however,
828 no another solution exists.
829 */
830 if ((cl = cl->borrow) == NULL) {
831 this_cl->qstats.overlimits++;
832 this_cl->overlimit(this_cl);
833 return NULL;
834 }
835 if (cl->level > q->toplevel)
836 return NULL;
Patrick McHardya0849802007-03-23 11:28:30 -0700837 } while (cl->undertime != PSCHED_PASTPERFECT && q->now < cl->undertime);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700838
839 cl->delayed = 0;
840 return cl;
841}
842
843static __inline__ struct sk_buff *
844cbq_dequeue_prio(struct Qdisc *sch, int prio)
845{
846 struct cbq_sched_data *q = qdisc_priv(sch);
847 struct cbq_class *cl_tail, *cl_prev, *cl;
848 struct sk_buff *skb;
849 int deficit;
850
851 cl_tail = cl_prev = q->active[prio];
852 cl = cl_prev->next_alive;
853
854 do {
855 deficit = 0;
856
857 /* Start round */
858 do {
859 struct cbq_class *borrow = cl;
860
861 if (cl->q->q.qlen &&
862 (borrow = cbq_under_limit(cl)) == NULL)
863 goto skip_class;
864
865 if (cl->deficit <= 0) {
866 /* Class exhausted its allotment per
867 this round. Switch to the next one.
868 */
869 deficit = 1;
870 cl->deficit += cl->quantum;
871 goto next_class;
872 }
873
874 skb = cl->q->dequeue(cl->q);
875
876 /* Class did not give us any skb :-(
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900877 It could occur even if cl->q->q.qlen != 0
Linus Torvalds1da177e2005-04-16 15:20:36 -0700878 f.e. if cl->q == "tbf"
879 */
880 if (skb == NULL)
881 goto skip_class;
882
883 cl->deficit -= skb->len;
884 q->tx_class = cl;
885 q->tx_borrowed = borrow;
886 if (borrow != cl) {
887#ifndef CBQ_XSTATS_BORROWS_BYTES
888 borrow->xstats.borrows++;
889 cl->xstats.borrows++;
890#else
891 borrow->xstats.borrows += skb->len;
892 cl->xstats.borrows += skb->len;
893#endif
894 }
895 q->tx_len = skb->len;
896
897 if (cl->deficit <= 0) {
898 q->active[prio] = cl;
899 cl = cl->next_alive;
900 cl->deficit += cl->quantum;
901 }
902 return skb;
903
904skip_class:
905 if (cl->q->q.qlen == 0 || prio != cl->cpriority) {
906 /* Class is empty or penalized.
907 Unlink it from active chain.
908 */
909 cl_prev->next_alive = cl->next_alive;
910 cl->next_alive = NULL;
911
912 /* Did cl_tail point to it? */
913 if (cl == cl_tail) {
914 /* Repair it! */
915 cl_tail = cl_prev;
916
917 /* Was it the last class in this band? */
918 if (cl == cl_tail) {
919 /* Kill the band! */
920 q->active[prio] = NULL;
921 q->activemask &= ~(1<<prio);
922 if (cl->q->q.qlen)
923 cbq_activate_class(cl);
924 return NULL;
925 }
926
927 q->active[prio] = cl_tail;
928 }
929 if (cl->q->q.qlen)
930 cbq_activate_class(cl);
931
932 cl = cl_prev;
933 }
934
935next_class:
936 cl_prev = cl;
937 cl = cl->next_alive;
938 } while (cl_prev != cl_tail);
939 } while (deficit);
940
941 q->active[prio] = cl_prev;
942
943 return NULL;
944}
945
946static __inline__ struct sk_buff *
947cbq_dequeue_1(struct Qdisc *sch)
948{
949 struct cbq_sched_data *q = qdisc_priv(sch);
950 struct sk_buff *skb;
951 unsigned activemask;
952
953 activemask = q->activemask&0xFF;
954 while (activemask) {
955 int prio = ffz(~activemask);
956 activemask &= ~(1<<prio);
957 skb = cbq_dequeue_prio(sch, prio);
958 if (skb)
959 return skb;
960 }
961 return NULL;
962}
963
964static struct sk_buff *
965cbq_dequeue(struct Qdisc *sch)
966{
967 struct sk_buff *skb;
968 struct cbq_sched_data *q = qdisc_priv(sch);
969 psched_time_t now;
970 psched_tdiff_t incr;
971
Patrick McHardy3bebcda2007-03-23 11:29:25 -0700972 now = psched_get_time();
Patrick McHardy8edc0c32007-03-23 11:28:55 -0700973 incr = now - q->now_rt;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700974
975 if (q->tx_class) {
976 psched_tdiff_t incr2;
977 /* Time integrator. We calculate EOS time
978 by adding expected packet transmission time.
979 If real time is greater, we warp artificial clock,
980 so that:
981
982 cbq_time = max(real_time, work);
983 */
984 incr2 = L2T(&q->link, q->tx_len);
Patrick McHardy7c59e252007-03-23 11:27:45 -0700985 q->now += incr2;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700986 cbq_update(q);
987 if ((incr -= incr2) < 0)
988 incr = 0;
989 }
Patrick McHardy7c59e252007-03-23 11:27:45 -0700990 q->now += incr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700991 q->now_rt = now;
992
993 for (;;) {
994 q->wd_expires = 0;
995
996 skb = cbq_dequeue_1(sch);
997 if (skb) {
998 sch->q.qlen--;
999 sch->flags &= ~TCQ_F_THROTTLED;
1000 return skb;
1001 }
1002
1003 /* All the classes are overlimit.
1004
1005 It is possible, if:
1006
1007 1. Scheduler is empty.
1008 2. Toplevel cutoff inhibited borrowing.
1009 3. Root class is overlimit.
1010
1011 Reset 2d and 3d conditions and retry.
1012
1013 Note, that NS and cbq-2.0 are buggy, peeking
1014 an arbitrary class is appropriate for ancestor-only
1015 sharing, but not for toplevel algorithm.
1016
1017 Our version is better, but slower, because it requires
1018 two passes, but it is unavoidable with top-level sharing.
1019 */
1020
1021 if (q->toplevel == TC_CBQ_MAXLEVEL &&
Patrick McHardya0849802007-03-23 11:28:30 -07001022 q->link.undertime == PSCHED_PASTPERFECT)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001023 break;
1024
1025 q->toplevel = TC_CBQ_MAXLEVEL;
Patrick McHardya0849802007-03-23 11:28:30 -07001026 q->link.undertime = PSCHED_PASTPERFECT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001027 }
1028
1029 /* No packets in scheduler or nobody wants to give them to us :-(
1030 Sigh... start watchdog timer in the last case. */
1031
1032 if (sch->q.qlen) {
1033 sch->qstats.overlimits++;
Patrick McHardy88a99352007-03-16 01:21:11 -07001034 if (q->wd_expires)
1035 qdisc_watchdog_schedule(&q->watchdog,
Patrick McHardybb239ac2007-03-16 12:31:28 -07001036 now + q->wd_expires);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001037 }
1038 return NULL;
1039}
1040
1041/* CBQ class maintanance routines */
1042
1043static void cbq_adjust_levels(struct cbq_class *this)
1044{
1045 if (this == NULL)
1046 return;
1047
1048 do {
1049 int level = 0;
1050 struct cbq_class *cl;
1051
1052 if ((cl = this->children) != NULL) {
1053 do {
1054 if (cl->level > level)
1055 level = cl->level;
1056 } while ((cl = cl->sibling) != this->children);
1057 }
1058 this->level = level+1;
1059 } while ((this = this->tparent) != NULL);
1060}
1061
1062static void cbq_normalize_quanta(struct cbq_sched_data *q, int prio)
1063{
1064 struct cbq_class *cl;
Patrick McHardyd77fea22008-07-05 23:22:05 -07001065 struct hlist_node *n;
1066 unsigned int h;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001067
1068 if (q->quanta[prio] == 0)
1069 return;
1070
Patrick McHardyd77fea22008-07-05 23:22:05 -07001071 for (h = 0; h < q->clhash.hashsize; h++) {
1072 hlist_for_each_entry(cl, n, &q->clhash.hash[h], common.hnode) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001073 /* BUGGGG... Beware! This expression suffer of
1074 arithmetic overflows!
1075 */
1076 if (cl->priority == prio) {
1077 cl->quantum = (cl->weight*cl->allot*q->nclasses[prio])/
1078 q->quanta[prio];
1079 }
David S. Miller5ce2d482008-07-08 17:06:30 -07001080 if (cl->quantum <= 0 || cl->quantum>32*qdisc_dev(cl->qdisc)->mtu) {
Patrick McHardyd77fea22008-07-05 23:22:05 -07001081 printk(KERN_WARNING "CBQ: class %08x has bad quantum==%ld, repaired.\n", cl->common.classid, cl->quantum);
David S. Miller5ce2d482008-07-08 17:06:30 -07001082 cl->quantum = qdisc_dev(cl->qdisc)->mtu/2 + 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001083 }
1084 }
1085 }
1086}
1087
1088static void cbq_sync_defmap(struct cbq_class *cl)
1089{
1090 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
1091 struct cbq_class *split = cl->split;
1092 unsigned h;
1093 int i;
1094
1095 if (split == NULL)
1096 return;
1097
1098 for (i=0; i<=TC_PRIO_MAX; i++) {
1099 if (split->defaults[i] == cl && !(cl->defmap&(1<<i)))
1100 split->defaults[i] = NULL;
1101 }
1102
1103 for (i=0; i<=TC_PRIO_MAX; i++) {
1104 int level = split->level;
1105
1106 if (split->defaults[i])
1107 continue;
1108
Patrick McHardyd77fea22008-07-05 23:22:05 -07001109 for (h = 0; h < q->clhash.hashsize; h++) {
1110 struct hlist_node *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001111 struct cbq_class *c;
1112
Patrick McHardyd77fea22008-07-05 23:22:05 -07001113 hlist_for_each_entry(c, n, &q->clhash.hash[h],
1114 common.hnode) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001115 if (c->split == split && c->level < level &&
1116 c->defmap&(1<<i)) {
1117 split->defaults[i] = c;
1118 level = c->level;
1119 }
1120 }
1121 }
1122 }
1123}
1124
1125static void cbq_change_defmap(struct cbq_class *cl, u32 splitid, u32 def, u32 mask)
1126{
1127 struct cbq_class *split = NULL;
1128
1129 if (splitid == 0) {
1130 if ((split = cl->split) == NULL)
1131 return;
Patrick McHardyd77fea22008-07-05 23:22:05 -07001132 splitid = split->common.classid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001133 }
1134
Patrick McHardyd77fea22008-07-05 23:22:05 -07001135 if (split == NULL || split->common.classid != splitid) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001136 for (split = cl->tparent; split; split = split->tparent)
Patrick McHardyd77fea22008-07-05 23:22:05 -07001137 if (split->common.classid == splitid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001138 break;
1139 }
1140
1141 if (split == NULL)
1142 return;
1143
1144 if (cl->split != split) {
1145 cl->defmap = 0;
1146 cbq_sync_defmap(cl);
1147 cl->split = split;
1148 cl->defmap = def&mask;
1149 } else
1150 cl->defmap = (cl->defmap&~mask)|(def&mask);
1151
1152 cbq_sync_defmap(cl);
1153}
1154
1155static void cbq_unlink_class(struct cbq_class *this)
1156{
1157 struct cbq_class *cl, **clp;
1158 struct cbq_sched_data *q = qdisc_priv(this->qdisc);
1159
Patrick McHardyd77fea22008-07-05 23:22:05 -07001160 qdisc_class_hash_remove(&q->clhash, &this->common);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001161
1162 if (this->tparent) {
1163 clp=&this->sibling;
1164 cl = *clp;
1165 do {
1166 if (cl == this) {
1167 *clp = cl->sibling;
1168 break;
1169 }
1170 clp = &cl->sibling;
1171 } while ((cl = *clp) != this->sibling);
1172
1173 if (this->tparent->children == this) {
1174 this->tparent->children = this->sibling;
1175 if (this->sibling == this)
1176 this->tparent->children = NULL;
1177 }
1178 } else {
1179 BUG_TRAP(this->sibling == this);
1180 }
1181}
1182
1183static void cbq_link_class(struct cbq_class *this)
1184{
1185 struct cbq_sched_data *q = qdisc_priv(this->qdisc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001186 struct cbq_class *parent = this->tparent;
1187
1188 this->sibling = this;
Patrick McHardyd77fea22008-07-05 23:22:05 -07001189 qdisc_class_hash_insert(&q->clhash, &this->common);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001190
1191 if (parent == NULL)
1192 return;
1193
1194 if (parent->children == NULL) {
1195 parent->children = this;
1196 } else {
1197 this->sibling = parent->children->sibling;
1198 parent->children->sibling = this;
1199 }
1200}
1201
1202static unsigned int cbq_drop(struct Qdisc* sch)
1203{
1204 struct cbq_sched_data *q = qdisc_priv(sch);
1205 struct cbq_class *cl, *cl_head;
1206 int prio;
1207 unsigned int len;
1208
1209 for (prio = TC_CBQ_MAXPRIO; prio >= 0; prio--) {
1210 if ((cl_head = q->active[prio]) == NULL)
1211 continue;
1212
1213 cl = cl_head;
1214 do {
1215 if (cl->q->ops->drop && (len = cl->q->ops->drop(cl->q))) {
1216 sch->q.qlen--;
Jarek Poplawskia37ef2e2006-12-08 00:25:55 -08001217 if (!cl->q->q.qlen)
1218 cbq_deactivate_class(cl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001219 return len;
1220 }
1221 } while ((cl = cl->next_alive) != cl_head);
1222 }
1223 return 0;
1224}
1225
1226static void
1227cbq_reset(struct Qdisc* sch)
1228{
1229 struct cbq_sched_data *q = qdisc_priv(sch);
1230 struct cbq_class *cl;
Patrick McHardyd77fea22008-07-05 23:22:05 -07001231 struct hlist_node *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001232 int prio;
1233 unsigned h;
1234
1235 q->activemask = 0;
1236 q->pmask = 0;
1237 q->tx_class = NULL;
1238 q->tx_borrowed = NULL;
Patrick McHardy88a99352007-03-16 01:21:11 -07001239 qdisc_watchdog_cancel(&q->watchdog);
Patrick McHardy1a13cb62007-03-16 01:22:20 -07001240 hrtimer_cancel(&q->delay_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001241 q->toplevel = TC_CBQ_MAXLEVEL;
Patrick McHardy3bebcda2007-03-23 11:29:25 -07001242 q->now = psched_get_time();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001243 q->now_rt = q->now;
1244
1245 for (prio = 0; prio <= TC_CBQ_MAXPRIO; prio++)
1246 q->active[prio] = NULL;
1247
Patrick McHardyd77fea22008-07-05 23:22:05 -07001248 for (h = 0; h < q->clhash.hashsize; h++) {
1249 hlist_for_each_entry(cl, n, &q->clhash.hash[h], common.hnode) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001250 qdisc_reset(cl->q);
1251
1252 cl->next_alive = NULL;
Patrick McHardya0849802007-03-23 11:28:30 -07001253 cl->undertime = PSCHED_PASTPERFECT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001254 cl->avgidle = cl->maxidle;
1255 cl->deficit = cl->quantum;
1256 cl->cpriority = cl->priority;
1257 }
1258 }
1259 sch->q.qlen = 0;
1260}
1261
1262
1263static int cbq_set_lss(struct cbq_class *cl, struct tc_cbq_lssopt *lss)
1264{
1265 if (lss->change&TCF_CBQ_LSS_FLAGS) {
1266 cl->share = (lss->flags&TCF_CBQ_LSS_ISOLATED) ? NULL : cl->tparent;
1267 cl->borrow = (lss->flags&TCF_CBQ_LSS_BOUNDED) ? NULL : cl->tparent;
1268 }
1269 if (lss->change&TCF_CBQ_LSS_EWMA)
1270 cl->ewma_log = lss->ewma_log;
1271 if (lss->change&TCF_CBQ_LSS_AVPKT)
1272 cl->avpkt = lss->avpkt;
1273 if (lss->change&TCF_CBQ_LSS_MINIDLE)
1274 cl->minidle = -(long)lss->minidle;
1275 if (lss->change&TCF_CBQ_LSS_MAXIDLE) {
1276 cl->maxidle = lss->maxidle;
1277 cl->avgidle = lss->maxidle;
1278 }
1279 if (lss->change&TCF_CBQ_LSS_OFFTIME)
1280 cl->offtime = lss->offtime;
1281 return 0;
1282}
1283
1284static void cbq_rmprio(struct cbq_sched_data *q, struct cbq_class *cl)
1285{
1286 q->nclasses[cl->priority]--;
1287 q->quanta[cl->priority] -= cl->weight;
1288 cbq_normalize_quanta(q, cl->priority);
1289}
1290
1291static void cbq_addprio(struct cbq_sched_data *q, struct cbq_class *cl)
1292{
1293 q->nclasses[cl->priority]++;
1294 q->quanta[cl->priority] += cl->weight;
1295 cbq_normalize_quanta(q, cl->priority);
1296}
1297
1298static int cbq_set_wrr(struct cbq_class *cl, struct tc_cbq_wrropt *wrr)
1299{
1300 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
1301
1302 if (wrr->allot)
1303 cl->allot = wrr->allot;
1304 if (wrr->weight)
1305 cl->weight = wrr->weight;
1306 if (wrr->priority) {
1307 cl->priority = wrr->priority-1;
1308 cl->cpriority = cl->priority;
1309 if (cl->priority >= cl->priority2)
1310 cl->priority2 = TC_CBQ_MAXPRIO-1;
1311 }
1312
1313 cbq_addprio(q, cl);
1314 return 0;
1315}
1316
1317static int cbq_set_overlimit(struct cbq_class *cl, struct tc_cbq_ovl *ovl)
1318{
1319 switch (ovl->strategy) {
1320 case TC_CBQ_OVL_CLASSIC:
1321 cl->overlimit = cbq_ovl_classic;
1322 break;
1323 case TC_CBQ_OVL_DELAY:
1324 cl->overlimit = cbq_ovl_delay;
1325 break;
1326 case TC_CBQ_OVL_LOWPRIO:
1327 if (ovl->priority2-1 >= TC_CBQ_MAXPRIO ||
1328 ovl->priority2-1 <= cl->priority)
1329 return -EINVAL;
1330 cl->priority2 = ovl->priority2-1;
1331 cl->overlimit = cbq_ovl_lowprio;
1332 break;
1333 case TC_CBQ_OVL_DROP:
1334 cl->overlimit = cbq_ovl_drop;
1335 break;
1336 case TC_CBQ_OVL_RCLASSIC:
1337 cl->overlimit = cbq_ovl_rclassic;
1338 break;
1339 default:
1340 return -EINVAL;
1341 }
Patrick McHardy1a13cb62007-03-16 01:22:20 -07001342 cl->penalty = ovl->penalty;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001343 return 0;
1344}
1345
Patrick McHardyc3bc7cf2007-07-15 00:03:05 -07001346#ifdef CONFIG_NET_CLS_ACT
Linus Torvalds1da177e2005-04-16 15:20:36 -07001347static int cbq_set_police(struct cbq_class *cl, struct tc_cbq_police *p)
1348{
1349 cl->police = p->police;
1350
1351 if (cl->q->handle) {
1352 if (p->police == TC_POLICE_RECLASSIFY)
1353 cl->q->reshape_fail = cbq_reshape_fail;
1354 else
1355 cl->q->reshape_fail = NULL;
1356 }
1357 return 0;
1358}
1359#endif
1360
1361static int cbq_set_fopt(struct cbq_class *cl, struct tc_cbq_fopt *fopt)
1362{
1363 cbq_change_defmap(cl, fopt->split, fopt->defmap, fopt->defchange);
1364 return 0;
1365}
1366
Patrick McHardy27a34212008-01-23 20:35:39 -08001367static const struct nla_policy cbq_policy[TCA_CBQ_MAX + 1] = {
1368 [TCA_CBQ_LSSOPT] = { .len = sizeof(struct tc_cbq_lssopt) },
1369 [TCA_CBQ_WRROPT] = { .len = sizeof(struct tc_cbq_wrropt) },
1370 [TCA_CBQ_FOPT] = { .len = sizeof(struct tc_cbq_fopt) },
1371 [TCA_CBQ_OVL_STRATEGY] = { .len = sizeof(struct tc_cbq_ovl) },
1372 [TCA_CBQ_RATE] = { .len = sizeof(struct tc_ratespec) },
1373 [TCA_CBQ_RTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
1374 [TCA_CBQ_POLICE] = { .len = sizeof(struct tc_cbq_police) },
1375};
1376
Patrick McHardy1e904742008-01-22 22:11:17 -08001377static int cbq_init(struct Qdisc *sch, struct nlattr *opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001378{
1379 struct cbq_sched_data *q = qdisc_priv(sch);
Patrick McHardy1e904742008-01-22 22:11:17 -08001380 struct nlattr *tb[TCA_CBQ_MAX + 1];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001381 struct tc_ratespec *r;
Patrick McHardycee63722008-01-23 20:33:32 -08001382 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001383
Patrick McHardy27a34212008-01-23 20:35:39 -08001384 err = nla_parse_nested(tb, TCA_CBQ_MAX, opt, cbq_policy);
Patrick McHardycee63722008-01-23 20:33:32 -08001385 if (err < 0)
1386 return err;
1387
Patrick McHardy27a34212008-01-23 20:35:39 -08001388 if (tb[TCA_CBQ_RTAB] == NULL || tb[TCA_CBQ_RATE] == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001389 return -EINVAL;
1390
Patrick McHardy1e904742008-01-22 22:11:17 -08001391 r = nla_data(tb[TCA_CBQ_RATE]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001392
Patrick McHardy1e904742008-01-22 22:11:17 -08001393 if ((q->link.R_tab = qdisc_get_rtab(r, tb[TCA_CBQ_RTAB])) == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001394 return -EINVAL;
1395
Patrick McHardyd77fea22008-07-05 23:22:05 -07001396 err = qdisc_class_hash_init(&q->clhash);
1397 if (err < 0)
1398 goto put_rtab;
1399
Linus Torvalds1da177e2005-04-16 15:20:36 -07001400 q->link.refcnt = 1;
1401 q->link.sibling = &q->link;
Patrick McHardyd77fea22008-07-05 23:22:05 -07001402 q->link.common.classid = sch->handle;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001403 q->link.qdisc = sch;
David S. Miller5ce2d482008-07-08 17:06:30 -07001404 if (!(q->link.q = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
David S. Millerbb949fb2008-07-08 16:55:56 -07001405 &pfifo_qdisc_ops,
Patrick McHardy9f9afec2006-11-29 17:35:18 -08001406 sch->handle)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001407 q->link.q = &noop_qdisc;
1408
1409 q->link.priority = TC_CBQ_MAXPRIO-1;
1410 q->link.priority2 = TC_CBQ_MAXPRIO-1;
1411 q->link.cpriority = TC_CBQ_MAXPRIO-1;
1412 q->link.ovl_strategy = TC_CBQ_OVL_CLASSIC;
1413 q->link.overlimit = cbq_ovl_classic;
David S. Miller5ce2d482008-07-08 17:06:30 -07001414 q->link.allot = psched_mtu(qdisc_dev(sch));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001415 q->link.quantum = q->link.allot;
1416 q->link.weight = q->link.R_tab->rate.rate;
1417
1418 q->link.ewma_log = TC_CBQ_DEF_EWMA;
1419 q->link.avpkt = q->link.allot/2;
1420 q->link.minidle = -0x7FFFFFFF;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001421
Patrick McHardy88a99352007-03-16 01:21:11 -07001422 qdisc_watchdog_init(&q->watchdog, sch);
Patrick McHardy1a13cb62007-03-16 01:22:20 -07001423 hrtimer_init(&q->delay_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001424 q->delay_timer.function = cbq_undelay;
1425 q->toplevel = TC_CBQ_MAXLEVEL;
Patrick McHardy3bebcda2007-03-23 11:29:25 -07001426 q->now = psched_get_time();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001427 q->now_rt = q->now;
1428
1429 cbq_link_class(&q->link);
1430
Patrick McHardy1e904742008-01-22 22:11:17 -08001431 if (tb[TCA_CBQ_LSSOPT])
1432 cbq_set_lss(&q->link, nla_data(tb[TCA_CBQ_LSSOPT]));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001433
1434 cbq_addprio(q, &q->link);
1435 return 0;
Patrick McHardyd77fea22008-07-05 23:22:05 -07001436
1437put_rtab:
1438 qdisc_put_rtab(q->link.R_tab);
1439 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001440}
1441
1442static __inline__ int cbq_dump_rate(struct sk_buff *skb, struct cbq_class *cl)
1443{
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001444 unsigned char *b = skb_tail_pointer(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001445
Patrick McHardy1e904742008-01-22 22:11:17 -08001446 NLA_PUT(skb, TCA_CBQ_RATE, sizeof(cl->R_tab->rate), &cl->R_tab->rate);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001447 return skb->len;
1448
Patrick McHardy1e904742008-01-22 22:11:17 -08001449nla_put_failure:
Arnaldo Carvalho de Melodc5fc572007-03-25 23:06:12 -07001450 nlmsg_trim(skb, b);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001451 return -1;
1452}
1453
1454static __inline__ int cbq_dump_lss(struct sk_buff *skb, struct cbq_class *cl)
1455{
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001456 unsigned char *b = skb_tail_pointer(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001457 struct tc_cbq_lssopt opt;
1458
1459 opt.flags = 0;
1460 if (cl->borrow == NULL)
1461 opt.flags |= TCF_CBQ_LSS_BOUNDED;
1462 if (cl->share == NULL)
1463 opt.flags |= TCF_CBQ_LSS_ISOLATED;
1464 opt.ewma_log = cl->ewma_log;
1465 opt.level = cl->level;
1466 opt.avpkt = cl->avpkt;
1467 opt.maxidle = cl->maxidle;
1468 opt.minidle = (u32)(-cl->minidle);
1469 opt.offtime = cl->offtime;
1470 opt.change = ~0;
Patrick McHardy1e904742008-01-22 22:11:17 -08001471 NLA_PUT(skb, TCA_CBQ_LSSOPT, sizeof(opt), &opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001472 return skb->len;
1473
Patrick McHardy1e904742008-01-22 22:11:17 -08001474nla_put_failure:
Arnaldo Carvalho de Melodc5fc572007-03-25 23:06:12 -07001475 nlmsg_trim(skb, b);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001476 return -1;
1477}
1478
1479static __inline__ int cbq_dump_wrr(struct sk_buff *skb, struct cbq_class *cl)
1480{
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001481 unsigned char *b = skb_tail_pointer(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001482 struct tc_cbq_wrropt opt;
1483
1484 opt.flags = 0;
1485 opt.allot = cl->allot;
1486 opt.priority = cl->priority+1;
1487 opt.cpriority = cl->cpriority+1;
1488 opt.weight = cl->weight;
Patrick McHardy1e904742008-01-22 22:11:17 -08001489 NLA_PUT(skb, TCA_CBQ_WRROPT, sizeof(opt), &opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001490 return skb->len;
1491
Patrick McHardy1e904742008-01-22 22:11:17 -08001492nla_put_failure:
Arnaldo Carvalho de Melodc5fc572007-03-25 23:06:12 -07001493 nlmsg_trim(skb, b);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001494 return -1;
1495}
1496
1497static __inline__ int cbq_dump_ovl(struct sk_buff *skb, struct cbq_class *cl)
1498{
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001499 unsigned char *b = skb_tail_pointer(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001500 struct tc_cbq_ovl opt;
1501
1502 opt.strategy = cl->ovl_strategy;
1503 opt.priority2 = cl->priority2+1;
Patrick McHardy8a470772005-06-28 12:56:45 -07001504 opt.pad = 0;
Patrick McHardy1a13cb62007-03-16 01:22:20 -07001505 opt.penalty = cl->penalty;
Patrick McHardy1e904742008-01-22 22:11:17 -08001506 NLA_PUT(skb, TCA_CBQ_OVL_STRATEGY, sizeof(opt), &opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001507 return skb->len;
1508
Patrick McHardy1e904742008-01-22 22:11:17 -08001509nla_put_failure:
Arnaldo Carvalho de Melodc5fc572007-03-25 23:06:12 -07001510 nlmsg_trim(skb, b);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001511 return -1;
1512}
1513
1514static __inline__ int cbq_dump_fopt(struct sk_buff *skb, struct cbq_class *cl)
1515{
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001516 unsigned char *b = skb_tail_pointer(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001517 struct tc_cbq_fopt opt;
1518
1519 if (cl->split || cl->defmap) {
Patrick McHardyd77fea22008-07-05 23:22:05 -07001520 opt.split = cl->split ? cl->split->common.classid : 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001521 opt.defmap = cl->defmap;
1522 opt.defchange = ~0;
Patrick McHardy1e904742008-01-22 22:11:17 -08001523 NLA_PUT(skb, TCA_CBQ_FOPT, sizeof(opt), &opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001524 }
1525 return skb->len;
1526
Patrick McHardy1e904742008-01-22 22:11:17 -08001527nla_put_failure:
Arnaldo Carvalho de Melodc5fc572007-03-25 23:06:12 -07001528 nlmsg_trim(skb, b);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001529 return -1;
1530}
1531
Patrick McHardyc3bc7cf2007-07-15 00:03:05 -07001532#ifdef CONFIG_NET_CLS_ACT
Linus Torvalds1da177e2005-04-16 15:20:36 -07001533static __inline__ int cbq_dump_police(struct sk_buff *skb, struct cbq_class *cl)
1534{
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001535 unsigned char *b = skb_tail_pointer(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001536 struct tc_cbq_police opt;
1537
1538 if (cl->police) {
1539 opt.police = cl->police;
Patrick McHardy9ef1d4c2005-06-28 12:55:30 -07001540 opt.__res1 = 0;
1541 opt.__res2 = 0;
Patrick McHardy1e904742008-01-22 22:11:17 -08001542 NLA_PUT(skb, TCA_CBQ_POLICE, sizeof(opt), &opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001543 }
1544 return skb->len;
1545
Patrick McHardy1e904742008-01-22 22:11:17 -08001546nla_put_failure:
Arnaldo Carvalho de Melodc5fc572007-03-25 23:06:12 -07001547 nlmsg_trim(skb, b);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001548 return -1;
1549}
1550#endif
1551
1552static int cbq_dump_attr(struct sk_buff *skb, struct cbq_class *cl)
1553{
1554 if (cbq_dump_lss(skb, cl) < 0 ||
1555 cbq_dump_rate(skb, cl) < 0 ||
1556 cbq_dump_wrr(skb, cl) < 0 ||
1557 cbq_dump_ovl(skb, cl) < 0 ||
Patrick McHardyc3bc7cf2007-07-15 00:03:05 -07001558#ifdef CONFIG_NET_CLS_ACT
Linus Torvalds1da177e2005-04-16 15:20:36 -07001559 cbq_dump_police(skb, cl) < 0 ||
1560#endif
1561 cbq_dump_fopt(skb, cl) < 0)
1562 return -1;
1563 return 0;
1564}
1565
1566static int cbq_dump(struct Qdisc *sch, struct sk_buff *skb)
1567{
1568 struct cbq_sched_data *q = qdisc_priv(sch);
Patrick McHardy4b3550ef2008-01-23 20:34:11 -08001569 struct nlattr *nest;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001570
Patrick McHardy4b3550ef2008-01-23 20:34:11 -08001571 nest = nla_nest_start(skb, TCA_OPTIONS);
1572 if (nest == NULL)
1573 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001574 if (cbq_dump_attr(skb, &q->link) < 0)
Patrick McHardy1e904742008-01-22 22:11:17 -08001575 goto nla_put_failure;
Patrick McHardy4b3550ef2008-01-23 20:34:11 -08001576 nla_nest_end(skb, nest);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001577 return skb->len;
1578
Patrick McHardy1e904742008-01-22 22:11:17 -08001579nla_put_failure:
Patrick McHardy4b3550ef2008-01-23 20:34:11 -08001580 nla_nest_cancel(skb, nest);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001581 return -1;
1582}
1583
1584static int
1585cbq_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
1586{
1587 struct cbq_sched_data *q = qdisc_priv(sch);
1588
1589 q->link.xstats.avgidle = q->link.avgidle;
1590 return gnet_stats_copy_app(d, &q->link.xstats, sizeof(q->link.xstats));
1591}
1592
1593static int
1594cbq_dump_class(struct Qdisc *sch, unsigned long arg,
1595 struct sk_buff *skb, struct tcmsg *tcm)
1596{
1597 struct cbq_class *cl = (struct cbq_class*)arg;
Patrick McHardy4b3550ef2008-01-23 20:34:11 -08001598 struct nlattr *nest;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001599
1600 if (cl->tparent)
Patrick McHardyd77fea22008-07-05 23:22:05 -07001601 tcm->tcm_parent = cl->tparent->common.classid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001602 else
1603 tcm->tcm_parent = TC_H_ROOT;
Patrick McHardyd77fea22008-07-05 23:22:05 -07001604 tcm->tcm_handle = cl->common.classid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001605 tcm->tcm_info = cl->q->handle;
1606
Patrick McHardy4b3550ef2008-01-23 20:34:11 -08001607 nest = nla_nest_start(skb, TCA_OPTIONS);
1608 if (nest == NULL)
1609 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001610 if (cbq_dump_attr(skb, cl) < 0)
Patrick McHardy1e904742008-01-22 22:11:17 -08001611 goto nla_put_failure;
Patrick McHardy4b3550ef2008-01-23 20:34:11 -08001612 nla_nest_end(skb, nest);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001613 return skb->len;
1614
Patrick McHardy1e904742008-01-22 22:11:17 -08001615nla_put_failure:
Patrick McHardy4b3550ef2008-01-23 20:34:11 -08001616 nla_nest_cancel(skb, nest);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001617 return -1;
1618}
1619
1620static int
1621cbq_dump_class_stats(struct Qdisc *sch, unsigned long arg,
1622 struct gnet_dump *d)
1623{
1624 struct cbq_sched_data *q = qdisc_priv(sch);
1625 struct cbq_class *cl = (struct cbq_class*)arg;
1626
1627 cl->qstats.qlen = cl->q->q.qlen;
1628 cl->xstats.avgidle = cl->avgidle;
1629 cl->xstats.undertime = 0;
1630
Patrick McHardya0849802007-03-23 11:28:30 -07001631 if (cl->undertime != PSCHED_PASTPERFECT)
Patrick McHardy8edc0c32007-03-23 11:28:55 -07001632 cl->xstats.undertime = cl->undertime - q->now;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001633
1634 if (gnet_stats_copy_basic(d, &cl->bstats) < 0 ||
Linus Torvalds1da177e2005-04-16 15:20:36 -07001635 gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
Linus Torvalds1da177e2005-04-16 15:20:36 -07001636 gnet_stats_copy_queue(d, &cl->qstats) < 0)
1637 return -1;
1638
1639 return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats));
1640}
1641
1642static int cbq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
1643 struct Qdisc **old)
1644{
1645 struct cbq_class *cl = (struct cbq_class*)arg;
1646
1647 if (cl) {
1648 if (new == NULL) {
David S. Miller5ce2d482008-07-08 17:06:30 -07001649 new = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
David S. Millerbb949fb2008-07-08 16:55:56 -07001650 &pfifo_qdisc_ops,
Patrick McHardyd77fea22008-07-05 23:22:05 -07001651 cl->common.classid);
1652 if (new == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001653 return -ENOBUFS;
1654 } else {
Patrick McHardyc3bc7cf2007-07-15 00:03:05 -07001655#ifdef CONFIG_NET_CLS_ACT
Linus Torvalds1da177e2005-04-16 15:20:36 -07001656 if (cl->police == TC_POLICE_RECLASSIFY)
1657 new->reshape_fail = cbq_reshape_fail;
1658#endif
1659 }
1660 sch_tree_lock(sch);
Jarek Poplawskia37ef2e2006-12-08 00:25:55 -08001661 *old = xchg(&cl->q, new);
Patrick McHardy5e50da02006-11-29 17:36:20 -08001662 qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001663 qdisc_reset(*old);
1664 sch_tree_unlock(sch);
1665
1666 return 0;
1667 }
1668 return -ENOENT;
1669}
1670
1671static struct Qdisc *
1672cbq_leaf(struct Qdisc *sch, unsigned long arg)
1673{
1674 struct cbq_class *cl = (struct cbq_class*)arg;
1675
1676 return cl ? cl->q : NULL;
1677}
1678
Jarek Poplawskia37ef2e2006-12-08 00:25:55 -08001679static void cbq_qlen_notify(struct Qdisc *sch, unsigned long arg)
1680{
1681 struct cbq_class *cl = (struct cbq_class *)arg;
1682
1683 if (cl->q->q.qlen == 0)
1684 cbq_deactivate_class(cl);
1685}
1686
Linus Torvalds1da177e2005-04-16 15:20:36 -07001687static unsigned long cbq_get(struct Qdisc *sch, u32 classid)
1688{
1689 struct cbq_sched_data *q = qdisc_priv(sch);
1690 struct cbq_class *cl = cbq_class_lookup(q, classid);
1691
1692 if (cl) {
1693 cl->refcnt++;
1694 return (unsigned long)cl;
1695 }
1696 return 0;
1697}
1698
Linus Torvalds1da177e2005-04-16 15:20:36 -07001699static void cbq_destroy_class(struct Qdisc *sch, struct cbq_class *cl)
1700{
1701 struct cbq_sched_data *q = qdisc_priv(sch);
1702
1703 BUG_TRAP(!cl->filters);
1704
Patrick McHardyff31ab52008-07-01 19:52:38 -07001705 tcf_destroy_chain(&cl->filter_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001706 qdisc_destroy(cl->q);
1707 qdisc_put_rtab(cl->R_tab);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001708 gen_kill_estimator(&cl->bstats, &cl->rate_est);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001709 if (cl != &q->link)
1710 kfree(cl);
1711}
1712
1713static void
1714cbq_destroy(struct Qdisc* sch)
1715{
1716 struct cbq_sched_data *q = qdisc_priv(sch);
Patrick McHardyd77fea22008-07-05 23:22:05 -07001717 struct hlist_node *n, *next;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001718 struct cbq_class *cl;
1719 unsigned h;
1720
Patrick McHardyc3bc7cf2007-07-15 00:03:05 -07001721#ifdef CONFIG_NET_CLS_ACT
Linus Torvalds1da177e2005-04-16 15:20:36 -07001722 q->rx_class = NULL;
1723#endif
1724 /*
1725 * Filters must be destroyed first because we don't destroy the
1726 * classes from root to leafs which means that filters can still
1727 * be bound to classes which have been destroyed already. --TGR '04
1728 */
Patrick McHardyd77fea22008-07-05 23:22:05 -07001729 for (h = 0; h < q->clhash.hashsize; h++) {
1730 hlist_for_each_entry(cl, n, &q->clhash.hash[h], common.hnode)
Patrick McHardyff31ab52008-07-01 19:52:38 -07001731 tcf_destroy_chain(&cl->filter_list);
Patrick McHardyb00b4bf2007-06-05 16:06:59 -07001732 }
Patrick McHardyd77fea22008-07-05 23:22:05 -07001733 for (h = 0; h < q->clhash.hashsize; h++) {
1734 hlist_for_each_entry_safe(cl, n, next, &q->clhash.hash[h],
1735 common.hnode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001736 cbq_destroy_class(sch, cl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001737 }
Patrick McHardyd77fea22008-07-05 23:22:05 -07001738 qdisc_class_hash_destroy(&q->clhash);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001739}
1740
1741static void cbq_put(struct Qdisc *sch, unsigned long arg)
1742{
1743 struct cbq_class *cl = (struct cbq_class*)arg;
1744
1745 if (--cl->refcnt == 0) {
Patrick McHardyc3bc7cf2007-07-15 00:03:05 -07001746#ifdef CONFIG_NET_CLS_ACT
David S. Miller7698b4f2008-07-16 01:42:40 -07001747 spinlock_t *root_lock = qdisc_root_lock(sch);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001748 struct cbq_sched_data *q = qdisc_priv(sch);
1749
David S. Miller7698b4f2008-07-16 01:42:40 -07001750 spin_lock_bh(root_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001751 if (q->rx_class == cl)
1752 q->rx_class = NULL;
David S. Miller7698b4f2008-07-16 01:42:40 -07001753 spin_unlock_bh(root_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001754#endif
1755
1756 cbq_destroy_class(sch, cl);
1757 }
1758}
1759
1760static int
Patrick McHardy1e904742008-01-22 22:11:17 -08001761cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **tca,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001762 unsigned long *arg)
1763{
1764 int err;
1765 struct cbq_sched_data *q = qdisc_priv(sch);
1766 struct cbq_class *cl = (struct cbq_class*)*arg;
Patrick McHardy1e904742008-01-22 22:11:17 -08001767 struct nlattr *opt = tca[TCA_OPTIONS];
1768 struct nlattr *tb[TCA_CBQ_MAX + 1];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001769 struct cbq_class *parent;
1770 struct qdisc_rate_table *rtab = NULL;
1771
Patrick McHardycee63722008-01-23 20:33:32 -08001772 if (opt == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001773 return -EINVAL;
1774
Patrick McHardy27a34212008-01-23 20:35:39 -08001775 err = nla_parse_nested(tb, TCA_CBQ_MAX, opt, cbq_policy);
Patrick McHardycee63722008-01-23 20:33:32 -08001776 if (err < 0)
1777 return err;
1778
Linus Torvalds1da177e2005-04-16 15:20:36 -07001779 if (cl) {
1780 /* Check parent */
1781 if (parentid) {
Patrick McHardyd77fea22008-07-05 23:22:05 -07001782 if (cl->tparent &&
1783 cl->tparent->common.classid != parentid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001784 return -EINVAL;
1785 if (!cl->tparent && parentid != TC_H_ROOT)
1786 return -EINVAL;
1787 }
1788
Patrick McHardy1e904742008-01-22 22:11:17 -08001789 if (tb[TCA_CBQ_RATE]) {
1790 rtab = qdisc_get_rtab(nla_data(tb[TCA_CBQ_RATE]), tb[TCA_CBQ_RTAB]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001791 if (rtab == NULL)
1792 return -EINVAL;
1793 }
1794
1795 /* Change class parameters */
1796 sch_tree_lock(sch);
1797
1798 if (cl->next_alive != NULL)
1799 cbq_deactivate_class(cl);
1800
1801 if (rtab) {
1802 rtab = xchg(&cl->R_tab, rtab);
1803 qdisc_put_rtab(rtab);
1804 }
1805
Patrick McHardy1e904742008-01-22 22:11:17 -08001806 if (tb[TCA_CBQ_LSSOPT])
1807 cbq_set_lss(cl, nla_data(tb[TCA_CBQ_LSSOPT]));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001808
Patrick McHardy1e904742008-01-22 22:11:17 -08001809 if (tb[TCA_CBQ_WRROPT]) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001810 cbq_rmprio(q, cl);
Patrick McHardy1e904742008-01-22 22:11:17 -08001811 cbq_set_wrr(cl, nla_data(tb[TCA_CBQ_WRROPT]));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001812 }
1813
Patrick McHardy1e904742008-01-22 22:11:17 -08001814 if (tb[TCA_CBQ_OVL_STRATEGY])
1815 cbq_set_overlimit(cl, nla_data(tb[TCA_CBQ_OVL_STRATEGY]));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001816
Patrick McHardyc3bc7cf2007-07-15 00:03:05 -07001817#ifdef CONFIG_NET_CLS_ACT
Patrick McHardy1e904742008-01-22 22:11:17 -08001818 if (tb[TCA_CBQ_POLICE])
1819 cbq_set_police(cl, nla_data(tb[TCA_CBQ_POLICE]));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001820#endif
1821
Patrick McHardy1e904742008-01-22 22:11:17 -08001822 if (tb[TCA_CBQ_FOPT])
1823 cbq_set_fopt(cl, nla_data(tb[TCA_CBQ_FOPT]));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001824
1825 if (cl->q->q.qlen)
1826 cbq_activate_class(cl);
1827
1828 sch_tree_unlock(sch);
1829
Patrick McHardy1e904742008-01-22 22:11:17 -08001830 if (tca[TCA_RATE])
Linus Torvalds1da177e2005-04-16 15:20:36 -07001831 gen_replace_estimator(&cl->bstats, &cl->rate_est,
David S. Miller7698b4f2008-07-16 01:42:40 -07001832 qdisc_root_lock(sch),
Patrick McHardy1e904742008-01-22 22:11:17 -08001833 tca[TCA_RATE]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001834 return 0;
1835 }
1836
1837 if (parentid == TC_H_ROOT)
1838 return -EINVAL;
1839
Patrick McHardy1e904742008-01-22 22:11:17 -08001840 if (tb[TCA_CBQ_WRROPT] == NULL || tb[TCA_CBQ_RATE] == NULL ||
1841 tb[TCA_CBQ_LSSOPT] == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001842 return -EINVAL;
1843
Patrick McHardy1e904742008-01-22 22:11:17 -08001844 rtab = qdisc_get_rtab(nla_data(tb[TCA_CBQ_RATE]), tb[TCA_CBQ_RTAB]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001845 if (rtab == NULL)
1846 return -EINVAL;
1847
1848 if (classid) {
1849 err = -EINVAL;
1850 if (TC_H_MAJ(classid^sch->handle) || cbq_class_lookup(q, classid))
1851 goto failure;
1852 } else {
1853 int i;
1854 classid = TC_H_MAKE(sch->handle,0x8000);
1855
1856 for (i=0; i<0x8000; i++) {
1857 if (++q->hgenerator >= 0x8000)
1858 q->hgenerator = 1;
1859 if (cbq_class_lookup(q, classid|q->hgenerator) == NULL)
1860 break;
1861 }
1862 err = -ENOSR;
1863 if (i >= 0x8000)
1864 goto failure;
1865 classid = classid|q->hgenerator;
1866 }
1867
1868 parent = &q->link;
1869 if (parentid) {
1870 parent = cbq_class_lookup(q, parentid);
1871 err = -EINVAL;
1872 if (parent == NULL)
1873 goto failure;
1874 }
1875
1876 err = -ENOBUFS;
Panagiotis Issaris0da974f2006-07-21 14:51:30 -07001877 cl = kzalloc(sizeof(*cl), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001878 if (cl == NULL)
1879 goto failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001880 cl->R_tab = rtab;
1881 rtab = NULL;
1882 cl->refcnt = 1;
David S. Miller5ce2d482008-07-08 17:06:30 -07001883 if (!(cl->q = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
David S. Millerbb949fb2008-07-08 16:55:56 -07001884 &pfifo_qdisc_ops, classid)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001885 cl->q = &noop_qdisc;
Patrick McHardyd77fea22008-07-05 23:22:05 -07001886 cl->common.classid = classid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001887 cl->tparent = parent;
1888 cl->qdisc = sch;
1889 cl->allot = parent->allot;
1890 cl->quantum = cl->allot;
1891 cl->weight = cl->R_tab->rate.rate;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001892
1893 sch_tree_lock(sch);
1894 cbq_link_class(cl);
1895 cl->borrow = cl->tparent;
1896 if (cl->tparent != &q->link)
1897 cl->share = cl->tparent;
1898 cbq_adjust_levels(parent);
1899 cl->minidle = -0x7FFFFFFF;
Patrick McHardy1e904742008-01-22 22:11:17 -08001900 cbq_set_lss(cl, nla_data(tb[TCA_CBQ_LSSOPT]));
1901 cbq_set_wrr(cl, nla_data(tb[TCA_CBQ_WRROPT]));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001902 if (cl->ewma_log==0)
1903 cl->ewma_log = q->link.ewma_log;
1904 if (cl->maxidle==0)
1905 cl->maxidle = q->link.maxidle;
1906 if (cl->avpkt==0)
1907 cl->avpkt = q->link.avpkt;
1908 cl->overlimit = cbq_ovl_classic;
Patrick McHardy1e904742008-01-22 22:11:17 -08001909 if (tb[TCA_CBQ_OVL_STRATEGY])
1910 cbq_set_overlimit(cl, nla_data(tb[TCA_CBQ_OVL_STRATEGY]));
Patrick McHardyc3bc7cf2007-07-15 00:03:05 -07001911#ifdef CONFIG_NET_CLS_ACT
Patrick McHardy1e904742008-01-22 22:11:17 -08001912 if (tb[TCA_CBQ_POLICE])
1913 cbq_set_police(cl, nla_data(tb[TCA_CBQ_POLICE]));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001914#endif
Patrick McHardy1e904742008-01-22 22:11:17 -08001915 if (tb[TCA_CBQ_FOPT])
1916 cbq_set_fopt(cl, nla_data(tb[TCA_CBQ_FOPT]));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001917 sch_tree_unlock(sch);
1918
Patrick McHardyd77fea22008-07-05 23:22:05 -07001919 qdisc_class_hash_grow(sch, &q->clhash);
1920
Patrick McHardy1e904742008-01-22 22:11:17 -08001921 if (tca[TCA_RATE])
Linus Torvalds1da177e2005-04-16 15:20:36 -07001922 gen_new_estimator(&cl->bstats, &cl->rate_est,
David S. Miller7698b4f2008-07-16 01:42:40 -07001923 qdisc_root_lock(sch), tca[TCA_RATE]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001924
1925 *arg = (unsigned long)cl;
1926 return 0;
1927
1928failure:
1929 qdisc_put_rtab(rtab);
1930 return err;
1931}
1932
1933static int cbq_delete(struct Qdisc *sch, unsigned long arg)
1934{
1935 struct cbq_sched_data *q = qdisc_priv(sch);
1936 struct cbq_class *cl = (struct cbq_class*)arg;
Jarek Poplawskia37ef2e2006-12-08 00:25:55 -08001937 unsigned int qlen;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001938
1939 if (cl->filters || cl->children || cl == &q->link)
1940 return -EBUSY;
1941
1942 sch_tree_lock(sch);
1943
Jarek Poplawskia37ef2e2006-12-08 00:25:55 -08001944 qlen = cl->q->q.qlen;
1945 qdisc_reset(cl->q);
1946 qdisc_tree_decrease_qlen(cl->q, qlen);
1947
Linus Torvalds1da177e2005-04-16 15:20:36 -07001948 if (cl->next_alive)
1949 cbq_deactivate_class(cl);
1950
1951 if (q->tx_borrowed == cl)
1952 q->tx_borrowed = q->tx_class;
1953 if (q->tx_class == cl) {
1954 q->tx_class = NULL;
1955 q->tx_borrowed = NULL;
1956 }
Patrick McHardyc3bc7cf2007-07-15 00:03:05 -07001957#ifdef CONFIG_NET_CLS_ACT
Linus Torvalds1da177e2005-04-16 15:20:36 -07001958 if (q->rx_class == cl)
1959 q->rx_class = NULL;
1960#endif
1961
1962 cbq_unlink_class(cl);
1963 cbq_adjust_levels(cl->tparent);
1964 cl->defmap = 0;
1965 cbq_sync_defmap(cl);
1966
1967 cbq_rmprio(q, cl);
1968 sch_tree_unlock(sch);
1969
1970 if (--cl->refcnt == 0)
1971 cbq_destroy_class(sch, cl);
1972
1973 return 0;
1974}
1975
1976static struct tcf_proto **cbq_find_tcf(struct Qdisc *sch, unsigned long arg)
1977{
1978 struct cbq_sched_data *q = qdisc_priv(sch);
1979 struct cbq_class *cl = (struct cbq_class *)arg;
1980
1981 if (cl == NULL)
1982 cl = &q->link;
1983
1984 return &cl->filter_list;
1985}
1986
1987static unsigned long cbq_bind_filter(struct Qdisc *sch, unsigned long parent,
1988 u32 classid)
1989{
1990 struct cbq_sched_data *q = qdisc_priv(sch);
1991 struct cbq_class *p = (struct cbq_class*)parent;
1992 struct cbq_class *cl = cbq_class_lookup(q, classid);
1993
1994 if (cl) {
1995 if (p && p->level <= cl->level)
1996 return 0;
1997 cl->filters++;
1998 return (unsigned long)cl;
1999 }
2000 return 0;
2001}
2002
2003static void cbq_unbind_filter(struct Qdisc *sch, unsigned long arg)
2004{
2005 struct cbq_class *cl = (struct cbq_class*)arg;
2006
2007 cl->filters--;
2008}
2009
2010static void cbq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
2011{
2012 struct cbq_sched_data *q = qdisc_priv(sch);
Patrick McHardyd77fea22008-07-05 23:22:05 -07002013 struct cbq_class *cl;
2014 struct hlist_node *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002015 unsigned h;
2016
2017 if (arg->stop)
2018 return;
2019
Patrick McHardyd77fea22008-07-05 23:22:05 -07002020 for (h = 0; h < q->clhash.hashsize; h++) {
2021 hlist_for_each_entry(cl, n, &q->clhash.hash[h], common.hnode) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002022 if (arg->count < arg->skip) {
2023 arg->count++;
2024 continue;
2025 }
2026 if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
2027 arg->stop = 1;
2028 return;
2029 }
2030 arg->count++;
2031 }
2032 }
2033}
2034
Eric Dumazet20fea082007-11-14 01:44:41 -08002035static const struct Qdisc_class_ops cbq_class_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002036 .graft = cbq_graft,
2037 .leaf = cbq_leaf,
Jarek Poplawskia37ef2e2006-12-08 00:25:55 -08002038 .qlen_notify = cbq_qlen_notify,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002039 .get = cbq_get,
2040 .put = cbq_put,
2041 .change = cbq_change_class,
2042 .delete = cbq_delete,
2043 .walk = cbq_walk,
2044 .tcf_chain = cbq_find_tcf,
2045 .bind_tcf = cbq_bind_filter,
2046 .unbind_tcf = cbq_unbind_filter,
2047 .dump = cbq_dump_class,
2048 .dump_stats = cbq_dump_class_stats,
2049};
2050
Eric Dumazet20fea082007-11-14 01:44:41 -08002051static struct Qdisc_ops cbq_qdisc_ops __read_mostly = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002052 .next = NULL,
2053 .cl_ops = &cbq_class_ops,
2054 .id = "cbq",
2055 .priv_size = sizeof(struct cbq_sched_data),
2056 .enqueue = cbq_enqueue,
2057 .dequeue = cbq_dequeue,
2058 .requeue = cbq_requeue,
2059 .drop = cbq_drop,
2060 .init = cbq_init,
2061 .reset = cbq_reset,
2062 .destroy = cbq_destroy,
2063 .change = NULL,
2064 .dump = cbq_dump,
2065 .dump_stats = cbq_dump_stats,
2066 .owner = THIS_MODULE,
2067};
2068
2069static int __init cbq_module_init(void)
2070{
2071 return register_qdisc(&cbq_qdisc_ops);
2072}
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +09002073static void __exit cbq_module_exit(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002074{
2075 unregister_qdisc(&cbq_qdisc_ops);
2076}
2077module_init(cbq_module_init)
2078module_exit(cbq_module_exit)
2079MODULE_LICENSE("GPL");