blob: 2f80d01d42a6d8b971345229d407f062df921b04 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * net/sched/sch_cbq.c Class-Based Queueing discipline.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 *
11 */
12
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <linux/module.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090014#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include <linux/types.h>
16#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#include <linux/string.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include <linux/errno.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070019#include <linux/skbuff.h>
Patrick McHardy0ba48052007-07-02 22:49:07 -070020#include <net/netlink.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#include <net/pkt_sched.h>
22
23
24/* Class-Based Queueing (CBQ) algorithm.
25 =======================================
26
27 Sources: [1] Sally Floyd and Van Jacobson, "Link-sharing and Resource
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +090028 Management Models for Packet Networks",
Linus Torvalds1da177e2005-04-16 15:20:36 -070029 IEEE/ACM Transactions on Networking, Vol.3, No.4, 1995
30
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +090031 [2] Sally Floyd, "Notes on CBQ and Guaranteed Service", 1995
Linus Torvalds1da177e2005-04-16 15:20:36 -070032
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +090033 [3] Sally Floyd, "Notes on Class-Based Queueing: Setting
Linus Torvalds1da177e2005-04-16 15:20:36 -070034 Parameters", 1996
35
36 [4] Sally Floyd and Michael Speer, "Experimental Results
37 for Class-Based Queueing", 1998, not published.
38
39 -----------------------------------------------------------------------
40
41 Algorithm skeleton was taken from NS simulator cbq.cc.
42 If someone wants to check this code against the LBL version,
43 he should take into account that ONLY the skeleton was borrowed,
44 the implementation is different. Particularly:
45
46 --- The WRR algorithm is different. Our version looks more
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +090047 reasonable (I hope) and works when quanta are allowed to be
48 less than MTU, which is always the case when real time classes
49 have small rates. Note, that the statement of [3] is
50 incomplete, delay may actually be estimated even if class
51 per-round allotment is less than MTU. Namely, if per-round
52 allotment is W*r_i, and r_1+...+r_k = r < 1
Linus Torvalds1da177e2005-04-16 15:20:36 -070053
54 delay_i <= ([MTU/(W*r_i)]*W*r + W*r + k*MTU)/B
55
56 In the worst case we have IntServ estimate with D = W*r+k*MTU
57 and C = MTU*r. The proof (if correct at all) is trivial.
58
59
60 --- It seems that cbq-2.0 is not very accurate. At least, I cannot
61 interpret some places, which look like wrong translations
62 from NS. Anyone is advised to find these differences
63 and explain to me, why I am wrong 8).
64
65 --- Linux has no EOI event, so that we cannot estimate true class
66 idle time. Workaround is to consider the next dequeue event
67 as sign that previous packet is finished. This is wrong because of
68 internal device queueing, but on a permanently loaded link it is true.
69 Moreover, combined with clock integrator, this scheme looks
70 very close to an ideal solution. */
71
72struct cbq_sched_data;
73
74
Eric Dumazetcc7ec452011-01-19 19:26:56 +000075struct cbq_class {
Patrick McHardyd77fea22008-07-05 23:22:05 -070076 struct Qdisc_class_common common;
Linus Torvalds1da177e2005-04-16 15:20:36 -070077 struct cbq_class *next_alive; /* next class with backlog in this priority band */
78
79/* Parameters */
Linus Torvalds1da177e2005-04-16 15:20:36 -070080 unsigned char priority; /* class priority */
81 unsigned char priority2; /* priority to be used after overlimit */
82 unsigned char ewma_log; /* time constant for idle time calculation */
83 unsigned char ovl_strategy;
Patrick McHardyc3bc7cf2007-07-15 00:03:05 -070084#ifdef CONFIG_NET_CLS_ACT
Linus Torvalds1da177e2005-04-16 15:20:36 -070085 unsigned char police;
86#endif
87
88 u32 defmap;
89
90 /* Link-sharing scheduler parameters */
91 long maxidle; /* Class parameters: see below. */
92 long offtime;
93 long minidle;
94 u32 avpkt;
95 struct qdisc_rate_table *R_tab;
96
97 /* Overlimit strategy parameters */
98 void (*overlimit)(struct cbq_class *cl);
Patrick McHardy1a13cb62007-03-16 01:22:20 -070099 psched_tdiff_t penalty;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100
101 /* General scheduler (WRR) parameters */
102 long allot;
103 long quantum; /* Allotment per WRR round */
104 long weight; /* Relative allotment: see below */
105
106 struct Qdisc *qdisc; /* Ptr to CBQ discipline */
107 struct cbq_class *split; /* Ptr to split node */
108 struct cbq_class *share; /* Ptr to LS parent in the class tree */
109 struct cbq_class *tparent; /* Ptr to tree parent in the class tree */
110 struct cbq_class *borrow; /* NULL if class is bandwidth limited;
111 parent otherwise */
112 struct cbq_class *sibling; /* Sibling chain */
113 struct cbq_class *children; /* Pointer to children chain */
114
115 struct Qdisc *q; /* Elementary queueing discipline */
116
117
118/* Variables */
119 unsigned char cpriority; /* Effective priority */
120 unsigned char delayed;
121 unsigned char level; /* level of the class in hierarchy:
122 0 for leaf classes, and maximal
123 level of children + 1 for nodes.
124 */
125
126 psched_time_t last; /* Last end of service */
127 psched_time_t undertime;
128 long avgidle;
129 long deficit; /* Saved deficit for WRR */
Patrick McHardy1a13cb62007-03-16 01:22:20 -0700130 psched_time_t penalized;
Eric Dumazetc1a8f1f2009-08-16 09:36:49 +0000131 struct gnet_stats_basic_packed bstats;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132 struct gnet_stats_queue qstats;
Eric Dumazet45203a32013-06-06 08:43:22 -0700133 struct gnet_stats_rate_est64 rate_est;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134 struct tc_cbq_xstats xstats;
135
136 struct tcf_proto *filter_list;
137
138 int refcnt;
139 int filters;
140
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000141 struct cbq_class *defaults[TC_PRIO_MAX + 1];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142};
143
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000144struct cbq_sched_data {
Patrick McHardyd77fea22008-07-05 23:22:05 -0700145 struct Qdisc_class_hash clhash; /* Hash table of all classes */
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000146 int nclasses[TC_CBQ_MAXPRIO + 1];
147 unsigned int quanta[TC_CBQ_MAXPRIO + 1];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148
149 struct cbq_class link;
150
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000151 unsigned int activemask;
152 struct cbq_class *active[TC_CBQ_MAXPRIO + 1]; /* List of all classes
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153 with backlog */
154
Patrick McHardyc3bc7cf2007-07-15 00:03:05 -0700155#ifdef CONFIG_NET_CLS_ACT
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156 struct cbq_class *rx_class;
157#endif
158 struct cbq_class *tx_class;
159 struct cbq_class *tx_borrowed;
160 int tx_len;
161 psched_time_t now; /* Cached timestamp */
162 psched_time_t now_rt; /* Cached real time */
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000163 unsigned int pmask;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700164
David S. Miller2fbd3da2009-09-01 17:59:25 -0700165 struct hrtimer delay_timer;
Patrick McHardy88a99352007-03-16 01:21:11 -0700166 struct qdisc_watchdog watchdog; /* Watchdog timer,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167 started when CBQ has
168 backlog, but cannot
169 transmit just now */
Patrick McHardy88a99352007-03-16 01:21:11 -0700170 psched_tdiff_t wd_expires;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171 int toplevel;
172 u32 hgenerator;
173};
174
175
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000176#define L2T(cl, len) qdisc_l2t((cl)->R_tab, len)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000178static inline struct cbq_class *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700179cbq_class_lookup(struct cbq_sched_data *q, u32 classid)
180{
Patrick McHardyd77fea22008-07-05 23:22:05 -0700181 struct Qdisc_class_common *clc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182
Patrick McHardyd77fea22008-07-05 23:22:05 -0700183 clc = qdisc_class_find(&q->clhash, classid);
184 if (clc == NULL)
185 return NULL;
186 return container_of(clc, struct cbq_class, common);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187}
188
Patrick McHardyc3bc7cf2007-07-15 00:03:05 -0700189#ifdef CONFIG_NET_CLS_ACT
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190
191static struct cbq_class *
192cbq_reclassify(struct sk_buff *skb, struct cbq_class *this)
193{
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000194 struct cbq_class *cl;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000196 for (cl = this->tparent; cl; cl = cl->tparent) {
197 struct cbq_class *new = cl->defaults[TC_PRIO_BESTEFFORT];
198
199 if (new != NULL && new != this)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200 return new;
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000201 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202 return NULL;
203}
204
205#endif
206
207/* Classify packet. The procedure is pretty complicated, but
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000208 * it allows us to combine link sharing and priority scheduling
209 * transparently.
210 *
211 * Namely, you can put link sharing rules (f.e. route based) at root of CBQ,
212 * so that it resolves to split nodes. Then packets are classified
213 * by logical priority, or a more specific classifier may be attached
214 * to the split node.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215 */
216
217static struct cbq_class *
218cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
219{
220 struct cbq_sched_data *q = qdisc_priv(sch);
221 struct cbq_class *head = &q->link;
222 struct cbq_class **defmap;
223 struct cbq_class *cl = NULL;
224 u32 prio = skb->priority;
225 struct tcf_result res;
226
227 /*
228 * Step 1. If skb->priority points to one of our classes, use it.
229 */
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000230 if (TC_H_MAJ(prio ^ sch->handle) == 0 &&
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231 (cl = cbq_class_lookup(q, prio)) != NULL)
232 return cl;
233
Jarek Poplawskic27f3392008-08-04 22:39:11 -0700234 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700235 for (;;) {
236 int result = 0;
237 defmap = head->defaults;
238
239 /*
240 * Step 2+n. Apply classifier.
241 */
Patrick McHardy73ca4912007-07-15 00:02:31 -0700242 if (!head->filter_list ||
243 (result = tc_classify_compat(skb, head->filter_list, &res)) < 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244 goto fallback;
245
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000246 cl = (void *)res.class;
247 if (!cl) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700248 if (TC_H_MAJ(res.classid))
249 cl = cbq_class_lookup(q, res.classid);
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000250 else if ((cl = defmap[res.classid & TC_PRIO_MAX]) == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251 cl = defmap[TC_PRIO_BESTEFFORT];
252
Eric Dumazetbdfc87f2012-09-11 13:11:12 +0000253 if (cl == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254 goto fallback;
255 }
Eric Dumazetbdfc87f2012-09-11 13:11:12 +0000256 if (cl->level >= head->level)
257 goto fallback;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700258#ifdef CONFIG_NET_CLS_ACT
259 switch (result) {
260 case TC_ACT_QUEUED:
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900261 case TC_ACT_STOLEN:
Jarek Poplawski378a2f02008-08-04 22:31:03 -0700262 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700263 case TC_ACT_SHOT:
264 return NULL;
Patrick McHardy73ca4912007-07-15 00:02:31 -0700265 case TC_ACT_RECLASSIFY:
266 return cbq_reclassify(skb, cl);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700267 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700268#endif
269 if (cl->level == 0)
270 return cl;
271
272 /*
273 * Step 3+n. If classifier selected a link sharing class,
274 * apply agency specific classifier.
275 * Repeat this procdure until we hit a leaf node.
276 */
277 head = cl;
278 }
279
280fallback:
281 cl = head;
282
283 /*
284 * Step 4. No success...
285 */
286 if (TC_H_MAJ(prio) == 0 &&
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000287 !(cl = head->defaults[prio & TC_PRIO_MAX]) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -0700288 !(cl = head->defaults[TC_PRIO_BESTEFFORT]))
289 return head;
290
291 return cl;
292}
293
294/*
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000295 * A packet has just been enqueued on the empty class.
296 * cbq_activate_class adds it to the tail of active class list
297 * of its priority band.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700298 */
299
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000300static inline void cbq_activate_class(struct cbq_class *cl)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301{
302 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
303 int prio = cl->cpriority;
304 struct cbq_class *cl_tail;
305
306 cl_tail = q->active[prio];
307 q->active[prio] = cl;
308
309 if (cl_tail != NULL) {
310 cl->next_alive = cl_tail->next_alive;
311 cl_tail->next_alive = cl;
312 } else {
313 cl->next_alive = cl;
314 q->activemask |= (1<<prio);
315 }
316}
317
318/*
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000319 * Unlink class from active chain.
320 * Note that this same procedure is done directly in cbq_dequeue*
321 * during round-robin procedure.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700322 */
323
324static void cbq_deactivate_class(struct cbq_class *this)
325{
326 struct cbq_sched_data *q = qdisc_priv(this->qdisc);
327 int prio = this->cpriority;
328 struct cbq_class *cl;
329 struct cbq_class *cl_prev = q->active[prio];
330
331 do {
332 cl = cl_prev->next_alive;
333 if (cl == this) {
334 cl_prev->next_alive = cl->next_alive;
335 cl->next_alive = NULL;
336
337 if (cl == q->active[prio]) {
338 q->active[prio] = cl_prev;
339 if (cl == q->active[prio]) {
340 q->active[prio] = NULL;
341 q->activemask &= ~(1<<prio);
342 return;
343 }
344 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345 return;
346 }
347 } while ((cl_prev = cl) != q->active[prio]);
348}
349
350static void
351cbq_mark_toplevel(struct cbq_sched_data *q, struct cbq_class *cl)
352{
353 int toplevel = q->toplevel;
354
Eric Dumazetfd245a42011-01-20 05:27:16 +0000355 if (toplevel > cl->level && !(qdisc_is_throttled(cl->q))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700356 psched_time_t now;
357 psched_tdiff_t incr;
358
Patrick McHardy3bebcda2007-03-23 11:29:25 -0700359 now = psched_get_time();
Patrick McHardy8edc0c32007-03-23 11:28:55 -0700360 incr = now - q->now_rt;
Patrick McHardy7c59e252007-03-23 11:27:45 -0700361 now = q->now + incr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362
363 do {
Patrick McHardy104e0872007-03-23 11:28:07 -0700364 if (cl->undertime < now) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700365 q->toplevel = cl->level;
366 return;
367 }
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000368 } while ((cl = cl->borrow) != NULL && toplevel > cl->level);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700369 }
370}
371
372static int
373cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
374{
375 struct cbq_sched_data *q = qdisc_priv(sch);
Satyam Sharmaddeee3c2007-09-16 14:54:05 -0700376 int uninitialized_var(ret);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700377 struct cbq_class *cl = cbq_classify(skb, sch, &ret);
378
Patrick McHardyc3bc7cf2007-07-15 00:03:05 -0700379#ifdef CONFIG_NET_CLS_ACT
Linus Torvalds1da177e2005-04-16 15:20:36 -0700380 q->rx_class = cl;
381#endif
382 if (cl == NULL) {
Jarek Poplawskic27f3392008-08-04 22:39:11 -0700383 if (ret & __NET_XMIT_BYPASS)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700384 sch->qstats.drops++;
385 kfree_skb(skb);
386 return ret;
387 }
388
Patrick McHardyc3bc7cf2007-07-15 00:03:05 -0700389#ifdef CONFIG_NET_CLS_ACT
Linus Torvalds1da177e2005-04-16 15:20:36 -0700390 cl->q->__parent = sch;
391#endif
Jussi Kivilinna5f861732008-07-20 00:08:04 -0700392 ret = qdisc_enqueue(skb, cl->q);
393 if (ret == NET_XMIT_SUCCESS) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700394 sch->q.qlen++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700395 cbq_mark_toplevel(q, cl);
396 if (!cl->next_alive)
397 cbq_activate_class(cl);
398 return ret;
399 }
400
Jarek Poplawski378a2f02008-08-04 22:31:03 -0700401 if (net_xmit_drop_count(ret)) {
402 sch->qstats.drops++;
403 cbq_mark_toplevel(q, cl);
404 cl->qstats.drops++;
405 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700406 return ret;
407}
408
Linus Torvalds1da177e2005-04-16 15:20:36 -0700409/* Overlimit actions */
410
411/* TC_CBQ_OVL_CLASSIC: (default) penalize leaf class by adding offtime */
412
413static void cbq_ovl_classic(struct cbq_class *cl)
414{
415 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
Patrick McHardy8edc0c32007-03-23 11:28:55 -0700416 psched_tdiff_t delay = cl->undertime - q->now;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700417
418 if (!cl->delayed) {
419 delay += cl->offtime;
420
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900421 /*
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000422 * Class goes to sleep, so that it will have no
423 * chance to work avgidle. Let's forgive it 8)
424 *
425 * BTW cbq-2.0 has a crap in this
426 * place, apparently they forgot to shift it by cl->ewma_log.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700427 */
428 if (cl->avgidle < 0)
429 delay -= (-cl->avgidle) - ((-cl->avgidle) >> cl->ewma_log);
430 if (cl->avgidle < cl->minidle)
431 cl->avgidle = cl->minidle;
432 if (delay <= 0)
433 delay = 1;
Patrick McHardy7c59e252007-03-23 11:27:45 -0700434 cl->undertime = q->now + delay;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700435
436 cl->xstats.overactions++;
437 cl->delayed = 1;
438 }
439 if (q->wd_expires == 0 || q->wd_expires > delay)
440 q->wd_expires = delay;
441
442 /* Dirty work! We must schedule wakeups based on
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000443 * real available rate, rather than leaf rate,
444 * which may be tiny (even zero).
Linus Torvalds1da177e2005-04-16 15:20:36 -0700445 */
446 if (q->toplevel == TC_CBQ_MAXLEVEL) {
447 struct cbq_class *b;
448 psched_tdiff_t base_delay = q->wd_expires;
449
450 for (b = cl->borrow; b; b = b->borrow) {
Patrick McHardy8edc0c32007-03-23 11:28:55 -0700451 delay = b->undertime - q->now;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700452 if (delay < base_delay) {
453 if (delay <= 0)
454 delay = 1;
455 base_delay = delay;
456 }
457 }
458
459 q->wd_expires = base_delay;
460 }
461}
462
463/* TC_CBQ_OVL_RCLASSIC: penalize by offtime classes in hierarchy, when
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000464 * they go overlimit
Linus Torvalds1da177e2005-04-16 15:20:36 -0700465 */
466
467static void cbq_ovl_rclassic(struct cbq_class *cl)
468{
469 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
470 struct cbq_class *this = cl;
471
472 do {
473 if (cl->level > q->toplevel) {
474 cl = NULL;
475 break;
476 }
477 } while ((cl = cl->borrow) != NULL);
478
479 if (cl == NULL)
480 cl = this;
481 cbq_ovl_classic(cl);
482}
483
484/* TC_CBQ_OVL_DELAY: delay until it will go to underlimit */
485
486static void cbq_ovl_delay(struct cbq_class *cl)
487{
488 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
Patrick McHardy8edc0c32007-03-23 11:28:55 -0700489 psched_tdiff_t delay = cl->undertime - q->now;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700490
Jarek Poplawski2540e052008-08-21 05:11:14 -0700491 if (test_bit(__QDISC_STATE_DEACTIVATED,
492 &qdisc_root_sleeping(cl->qdisc)->state))
493 return;
494
Linus Torvalds1da177e2005-04-16 15:20:36 -0700495 if (!cl->delayed) {
Patrick McHardy1a13cb62007-03-16 01:22:20 -0700496 psched_time_t sched = q->now;
497 ktime_t expires;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700498
499 delay += cl->offtime;
500 if (cl->avgidle < 0)
501 delay -= (-cl->avgidle) - ((-cl->avgidle) >> cl->ewma_log);
502 if (cl->avgidle < cl->minidle)
503 cl->avgidle = cl->minidle;
Patrick McHardy7c59e252007-03-23 11:27:45 -0700504 cl->undertime = q->now + delay;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700505
506 if (delay > 0) {
Patrick McHardy1a13cb62007-03-16 01:22:20 -0700507 sched += delay + cl->penalty;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700508 cl->penalized = sched;
509 cl->cpriority = TC_CBQ_MAXPRIO;
510 q->pmask |= (1<<TC_CBQ_MAXPRIO);
Patrick McHardy1a13cb62007-03-16 01:22:20 -0700511
Eric Dumazet46baac32012-10-20 00:40:51 +0000512 expires = ns_to_ktime(PSCHED_TICKS2NS(sched));
David S. Miller2fbd3da2009-09-01 17:59:25 -0700513 if (hrtimer_try_to_cancel(&q->delay_timer) &&
514 ktime_to_ns(ktime_sub(
515 hrtimer_get_expires(&q->delay_timer),
516 expires)) > 0)
517 hrtimer_set_expires(&q->delay_timer, expires);
518 hrtimer_restart(&q->delay_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700519 cl->delayed = 1;
520 cl->xstats.overactions++;
521 return;
522 }
523 delay = 1;
524 }
525 if (q->wd_expires == 0 || q->wd_expires > delay)
526 q->wd_expires = delay;
527}
528
529/* TC_CBQ_OVL_LOWPRIO: penalize class by lowering its priority band */
530
531static void cbq_ovl_lowprio(struct cbq_class *cl)
532{
533 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
534
Patrick McHardy1a13cb62007-03-16 01:22:20 -0700535 cl->penalized = q->now + cl->penalty;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700536
537 if (cl->cpriority != cl->priority2) {
538 cl->cpriority = cl->priority2;
539 q->pmask |= (1<<cl->cpriority);
540 cl->xstats.overactions++;
541 }
542 cbq_ovl_classic(cl);
543}
544
545/* TC_CBQ_OVL_DROP: penalize class by dropping */
546
547static void cbq_ovl_drop(struct cbq_class *cl)
548{
549 if (cl->q->ops->drop)
550 if (cl->q->ops->drop(cl->q))
551 cl->qdisc->q.qlen--;
552 cl->xstats.overactions++;
553 cbq_ovl_classic(cl);
554}
555
Patrick McHardy1a13cb62007-03-16 01:22:20 -0700556static psched_tdiff_t cbq_undelay_prio(struct cbq_sched_data *q, int prio,
557 psched_time_t now)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700558{
559 struct cbq_class *cl;
560 struct cbq_class *cl_prev = q->active[prio];
Patrick McHardy1a13cb62007-03-16 01:22:20 -0700561 psched_time_t sched = now;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700562
563 if (cl_prev == NULL)
Patrick McHardye9054a32007-03-16 01:21:40 -0700564 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700565
566 do {
567 cl = cl_prev->next_alive;
Patrick McHardy1a13cb62007-03-16 01:22:20 -0700568 if (now - cl->penalized > 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700569 cl_prev->next_alive = cl->next_alive;
570 cl->next_alive = NULL;
571 cl->cpriority = cl->priority;
572 cl->delayed = 0;
573 cbq_activate_class(cl);
574
575 if (cl == q->active[prio]) {
576 q->active[prio] = cl_prev;
577 if (cl == q->active[prio]) {
578 q->active[prio] = NULL;
579 return 0;
580 }
581 }
582
583 cl = cl_prev->next_alive;
Patrick McHardy1a13cb62007-03-16 01:22:20 -0700584 } else if (sched - cl->penalized > 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700585 sched = cl->penalized;
586 } while ((cl_prev = cl) != q->active[prio]);
587
Patrick McHardy1a13cb62007-03-16 01:22:20 -0700588 return sched - now;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700589}
590
Patrick McHardy1a13cb62007-03-16 01:22:20 -0700591static enum hrtimer_restart cbq_undelay(struct hrtimer *timer)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700592{
Patrick McHardy1a13cb62007-03-16 01:22:20 -0700593 struct cbq_sched_data *q = container_of(timer, struct cbq_sched_data,
David S. Miller2fbd3da2009-09-01 17:59:25 -0700594 delay_timer);
Patrick McHardy1a13cb62007-03-16 01:22:20 -0700595 struct Qdisc *sch = q->watchdog.qdisc;
596 psched_time_t now;
597 psched_tdiff_t delay = 0;
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000598 unsigned int pmask;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700599
Patrick McHardy3bebcda2007-03-23 11:29:25 -0700600 now = psched_get_time();
Patrick McHardy1a13cb62007-03-16 01:22:20 -0700601
Linus Torvalds1da177e2005-04-16 15:20:36 -0700602 pmask = q->pmask;
603 q->pmask = 0;
604
605 while (pmask) {
606 int prio = ffz(~pmask);
Patrick McHardy1a13cb62007-03-16 01:22:20 -0700607 psched_tdiff_t tmp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700608
609 pmask &= ~(1<<prio);
610
Patrick McHardy1a13cb62007-03-16 01:22:20 -0700611 tmp = cbq_undelay_prio(q, prio, now);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700612 if (tmp > 0) {
613 q->pmask |= 1<<prio;
614 if (tmp < delay || delay == 0)
615 delay = tmp;
616 }
617 }
618
619 if (delay) {
Patrick McHardy1a13cb62007-03-16 01:22:20 -0700620 ktime_t time;
621
622 time = ktime_set(0, 0);
Jarek Poplawskica44d6e2009-06-15 02:31:47 -0700623 time = ktime_add_ns(time, PSCHED_TICKS2NS(now + delay));
David S. Miller2fbd3da2009-09-01 17:59:25 -0700624 hrtimer_start(&q->delay_timer, time, HRTIMER_MODE_ABS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700625 }
626
Eric Dumazetfd245a42011-01-20 05:27:16 +0000627 qdisc_unthrottled(sch);
David S. Miller8608db02008-08-18 20:51:18 -0700628 __netif_schedule(qdisc_root(sch));
Patrick McHardy1a13cb62007-03-16 01:22:20 -0700629 return HRTIMER_NORESTART;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700630}
631
Patrick McHardyc3bc7cf2007-07-15 00:03:05 -0700632#ifdef CONFIG_NET_CLS_ACT
Linus Torvalds1da177e2005-04-16 15:20:36 -0700633static int cbq_reshape_fail(struct sk_buff *skb, struct Qdisc *child)
634{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700635 struct Qdisc *sch = child->__parent;
636 struct cbq_sched_data *q = qdisc_priv(sch);
637 struct cbq_class *cl = q->rx_class;
638
639 q->rx_class = NULL;
640
641 if (cl && (cl = cbq_reclassify(skb, cl)) != NULL) {
Jarek Poplawski378a2f02008-08-04 22:31:03 -0700642 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700643
644 cbq_mark_toplevel(q, cl);
645
646 q->rx_class = cl;
647 cl->q->__parent = sch;
648
Jarek Poplawski378a2f02008-08-04 22:31:03 -0700649 ret = qdisc_enqueue(skb, cl->q);
650 if (ret == NET_XMIT_SUCCESS) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700651 sch->q.qlen++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700652 if (!cl->next_alive)
653 cbq_activate_class(cl);
654 return 0;
655 }
Jarek Poplawski378a2f02008-08-04 22:31:03 -0700656 if (net_xmit_drop_count(ret))
657 sch->qstats.drops++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700658 return 0;
659 }
660
661 sch->qstats.drops++;
662 return -1;
663}
664#endif
665
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900666/*
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000667 * It is mission critical procedure.
668 *
669 * We "regenerate" toplevel cutoff, if transmitting class
670 * has backlog and it is not regulated. It is not part of
671 * original CBQ description, but looks more reasonable.
672 * Probably, it is wrong. This question needs further investigation.
673 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700674
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000675static inline void
Linus Torvalds1da177e2005-04-16 15:20:36 -0700676cbq_update_toplevel(struct cbq_sched_data *q, struct cbq_class *cl,
677 struct cbq_class *borrowed)
678{
679 if (cl && q->toplevel >= borrowed->level) {
680 if (cl->q->q.qlen > 1) {
681 do {
Patrick McHardya0849802007-03-23 11:28:30 -0700682 if (borrowed->undertime == PSCHED_PASTPERFECT) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700683 q->toplevel = borrowed->level;
684 return;
685 }
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000686 } while ((borrowed = borrowed->borrow) != NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700687 }
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900688#if 0
Linus Torvalds1da177e2005-04-16 15:20:36 -0700689 /* It is not necessary now. Uncommenting it
690 will save CPU cycles, but decrease fairness.
691 */
692 q->toplevel = TC_CBQ_MAXLEVEL;
693#endif
694 }
695}
696
697static void
698cbq_update(struct cbq_sched_data *q)
699{
700 struct cbq_class *this = q->tx_class;
701 struct cbq_class *cl = this;
702 int len = q->tx_len;
703
704 q->tx_class = NULL;
705
706 for ( ; cl; cl = cl->share) {
707 long avgidle = cl->avgidle;
708 long idle;
709
710 cl->bstats.packets++;
711 cl->bstats.bytes += len;
712
713 /*
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000714 * (now - last) is total time between packet right edges.
715 * (last_pktlen/rate) is "virtual" busy time, so that
716 *
717 * idle = (now - last) - last_pktlen/rate
Linus Torvalds1da177e2005-04-16 15:20:36 -0700718 */
719
Patrick McHardy8edc0c32007-03-23 11:28:55 -0700720 idle = q->now - cl->last;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700721 if ((unsigned long)idle > 128*1024*1024) {
722 avgidle = cl->maxidle;
723 } else {
724 idle -= L2T(cl, len);
725
726 /* true_avgidle := (1-W)*true_avgidle + W*idle,
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000727 * where W=2^{-ewma_log}. But cl->avgidle is scaled:
728 * cl->avgidle == true_avgidle/W,
729 * hence:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700730 */
731 avgidle += idle - (avgidle>>cl->ewma_log);
732 }
733
734 if (avgidle <= 0) {
735 /* Overlimit or at-limit */
736
737 if (avgidle < cl->minidle)
738 avgidle = cl->minidle;
739
740 cl->avgidle = avgidle;
741
742 /* Calculate expected time, when this class
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000743 * will be allowed to send.
744 * It will occur, when:
745 * (1-W)*true_avgidle + W*delay = 0, i.e.
746 * idle = (1/W - 1)*(-true_avgidle)
747 * or
748 * idle = (1 - W)*(-cl->avgidle);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700749 */
750 idle = (-avgidle) - ((-avgidle) >> cl->ewma_log);
751
752 /*
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000753 * That is not all.
754 * To maintain the rate allocated to the class,
755 * we add to undertime virtual clock,
756 * necessary to complete transmitted packet.
757 * (len/phys_bandwidth has been already passed
758 * to the moment of cbq_update)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700759 */
760
761 idle -= L2T(&q->link, len);
762 idle += L2T(cl, len);
763
Patrick McHardy7c59e252007-03-23 11:27:45 -0700764 cl->undertime = q->now + idle;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700765 } else {
766 /* Underlimit */
767
Patrick McHardya0849802007-03-23 11:28:30 -0700768 cl->undertime = PSCHED_PASTPERFECT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700769 if (avgidle > cl->maxidle)
770 cl->avgidle = cl->maxidle;
771 else
772 cl->avgidle = avgidle;
773 }
774 cl->last = q->now;
775 }
776
777 cbq_update_toplevel(q, this, q->tx_borrowed);
778}
779
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000780static inline struct cbq_class *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700781cbq_under_limit(struct cbq_class *cl)
782{
783 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
784 struct cbq_class *this_cl = cl;
785
786 if (cl->tparent == NULL)
787 return cl;
788
Patrick McHardya0849802007-03-23 11:28:30 -0700789 if (cl->undertime == PSCHED_PASTPERFECT || q->now >= cl->undertime) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700790 cl->delayed = 0;
791 return cl;
792 }
793
794 do {
795 /* It is very suspicious place. Now overlimit
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000796 * action is generated for not bounded classes
797 * only if link is completely congested.
798 * Though it is in agree with ancestor-only paradigm,
799 * it looks very stupid. Particularly,
800 * it means that this chunk of code will either
801 * never be called or result in strong amplification
802 * of burstiness. Dangerous, silly, and, however,
803 * no another solution exists.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700804 */
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000805 cl = cl->borrow;
806 if (!cl) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700807 this_cl->qstats.overlimits++;
808 this_cl->overlimit(this_cl);
809 return NULL;
810 }
811 if (cl->level > q->toplevel)
812 return NULL;
Patrick McHardya0849802007-03-23 11:28:30 -0700813 } while (cl->undertime != PSCHED_PASTPERFECT && q->now < cl->undertime);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700814
815 cl->delayed = 0;
816 return cl;
817}
818
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000819static inline struct sk_buff *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700820cbq_dequeue_prio(struct Qdisc *sch, int prio)
821{
822 struct cbq_sched_data *q = qdisc_priv(sch);
823 struct cbq_class *cl_tail, *cl_prev, *cl;
824 struct sk_buff *skb;
825 int deficit;
826
827 cl_tail = cl_prev = q->active[prio];
828 cl = cl_prev->next_alive;
829
830 do {
831 deficit = 0;
832
833 /* Start round */
834 do {
835 struct cbq_class *borrow = cl;
836
837 if (cl->q->q.qlen &&
838 (borrow = cbq_under_limit(cl)) == NULL)
839 goto skip_class;
840
841 if (cl->deficit <= 0) {
842 /* Class exhausted its allotment per
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000843 * this round. Switch to the next one.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700844 */
845 deficit = 1;
846 cl->deficit += cl->quantum;
847 goto next_class;
848 }
849
850 skb = cl->q->dequeue(cl->q);
851
852 /* Class did not give us any skb :-(
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000853 * It could occur even if cl->q->q.qlen != 0
854 * f.e. if cl->q == "tbf"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700855 */
856 if (skb == NULL)
857 goto skip_class;
858
Jussi Kivilinna0abf77e2008-07-20 00:08:27 -0700859 cl->deficit -= qdisc_pkt_len(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700860 q->tx_class = cl;
861 q->tx_borrowed = borrow;
862 if (borrow != cl) {
863#ifndef CBQ_XSTATS_BORROWS_BYTES
864 borrow->xstats.borrows++;
865 cl->xstats.borrows++;
866#else
Jussi Kivilinna0abf77e2008-07-20 00:08:27 -0700867 borrow->xstats.borrows += qdisc_pkt_len(skb);
868 cl->xstats.borrows += qdisc_pkt_len(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700869#endif
870 }
Jussi Kivilinna0abf77e2008-07-20 00:08:27 -0700871 q->tx_len = qdisc_pkt_len(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700872
873 if (cl->deficit <= 0) {
874 q->active[prio] = cl;
875 cl = cl->next_alive;
876 cl->deficit += cl->quantum;
877 }
878 return skb;
879
880skip_class:
881 if (cl->q->q.qlen == 0 || prio != cl->cpriority) {
882 /* Class is empty or penalized.
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000883 * Unlink it from active chain.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700884 */
885 cl_prev->next_alive = cl->next_alive;
886 cl->next_alive = NULL;
887
888 /* Did cl_tail point to it? */
889 if (cl == cl_tail) {
890 /* Repair it! */
891 cl_tail = cl_prev;
892
893 /* Was it the last class in this band? */
894 if (cl == cl_tail) {
895 /* Kill the band! */
896 q->active[prio] = NULL;
897 q->activemask &= ~(1<<prio);
898 if (cl->q->q.qlen)
899 cbq_activate_class(cl);
900 return NULL;
901 }
902
903 q->active[prio] = cl_tail;
904 }
905 if (cl->q->q.qlen)
906 cbq_activate_class(cl);
907
908 cl = cl_prev;
909 }
910
911next_class:
912 cl_prev = cl;
913 cl = cl->next_alive;
914 } while (cl_prev != cl_tail);
915 } while (deficit);
916
917 q->active[prio] = cl_prev;
918
919 return NULL;
920}
921
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000922static inline struct sk_buff *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700923cbq_dequeue_1(struct Qdisc *sch)
924{
925 struct cbq_sched_data *q = qdisc_priv(sch);
926 struct sk_buff *skb;
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000927 unsigned int activemask;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700928
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000929 activemask = q->activemask & 0xFF;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700930 while (activemask) {
931 int prio = ffz(~activemask);
932 activemask &= ~(1<<prio);
933 skb = cbq_dequeue_prio(sch, prio);
934 if (skb)
935 return skb;
936 }
937 return NULL;
938}
939
940static struct sk_buff *
941cbq_dequeue(struct Qdisc *sch)
942{
943 struct sk_buff *skb;
944 struct cbq_sched_data *q = qdisc_priv(sch);
945 psched_time_t now;
946 psched_tdiff_t incr;
947
Patrick McHardy3bebcda2007-03-23 11:29:25 -0700948 now = psched_get_time();
Patrick McHardy8edc0c32007-03-23 11:28:55 -0700949 incr = now - q->now_rt;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700950
951 if (q->tx_class) {
952 psched_tdiff_t incr2;
953 /* Time integrator. We calculate EOS time
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000954 * by adding expected packet transmission time.
955 * If real time is greater, we warp artificial clock,
956 * so that:
957 *
958 * cbq_time = max(real_time, work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700959 */
960 incr2 = L2T(&q->link, q->tx_len);
Patrick McHardy7c59e252007-03-23 11:27:45 -0700961 q->now += incr2;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700962 cbq_update(q);
963 if ((incr -= incr2) < 0)
964 incr = 0;
Vasily Averinf0f6ee12013-04-01 03:01:32 +0000965 q->now += incr;
966 } else {
967 if (now > q->now)
968 q->now = now;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700969 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700970 q->now_rt = now;
971
972 for (;;) {
973 q->wd_expires = 0;
974
975 skb = cbq_dequeue_1(sch);
976 if (skb) {
Eric Dumazet9190b3b2011-01-20 23:31:33 -0800977 qdisc_bstats_update(sch, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700978 sch->q.qlen--;
Eric Dumazetfd245a42011-01-20 05:27:16 +0000979 qdisc_unthrottled(sch);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700980 return skb;
981 }
982
983 /* All the classes are overlimit.
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000984 *
985 * It is possible, if:
986 *
987 * 1. Scheduler is empty.
988 * 2. Toplevel cutoff inhibited borrowing.
989 * 3. Root class is overlimit.
990 *
991 * Reset 2d and 3d conditions and retry.
992 *
993 * Note, that NS and cbq-2.0 are buggy, peeking
994 * an arbitrary class is appropriate for ancestor-only
995 * sharing, but not for toplevel algorithm.
996 *
997 * Our version is better, but slower, because it requires
998 * two passes, but it is unavoidable with top-level sharing.
999 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001000
1001 if (q->toplevel == TC_CBQ_MAXLEVEL &&
Patrick McHardya0849802007-03-23 11:28:30 -07001002 q->link.undertime == PSCHED_PASTPERFECT)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001003 break;
1004
1005 q->toplevel = TC_CBQ_MAXLEVEL;
Patrick McHardya0849802007-03-23 11:28:30 -07001006 q->link.undertime = PSCHED_PASTPERFECT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001007 }
1008
1009 /* No packets in scheduler or nobody wants to give them to us :-(
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001010 * Sigh... start watchdog timer in the last case.
1011 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001012
1013 if (sch->q.qlen) {
1014 sch->qstats.overlimits++;
Patrick McHardy88a99352007-03-16 01:21:11 -07001015 if (q->wd_expires)
1016 qdisc_watchdog_schedule(&q->watchdog,
Patrick McHardybb239ac2007-03-16 12:31:28 -07001017 now + q->wd_expires);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001018 }
1019 return NULL;
1020}
1021
1022/* CBQ class maintanance routines */
1023
1024static void cbq_adjust_levels(struct cbq_class *this)
1025{
1026 if (this == NULL)
1027 return;
1028
1029 do {
1030 int level = 0;
1031 struct cbq_class *cl;
1032
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001033 cl = this->children;
1034 if (cl) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001035 do {
1036 if (cl->level > level)
1037 level = cl->level;
1038 } while ((cl = cl->sibling) != this->children);
1039 }
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001040 this->level = level + 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001041 } while ((this = this->tparent) != NULL);
1042}
1043
1044static void cbq_normalize_quanta(struct cbq_sched_data *q, int prio)
1045{
1046 struct cbq_class *cl;
Patrick McHardyd77fea22008-07-05 23:22:05 -07001047 unsigned int h;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001048
1049 if (q->quanta[prio] == 0)
1050 return;
1051
Patrick McHardyd77fea22008-07-05 23:22:05 -07001052 for (h = 0; h < q->clhash.hashsize; h++) {
Sasha Levinb67bfe02013-02-27 17:06:00 -08001053 hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001054 /* BUGGGG... Beware! This expression suffer of
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001055 * arithmetic overflows!
Linus Torvalds1da177e2005-04-16 15:20:36 -07001056 */
1057 if (cl->priority == prio) {
1058 cl->quantum = (cl->weight*cl->allot*q->nclasses[prio])/
1059 q->quanta[prio];
1060 }
Yang Yingliang833fa742013-12-10 20:55:32 +08001061 if (cl->quantum <= 0 ||
1062 cl->quantum > 32*qdisc_dev(cl->qdisc)->mtu) {
Yang Yingliangc17988a2013-12-23 17:38:58 +08001063 pr_warn("CBQ: class %08x has bad quantum==%ld, repaired.\n",
1064 cl->common.classid, cl->quantum);
David S. Miller5ce2d482008-07-08 17:06:30 -07001065 cl->quantum = qdisc_dev(cl->qdisc)->mtu/2 + 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001066 }
1067 }
1068 }
1069}
1070
1071static void cbq_sync_defmap(struct cbq_class *cl)
1072{
1073 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
1074 struct cbq_class *split = cl->split;
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001075 unsigned int h;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001076 int i;
1077
1078 if (split == NULL)
1079 return;
1080
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001081 for (i = 0; i <= TC_PRIO_MAX; i++) {
1082 if (split->defaults[i] == cl && !(cl->defmap & (1<<i)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001083 split->defaults[i] = NULL;
1084 }
1085
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001086 for (i = 0; i <= TC_PRIO_MAX; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001087 int level = split->level;
1088
1089 if (split->defaults[i])
1090 continue;
1091
Patrick McHardyd77fea22008-07-05 23:22:05 -07001092 for (h = 0; h < q->clhash.hashsize; h++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001093 struct cbq_class *c;
1094
Sasha Levinb67bfe02013-02-27 17:06:00 -08001095 hlist_for_each_entry(c, &q->clhash.hash[h],
Patrick McHardyd77fea22008-07-05 23:22:05 -07001096 common.hnode) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001097 if (c->split == split && c->level < level &&
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001098 c->defmap & (1<<i)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001099 split->defaults[i] = c;
1100 level = c->level;
1101 }
1102 }
1103 }
1104 }
1105}
1106
1107static void cbq_change_defmap(struct cbq_class *cl, u32 splitid, u32 def, u32 mask)
1108{
1109 struct cbq_class *split = NULL;
1110
1111 if (splitid == 0) {
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001112 split = cl->split;
1113 if (!split)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001114 return;
Patrick McHardyd77fea22008-07-05 23:22:05 -07001115 splitid = split->common.classid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001116 }
1117
Patrick McHardyd77fea22008-07-05 23:22:05 -07001118 if (split == NULL || split->common.classid != splitid) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001119 for (split = cl->tparent; split; split = split->tparent)
Patrick McHardyd77fea22008-07-05 23:22:05 -07001120 if (split->common.classid == splitid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001121 break;
1122 }
1123
1124 if (split == NULL)
1125 return;
1126
1127 if (cl->split != split) {
1128 cl->defmap = 0;
1129 cbq_sync_defmap(cl);
1130 cl->split = split;
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001131 cl->defmap = def & mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001132 } else
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001133 cl->defmap = (cl->defmap & ~mask) | (def & mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001134
1135 cbq_sync_defmap(cl);
1136}
1137
1138static void cbq_unlink_class(struct cbq_class *this)
1139{
1140 struct cbq_class *cl, **clp;
1141 struct cbq_sched_data *q = qdisc_priv(this->qdisc);
1142
Patrick McHardyd77fea22008-07-05 23:22:05 -07001143 qdisc_class_hash_remove(&q->clhash, &this->common);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001144
1145 if (this->tparent) {
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001146 clp = &this->sibling;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001147 cl = *clp;
1148 do {
1149 if (cl == this) {
1150 *clp = cl->sibling;
1151 break;
1152 }
1153 clp = &cl->sibling;
1154 } while ((cl = *clp) != this->sibling);
1155
1156 if (this->tparent->children == this) {
1157 this->tparent->children = this->sibling;
1158 if (this->sibling == this)
1159 this->tparent->children = NULL;
1160 }
1161 } else {
Ilpo Järvinen547b7922008-07-25 21:43:18 -07001162 WARN_ON(this->sibling != this);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001163 }
1164}
1165
1166static void cbq_link_class(struct cbq_class *this)
1167{
1168 struct cbq_sched_data *q = qdisc_priv(this->qdisc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001169 struct cbq_class *parent = this->tparent;
1170
1171 this->sibling = this;
Patrick McHardyd77fea22008-07-05 23:22:05 -07001172 qdisc_class_hash_insert(&q->clhash, &this->common);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001173
1174 if (parent == NULL)
1175 return;
1176
1177 if (parent->children == NULL) {
1178 parent->children = this;
1179 } else {
1180 this->sibling = parent->children->sibling;
1181 parent->children->sibling = this;
1182 }
1183}
1184
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001185static unsigned int cbq_drop(struct Qdisc *sch)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001186{
1187 struct cbq_sched_data *q = qdisc_priv(sch);
1188 struct cbq_class *cl, *cl_head;
1189 int prio;
1190 unsigned int len;
1191
1192 for (prio = TC_CBQ_MAXPRIO; prio >= 0; prio--) {
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001193 cl_head = q->active[prio];
1194 if (!cl_head)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001195 continue;
1196
1197 cl = cl_head;
1198 do {
1199 if (cl->q->ops->drop && (len = cl->q->ops->drop(cl->q))) {
1200 sch->q.qlen--;
Jarek Poplawskia37ef2e2006-12-08 00:25:55 -08001201 if (!cl->q->q.qlen)
1202 cbq_deactivate_class(cl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001203 return len;
1204 }
1205 } while ((cl = cl->next_alive) != cl_head);
1206 }
1207 return 0;
1208}
1209
1210static void
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001211cbq_reset(struct Qdisc *sch)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001212{
1213 struct cbq_sched_data *q = qdisc_priv(sch);
1214 struct cbq_class *cl;
1215 int prio;
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001216 unsigned int h;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001217
1218 q->activemask = 0;
1219 q->pmask = 0;
1220 q->tx_class = NULL;
1221 q->tx_borrowed = NULL;
Patrick McHardy88a99352007-03-16 01:21:11 -07001222 qdisc_watchdog_cancel(&q->watchdog);
David S. Miller2fbd3da2009-09-01 17:59:25 -07001223 hrtimer_cancel(&q->delay_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001224 q->toplevel = TC_CBQ_MAXLEVEL;
Patrick McHardy3bebcda2007-03-23 11:29:25 -07001225 q->now = psched_get_time();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001226 q->now_rt = q->now;
1227
1228 for (prio = 0; prio <= TC_CBQ_MAXPRIO; prio++)
1229 q->active[prio] = NULL;
1230
Patrick McHardyd77fea22008-07-05 23:22:05 -07001231 for (h = 0; h < q->clhash.hashsize; h++) {
Sasha Levinb67bfe02013-02-27 17:06:00 -08001232 hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001233 qdisc_reset(cl->q);
1234
1235 cl->next_alive = NULL;
Patrick McHardya0849802007-03-23 11:28:30 -07001236 cl->undertime = PSCHED_PASTPERFECT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001237 cl->avgidle = cl->maxidle;
1238 cl->deficit = cl->quantum;
1239 cl->cpriority = cl->priority;
1240 }
1241 }
1242 sch->q.qlen = 0;
1243}
1244
1245
1246static int cbq_set_lss(struct cbq_class *cl, struct tc_cbq_lssopt *lss)
1247{
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001248 if (lss->change & TCF_CBQ_LSS_FLAGS) {
1249 cl->share = (lss->flags & TCF_CBQ_LSS_ISOLATED) ? NULL : cl->tparent;
1250 cl->borrow = (lss->flags & TCF_CBQ_LSS_BOUNDED) ? NULL : cl->tparent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001251 }
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001252 if (lss->change & TCF_CBQ_LSS_EWMA)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001253 cl->ewma_log = lss->ewma_log;
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001254 if (lss->change & TCF_CBQ_LSS_AVPKT)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001255 cl->avpkt = lss->avpkt;
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001256 if (lss->change & TCF_CBQ_LSS_MINIDLE)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001257 cl->minidle = -(long)lss->minidle;
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001258 if (lss->change & TCF_CBQ_LSS_MAXIDLE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001259 cl->maxidle = lss->maxidle;
1260 cl->avgidle = lss->maxidle;
1261 }
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001262 if (lss->change & TCF_CBQ_LSS_OFFTIME)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001263 cl->offtime = lss->offtime;
1264 return 0;
1265}
1266
1267static void cbq_rmprio(struct cbq_sched_data *q, struct cbq_class *cl)
1268{
1269 q->nclasses[cl->priority]--;
1270 q->quanta[cl->priority] -= cl->weight;
1271 cbq_normalize_quanta(q, cl->priority);
1272}
1273
1274static void cbq_addprio(struct cbq_sched_data *q, struct cbq_class *cl)
1275{
1276 q->nclasses[cl->priority]++;
1277 q->quanta[cl->priority] += cl->weight;
1278 cbq_normalize_quanta(q, cl->priority);
1279}
1280
1281static int cbq_set_wrr(struct cbq_class *cl, struct tc_cbq_wrropt *wrr)
1282{
1283 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
1284
1285 if (wrr->allot)
1286 cl->allot = wrr->allot;
1287 if (wrr->weight)
1288 cl->weight = wrr->weight;
1289 if (wrr->priority) {
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001290 cl->priority = wrr->priority - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001291 cl->cpriority = cl->priority;
1292 if (cl->priority >= cl->priority2)
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001293 cl->priority2 = TC_CBQ_MAXPRIO - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001294 }
1295
1296 cbq_addprio(q, cl);
1297 return 0;
1298}
1299
1300static int cbq_set_overlimit(struct cbq_class *cl, struct tc_cbq_ovl *ovl)
1301{
1302 switch (ovl->strategy) {
1303 case TC_CBQ_OVL_CLASSIC:
1304 cl->overlimit = cbq_ovl_classic;
1305 break;
1306 case TC_CBQ_OVL_DELAY:
1307 cl->overlimit = cbq_ovl_delay;
1308 break;
1309 case TC_CBQ_OVL_LOWPRIO:
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001310 if (ovl->priority2 - 1 >= TC_CBQ_MAXPRIO ||
1311 ovl->priority2 - 1 <= cl->priority)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001312 return -EINVAL;
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001313 cl->priority2 = ovl->priority2 - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001314 cl->overlimit = cbq_ovl_lowprio;
1315 break;
1316 case TC_CBQ_OVL_DROP:
1317 cl->overlimit = cbq_ovl_drop;
1318 break;
1319 case TC_CBQ_OVL_RCLASSIC:
1320 cl->overlimit = cbq_ovl_rclassic;
1321 break;
1322 default:
1323 return -EINVAL;
1324 }
Patrick McHardy1a13cb62007-03-16 01:22:20 -07001325 cl->penalty = ovl->penalty;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001326 return 0;
1327}
1328
Patrick McHardyc3bc7cf2007-07-15 00:03:05 -07001329#ifdef CONFIG_NET_CLS_ACT
Linus Torvalds1da177e2005-04-16 15:20:36 -07001330static int cbq_set_police(struct cbq_class *cl, struct tc_cbq_police *p)
1331{
1332 cl->police = p->police;
1333
1334 if (cl->q->handle) {
1335 if (p->police == TC_POLICE_RECLASSIFY)
1336 cl->q->reshape_fail = cbq_reshape_fail;
1337 else
1338 cl->q->reshape_fail = NULL;
1339 }
1340 return 0;
1341}
1342#endif
1343
1344static int cbq_set_fopt(struct cbq_class *cl, struct tc_cbq_fopt *fopt)
1345{
1346 cbq_change_defmap(cl, fopt->split, fopt->defmap, fopt->defchange);
1347 return 0;
1348}
1349
Patrick McHardy27a34212008-01-23 20:35:39 -08001350static const struct nla_policy cbq_policy[TCA_CBQ_MAX + 1] = {
1351 [TCA_CBQ_LSSOPT] = { .len = sizeof(struct tc_cbq_lssopt) },
1352 [TCA_CBQ_WRROPT] = { .len = sizeof(struct tc_cbq_wrropt) },
1353 [TCA_CBQ_FOPT] = { .len = sizeof(struct tc_cbq_fopt) },
1354 [TCA_CBQ_OVL_STRATEGY] = { .len = sizeof(struct tc_cbq_ovl) },
1355 [TCA_CBQ_RATE] = { .len = sizeof(struct tc_ratespec) },
1356 [TCA_CBQ_RTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
1357 [TCA_CBQ_POLICE] = { .len = sizeof(struct tc_cbq_police) },
1358};
1359
Patrick McHardy1e904742008-01-22 22:11:17 -08001360static int cbq_init(struct Qdisc *sch, struct nlattr *opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001361{
1362 struct cbq_sched_data *q = qdisc_priv(sch);
Patrick McHardy1e904742008-01-22 22:11:17 -08001363 struct nlattr *tb[TCA_CBQ_MAX + 1];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001364 struct tc_ratespec *r;
Patrick McHardycee63722008-01-23 20:33:32 -08001365 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001366
Patrick McHardy27a34212008-01-23 20:35:39 -08001367 err = nla_parse_nested(tb, TCA_CBQ_MAX, opt, cbq_policy);
Patrick McHardycee63722008-01-23 20:33:32 -08001368 if (err < 0)
1369 return err;
1370
Patrick McHardy27a34212008-01-23 20:35:39 -08001371 if (tb[TCA_CBQ_RTAB] == NULL || tb[TCA_CBQ_RATE] == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001372 return -EINVAL;
1373
Patrick McHardy1e904742008-01-22 22:11:17 -08001374 r = nla_data(tb[TCA_CBQ_RATE]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001375
Patrick McHardy1e904742008-01-22 22:11:17 -08001376 if ((q->link.R_tab = qdisc_get_rtab(r, tb[TCA_CBQ_RTAB])) == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001377 return -EINVAL;
1378
Patrick McHardyd77fea22008-07-05 23:22:05 -07001379 err = qdisc_class_hash_init(&q->clhash);
1380 if (err < 0)
1381 goto put_rtab;
1382
Linus Torvalds1da177e2005-04-16 15:20:36 -07001383 q->link.refcnt = 1;
1384 q->link.sibling = &q->link;
Patrick McHardyd77fea22008-07-05 23:22:05 -07001385 q->link.common.classid = sch->handle;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001386 q->link.qdisc = sch;
Changli Gao3511c912010-10-16 13:04:08 +00001387 q->link.q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
1388 sch->handle);
1389 if (!q->link.q)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001390 q->link.q = &noop_qdisc;
1391
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001392 q->link.priority = TC_CBQ_MAXPRIO - 1;
1393 q->link.priority2 = TC_CBQ_MAXPRIO - 1;
1394 q->link.cpriority = TC_CBQ_MAXPRIO - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001395 q->link.ovl_strategy = TC_CBQ_OVL_CLASSIC;
1396 q->link.overlimit = cbq_ovl_classic;
David S. Miller5ce2d482008-07-08 17:06:30 -07001397 q->link.allot = psched_mtu(qdisc_dev(sch));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001398 q->link.quantum = q->link.allot;
1399 q->link.weight = q->link.R_tab->rate.rate;
1400
1401 q->link.ewma_log = TC_CBQ_DEF_EWMA;
1402 q->link.avpkt = q->link.allot/2;
1403 q->link.minidle = -0x7FFFFFFF;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001404
Patrick McHardy88a99352007-03-16 01:21:11 -07001405 qdisc_watchdog_init(&q->watchdog, sch);
David S. Miller2fbd3da2009-09-01 17:59:25 -07001406 hrtimer_init(&q->delay_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001407 q->delay_timer.function = cbq_undelay;
1408 q->toplevel = TC_CBQ_MAXLEVEL;
Patrick McHardy3bebcda2007-03-23 11:29:25 -07001409 q->now = psched_get_time();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001410 q->now_rt = q->now;
1411
1412 cbq_link_class(&q->link);
1413
Patrick McHardy1e904742008-01-22 22:11:17 -08001414 if (tb[TCA_CBQ_LSSOPT])
1415 cbq_set_lss(&q->link, nla_data(tb[TCA_CBQ_LSSOPT]));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001416
1417 cbq_addprio(q, &q->link);
1418 return 0;
Patrick McHardyd77fea22008-07-05 23:22:05 -07001419
1420put_rtab:
1421 qdisc_put_rtab(q->link.R_tab);
1422 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001423}
1424
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001425static int cbq_dump_rate(struct sk_buff *skb, struct cbq_class *cl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001426{
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001427 unsigned char *b = skb_tail_pointer(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001428
David S. Miller1b34ec42012-03-29 05:11:39 -04001429 if (nla_put(skb, TCA_CBQ_RATE, sizeof(cl->R_tab->rate), &cl->R_tab->rate))
1430 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001431 return skb->len;
1432
Patrick McHardy1e904742008-01-22 22:11:17 -08001433nla_put_failure:
Arnaldo Carvalho de Melodc5fc572007-03-25 23:06:12 -07001434 nlmsg_trim(skb, b);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001435 return -1;
1436}
1437
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001438static int cbq_dump_lss(struct sk_buff *skb, struct cbq_class *cl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001439{
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001440 unsigned char *b = skb_tail_pointer(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001441 struct tc_cbq_lssopt opt;
1442
1443 opt.flags = 0;
1444 if (cl->borrow == NULL)
1445 opt.flags |= TCF_CBQ_LSS_BOUNDED;
1446 if (cl->share == NULL)
1447 opt.flags |= TCF_CBQ_LSS_ISOLATED;
1448 opt.ewma_log = cl->ewma_log;
1449 opt.level = cl->level;
1450 opt.avpkt = cl->avpkt;
1451 opt.maxidle = cl->maxidle;
1452 opt.minidle = (u32)(-cl->minidle);
1453 opt.offtime = cl->offtime;
1454 opt.change = ~0;
David S. Miller1b34ec42012-03-29 05:11:39 -04001455 if (nla_put(skb, TCA_CBQ_LSSOPT, sizeof(opt), &opt))
1456 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001457 return skb->len;
1458
Patrick McHardy1e904742008-01-22 22:11:17 -08001459nla_put_failure:
Arnaldo Carvalho de Melodc5fc572007-03-25 23:06:12 -07001460 nlmsg_trim(skb, b);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001461 return -1;
1462}
1463
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001464static int cbq_dump_wrr(struct sk_buff *skb, struct cbq_class *cl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001465{
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001466 unsigned char *b = skb_tail_pointer(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001467 struct tc_cbq_wrropt opt;
1468
David S. Millera0db8562013-07-30 00:16:21 -07001469 memset(&opt, 0, sizeof(opt));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001470 opt.flags = 0;
1471 opt.allot = cl->allot;
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001472 opt.priority = cl->priority + 1;
1473 opt.cpriority = cl->cpriority + 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001474 opt.weight = cl->weight;
David S. Miller1b34ec42012-03-29 05:11:39 -04001475 if (nla_put(skb, TCA_CBQ_WRROPT, sizeof(opt), &opt))
1476 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001477 return skb->len;
1478
Patrick McHardy1e904742008-01-22 22:11:17 -08001479nla_put_failure:
Arnaldo Carvalho de Melodc5fc572007-03-25 23:06:12 -07001480 nlmsg_trim(skb, b);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001481 return -1;
1482}
1483
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001484static int cbq_dump_ovl(struct sk_buff *skb, struct cbq_class *cl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001485{
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001486 unsigned char *b = skb_tail_pointer(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001487 struct tc_cbq_ovl opt;
1488
1489 opt.strategy = cl->ovl_strategy;
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001490 opt.priority2 = cl->priority2 + 1;
Patrick McHardy8a470772005-06-28 12:56:45 -07001491 opt.pad = 0;
Patrick McHardy1a13cb62007-03-16 01:22:20 -07001492 opt.penalty = cl->penalty;
David S. Miller1b34ec42012-03-29 05:11:39 -04001493 if (nla_put(skb, TCA_CBQ_OVL_STRATEGY, sizeof(opt), &opt))
1494 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001495 return skb->len;
1496
Patrick McHardy1e904742008-01-22 22:11:17 -08001497nla_put_failure:
Arnaldo Carvalho de Melodc5fc572007-03-25 23:06:12 -07001498 nlmsg_trim(skb, b);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001499 return -1;
1500}
1501
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001502static int cbq_dump_fopt(struct sk_buff *skb, struct cbq_class *cl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001503{
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001504 unsigned char *b = skb_tail_pointer(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001505 struct tc_cbq_fopt opt;
1506
1507 if (cl->split || cl->defmap) {
Patrick McHardyd77fea22008-07-05 23:22:05 -07001508 opt.split = cl->split ? cl->split->common.classid : 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001509 opt.defmap = cl->defmap;
1510 opt.defchange = ~0;
David S. Miller1b34ec42012-03-29 05:11:39 -04001511 if (nla_put(skb, TCA_CBQ_FOPT, sizeof(opt), &opt))
1512 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001513 }
1514 return skb->len;
1515
Patrick McHardy1e904742008-01-22 22:11:17 -08001516nla_put_failure:
Arnaldo Carvalho de Melodc5fc572007-03-25 23:06:12 -07001517 nlmsg_trim(skb, b);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001518 return -1;
1519}
1520
Patrick McHardyc3bc7cf2007-07-15 00:03:05 -07001521#ifdef CONFIG_NET_CLS_ACT
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001522static int cbq_dump_police(struct sk_buff *skb, struct cbq_class *cl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001523{
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001524 unsigned char *b = skb_tail_pointer(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001525 struct tc_cbq_police opt;
1526
1527 if (cl->police) {
1528 opt.police = cl->police;
Patrick McHardy9ef1d4c2005-06-28 12:55:30 -07001529 opt.__res1 = 0;
1530 opt.__res2 = 0;
David S. Miller1b34ec42012-03-29 05:11:39 -04001531 if (nla_put(skb, TCA_CBQ_POLICE, sizeof(opt), &opt))
1532 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001533 }
1534 return skb->len;
1535
Patrick McHardy1e904742008-01-22 22:11:17 -08001536nla_put_failure:
Arnaldo Carvalho de Melodc5fc572007-03-25 23:06:12 -07001537 nlmsg_trim(skb, b);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001538 return -1;
1539}
1540#endif
1541
1542static int cbq_dump_attr(struct sk_buff *skb, struct cbq_class *cl)
1543{
1544 if (cbq_dump_lss(skb, cl) < 0 ||
1545 cbq_dump_rate(skb, cl) < 0 ||
1546 cbq_dump_wrr(skb, cl) < 0 ||
1547 cbq_dump_ovl(skb, cl) < 0 ||
Patrick McHardyc3bc7cf2007-07-15 00:03:05 -07001548#ifdef CONFIG_NET_CLS_ACT
Linus Torvalds1da177e2005-04-16 15:20:36 -07001549 cbq_dump_police(skb, cl) < 0 ||
1550#endif
1551 cbq_dump_fopt(skb, cl) < 0)
1552 return -1;
1553 return 0;
1554}
1555
1556static int cbq_dump(struct Qdisc *sch, struct sk_buff *skb)
1557{
1558 struct cbq_sched_data *q = qdisc_priv(sch);
Patrick McHardy4b3550ef2008-01-23 20:34:11 -08001559 struct nlattr *nest;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001560
Patrick McHardy4b3550ef2008-01-23 20:34:11 -08001561 nest = nla_nest_start(skb, TCA_OPTIONS);
1562 if (nest == NULL)
1563 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001564 if (cbq_dump_attr(skb, &q->link) < 0)
Patrick McHardy1e904742008-01-22 22:11:17 -08001565 goto nla_put_failure;
Patrick McHardy4b3550ef2008-01-23 20:34:11 -08001566 nla_nest_end(skb, nest);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001567 return skb->len;
1568
Patrick McHardy1e904742008-01-22 22:11:17 -08001569nla_put_failure:
Patrick McHardy4b3550ef2008-01-23 20:34:11 -08001570 nla_nest_cancel(skb, nest);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001571 return -1;
1572}
1573
1574static int
1575cbq_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
1576{
1577 struct cbq_sched_data *q = qdisc_priv(sch);
1578
1579 q->link.xstats.avgidle = q->link.avgidle;
1580 return gnet_stats_copy_app(d, &q->link.xstats, sizeof(q->link.xstats));
1581}
1582
1583static int
1584cbq_dump_class(struct Qdisc *sch, unsigned long arg,
1585 struct sk_buff *skb, struct tcmsg *tcm)
1586{
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001587 struct cbq_class *cl = (struct cbq_class *)arg;
Patrick McHardy4b3550ef2008-01-23 20:34:11 -08001588 struct nlattr *nest;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001589
1590 if (cl->tparent)
Patrick McHardyd77fea22008-07-05 23:22:05 -07001591 tcm->tcm_parent = cl->tparent->common.classid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001592 else
1593 tcm->tcm_parent = TC_H_ROOT;
Patrick McHardyd77fea22008-07-05 23:22:05 -07001594 tcm->tcm_handle = cl->common.classid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001595 tcm->tcm_info = cl->q->handle;
1596
Patrick McHardy4b3550ef2008-01-23 20:34:11 -08001597 nest = nla_nest_start(skb, TCA_OPTIONS);
1598 if (nest == NULL)
1599 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001600 if (cbq_dump_attr(skb, cl) < 0)
Patrick McHardy1e904742008-01-22 22:11:17 -08001601 goto nla_put_failure;
Patrick McHardy4b3550ef2008-01-23 20:34:11 -08001602 nla_nest_end(skb, nest);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001603 return skb->len;
1604
Patrick McHardy1e904742008-01-22 22:11:17 -08001605nla_put_failure:
Patrick McHardy4b3550ef2008-01-23 20:34:11 -08001606 nla_nest_cancel(skb, nest);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001607 return -1;
1608}
1609
1610static int
1611cbq_dump_class_stats(struct Qdisc *sch, unsigned long arg,
1612 struct gnet_dump *d)
1613{
1614 struct cbq_sched_data *q = qdisc_priv(sch);
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001615 struct cbq_class *cl = (struct cbq_class *)arg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001616
1617 cl->qstats.qlen = cl->q->q.qlen;
1618 cl->xstats.avgidle = cl->avgidle;
1619 cl->xstats.undertime = 0;
1620
Patrick McHardya0849802007-03-23 11:28:30 -07001621 if (cl->undertime != PSCHED_PASTPERFECT)
Patrick McHardy8edc0c32007-03-23 11:28:55 -07001622 cl->xstats.undertime = cl->undertime - q->now;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001623
1624 if (gnet_stats_copy_basic(d, &cl->bstats) < 0 ||
Eric Dumazetd250a5f2009-10-02 10:32:18 +00001625 gnet_stats_copy_rate_est(d, &cl->bstats, &cl->rate_est) < 0 ||
Linus Torvalds1da177e2005-04-16 15:20:36 -07001626 gnet_stats_copy_queue(d, &cl->qstats) < 0)
1627 return -1;
1628
1629 return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats));
1630}
1631
1632static int cbq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
1633 struct Qdisc **old)
1634{
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001635 struct cbq_class *cl = (struct cbq_class *)arg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001636
Patrick McHardy5b9a9cc2009-09-04 06:41:17 +00001637 if (new == NULL) {
Changli Gao3511c912010-10-16 13:04:08 +00001638 new = qdisc_create_dflt(sch->dev_queue,
Patrick McHardy5b9a9cc2009-09-04 06:41:17 +00001639 &pfifo_qdisc_ops, cl->common.classid);
1640 if (new == NULL)
1641 return -ENOBUFS;
1642 } else {
Patrick McHardyc3bc7cf2007-07-15 00:03:05 -07001643#ifdef CONFIG_NET_CLS_ACT
Patrick McHardy5b9a9cc2009-09-04 06:41:17 +00001644 if (cl->police == TC_POLICE_RECLASSIFY)
1645 new->reshape_fail = cbq_reshape_fail;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001646#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001647 }
Patrick McHardy5b9a9cc2009-09-04 06:41:17 +00001648 sch_tree_lock(sch);
1649 *old = cl->q;
1650 cl->q = new;
1651 qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
1652 qdisc_reset(*old);
1653 sch_tree_unlock(sch);
1654
1655 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001656}
1657
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001658static struct Qdisc *cbq_leaf(struct Qdisc *sch, unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001659{
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001660 struct cbq_class *cl = (struct cbq_class *)arg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001661
Patrick McHardy5b9a9cc2009-09-04 06:41:17 +00001662 return cl->q;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001663}
1664
Jarek Poplawskia37ef2e2006-12-08 00:25:55 -08001665static void cbq_qlen_notify(struct Qdisc *sch, unsigned long arg)
1666{
1667 struct cbq_class *cl = (struct cbq_class *)arg;
1668
1669 if (cl->q->q.qlen == 0)
1670 cbq_deactivate_class(cl);
1671}
1672
Linus Torvalds1da177e2005-04-16 15:20:36 -07001673static unsigned long cbq_get(struct Qdisc *sch, u32 classid)
1674{
1675 struct cbq_sched_data *q = qdisc_priv(sch);
1676 struct cbq_class *cl = cbq_class_lookup(q, classid);
1677
1678 if (cl) {
1679 cl->refcnt++;
1680 return (unsigned long)cl;
1681 }
1682 return 0;
1683}
1684
Linus Torvalds1da177e2005-04-16 15:20:36 -07001685static void cbq_destroy_class(struct Qdisc *sch, struct cbq_class *cl)
1686{
1687 struct cbq_sched_data *q = qdisc_priv(sch);
1688
Ilpo Järvinen547b7922008-07-25 21:43:18 -07001689 WARN_ON(cl->filters);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001690
Patrick McHardyff31ab52008-07-01 19:52:38 -07001691 tcf_destroy_chain(&cl->filter_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001692 qdisc_destroy(cl->q);
1693 qdisc_put_rtab(cl->R_tab);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001694 gen_kill_estimator(&cl->bstats, &cl->rate_est);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001695 if (cl != &q->link)
1696 kfree(cl);
1697}
1698
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001699static void cbq_destroy(struct Qdisc *sch)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001700{
1701 struct cbq_sched_data *q = qdisc_priv(sch);
Sasha Levinb67bfe02013-02-27 17:06:00 -08001702 struct hlist_node *next;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001703 struct cbq_class *cl;
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001704 unsigned int h;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001705
Patrick McHardyc3bc7cf2007-07-15 00:03:05 -07001706#ifdef CONFIG_NET_CLS_ACT
Linus Torvalds1da177e2005-04-16 15:20:36 -07001707 q->rx_class = NULL;
1708#endif
1709 /*
1710 * Filters must be destroyed first because we don't destroy the
1711 * classes from root to leafs which means that filters can still
1712 * be bound to classes which have been destroyed already. --TGR '04
1713 */
Patrick McHardyd77fea22008-07-05 23:22:05 -07001714 for (h = 0; h < q->clhash.hashsize; h++) {
Sasha Levinb67bfe02013-02-27 17:06:00 -08001715 hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode)
Patrick McHardyff31ab52008-07-01 19:52:38 -07001716 tcf_destroy_chain(&cl->filter_list);
Patrick McHardyb00b4bf2007-06-05 16:06:59 -07001717 }
Patrick McHardyd77fea22008-07-05 23:22:05 -07001718 for (h = 0; h < q->clhash.hashsize; h++) {
Sasha Levinb67bfe02013-02-27 17:06:00 -08001719 hlist_for_each_entry_safe(cl, next, &q->clhash.hash[h],
Patrick McHardyd77fea22008-07-05 23:22:05 -07001720 common.hnode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001721 cbq_destroy_class(sch, cl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001722 }
Patrick McHardyd77fea22008-07-05 23:22:05 -07001723 qdisc_class_hash_destroy(&q->clhash);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001724}
1725
1726static void cbq_put(struct Qdisc *sch, unsigned long arg)
1727{
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001728 struct cbq_class *cl = (struct cbq_class *)arg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001729
1730 if (--cl->refcnt == 0) {
Patrick McHardyc3bc7cf2007-07-15 00:03:05 -07001731#ifdef CONFIG_NET_CLS_ACT
Jarek Poplawski102396a2008-08-29 14:21:52 -07001732 spinlock_t *root_lock = qdisc_root_sleeping_lock(sch);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001733 struct cbq_sched_data *q = qdisc_priv(sch);
1734
David S. Miller7698b4f2008-07-16 01:42:40 -07001735 spin_lock_bh(root_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001736 if (q->rx_class == cl)
1737 q->rx_class = NULL;
David S. Miller7698b4f2008-07-16 01:42:40 -07001738 spin_unlock_bh(root_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001739#endif
1740
1741 cbq_destroy_class(sch, cl);
1742 }
1743}
1744
1745static int
Patrick McHardy1e904742008-01-22 22:11:17 -08001746cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **tca,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001747 unsigned long *arg)
1748{
1749 int err;
1750 struct cbq_sched_data *q = qdisc_priv(sch);
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001751 struct cbq_class *cl = (struct cbq_class *)*arg;
Patrick McHardy1e904742008-01-22 22:11:17 -08001752 struct nlattr *opt = tca[TCA_OPTIONS];
1753 struct nlattr *tb[TCA_CBQ_MAX + 1];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001754 struct cbq_class *parent;
1755 struct qdisc_rate_table *rtab = NULL;
1756
Patrick McHardycee63722008-01-23 20:33:32 -08001757 if (opt == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001758 return -EINVAL;
1759
Patrick McHardy27a34212008-01-23 20:35:39 -08001760 err = nla_parse_nested(tb, TCA_CBQ_MAX, opt, cbq_policy);
Patrick McHardycee63722008-01-23 20:33:32 -08001761 if (err < 0)
1762 return err;
1763
Linus Torvalds1da177e2005-04-16 15:20:36 -07001764 if (cl) {
1765 /* Check parent */
1766 if (parentid) {
Patrick McHardyd77fea22008-07-05 23:22:05 -07001767 if (cl->tparent &&
1768 cl->tparent->common.classid != parentid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001769 return -EINVAL;
1770 if (!cl->tparent && parentid != TC_H_ROOT)
1771 return -EINVAL;
1772 }
1773
Patrick McHardy1e904742008-01-22 22:11:17 -08001774 if (tb[TCA_CBQ_RATE]) {
Stephen Hemminger71bcb092008-11-25 21:13:31 -08001775 rtab = qdisc_get_rtab(nla_data(tb[TCA_CBQ_RATE]),
1776 tb[TCA_CBQ_RTAB]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001777 if (rtab == NULL)
1778 return -EINVAL;
1779 }
1780
Stephen Hemminger71bcb092008-11-25 21:13:31 -08001781 if (tca[TCA_RATE]) {
1782 err = gen_replace_estimator(&cl->bstats, &cl->rate_est,
1783 qdisc_root_sleeping_lock(sch),
1784 tca[TCA_RATE]);
1785 if (err) {
Yang Yingliang79c11f22013-12-17 15:29:17 +08001786 qdisc_put_rtab(rtab);
Stephen Hemminger71bcb092008-11-25 21:13:31 -08001787 return err;
1788 }
1789 }
1790
Linus Torvalds1da177e2005-04-16 15:20:36 -07001791 /* Change class parameters */
1792 sch_tree_lock(sch);
1793
1794 if (cl->next_alive != NULL)
1795 cbq_deactivate_class(cl);
1796
1797 if (rtab) {
Patrick McHardyb94c8af2008-11-20 04:11:36 -08001798 qdisc_put_rtab(cl->R_tab);
1799 cl->R_tab = rtab;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001800 }
1801
Patrick McHardy1e904742008-01-22 22:11:17 -08001802 if (tb[TCA_CBQ_LSSOPT])
1803 cbq_set_lss(cl, nla_data(tb[TCA_CBQ_LSSOPT]));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001804
Patrick McHardy1e904742008-01-22 22:11:17 -08001805 if (tb[TCA_CBQ_WRROPT]) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001806 cbq_rmprio(q, cl);
Patrick McHardy1e904742008-01-22 22:11:17 -08001807 cbq_set_wrr(cl, nla_data(tb[TCA_CBQ_WRROPT]));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001808 }
1809
Patrick McHardy1e904742008-01-22 22:11:17 -08001810 if (tb[TCA_CBQ_OVL_STRATEGY])
1811 cbq_set_overlimit(cl, nla_data(tb[TCA_CBQ_OVL_STRATEGY]));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001812
Patrick McHardyc3bc7cf2007-07-15 00:03:05 -07001813#ifdef CONFIG_NET_CLS_ACT
Patrick McHardy1e904742008-01-22 22:11:17 -08001814 if (tb[TCA_CBQ_POLICE])
1815 cbq_set_police(cl, nla_data(tb[TCA_CBQ_POLICE]));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001816#endif
1817
Patrick McHardy1e904742008-01-22 22:11:17 -08001818 if (tb[TCA_CBQ_FOPT])
1819 cbq_set_fopt(cl, nla_data(tb[TCA_CBQ_FOPT]));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001820
1821 if (cl->q->q.qlen)
1822 cbq_activate_class(cl);
1823
1824 sch_tree_unlock(sch);
1825
Linus Torvalds1da177e2005-04-16 15:20:36 -07001826 return 0;
1827 }
1828
1829 if (parentid == TC_H_ROOT)
1830 return -EINVAL;
1831
Patrick McHardy1e904742008-01-22 22:11:17 -08001832 if (tb[TCA_CBQ_WRROPT] == NULL || tb[TCA_CBQ_RATE] == NULL ||
1833 tb[TCA_CBQ_LSSOPT] == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001834 return -EINVAL;
1835
Patrick McHardy1e904742008-01-22 22:11:17 -08001836 rtab = qdisc_get_rtab(nla_data(tb[TCA_CBQ_RATE]), tb[TCA_CBQ_RTAB]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001837 if (rtab == NULL)
1838 return -EINVAL;
1839
1840 if (classid) {
1841 err = -EINVAL;
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001842 if (TC_H_MAJ(classid ^ sch->handle) ||
1843 cbq_class_lookup(q, classid))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001844 goto failure;
1845 } else {
1846 int i;
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001847 classid = TC_H_MAKE(sch->handle, 0x8000);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001848
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001849 for (i = 0; i < 0x8000; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001850 if (++q->hgenerator >= 0x8000)
1851 q->hgenerator = 1;
1852 if (cbq_class_lookup(q, classid|q->hgenerator) == NULL)
1853 break;
1854 }
1855 err = -ENOSR;
1856 if (i >= 0x8000)
1857 goto failure;
1858 classid = classid|q->hgenerator;
1859 }
1860
1861 parent = &q->link;
1862 if (parentid) {
1863 parent = cbq_class_lookup(q, parentid);
1864 err = -EINVAL;
1865 if (parent == NULL)
1866 goto failure;
1867 }
1868
1869 err = -ENOBUFS;
Panagiotis Issaris0da974f2006-07-21 14:51:30 -07001870 cl = kzalloc(sizeof(*cl), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001871 if (cl == NULL)
1872 goto failure;
Stephen Hemminger71bcb092008-11-25 21:13:31 -08001873
1874 if (tca[TCA_RATE]) {
1875 err = gen_new_estimator(&cl->bstats, &cl->rate_est,
1876 qdisc_root_sleeping_lock(sch),
1877 tca[TCA_RATE]);
1878 if (err) {
1879 kfree(cl);
1880 goto failure;
1881 }
1882 }
1883
Linus Torvalds1da177e2005-04-16 15:20:36 -07001884 cl->R_tab = rtab;
1885 rtab = NULL;
1886 cl->refcnt = 1;
Changli Gao3511c912010-10-16 13:04:08 +00001887 cl->q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, classid);
1888 if (!cl->q)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001889 cl->q = &noop_qdisc;
Patrick McHardyd77fea22008-07-05 23:22:05 -07001890 cl->common.classid = classid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001891 cl->tparent = parent;
1892 cl->qdisc = sch;
1893 cl->allot = parent->allot;
1894 cl->quantum = cl->allot;
1895 cl->weight = cl->R_tab->rate.rate;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001896
1897 sch_tree_lock(sch);
1898 cbq_link_class(cl);
1899 cl->borrow = cl->tparent;
1900 if (cl->tparent != &q->link)
1901 cl->share = cl->tparent;
1902 cbq_adjust_levels(parent);
1903 cl->minidle = -0x7FFFFFFF;
Patrick McHardy1e904742008-01-22 22:11:17 -08001904 cbq_set_lss(cl, nla_data(tb[TCA_CBQ_LSSOPT]));
1905 cbq_set_wrr(cl, nla_data(tb[TCA_CBQ_WRROPT]));
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001906 if (cl->ewma_log == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001907 cl->ewma_log = q->link.ewma_log;
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001908 if (cl->maxidle == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001909 cl->maxidle = q->link.maxidle;
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001910 if (cl->avpkt == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001911 cl->avpkt = q->link.avpkt;
1912 cl->overlimit = cbq_ovl_classic;
Patrick McHardy1e904742008-01-22 22:11:17 -08001913 if (tb[TCA_CBQ_OVL_STRATEGY])
1914 cbq_set_overlimit(cl, nla_data(tb[TCA_CBQ_OVL_STRATEGY]));
Patrick McHardyc3bc7cf2007-07-15 00:03:05 -07001915#ifdef CONFIG_NET_CLS_ACT
Patrick McHardy1e904742008-01-22 22:11:17 -08001916 if (tb[TCA_CBQ_POLICE])
1917 cbq_set_police(cl, nla_data(tb[TCA_CBQ_POLICE]));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001918#endif
Patrick McHardy1e904742008-01-22 22:11:17 -08001919 if (tb[TCA_CBQ_FOPT])
1920 cbq_set_fopt(cl, nla_data(tb[TCA_CBQ_FOPT]));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001921 sch_tree_unlock(sch);
1922
Patrick McHardyd77fea22008-07-05 23:22:05 -07001923 qdisc_class_hash_grow(sch, &q->clhash);
1924
Linus Torvalds1da177e2005-04-16 15:20:36 -07001925 *arg = (unsigned long)cl;
1926 return 0;
1927
1928failure:
1929 qdisc_put_rtab(rtab);
1930 return err;
1931}
1932
1933static int cbq_delete(struct Qdisc *sch, unsigned long arg)
1934{
1935 struct cbq_sched_data *q = qdisc_priv(sch);
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001936 struct cbq_class *cl = (struct cbq_class *)arg;
Jarek Poplawskia37ef2e2006-12-08 00:25:55 -08001937 unsigned int qlen;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001938
1939 if (cl->filters || cl->children || cl == &q->link)
1940 return -EBUSY;
1941
1942 sch_tree_lock(sch);
1943
Jarek Poplawskia37ef2e2006-12-08 00:25:55 -08001944 qlen = cl->q->q.qlen;
1945 qdisc_reset(cl->q);
1946 qdisc_tree_decrease_qlen(cl->q, qlen);
1947
Linus Torvalds1da177e2005-04-16 15:20:36 -07001948 if (cl->next_alive)
1949 cbq_deactivate_class(cl);
1950
1951 if (q->tx_borrowed == cl)
1952 q->tx_borrowed = q->tx_class;
1953 if (q->tx_class == cl) {
1954 q->tx_class = NULL;
1955 q->tx_borrowed = NULL;
1956 }
Patrick McHardyc3bc7cf2007-07-15 00:03:05 -07001957#ifdef CONFIG_NET_CLS_ACT
Linus Torvalds1da177e2005-04-16 15:20:36 -07001958 if (q->rx_class == cl)
1959 q->rx_class = NULL;
1960#endif
1961
1962 cbq_unlink_class(cl);
1963 cbq_adjust_levels(cl->tparent);
1964 cl->defmap = 0;
1965 cbq_sync_defmap(cl);
1966
1967 cbq_rmprio(q, cl);
1968 sch_tree_unlock(sch);
1969
Jarek Poplawski7cd0a632009-03-15 20:00:19 -07001970 BUG_ON(--cl->refcnt == 0);
1971 /*
1972 * This shouldn't happen: we "hold" one cops->get() when called
1973 * from tc_ctl_tclass; the destroy method is done from cops->put().
1974 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001975
1976 return 0;
1977}
1978
1979static struct tcf_proto **cbq_find_tcf(struct Qdisc *sch, unsigned long arg)
1980{
1981 struct cbq_sched_data *q = qdisc_priv(sch);
1982 struct cbq_class *cl = (struct cbq_class *)arg;
1983
1984 if (cl == NULL)
1985 cl = &q->link;
1986
1987 return &cl->filter_list;
1988}
1989
1990static unsigned long cbq_bind_filter(struct Qdisc *sch, unsigned long parent,
1991 u32 classid)
1992{
1993 struct cbq_sched_data *q = qdisc_priv(sch);
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001994 struct cbq_class *p = (struct cbq_class *)parent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001995 struct cbq_class *cl = cbq_class_lookup(q, classid);
1996
1997 if (cl) {
1998 if (p && p->level <= cl->level)
1999 return 0;
2000 cl->filters++;
2001 return (unsigned long)cl;
2002 }
2003 return 0;
2004}
2005
2006static void cbq_unbind_filter(struct Qdisc *sch, unsigned long arg)
2007{
Eric Dumazetcc7ec452011-01-19 19:26:56 +00002008 struct cbq_class *cl = (struct cbq_class *)arg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002009
2010 cl->filters--;
2011}
2012
2013static void cbq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
2014{
2015 struct cbq_sched_data *q = qdisc_priv(sch);
Patrick McHardyd77fea22008-07-05 23:22:05 -07002016 struct cbq_class *cl;
Eric Dumazetcc7ec452011-01-19 19:26:56 +00002017 unsigned int h;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002018
2019 if (arg->stop)
2020 return;
2021
Patrick McHardyd77fea22008-07-05 23:22:05 -07002022 for (h = 0; h < q->clhash.hashsize; h++) {
Sasha Levinb67bfe02013-02-27 17:06:00 -08002023 hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002024 if (arg->count < arg->skip) {
2025 arg->count++;
2026 continue;
2027 }
2028 if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
2029 arg->stop = 1;
2030 return;
2031 }
2032 arg->count++;
2033 }
2034 }
2035}
2036
Eric Dumazet20fea082007-11-14 01:44:41 -08002037static const struct Qdisc_class_ops cbq_class_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002038 .graft = cbq_graft,
2039 .leaf = cbq_leaf,
Jarek Poplawskia37ef2e2006-12-08 00:25:55 -08002040 .qlen_notify = cbq_qlen_notify,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002041 .get = cbq_get,
2042 .put = cbq_put,
2043 .change = cbq_change_class,
2044 .delete = cbq_delete,
2045 .walk = cbq_walk,
2046 .tcf_chain = cbq_find_tcf,
2047 .bind_tcf = cbq_bind_filter,
2048 .unbind_tcf = cbq_unbind_filter,
2049 .dump = cbq_dump_class,
2050 .dump_stats = cbq_dump_class_stats,
2051};
2052
Eric Dumazet20fea082007-11-14 01:44:41 -08002053static struct Qdisc_ops cbq_qdisc_ops __read_mostly = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002054 .next = NULL,
2055 .cl_ops = &cbq_class_ops,
2056 .id = "cbq",
2057 .priv_size = sizeof(struct cbq_sched_data),
2058 .enqueue = cbq_enqueue,
2059 .dequeue = cbq_dequeue,
Jarek Poplawski77be1552008-10-31 00:47:01 -07002060 .peek = qdisc_peek_dequeued,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002061 .drop = cbq_drop,
2062 .init = cbq_init,
2063 .reset = cbq_reset,
2064 .destroy = cbq_destroy,
2065 .change = NULL,
2066 .dump = cbq_dump,
2067 .dump_stats = cbq_dump_stats,
2068 .owner = THIS_MODULE,
2069};
2070
2071static int __init cbq_module_init(void)
2072{
2073 return register_qdisc(&cbq_qdisc_ops);
2074}
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +09002075static void __exit cbq_module_exit(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002076{
2077 unregister_qdisc(&cbq_qdisc_ops);
2078}
2079module_init(cbq_module_init)
2080module_exit(cbq_module_exit)
2081MODULE_LICENSE("GPL");