blob: 762a04bb8f6dcbe4627b579ff4081a4cf92adb11 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * net/sched/sch_cbq.c Class-Based Queueing discipline.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 *
11 */
12
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <linux/module.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090014#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include <linux/types.h>
16#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#include <linux/string.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include <linux/errno.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070019#include <linux/skbuff.h>
Patrick McHardy0ba48052007-07-02 22:49:07 -070020#include <net/netlink.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#include <net/pkt_sched.h>
22
23
24/* Class-Based Queueing (CBQ) algorithm.
25 =======================================
26
27 Sources: [1] Sally Floyd and Van Jacobson, "Link-sharing and Resource
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +090028 Management Models for Packet Networks",
Linus Torvalds1da177e2005-04-16 15:20:36 -070029 IEEE/ACM Transactions on Networking, Vol.3, No.4, 1995
30
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +090031 [2] Sally Floyd, "Notes on CBQ and Guaranteed Service", 1995
Linus Torvalds1da177e2005-04-16 15:20:36 -070032
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +090033 [3] Sally Floyd, "Notes on Class-Based Queueing: Setting
Linus Torvalds1da177e2005-04-16 15:20:36 -070034 Parameters", 1996
35
36 [4] Sally Floyd and Michael Speer, "Experimental Results
37 for Class-Based Queueing", 1998, not published.
38
39 -----------------------------------------------------------------------
40
41 Algorithm skeleton was taken from NS simulator cbq.cc.
42 If someone wants to check this code against the LBL version,
43 he should take into account that ONLY the skeleton was borrowed,
44 the implementation is different. Particularly:
45
46 --- The WRR algorithm is different. Our version looks more
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +090047 reasonable (I hope) and works when quanta are allowed to be
48 less than MTU, which is always the case when real time classes
49 have small rates. Note, that the statement of [3] is
50 incomplete, delay may actually be estimated even if class
51 per-round allotment is less than MTU. Namely, if per-round
52 allotment is W*r_i, and r_1+...+r_k = r < 1
Linus Torvalds1da177e2005-04-16 15:20:36 -070053
54 delay_i <= ([MTU/(W*r_i)]*W*r + W*r + k*MTU)/B
55
56 In the worst case we have IntServ estimate with D = W*r+k*MTU
57 and C = MTU*r. The proof (if correct at all) is trivial.
58
59
60 --- It seems that cbq-2.0 is not very accurate. At least, I cannot
61 interpret some places, which look like wrong translations
62 from NS. Anyone is advised to find these differences
63 and explain to me, why I am wrong 8).
64
65 --- Linux has no EOI event, so that we cannot estimate true class
66 idle time. Workaround is to consider the next dequeue event
67 as sign that previous packet is finished. This is wrong because of
68 internal device queueing, but on a permanently loaded link it is true.
69 Moreover, combined with clock integrator, this scheme looks
70 very close to an ideal solution. */
71
72struct cbq_sched_data;
73
74
Eric Dumazetcc7ec452011-01-19 19:26:56 +000075struct cbq_class {
Patrick McHardyd77fea22008-07-05 23:22:05 -070076 struct Qdisc_class_common common;
Linus Torvalds1da177e2005-04-16 15:20:36 -070077 struct cbq_class *next_alive; /* next class with backlog in this priority band */
78
79/* Parameters */
Linus Torvalds1da177e2005-04-16 15:20:36 -070080 unsigned char priority; /* class priority */
81 unsigned char priority2; /* priority to be used after overlimit */
82 unsigned char ewma_log; /* time constant for idle time calculation */
83 unsigned char ovl_strategy;
Patrick McHardyc3bc7cf2007-07-15 00:03:05 -070084#ifdef CONFIG_NET_CLS_ACT
Linus Torvalds1da177e2005-04-16 15:20:36 -070085 unsigned char police;
86#endif
87
88 u32 defmap;
89
90 /* Link-sharing scheduler parameters */
91 long maxidle; /* Class parameters: see below. */
92 long offtime;
93 long minidle;
94 u32 avpkt;
95 struct qdisc_rate_table *R_tab;
96
97 /* Overlimit strategy parameters */
98 void (*overlimit)(struct cbq_class *cl);
Patrick McHardy1a13cb62007-03-16 01:22:20 -070099 psched_tdiff_t penalty;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100
101 /* General scheduler (WRR) parameters */
102 long allot;
103 long quantum; /* Allotment per WRR round */
104 long weight; /* Relative allotment: see below */
105
106 struct Qdisc *qdisc; /* Ptr to CBQ discipline */
107 struct cbq_class *split; /* Ptr to split node */
108 struct cbq_class *share; /* Ptr to LS parent in the class tree */
109 struct cbq_class *tparent; /* Ptr to tree parent in the class tree */
110 struct cbq_class *borrow; /* NULL if class is bandwidth limited;
111 parent otherwise */
112 struct cbq_class *sibling; /* Sibling chain */
113 struct cbq_class *children; /* Pointer to children chain */
114
115 struct Qdisc *q; /* Elementary queueing discipline */
116
117
118/* Variables */
119 unsigned char cpriority; /* Effective priority */
120 unsigned char delayed;
121 unsigned char level; /* level of the class in hierarchy:
122 0 for leaf classes, and maximal
123 level of children + 1 for nodes.
124 */
125
126 psched_time_t last; /* Last end of service */
127 psched_time_t undertime;
128 long avgidle;
129 long deficit; /* Saved deficit for WRR */
Patrick McHardy1a13cb62007-03-16 01:22:20 -0700130 psched_time_t penalized;
Eric Dumazetc1a8f1f2009-08-16 09:36:49 +0000131 struct gnet_stats_basic_packed bstats;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132 struct gnet_stats_queue qstats;
Eric Dumazet45203a32013-06-06 08:43:22 -0700133 struct gnet_stats_rate_est64 rate_est;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134 struct tc_cbq_xstats xstats;
135
136 struct tcf_proto *filter_list;
137
138 int refcnt;
139 int filters;
140
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000141 struct cbq_class *defaults[TC_PRIO_MAX + 1];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142};
143
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000144struct cbq_sched_data {
Patrick McHardyd77fea22008-07-05 23:22:05 -0700145 struct Qdisc_class_hash clhash; /* Hash table of all classes */
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000146 int nclasses[TC_CBQ_MAXPRIO + 1];
147 unsigned int quanta[TC_CBQ_MAXPRIO + 1];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148
149 struct cbq_class link;
150
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000151 unsigned int activemask;
152 struct cbq_class *active[TC_CBQ_MAXPRIO + 1]; /* List of all classes
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153 with backlog */
154
Patrick McHardyc3bc7cf2007-07-15 00:03:05 -0700155#ifdef CONFIG_NET_CLS_ACT
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156 struct cbq_class *rx_class;
157#endif
158 struct cbq_class *tx_class;
159 struct cbq_class *tx_borrowed;
160 int tx_len;
161 psched_time_t now; /* Cached timestamp */
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000162 unsigned int pmask;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163
David S. Miller2fbd3da2009-09-01 17:59:25 -0700164 struct hrtimer delay_timer;
Patrick McHardy88a99352007-03-16 01:21:11 -0700165 struct qdisc_watchdog watchdog; /* Watchdog timer,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166 started when CBQ has
167 backlog, but cannot
168 transmit just now */
Patrick McHardy88a99352007-03-16 01:21:11 -0700169 psched_tdiff_t wd_expires;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170 int toplevel;
171 u32 hgenerator;
172};
173
174
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000175#define L2T(cl, len) qdisc_l2t((cl)->R_tab, len)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000177static inline struct cbq_class *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700178cbq_class_lookup(struct cbq_sched_data *q, u32 classid)
179{
Patrick McHardyd77fea22008-07-05 23:22:05 -0700180 struct Qdisc_class_common *clc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700181
Patrick McHardyd77fea22008-07-05 23:22:05 -0700182 clc = qdisc_class_find(&q->clhash, classid);
183 if (clc == NULL)
184 return NULL;
185 return container_of(clc, struct cbq_class, common);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186}
187
Patrick McHardyc3bc7cf2007-07-15 00:03:05 -0700188#ifdef CONFIG_NET_CLS_ACT
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189
190static struct cbq_class *
191cbq_reclassify(struct sk_buff *skb, struct cbq_class *this)
192{
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000193 struct cbq_class *cl;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000195 for (cl = this->tparent; cl; cl = cl->tparent) {
196 struct cbq_class *new = cl->defaults[TC_PRIO_BESTEFFORT];
197
198 if (new != NULL && new != this)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199 return new;
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000200 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201 return NULL;
202}
203
204#endif
205
206/* Classify packet. The procedure is pretty complicated, but
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000207 * it allows us to combine link sharing and priority scheduling
208 * transparently.
209 *
210 * Namely, you can put link sharing rules (f.e. route based) at root of CBQ,
211 * so that it resolves to split nodes. Then packets are classified
212 * by logical priority, or a more specific classifier may be attached
213 * to the split node.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214 */
215
216static struct cbq_class *
217cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
218{
219 struct cbq_sched_data *q = qdisc_priv(sch);
220 struct cbq_class *head = &q->link;
221 struct cbq_class **defmap;
222 struct cbq_class *cl = NULL;
223 u32 prio = skb->priority;
224 struct tcf_result res;
225
226 /*
227 * Step 1. If skb->priority points to one of our classes, use it.
228 */
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000229 if (TC_H_MAJ(prio ^ sch->handle) == 0 &&
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230 (cl = cbq_class_lookup(q, prio)) != NULL)
231 return cl;
232
Jarek Poplawskic27f3392008-08-04 22:39:11 -0700233 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234 for (;;) {
235 int result = 0;
236 defmap = head->defaults;
237
238 /*
239 * Step 2+n. Apply classifier.
240 */
Patrick McHardy73ca4912007-07-15 00:02:31 -0700241 if (!head->filter_list ||
242 (result = tc_classify_compat(skb, head->filter_list, &res)) < 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243 goto fallback;
244
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000245 cl = (void *)res.class;
246 if (!cl) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247 if (TC_H_MAJ(res.classid))
248 cl = cbq_class_lookup(q, res.classid);
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000249 else if ((cl = defmap[res.classid & TC_PRIO_MAX]) == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250 cl = defmap[TC_PRIO_BESTEFFORT];
251
Eric Dumazetbdfc87f2012-09-11 13:11:12 +0000252 if (cl == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253 goto fallback;
254 }
Eric Dumazetbdfc87f2012-09-11 13:11:12 +0000255 if (cl->level >= head->level)
256 goto fallback;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257#ifdef CONFIG_NET_CLS_ACT
258 switch (result) {
259 case TC_ACT_QUEUED:
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900260 case TC_ACT_STOLEN:
Jarek Poplawski378a2f02008-08-04 22:31:03 -0700261 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700262 case TC_ACT_SHOT:
263 return NULL;
Patrick McHardy73ca4912007-07-15 00:02:31 -0700264 case TC_ACT_RECLASSIFY:
265 return cbq_reclassify(skb, cl);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700267#endif
268 if (cl->level == 0)
269 return cl;
270
271 /*
272 * Step 3+n. If classifier selected a link sharing class,
273 * apply agency specific classifier.
274 * Repeat this procdure until we hit a leaf node.
275 */
276 head = cl;
277 }
278
279fallback:
280 cl = head;
281
282 /*
283 * Step 4. No success...
284 */
285 if (TC_H_MAJ(prio) == 0 &&
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000286 !(cl = head->defaults[prio & TC_PRIO_MAX]) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287 !(cl = head->defaults[TC_PRIO_BESTEFFORT]))
288 return head;
289
290 return cl;
291}
292
293/*
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000294 * A packet has just been enqueued on the empty class.
295 * cbq_activate_class adds it to the tail of active class list
296 * of its priority band.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700297 */
298
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000299static inline void cbq_activate_class(struct cbq_class *cl)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300{
301 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
302 int prio = cl->cpriority;
303 struct cbq_class *cl_tail;
304
305 cl_tail = q->active[prio];
306 q->active[prio] = cl;
307
308 if (cl_tail != NULL) {
309 cl->next_alive = cl_tail->next_alive;
310 cl_tail->next_alive = cl;
311 } else {
312 cl->next_alive = cl;
313 q->activemask |= (1<<prio);
314 }
315}
316
317/*
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000318 * Unlink class from active chain.
319 * Note that this same procedure is done directly in cbq_dequeue*
320 * during round-robin procedure.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700321 */
322
323static void cbq_deactivate_class(struct cbq_class *this)
324{
325 struct cbq_sched_data *q = qdisc_priv(this->qdisc);
326 int prio = this->cpriority;
327 struct cbq_class *cl;
328 struct cbq_class *cl_prev = q->active[prio];
329
330 do {
331 cl = cl_prev->next_alive;
332 if (cl == this) {
333 cl_prev->next_alive = cl->next_alive;
334 cl->next_alive = NULL;
335
336 if (cl == q->active[prio]) {
337 q->active[prio] = cl_prev;
338 if (cl == q->active[prio]) {
339 q->active[prio] = NULL;
340 q->activemask &= ~(1<<prio);
341 return;
342 }
343 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344 return;
345 }
346 } while ((cl_prev = cl) != q->active[prio]);
347}
348
349static void
350cbq_mark_toplevel(struct cbq_sched_data *q, struct cbq_class *cl)
351{
352 int toplevel = q->toplevel;
353
Eric Dumazetfd245a42011-01-20 05:27:16 +0000354 if (toplevel > cl->level && !(qdisc_is_throttled(cl->q))) {
Vasily Averin7201c1d2014-08-14 12:27:59 +0400355 psched_time_t now = psched_get_time();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700356
357 do {
Patrick McHardy104e0872007-03-23 11:28:07 -0700358 if (cl->undertime < now) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700359 q->toplevel = cl->level;
360 return;
361 }
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000362 } while ((cl = cl->borrow) != NULL && toplevel > cl->level);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700363 }
364}
365
366static int
367cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
368{
369 struct cbq_sched_data *q = qdisc_priv(sch);
Satyam Sharmaddeee3c2007-09-16 14:54:05 -0700370 int uninitialized_var(ret);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700371 struct cbq_class *cl = cbq_classify(skb, sch, &ret);
372
Patrick McHardyc3bc7cf2007-07-15 00:03:05 -0700373#ifdef CONFIG_NET_CLS_ACT
Linus Torvalds1da177e2005-04-16 15:20:36 -0700374 q->rx_class = cl;
375#endif
376 if (cl == NULL) {
Jarek Poplawskic27f3392008-08-04 22:39:11 -0700377 if (ret & __NET_XMIT_BYPASS)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378 sch->qstats.drops++;
379 kfree_skb(skb);
380 return ret;
381 }
382
Patrick McHardyc3bc7cf2007-07-15 00:03:05 -0700383#ifdef CONFIG_NET_CLS_ACT
Linus Torvalds1da177e2005-04-16 15:20:36 -0700384 cl->q->__parent = sch;
385#endif
Jussi Kivilinna5f861732008-07-20 00:08:04 -0700386 ret = qdisc_enqueue(skb, cl->q);
387 if (ret == NET_XMIT_SUCCESS) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700388 sch->q.qlen++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700389 cbq_mark_toplevel(q, cl);
390 if (!cl->next_alive)
391 cbq_activate_class(cl);
392 return ret;
393 }
394
Jarek Poplawski378a2f02008-08-04 22:31:03 -0700395 if (net_xmit_drop_count(ret)) {
396 sch->qstats.drops++;
397 cbq_mark_toplevel(q, cl);
398 cl->qstats.drops++;
399 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700400 return ret;
401}
402
Linus Torvalds1da177e2005-04-16 15:20:36 -0700403/* Overlimit actions */
404
405/* TC_CBQ_OVL_CLASSIC: (default) penalize leaf class by adding offtime */
406
407static void cbq_ovl_classic(struct cbq_class *cl)
408{
409 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
Patrick McHardy8edc0c32007-03-23 11:28:55 -0700410 psched_tdiff_t delay = cl->undertime - q->now;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700411
412 if (!cl->delayed) {
413 delay += cl->offtime;
414
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900415 /*
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000416 * Class goes to sleep, so that it will have no
417 * chance to work avgidle. Let's forgive it 8)
418 *
419 * BTW cbq-2.0 has a crap in this
420 * place, apparently they forgot to shift it by cl->ewma_log.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700421 */
422 if (cl->avgidle < 0)
423 delay -= (-cl->avgidle) - ((-cl->avgidle) >> cl->ewma_log);
424 if (cl->avgidle < cl->minidle)
425 cl->avgidle = cl->minidle;
426 if (delay <= 0)
427 delay = 1;
Patrick McHardy7c59e252007-03-23 11:27:45 -0700428 cl->undertime = q->now + delay;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700429
430 cl->xstats.overactions++;
431 cl->delayed = 1;
432 }
433 if (q->wd_expires == 0 || q->wd_expires > delay)
434 q->wd_expires = delay;
435
436 /* Dirty work! We must schedule wakeups based on
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000437 * real available rate, rather than leaf rate,
438 * which may be tiny (even zero).
Linus Torvalds1da177e2005-04-16 15:20:36 -0700439 */
440 if (q->toplevel == TC_CBQ_MAXLEVEL) {
441 struct cbq_class *b;
442 psched_tdiff_t base_delay = q->wd_expires;
443
444 for (b = cl->borrow; b; b = b->borrow) {
Patrick McHardy8edc0c32007-03-23 11:28:55 -0700445 delay = b->undertime - q->now;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700446 if (delay < base_delay) {
447 if (delay <= 0)
448 delay = 1;
449 base_delay = delay;
450 }
451 }
452
453 q->wd_expires = base_delay;
454 }
455}
456
457/* TC_CBQ_OVL_RCLASSIC: penalize by offtime classes in hierarchy, when
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000458 * they go overlimit
Linus Torvalds1da177e2005-04-16 15:20:36 -0700459 */
460
461static void cbq_ovl_rclassic(struct cbq_class *cl)
462{
463 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
464 struct cbq_class *this = cl;
465
466 do {
467 if (cl->level > q->toplevel) {
468 cl = NULL;
469 break;
470 }
471 } while ((cl = cl->borrow) != NULL);
472
473 if (cl == NULL)
474 cl = this;
475 cbq_ovl_classic(cl);
476}
477
478/* TC_CBQ_OVL_DELAY: delay until it will go to underlimit */
479
480static void cbq_ovl_delay(struct cbq_class *cl)
481{
482 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
Patrick McHardy8edc0c32007-03-23 11:28:55 -0700483 psched_tdiff_t delay = cl->undertime - q->now;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700484
Jarek Poplawski2540e052008-08-21 05:11:14 -0700485 if (test_bit(__QDISC_STATE_DEACTIVATED,
486 &qdisc_root_sleeping(cl->qdisc)->state))
487 return;
488
Linus Torvalds1da177e2005-04-16 15:20:36 -0700489 if (!cl->delayed) {
Patrick McHardy1a13cb62007-03-16 01:22:20 -0700490 psched_time_t sched = q->now;
491 ktime_t expires;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700492
493 delay += cl->offtime;
494 if (cl->avgidle < 0)
495 delay -= (-cl->avgidle) - ((-cl->avgidle) >> cl->ewma_log);
496 if (cl->avgidle < cl->minidle)
497 cl->avgidle = cl->minidle;
Patrick McHardy7c59e252007-03-23 11:27:45 -0700498 cl->undertime = q->now + delay;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700499
500 if (delay > 0) {
Patrick McHardy1a13cb62007-03-16 01:22:20 -0700501 sched += delay + cl->penalty;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700502 cl->penalized = sched;
503 cl->cpriority = TC_CBQ_MAXPRIO;
504 q->pmask |= (1<<TC_CBQ_MAXPRIO);
Patrick McHardy1a13cb62007-03-16 01:22:20 -0700505
Eric Dumazet46baac32012-10-20 00:40:51 +0000506 expires = ns_to_ktime(PSCHED_TICKS2NS(sched));
David S. Miller2fbd3da2009-09-01 17:59:25 -0700507 if (hrtimer_try_to_cancel(&q->delay_timer) &&
508 ktime_to_ns(ktime_sub(
509 hrtimer_get_expires(&q->delay_timer),
510 expires)) > 0)
511 hrtimer_set_expires(&q->delay_timer, expires);
512 hrtimer_restart(&q->delay_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700513 cl->delayed = 1;
514 cl->xstats.overactions++;
515 return;
516 }
517 delay = 1;
518 }
519 if (q->wd_expires == 0 || q->wd_expires > delay)
520 q->wd_expires = delay;
521}
522
523/* TC_CBQ_OVL_LOWPRIO: penalize class by lowering its priority band */
524
525static void cbq_ovl_lowprio(struct cbq_class *cl)
526{
527 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
528
Patrick McHardy1a13cb62007-03-16 01:22:20 -0700529 cl->penalized = q->now + cl->penalty;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700530
531 if (cl->cpriority != cl->priority2) {
532 cl->cpriority = cl->priority2;
533 q->pmask |= (1<<cl->cpriority);
534 cl->xstats.overactions++;
535 }
536 cbq_ovl_classic(cl);
537}
538
539/* TC_CBQ_OVL_DROP: penalize class by dropping */
540
541static void cbq_ovl_drop(struct cbq_class *cl)
542{
543 if (cl->q->ops->drop)
544 if (cl->q->ops->drop(cl->q))
545 cl->qdisc->q.qlen--;
546 cl->xstats.overactions++;
547 cbq_ovl_classic(cl);
548}
549
Patrick McHardy1a13cb62007-03-16 01:22:20 -0700550static psched_tdiff_t cbq_undelay_prio(struct cbq_sched_data *q, int prio,
551 psched_time_t now)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700552{
553 struct cbq_class *cl;
554 struct cbq_class *cl_prev = q->active[prio];
Patrick McHardy1a13cb62007-03-16 01:22:20 -0700555 psched_time_t sched = now;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700556
557 if (cl_prev == NULL)
Patrick McHardye9054a32007-03-16 01:21:40 -0700558 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700559
560 do {
561 cl = cl_prev->next_alive;
Patrick McHardy1a13cb62007-03-16 01:22:20 -0700562 if (now - cl->penalized > 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700563 cl_prev->next_alive = cl->next_alive;
564 cl->next_alive = NULL;
565 cl->cpriority = cl->priority;
566 cl->delayed = 0;
567 cbq_activate_class(cl);
568
569 if (cl == q->active[prio]) {
570 q->active[prio] = cl_prev;
571 if (cl == q->active[prio]) {
572 q->active[prio] = NULL;
573 return 0;
574 }
575 }
576
577 cl = cl_prev->next_alive;
Patrick McHardy1a13cb62007-03-16 01:22:20 -0700578 } else if (sched - cl->penalized > 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700579 sched = cl->penalized;
580 } while ((cl_prev = cl) != q->active[prio]);
581
Patrick McHardy1a13cb62007-03-16 01:22:20 -0700582 return sched - now;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700583}
584
Patrick McHardy1a13cb62007-03-16 01:22:20 -0700585static enum hrtimer_restart cbq_undelay(struct hrtimer *timer)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700586{
Patrick McHardy1a13cb62007-03-16 01:22:20 -0700587 struct cbq_sched_data *q = container_of(timer, struct cbq_sched_data,
David S. Miller2fbd3da2009-09-01 17:59:25 -0700588 delay_timer);
Patrick McHardy1a13cb62007-03-16 01:22:20 -0700589 struct Qdisc *sch = q->watchdog.qdisc;
590 psched_time_t now;
591 psched_tdiff_t delay = 0;
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000592 unsigned int pmask;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700593
Patrick McHardy3bebcda2007-03-23 11:29:25 -0700594 now = psched_get_time();
Patrick McHardy1a13cb62007-03-16 01:22:20 -0700595
Linus Torvalds1da177e2005-04-16 15:20:36 -0700596 pmask = q->pmask;
597 q->pmask = 0;
598
599 while (pmask) {
600 int prio = ffz(~pmask);
Patrick McHardy1a13cb62007-03-16 01:22:20 -0700601 psched_tdiff_t tmp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700602
603 pmask &= ~(1<<prio);
604
Patrick McHardy1a13cb62007-03-16 01:22:20 -0700605 tmp = cbq_undelay_prio(q, prio, now);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700606 if (tmp > 0) {
607 q->pmask |= 1<<prio;
608 if (tmp < delay || delay == 0)
609 delay = tmp;
610 }
611 }
612
613 if (delay) {
Patrick McHardy1a13cb62007-03-16 01:22:20 -0700614 ktime_t time;
615
616 time = ktime_set(0, 0);
Jarek Poplawskica44d6e2009-06-15 02:31:47 -0700617 time = ktime_add_ns(time, PSCHED_TICKS2NS(now + delay));
David S. Miller2fbd3da2009-09-01 17:59:25 -0700618 hrtimer_start(&q->delay_timer, time, HRTIMER_MODE_ABS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700619 }
620
Eric Dumazetfd245a42011-01-20 05:27:16 +0000621 qdisc_unthrottled(sch);
David S. Miller8608db02008-08-18 20:51:18 -0700622 __netif_schedule(qdisc_root(sch));
Patrick McHardy1a13cb62007-03-16 01:22:20 -0700623 return HRTIMER_NORESTART;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700624}
625
Patrick McHardyc3bc7cf2007-07-15 00:03:05 -0700626#ifdef CONFIG_NET_CLS_ACT
Linus Torvalds1da177e2005-04-16 15:20:36 -0700627static int cbq_reshape_fail(struct sk_buff *skb, struct Qdisc *child)
628{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700629 struct Qdisc *sch = child->__parent;
630 struct cbq_sched_data *q = qdisc_priv(sch);
631 struct cbq_class *cl = q->rx_class;
632
633 q->rx_class = NULL;
634
635 if (cl && (cl = cbq_reclassify(skb, cl)) != NULL) {
Jarek Poplawski378a2f02008-08-04 22:31:03 -0700636 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700637
638 cbq_mark_toplevel(q, cl);
639
640 q->rx_class = cl;
641 cl->q->__parent = sch;
642
Jarek Poplawski378a2f02008-08-04 22:31:03 -0700643 ret = qdisc_enqueue(skb, cl->q);
644 if (ret == NET_XMIT_SUCCESS) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700645 sch->q.qlen++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700646 if (!cl->next_alive)
647 cbq_activate_class(cl);
648 return 0;
649 }
Jarek Poplawski378a2f02008-08-04 22:31:03 -0700650 if (net_xmit_drop_count(ret))
651 sch->qstats.drops++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700652 return 0;
653 }
654
655 sch->qstats.drops++;
656 return -1;
657}
658#endif
659
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900660/*
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000661 * It is mission critical procedure.
662 *
663 * We "regenerate" toplevel cutoff, if transmitting class
664 * has backlog and it is not regulated. It is not part of
665 * original CBQ description, but looks more reasonable.
666 * Probably, it is wrong. This question needs further investigation.
667 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700668
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000669static inline void
Linus Torvalds1da177e2005-04-16 15:20:36 -0700670cbq_update_toplevel(struct cbq_sched_data *q, struct cbq_class *cl,
671 struct cbq_class *borrowed)
672{
673 if (cl && q->toplevel >= borrowed->level) {
674 if (cl->q->q.qlen > 1) {
675 do {
Patrick McHardya0849802007-03-23 11:28:30 -0700676 if (borrowed->undertime == PSCHED_PASTPERFECT) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700677 q->toplevel = borrowed->level;
678 return;
679 }
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000680 } while ((borrowed = borrowed->borrow) != NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700681 }
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900682#if 0
Linus Torvalds1da177e2005-04-16 15:20:36 -0700683 /* It is not necessary now. Uncommenting it
684 will save CPU cycles, but decrease fairness.
685 */
686 q->toplevel = TC_CBQ_MAXLEVEL;
687#endif
688 }
689}
690
691static void
692cbq_update(struct cbq_sched_data *q)
693{
694 struct cbq_class *this = q->tx_class;
695 struct cbq_class *cl = this;
696 int len = q->tx_len;
Vasily Averin73d0f372014-08-14 12:27:47 +0400697 psched_time_t now;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700698
699 q->tx_class = NULL;
Vasily Averin73d0f372014-08-14 12:27:47 +0400700 /* Time integrator. We calculate EOS time
701 * by adding expected packet transmission time.
702 */
703 now = q->now + L2T(&q->link, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700704
705 for ( ; cl; cl = cl->share) {
706 long avgidle = cl->avgidle;
707 long idle;
708
709 cl->bstats.packets++;
710 cl->bstats.bytes += len;
711
712 /*
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000713 * (now - last) is total time between packet right edges.
714 * (last_pktlen/rate) is "virtual" busy time, so that
715 *
716 * idle = (now - last) - last_pktlen/rate
Linus Torvalds1da177e2005-04-16 15:20:36 -0700717 */
718
Vasily Averin73d0f372014-08-14 12:27:47 +0400719 idle = now - cl->last;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700720 if ((unsigned long)idle > 128*1024*1024) {
721 avgidle = cl->maxidle;
722 } else {
723 idle -= L2T(cl, len);
724
725 /* true_avgidle := (1-W)*true_avgidle + W*idle,
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000726 * where W=2^{-ewma_log}. But cl->avgidle is scaled:
727 * cl->avgidle == true_avgidle/W,
728 * hence:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700729 */
730 avgidle += idle - (avgidle>>cl->ewma_log);
731 }
732
733 if (avgidle <= 0) {
734 /* Overlimit or at-limit */
735
736 if (avgidle < cl->minidle)
737 avgidle = cl->minidle;
738
739 cl->avgidle = avgidle;
740
741 /* Calculate expected time, when this class
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000742 * will be allowed to send.
743 * It will occur, when:
744 * (1-W)*true_avgidle + W*delay = 0, i.e.
745 * idle = (1/W - 1)*(-true_avgidle)
746 * or
747 * idle = (1 - W)*(-cl->avgidle);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700748 */
749 idle = (-avgidle) - ((-avgidle) >> cl->ewma_log);
750
751 /*
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000752 * That is not all.
753 * To maintain the rate allocated to the class,
754 * we add to undertime virtual clock,
755 * necessary to complete transmitted packet.
756 * (len/phys_bandwidth has been already passed
757 * to the moment of cbq_update)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700758 */
759
760 idle -= L2T(&q->link, len);
761 idle += L2T(cl, len);
762
Vasily Averin73d0f372014-08-14 12:27:47 +0400763 cl->undertime = now + idle;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700764 } else {
765 /* Underlimit */
766
Patrick McHardya0849802007-03-23 11:28:30 -0700767 cl->undertime = PSCHED_PASTPERFECT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700768 if (avgidle > cl->maxidle)
769 cl->avgidle = cl->maxidle;
770 else
771 cl->avgidle = avgidle;
772 }
Vasily Averin73d0f372014-08-14 12:27:47 +0400773 if ((s64)(now - cl->last) > 0)
774 cl->last = now;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700775 }
776
777 cbq_update_toplevel(q, this, q->tx_borrowed);
778}
779
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000780static inline struct cbq_class *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700781cbq_under_limit(struct cbq_class *cl)
782{
783 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
784 struct cbq_class *this_cl = cl;
785
786 if (cl->tparent == NULL)
787 return cl;
788
Patrick McHardya0849802007-03-23 11:28:30 -0700789 if (cl->undertime == PSCHED_PASTPERFECT || q->now >= cl->undertime) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700790 cl->delayed = 0;
791 return cl;
792 }
793
794 do {
795 /* It is very suspicious place. Now overlimit
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000796 * action is generated for not bounded classes
797 * only if link is completely congested.
798 * Though it is in agree with ancestor-only paradigm,
799 * it looks very stupid. Particularly,
800 * it means that this chunk of code will either
801 * never be called or result in strong amplification
802 * of burstiness. Dangerous, silly, and, however,
803 * no another solution exists.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700804 */
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000805 cl = cl->borrow;
806 if (!cl) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700807 this_cl->qstats.overlimits++;
808 this_cl->overlimit(this_cl);
809 return NULL;
810 }
811 if (cl->level > q->toplevel)
812 return NULL;
Patrick McHardya0849802007-03-23 11:28:30 -0700813 } while (cl->undertime != PSCHED_PASTPERFECT && q->now < cl->undertime);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700814
815 cl->delayed = 0;
816 return cl;
817}
818
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000819static inline struct sk_buff *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700820cbq_dequeue_prio(struct Qdisc *sch, int prio)
821{
822 struct cbq_sched_data *q = qdisc_priv(sch);
823 struct cbq_class *cl_tail, *cl_prev, *cl;
824 struct sk_buff *skb;
825 int deficit;
826
827 cl_tail = cl_prev = q->active[prio];
828 cl = cl_prev->next_alive;
829
830 do {
831 deficit = 0;
832
833 /* Start round */
834 do {
835 struct cbq_class *borrow = cl;
836
837 if (cl->q->q.qlen &&
838 (borrow = cbq_under_limit(cl)) == NULL)
839 goto skip_class;
840
841 if (cl->deficit <= 0) {
842 /* Class exhausted its allotment per
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000843 * this round. Switch to the next one.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700844 */
845 deficit = 1;
846 cl->deficit += cl->quantum;
847 goto next_class;
848 }
849
850 skb = cl->q->dequeue(cl->q);
851
852 /* Class did not give us any skb :-(
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000853 * It could occur even if cl->q->q.qlen != 0
854 * f.e. if cl->q == "tbf"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700855 */
856 if (skb == NULL)
857 goto skip_class;
858
Jussi Kivilinna0abf77e2008-07-20 00:08:27 -0700859 cl->deficit -= qdisc_pkt_len(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700860 q->tx_class = cl;
861 q->tx_borrowed = borrow;
862 if (borrow != cl) {
863#ifndef CBQ_XSTATS_BORROWS_BYTES
864 borrow->xstats.borrows++;
865 cl->xstats.borrows++;
866#else
Jussi Kivilinna0abf77e2008-07-20 00:08:27 -0700867 borrow->xstats.borrows += qdisc_pkt_len(skb);
868 cl->xstats.borrows += qdisc_pkt_len(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700869#endif
870 }
Jussi Kivilinna0abf77e2008-07-20 00:08:27 -0700871 q->tx_len = qdisc_pkt_len(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700872
873 if (cl->deficit <= 0) {
874 q->active[prio] = cl;
875 cl = cl->next_alive;
876 cl->deficit += cl->quantum;
877 }
878 return skb;
879
880skip_class:
881 if (cl->q->q.qlen == 0 || prio != cl->cpriority) {
882 /* Class is empty or penalized.
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000883 * Unlink it from active chain.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700884 */
885 cl_prev->next_alive = cl->next_alive;
886 cl->next_alive = NULL;
887
888 /* Did cl_tail point to it? */
889 if (cl == cl_tail) {
890 /* Repair it! */
891 cl_tail = cl_prev;
892
893 /* Was it the last class in this band? */
894 if (cl == cl_tail) {
895 /* Kill the band! */
896 q->active[prio] = NULL;
897 q->activemask &= ~(1<<prio);
898 if (cl->q->q.qlen)
899 cbq_activate_class(cl);
900 return NULL;
901 }
902
903 q->active[prio] = cl_tail;
904 }
905 if (cl->q->q.qlen)
906 cbq_activate_class(cl);
907
908 cl = cl_prev;
909 }
910
911next_class:
912 cl_prev = cl;
913 cl = cl->next_alive;
914 } while (cl_prev != cl_tail);
915 } while (deficit);
916
917 q->active[prio] = cl_prev;
918
919 return NULL;
920}
921
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000922static inline struct sk_buff *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700923cbq_dequeue_1(struct Qdisc *sch)
924{
925 struct cbq_sched_data *q = qdisc_priv(sch);
926 struct sk_buff *skb;
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000927 unsigned int activemask;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700928
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000929 activemask = q->activemask & 0xFF;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700930 while (activemask) {
931 int prio = ffz(~activemask);
932 activemask &= ~(1<<prio);
933 skb = cbq_dequeue_prio(sch, prio);
934 if (skb)
935 return skb;
936 }
937 return NULL;
938}
939
940static struct sk_buff *
941cbq_dequeue(struct Qdisc *sch)
942{
943 struct sk_buff *skb;
944 struct cbq_sched_data *q = qdisc_priv(sch);
945 psched_time_t now;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700946
Patrick McHardy3bebcda2007-03-23 11:29:25 -0700947 now = psched_get_time();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700948
Vasily Averin73d0f372014-08-14 12:27:47 +0400949 if (q->tx_class)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700950 cbq_update(q);
Vasily Averin73d0f372014-08-14 12:27:47 +0400951
952 q->now = now;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700953
954 for (;;) {
955 q->wd_expires = 0;
956
957 skb = cbq_dequeue_1(sch);
958 if (skb) {
Eric Dumazet9190b3b2011-01-20 23:31:33 -0800959 qdisc_bstats_update(sch, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700960 sch->q.qlen--;
Eric Dumazetfd245a42011-01-20 05:27:16 +0000961 qdisc_unthrottled(sch);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700962 return skb;
963 }
964
965 /* All the classes are overlimit.
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000966 *
967 * It is possible, if:
968 *
969 * 1. Scheduler is empty.
970 * 2. Toplevel cutoff inhibited borrowing.
971 * 3. Root class is overlimit.
972 *
973 * Reset 2d and 3d conditions and retry.
974 *
975 * Note, that NS and cbq-2.0 are buggy, peeking
976 * an arbitrary class is appropriate for ancestor-only
977 * sharing, but not for toplevel algorithm.
978 *
979 * Our version is better, but slower, because it requires
980 * two passes, but it is unavoidable with top-level sharing.
981 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700982
983 if (q->toplevel == TC_CBQ_MAXLEVEL &&
Patrick McHardya0849802007-03-23 11:28:30 -0700984 q->link.undertime == PSCHED_PASTPERFECT)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700985 break;
986
987 q->toplevel = TC_CBQ_MAXLEVEL;
Patrick McHardya0849802007-03-23 11:28:30 -0700988 q->link.undertime = PSCHED_PASTPERFECT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700989 }
990
991 /* No packets in scheduler or nobody wants to give them to us :-(
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000992 * Sigh... start watchdog timer in the last case.
993 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700994
995 if (sch->q.qlen) {
996 sch->qstats.overlimits++;
Patrick McHardy88a99352007-03-16 01:21:11 -0700997 if (q->wd_expires)
998 qdisc_watchdog_schedule(&q->watchdog,
Patrick McHardybb239ac2007-03-16 12:31:28 -0700999 now + q->wd_expires);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001000 }
1001 return NULL;
1002}
1003
1004/* CBQ class maintanance routines */
1005
1006static void cbq_adjust_levels(struct cbq_class *this)
1007{
1008 if (this == NULL)
1009 return;
1010
1011 do {
1012 int level = 0;
1013 struct cbq_class *cl;
1014
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001015 cl = this->children;
1016 if (cl) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001017 do {
1018 if (cl->level > level)
1019 level = cl->level;
1020 } while ((cl = cl->sibling) != this->children);
1021 }
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001022 this->level = level + 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001023 } while ((this = this->tparent) != NULL);
1024}
1025
1026static void cbq_normalize_quanta(struct cbq_sched_data *q, int prio)
1027{
1028 struct cbq_class *cl;
Patrick McHardyd77fea22008-07-05 23:22:05 -07001029 unsigned int h;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001030
1031 if (q->quanta[prio] == 0)
1032 return;
1033
Patrick McHardyd77fea22008-07-05 23:22:05 -07001034 for (h = 0; h < q->clhash.hashsize; h++) {
Sasha Levinb67bfe02013-02-27 17:06:00 -08001035 hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001036 /* BUGGGG... Beware! This expression suffer of
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001037 * arithmetic overflows!
Linus Torvalds1da177e2005-04-16 15:20:36 -07001038 */
1039 if (cl->priority == prio) {
1040 cl->quantum = (cl->weight*cl->allot*q->nclasses[prio])/
1041 q->quanta[prio];
1042 }
Yang Yingliang833fa742013-12-10 20:55:32 +08001043 if (cl->quantum <= 0 ||
1044 cl->quantum > 32*qdisc_dev(cl->qdisc)->mtu) {
Yang Yingliangc17988a2013-12-23 17:38:58 +08001045 pr_warn("CBQ: class %08x has bad quantum==%ld, repaired.\n",
1046 cl->common.classid, cl->quantum);
David S. Miller5ce2d482008-07-08 17:06:30 -07001047 cl->quantum = qdisc_dev(cl->qdisc)->mtu/2 + 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001048 }
1049 }
1050 }
1051}
1052
1053static void cbq_sync_defmap(struct cbq_class *cl)
1054{
1055 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
1056 struct cbq_class *split = cl->split;
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001057 unsigned int h;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001058 int i;
1059
1060 if (split == NULL)
1061 return;
1062
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001063 for (i = 0; i <= TC_PRIO_MAX; i++) {
1064 if (split->defaults[i] == cl && !(cl->defmap & (1<<i)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001065 split->defaults[i] = NULL;
1066 }
1067
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001068 for (i = 0; i <= TC_PRIO_MAX; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001069 int level = split->level;
1070
1071 if (split->defaults[i])
1072 continue;
1073
Patrick McHardyd77fea22008-07-05 23:22:05 -07001074 for (h = 0; h < q->clhash.hashsize; h++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001075 struct cbq_class *c;
1076
Sasha Levinb67bfe02013-02-27 17:06:00 -08001077 hlist_for_each_entry(c, &q->clhash.hash[h],
Patrick McHardyd77fea22008-07-05 23:22:05 -07001078 common.hnode) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001079 if (c->split == split && c->level < level &&
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001080 c->defmap & (1<<i)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001081 split->defaults[i] = c;
1082 level = c->level;
1083 }
1084 }
1085 }
1086 }
1087}
1088
1089static void cbq_change_defmap(struct cbq_class *cl, u32 splitid, u32 def, u32 mask)
1090{
1091 struct cbq_class *split = NULL;
1092
1093 if (splitid == 0) {
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001094 split = cl->split;
1095 if (!split)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001096 return;
Patrick McHardyd77fea22008-07-05 23:22:05 -07001097 splitid = split->common.classid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001098 }
1099
Patrick McHardyd77fea22008-07-05 23:22:05 -07001100 if (split == NULL || split->common.classid != splitid) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001101 for (split = cl->tparent; split; split = split->tparent)
Patrick McHardyd77fea22008-07-05 23:22:05 -07001102 if (split->common.classid == splitid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001103 break;
1104 }
1105
1106 if (split == NULL)
1107 return;
1108
1109 if (cl->split != split) {
1110 cl->defmap = 0;
1111 cbq_sync_defmap(cl);
1112 cl->split = split;
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001113 cl->defmap = def & mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001114 } else
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001115 cl->defmap = (cl->defmap & ~mask) | (def & mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001116
1117 cbq_sync_defmap(cl);
1118}
1119
1120static void cbq_unlink_class(struct cbq_class *this)
1121{
1122 struct cbq_class *cl, **clp;
1123 struct cbq_sched_data *q = qdisc_priv(this->qdisc);
1124
Patrick McHardyd77fea22008-07-05 23:22:05 -07001125 qdisc_class_hash_remove(&q->clhash, &this->common);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001126
1127 if (this->tparent) {
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001128 clp = &this->sibling;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001129 cl = *clp;
1130 do {
1131 if (cl == this) {
1132 *clp = cl->sibling;
1133 break;
1134 }
1135 clp = &cl->sibling;
1136 } while ((cl = *clp) != this->sibling);
1137
1138 if (this->tparent->children == this) {
1139 this->tparent->children = this->sibling;
1140 if (this->sibling == this)
1141 this->tparent->children = NULL;
1142 }
1143 } else {
Ilpo Järvinen547b7922008-07-25 21:43:18 -07001144 WARN_ON(this->sibling != this);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001145 }
1146}
1147
1148static void cbq_link_class(struct cbq_class *this)
1149{
1150 struct cbq_sched_data *q = qdisc_priv(this->qdisc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001151 struct cbq_class *parent = this->tparent;
1152
1153 this->sibling = this;
Patrick McHardyd77fea22008-07-05 23:22:05 -07001154 qdisc_class_hash_insert(&q->clhash, &this->common);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001155
1156 if (parent == NULL)
1157 return;
1158
1159 if (parent->children == NULL) {
1160 parent->children = this;
1161 } else {
1162 this->sibling = parent->children->sibling;
1163 parent->children->sibling = this;
1164 }
1165}
1166
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001167static unsigned int cbq_drop(struct Qdisc *sch)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001168{
1169 struct cbq_sched_data *q = qdisc_priv(sch);
1170 struct cbq_class *cl, *cl_head;
1171 int prio;
1172 unsigned int len;
1173
1174 for (prio = TC_CBQ_MAXPRIO; prio >= 0; prio--) {
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001175 cl_head = q->active[prio];
1176 if (!cl_head)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001177 continue;
1178
1179 cl = cl_head;
1180 do {
1181 if (cl->q->ops->drop && (len = cl->q->ops->drop(cl->q))) {
1182 sch->q.qlen--;
Jarek Poplawskia37ef2e2006-12-08 00:25:55 -08001183 if (!cl->q->q.qlen)
1184 cbq_deactivate_class(cl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001185 return len;
1186 }
1187 } while ((cl = cl->next_alive) != cl_head);
1188 }
1189 return 0;
1190}
1191
1192static void
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001193cbq_reset(struct Qdisc *sch)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001194{
1195 struct cbq_sched_data *q = qdisc_priv(sch);
1196 struct cbq_class *cl;
1197 int prio;
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001198 unsigned int h;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001199
1200 q->activemask = 0;
1201 q->pmask = 0;
1202 q->tx_class = NULL;
1203 q->tx_borrowed = NULL;
Patrick McHardy88a99352007-03-16 01:21:11 -07001204 qdisc_watchdog_cancel(&q->watchdog);
David S. Miller2fbd3da2009-09-01 17:59:25 -07001205 hrtimer_cancel(&q->delay_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001206 q->toplevel = TC_CBQ_MAXLEVEL;
Patrick McHardy3bebcda2007-03-23 11:29:25 -07001207 q->now = psched_get_time();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001208
1209 for (prio = 0; prio <= TC_CBQ_MAXPRIO; prio++)
1210 q->active[prio] = NULL;
1211
Patrick McHardyd77fea22008-07-05 23:22:05 -07001212 for (h = 0; h < q->clhash.hashsize; h++) {
Sasha Levinb67bfe02013-02-27 17:06:00 -08001213 hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001214 qdisc_reset(cl->q);
1215
1216 cl->next_alive = NULL;
Patrick McHardya0849802007-03-23 11:28:30 -07001217 cl->undertime = PSCHED_PASTPERFECT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001218 cl->avgidle = cl->maxidle;
1219 cl->deficit = cl->quantum;
1220 cl->cpriority = cl->priority;
1221 }
1222 }
1223 sch->q.qlen = 0;
1224}
1225
1226
1227static int cbq_set_lss(struct cbq_class *cl, struct tc_cbq_lssopt *lss)
1228{
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001229 if (lss->change & TCF_CBQ_LSS_FLAGS) {
1230 cl->share = (lss->flags & TCF_CBQ_LSS_ISOLATED) ? NULL : cl->tparent;
1231 cl->borrow = (lss->flags & TCF_CBQ_LSS_BOUNDED) ? NULL : cl->tparent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001232 }
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001233 if (lss->change & TCF_CBQ_LSS_EWMA)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001234 cl->ewma_log = lss->ewma_log;
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001235 if (lss->change & TCF_CBQ_LSS_AVPKT)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001236 cl->avpkt = lss->avpkt;
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001237 if (lss->change & TCF_CBQ_LSS_MINIDLE)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001238 cl->minidle = -(long)lss->minidle;
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001239 if (lss->change & TCF_CBQ_LSS_MAXIDLE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001240 cl->maxidle = lss->maxidle;
1241 cl->avgidle = lss->maxidle;
1242 }
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001243 if (lss->change & TCF_CBQ_LSS_OFFTIME)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001244 cl->offtime = lss->offtime;
1245 return 0;
1246}
1247
1248static void cbq_rmprio(struct cbq_sched_data *q, struct cbq_class *cl)
1249{
1250 q->nclasses[cl->priority]--;
1251 q->quanta[cl->priority] -= cl->weight;
1252 cbq_normalize_quanta(q, cl->priority);
1253}
1254
1255static void cbq_addprio(struct cbq_sched_data *q, struct cbq_class *cl)
1256{
1257 q->nclasses[cl->priority]++;
1258 q->quanta[cl->priority] += cl->weight;
1259 cbq_normalize_quanta(q, cl->priority);
1260}
1261
1262static int cbq_set_wrr(struct cbq_class *cl, struct tc_cbq_wrropt *wrr)
1263{
1264 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
1265
1266 if (wrr->allot)
1267 cl->allot = wrr->allot;
1268 if (wrr->weight)
1269 cl->weight = wrr->weight;
1270 if (wrr->priority) {
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001271 cl->priority = wrr->priority - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001272 cl->cpriority = cl->priority;
1273 if (cl->priority >= cl->priority2)
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001274 cl->priority2 = TC_CBQ_MAXPRIO - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001275 }
1276
1277 cbq_addprio(q, cl);
1278 return 0;
1279}
1280
1281static int cbq_set_overlimit(struct cbq_class *cl, struct tc_cbq_ovl *ovl)
1282{
1283 switch (ovl->strategy) {
1284 case TC_CBQ_OVL_CLASSIC:
1285 cl->overlimit = cbq_ovl_classic;
1286 break;
1287 case TC_CBQ_OVL_DELAY:
1288 cl->overlimit = cbq_ovl_delay;
1289 break;
1290 case TC_CBQ_OVL_LOWPRIO:
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001291 if (ovl->priority2 - 1 >= TC_CBQ_MAXPRIO ||
1292 ovl->priority2 - 1 <= cl->priority)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001293 return -EINVAL;
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001294 cl->priority2 = ovl->priority2 - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001295 cl->overlimit = cbq_ovl_lowprio;
1296 break;
1297 case TC_CBQ_OVL_DROP:
1298 cl->overlimit = cbq_ovl_drop;
1299 break;
1300 case TC_CBQ_OVL_RCLASSIC:
1301 cl->overlimit = cbq_ovl_rclassic;
1302 break;
1303 default:
1304 return -EINVAL;
1305 }
Patrick McHardy1a13cb62007-03-16 01:22:20 -07001306 cl->penalty = ovl->penalty;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001307 return 0;
1308}
1309
Patrick McHardyc3bc7cf2007-07-15 00:03:05 -07001310#ifdef CONFIG_NET_CLS_ACT
Linus Torvalds1da177e2005-04-16 15:20:36 -07001311static int cbq_set_police(struct cbq_class *cl, struct tc_cbq_police *p)
1312{
1313 cl->police = p->police;
1314
1315 if (cl->q->handle) {
1316 if (p->police == TC_POLICE_RECLASSIFY)
1317 cl->q->reshape_fail = cbq_reshape_fail;
1318 else
1319 cl->q->reshape_fail = NULL;
1320 }
1321 return 0;
1322}
1323#endif
1324
1325static int cbq_set_fopt(struct cbq_class *cl, struct tc_cbq_fopt *fopt)
1326{
1327 cbq_change_defmap(cl, fopt->split, fopt->defmap, fopt->defchange);
1328 return 0;
1329}
1330
Patrick McHardy27a34212008-01-23 20:35:39 -08001331static const struct nla_policy cbq_policy[TCA_CBQ_MAX + 1] = {
1332 [TCA_CBQ_LSSOPT] = { .len = sizeof(struct tc_cbq_lssopt) },
1333 [TCA_CBQ_WRROPT] = { .len = sizeof(struct tc_cbq_wrropt) },
1334 [TCA_CBQ_FOPT] = { .len = sizeof(struct tc_cbq_fopt) },
1335 [TCA_CBQ_OVL_STRATEGY] = { .len = sizeof(struct tc_cbq_ovl) },
1336 [TCA_CBQ_RATE] = { .len = sizeof(struct tc_ratespec) },
1337 [TCA_CBQ_RTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
1338 [TCA_CBQ_POLICE] = { .len = sizeof(struct tc_cbq_police) },
1339};
1340
Patrick McHardy1e904742008-01-22 22:11:17 -08001341static int cbq_init(struct Qdisc *sch, struct nlattr *opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001342{
1343 struct cbq_sched_data *q = qdisc_priv(sch);
Patrick McHardy1e904742008-01-22 22:11:17 -08001344 struct nlattr *tb[TCA_CBQ_MAX + 1];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001345 struct tc_ratespec *r;
Patrick McHardycee63722008-01-23 20:33:32 -08001346 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001347
Patrick McHardy27a34212008-01-23 20:35:39 -08001348 err = nla_parse_nested(tb, TCA_CBQ_MAX, opt, cbq_policy);
Patrick McHardycee63722008-01-23 20:33:32 -08001349 if (err < 0)
1350 return err;
1351
Patrick McHardy27a34212008-01-23 20:35:39 -08001352 if (tb[TCA_CBQ_RTAB] == NULL || tb[TCA_CBQ_RATE] == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001353 return -EINVAL;
1354
Patrick McHardy1e904742008-01-22 22:11:17 -08001355 r = nla_data(tb[TCA_CBQ_RATE]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001356
Patrick McHardy1e904742008-01-22 22:11:17 -08001357 if ((q->link.R_tab = qdisc_get_rtab(r, tb[TCA_CBQ_RTAB])) == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001358 return -EINVAL;
1359
Patrick McHardyd77fea22008-07-05 23:22:05 -07001360 err = qdisc_class_hash_init(&q->clhash);
1361 if (err < 0)
1362 goto put_rtab;
1363
Linus Torvalds1da177e2005-04-16 15:20:36 -07001364 q->link.refcnt = 1;
1365 q->link.sibling = &q->link;
Patrick McHardyd77fea22008-07-05 23:22:05 -07001366 q->link.common.classid = sch->handle;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001367 q->link.qdisc = sch;
Changli Gao3511c912010-10-16 13:04:08 +00001368 q->link.q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
1369 sch->handle);
1370 if (!q->link.q)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001371 q->link.q = &noop_qdisc;
1372
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001373 q->link.priority = TC_CBQ_MAXPRIO - 1;
1374 q->link.priority2 = TC_CBQ_MAXPRIO - 1;
1375 q->link.cpriority = TC_CBQ_MAXPRIO - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001376 q->link.ovl_strategy = TC_CBQ_OVL_CLASSIC;
1377 q->link.overlimit = cbq_ovl_classic;
David S. Miller5ce2d482008-07-08 17:06:30 -07001378 q->link.allot = psched_mtu(qdisc_dev(sch));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001379 q->link.quantum = q->link.allot;
1380 q->link.weight = q->link.R_tab->rate.rate;
1381
1382 q->link.ewma_log = TC_CBQ_DEF_EWMA;
1383 q->link.avpkt = q->link.allot/2;
1384 q->link.minidle = -0x7FFFFFFF;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001385
Patrick McHardy88a99352007-03-16 01:21:11 -07001386 qdisc_watchdog_init(&q->watchdog, sch);
David S. Miller2fbd3da2009-09-01 17:59:25 -07001387 hrtimer_init(&q->delay_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001388 q->delay_timer.function = cbq_undelay;
1389 q->toplevel = TC_CBQ_MAXLEVEL;
Patrick McHardy3bebcda2007-03-23 11:29:25 -07001390 q->now = psched_get_time();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001391
1392 cbq_link_class(&q->link);
1393
Patrick McHardy1e904742008-01-22 22:11:17 -08001394 if (tb[TCA_CBQ_LSSOPT])
1395 cbq_set_lss(&q->link, nla_data(tb[TCA_CBQ_LSSOPT]));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001396
1397 cbq_addprio(q, &q->link);
1398 return 0;
Patrick McHardyd77fea22008-07-05 23:22:05 -07001399
1400put_rtab:
1401 qdisc_put_rtab(q->link.R_tab);
1402 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001403}
1404
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001405static int cbq_dump_rate(struct sk_buff *skb, struct cbq_class *cl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001406{
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001407 unsigned char *b = skb_tail_pointer(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001408
David S. Miller1b34ec42012-03-29 05:11:39 -04001409 if (nla_put(skb, TCA_CBQ_RATE, sizeof(cl->R_tab->rate), &cl->R_tab->rate))
1410 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001411 return skb->len;
1412
Patrick McHardy1e904742008-01-22 22:11:17 -08001413nla_put_failure:
Arnaldo Carvalho de Melodc5fc572007-03-25 23:06:12 -07001414 nlmsg_trim(skb, b);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001415 return -1;
1416}
1417
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001418static int cbq_dump_lss(struct sk_buff *skb, struct cbq_class *cl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001419{
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001420 unsigned char *b = skb_tail_pointer(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001421 struct tc_cbq_lssopt opt;
1422
1423 opt.flags = 0;
1424 if (cl->borrow == NULL)
1425 opt.flags |= TCF_CBQ_LSS_BOUNDED;
1426 if (cl->share == NULL)
1427 opt.flags |= TCF_CBQ_LSS_ISOLATED;
1428 opt.ewma_log = cl->ewma_log;
1429 opt.level = cl->level;
1430 opt.avpkt = cl->avpkt;
1431 opt.maxidle = cl->maxidle;
1432 opt.minidle = (u32)(-cl->minidle);
1433 opt.offtime = cl->offtime;
1434 opt.change = ~0;
David S. Miller1b34ec42012-03-29 05:11:39 -04001435 if (nla_put(skb, TCA_CBQ_LSSOPT, sizeof(opt), &opt))
1436 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001437 return skb->len;
1438
Patrick McHardy1e904742008-01-22 22:11:17 -08001439nla_put_failure:
Arnaldo Carvalho de Melodc5fc572007-03-25 23:06:12 -07001440 nlmsg_trim(skb, b);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001441 return -1;
1442}
1443
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001444static int cbq_dump_wrr(struct sk_buff *skb, struct cbq_class *cl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001445{
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001446 unsigned char *b = skb_tail_pointer(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001447 struct tc_cbq_wrropt opt;
1448
David S. Millera0db8562013-07-30 00:16:21 -07001449 memset(&opt, 0, sizeof(opt));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001450 opt.flags = 0;
1451 opt.allot = cl->allot;
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001452 opt.priority = cl->priority + 1;
1453 opt.cpriority = cl->cpriority + 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001454 opt.weight = cl->weight;
David S. Miller1b34ec42012-03-29 05:11:39 -04001455 if (nla_put(skb, TCA_CBQ_WRROPT, sizeof(opt), &opt))
1456 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001457 return skb->len;
1458
Patrick McHardy1e904742008-01-22 22:11:17 -08001459nla_put_failure:
Arnaldo Carvalho de Melodc5fc572007-03-25 23:06:12 -07001460 nlmsg_trim(skb, b);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001461 return -1;
1462}
1463
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001464static int cbq_dump_ovl(struct sk_buff *skb, struct cbq_class *cl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001465{
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001466 unsigned char *b = skb_tail_pointer(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001467 struct tc_cbq_ovl opt;
1468
1469 opt.strategy = cl->ovl_strategy;
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001470 opt.priority2 = cl->priority2 + 1;
Patrick McHardy8a470772005-06-28 12:56:45 -07001471 opt.pad = 0;
Patrick McHardy1a13cb62007-03-16 01:22:20 -07001472 opt.penalty = cl->penalty;
David S. Miller1b34ec42012-03-29 05:11:39 -04001473 if (nla_put(skb, TCA_CBQ_OVL_STRATEGY, sizeof(opt), &opt))
1474 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001475 return skb->len;
1476
Patrick McHardy1e904742008-01-22 22:11:17 -08001477nla_put_failure:
Arnaldo Carvalho de Melodc5fc572007-03-25 23:06:12 -07001478 nlmsg_trim(skb, b);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001479 return -1;
1480}
1481
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001482static int cbq_dump_fopt(struct sk_buff *skb, struct cbq_class *cl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001483{
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001484 unsigned char *b = skb_tail_pointer(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001485 struct tc_cbq_fopt opt;
1486
1487 if (cl->split || cl->defmap) {
Patrick McHardyd77fea22008-07-05 23:22:05 -07001488 opt.split = cl->split ? cl->split->common.classid : 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001489 opt.defmap = cl->defmap;
1490 opt.defchange = ~0;
David S. Miller1b34ec42012-03-29 05:11:39 -04001491 if (nla_put(skb, TCA_CBQ_FOPT, sizeof(opt), &opt))
1492 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001493 }
1494 return skb->len;
1495
Patrick McHardy1e904742008-01-22 22:11:17 -08001496nla_put_failure:
Arnaldo Carvalho de Melodc5fc572007-03-25 23:06:12 -07001497 nlmsg_trim(skb, b);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001498 return -1;
1499}
1500
Patrick McHardyc3bc7cf2007-07-15 00:03:05 -07001501#ifdef CONFIG_NET_CLS_ACT
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001502static int cbq_dump_police(struct sk_buff *skb, struct cbq_class *cl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001503{
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001504 unsigned char *b = skb_tail_pointer(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001505 struct tc_cbq_police opt;
1506
1507 if (cl->police) {
1508 opt.police = cl->police;
Patrick McHardy9ef1d4c2005-06-28 12:55:30 -07001509 opt.__res1 = 0;
1510 opt.__res2 = 0;
David S. Miller1b34ec42012-03-29 05:11:39 -04001511 if (nla_put(skb, TCA_CBQ_POLICE, sizeof(opt), &opt))
1512 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001513 }
1514 return skb->len;
1515
Patrick McHardy1e904742008-01-22 22:11:17 -08001516nla_put_failure:
Arnaldo Carvalho de Melodc5fc572007-03-25 23:06:12 -07001517 nlmsg_trim(skb, b);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001518 return -1;
1519}
1520#endif
1521
1522static int cbq_dump_attr(struct sk_buff *skb, struct cbq_class *cl)
1523{
1524 if (cbq_dump_lss(skb, cl) < 0 ||
1525 cbq_dump_rate(skb, cl) < 0 ||
1526 cbq_dump_wrr(skb, cl) < 0 ||
1527 cbq_dump_ovl(skb, cl) < 0 ||
Patrick McHardyc3bc7cf2007-07-15 00:03:05 -07001528#ifdef CONFIG_NET_CLS_ACT
Linus Torvalds1da177e2005-04-16 15:20:36 -07001529 cbq_dump_police(skb, cl) < 0 ||
1530#endif
1531 cbq_dump_fopt(skb, cl) < 0)
1532 return -1;
1533 return 0;
1534}
1535
1536static int cbq_dump(struct Qdisc *sch, struct sk_buff *skb)
1537{
1538 struct cbq_sched_data *q = qdisc_priv(sch);
Patrick McHardy4b3550ef2008-01-23 20:34:11 -08001539 struct nlattr *nest;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001540
Patrick McHardy4b3550ef2008-01-23 20:34:11 -08001541 nest = nla_nest_start(skb, TCA_OPTIONS);
1542 if (nest == NULL)
1543 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001544 if (cbq_dump_attr(skb, &q->link) < 0)
Patrick McHardy1e904742008-01-22 22:11:17 -08001545 goto nla_put_failure;
Yang Yingliangd59b7d82014-03-12 10:20:32 +08001546 return nla_nest_end(skb, nest);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001547
Patrick McHardy1e904742008-01-22 22:11:17 -08001548nla_put_failure:
Patrick McHardy4b3550ef2008-01-23 20:34:11 -08001549 nla_nest_cancel(skb, nest);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001550 return -1;
1551}
1552
1553static int
1554cbq_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
1555{
1556 struct cbq_sched_data *q = qdisc_priv(sch);
1557
1558 q->link.xstats.avgidle = q->link.avgidle;
1559 return gnet_stats_copy_app(d, &q->link.xstats, sizeof(q->link.xstats));
1560}
1561
1562static int
1563cbq_dump_class(struct Qdisc *sch, unsigned long arg,
1564 struct sk_buff *skb, struct tcmsg *tcm)
1565{
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001566 struct cbq_class *cl = (struct cbq_class *)arg;
Patrick McHardy4b3550ef2008-01-23 20:34:11 -08001567 struct nlattr *nest;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001568
1569 if (cl->tparent)
Patrick McHardyd77fea22008-07-05 23:22:05 -07001570 tcm->tcm_parent = cl->tparent->common.classid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001571 else
1572 tcm->tcm_parent = TC_H_ROOT;
Patrick McHardyd77fea22008-07-05 23:22:05 -07001573 tcm->tcm_handle = cl->common.classid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001574 tcm->tcm_info = cl->q->handle;
1575
Patrick McHardy4b3550ef2008-01-23 20:34:11 -08001576 nest = nla_nest_start(skb, TCA_OPTIONS);
1577 if (nest == NULL)
1578 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001579 if (cbq_dump_attr(skb, cl) < 0)
Patrick McHardy1e904742008-01-22 22:11:17 -08001580 goto nla_put_failure;
Yang Yingliangd59b7d82014-03-12 10:20:32 +08001581 return nla_nest_end(skb, nest);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001582
Patrick McHardy1e904742008-01-22 22:11:17 -08001583nla_put_failure:
Patrick McHardy4b3550ef2008-01-23 20:34:11 -08001584 nla_nest_cancel(skb, nest);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001585 return -1;
1586}
1587
1588static int
1589cbq_dump_class_stats(struct Qdisc *sch, unsigned long arg,
1590 struct gnet_dump *d)
1591{
1592 struct cbq_sched_data *q = qdisc_priv(sch);
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001593 struct cbq_class *cl = (struct cbq_class *)arg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001594
1595 cl->qstats.qlen = cl->q->q.qlen;
1596 cl->xstats.avgidle = cl->avgidle;
1597 cl->xstats.undertime = 0;
1598
Patrick McHardya0849802007-03-23 11:28:30 -07001599 if (cl->undertime != PSCHED_PASTPERFECT)
Patrick McHardy8edc0c32007-03-23 11:28:55 -07001600 cl->xstats.undertime = cl->undertime - q->now;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001601
1602 if (gnet_stats_copy_basic(d, &cl->bstats) < 0 ||
Eric Dumazetd250a5f2009-10-02 10:32:18 +00001603 gnet_stats_copy_rate_est(d, &cl->bstats, &cl->rate_est) < 0 ||
Linus Torvalds1da177e2005-04-16 15:20:36 -07001604 gnet_stats_copy_queue(d, &cl->qstats) < 0)
1605 return -1;
1606
1607 return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats));
1608}
1609
1610static int cbq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
1611 struct Qdisc **old)
1612{
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001613 struct cbq_class *cl = (struct cbq_class *)arg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001614
Patrick McHardy5b9a9cc2009-09-04 06:41:17 +00001615 if (new == NULL) {
Changli Gao3511c912010-10-16 13:04:08 +00001616 new = qdisc_create_dflt(sch->dev_queue,
Patrick McHardy5b9a9cc2009-09-04 06:41:17 +00001617 &pfifo_qdisc_ops, cl->common.classid);
1618 if (new == NULL)
1619 return -ENOBUFS;
1620 } else {
Patrick McHardyc3bc7cf2007-07-15 00:03:05 -07001621#ifdef CONFIG_NET_CLS_ACT
Patrick McHardy5b9a9cc2009-09-04 06:41:17 +00001622 if (cl->police == TC_POLICE_RECLASSIFY)
1623 new->reshape_fail = cbq_reshape_fail;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001624#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001625 }
Patrick McHardy5b9a9cc2009-09-04 06:41:17 +00001626 sch_tree_lock(sch);
1627 *old = cl->q;
1628 cl->q = new;
1629 qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
1630 qdisc_reset(*old);
1631 sch_tree_unlock(sch);
1632
1633 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001634}
1635
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001636static struct Qdisc *cbq_leaf(struct Qdisc *sch, unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001637{
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001638 struct cbq_class *cl = (struct cbq_class *)arg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001639
Patrick McHardy5b9a9cc2009-09-04 06:41:17 +00001640 return cl->q;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001641}
1642
Jarek Poplawskia37ef2e2006-12-08 00:25:55 -08001643static void cbq_qlen_notify(struct Qdisc *sch, unsigned long arg)
1644{
1645 struct cbq_class *cl = (struct cbq_class *)arg;
1646
1647 if (cl->q->q.qlen == 0)
1648 cbq_deactivate_class(cl);
1649}
1650
Linus Torvalds1da177e2005-04-16 15:20:36 -07001651static unsigned long cbq_get(struct Qdisc *sch, u32 classid)
1652{
1653 struct cbq_sched_data *q = qdisc_priv(sch);
1654 struct cbq_class *cl = cbq_class_lookup(q, classid);
1655
1656 if (cl) {
1657 cl->refcnt++;
1658 return (unsigned long)cl;
1659 }
1660 return 0;
1661}
1662
Linus Torvalds1da177e2005-04-16 15:20:36 -07001663static void cbq_destroy_class(struct Qdisc *sch, struct cbq_class *cl)
1664{
1665 struct cbq_sched_data *q = qdisc_priv(sch);
1666
Ilpo Järvinen547b7922008-07-25 21:43:18 -07001667 WARN_ON(cl->filters);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001668
Patrick McHardyff31ab52008-07-01 19:52:38 -07001669 tcf_destroy_chain(&cl->filter_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001670 qdisc_destroy(cl->q);
1671 qdisc_put_rtab(cl->R_tab);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001672 gen_kill_estimator(&cl->bstats, &cl->rate_est);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001673 if (cl != &q->link)
1674 kfree(cl);
1675}
1676
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001677static void cbq_destroy(struct Qdisc *sch)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001678{
1679 struct cbq_sched_data *q = qdisc_priv(sch);
Sasha Levinb67bfe02013-02-27 17:06:00 -08001680 struct hlist_node *next;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001681 struct cbq_class *cl;
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001682 unsigned int h;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001683
Patrick McHardyc3bc7cf2007-07-15 00:03:05 -07001684#ifdef CONFIG_NET_CLS_ACT
Linus Torvalds1da177e2005-04-16 15:20:36 -07001685 q->rx_class = NULL;
1686#endif
1687 /*
1688 * Filters must be destroyed first because we don't destroy the
1689 * classes from root to leafs which means that filters can still
1690 * be bound to classes which have been destroyed already. --TGR '04
1691 */
Patrick McHardyd77fea22008-07-05 23:22:05 -07001692 for (h = 0; h < q->clhash.hashsize; h++) {
Sasha Levinb67bfe02013-02-27 17:06:00 -08001693 hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode)
Patrick McHardyff31ab52008-07-01 19:52:38 -07001694 tcf_destroy_chain(&cl->filter_list);
Patrick McHardyb00b4bf2007-06-05 16:06:59 -07001695 }
Patrick McHardyd77fea22008-07-05 23:22:05 -07001696 for (h = 0; h < q->clhash.hashsize; h++) {
Sasha Levinb67bfe02013-02-27 17:06:00 -08001697 hlist_for_each_entry_safe(cl, next, &q->clhash.hash[h],
Patrick McHardyd77fea22008-07-05 23:22:05 -07001698 common.hnode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001699 cbq_destroy_class(sch, cl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001700 }
Patrick McHardyd77fea22008-07-05 23:22:05 -07001701 qdisc_class_hash_destroy(&q->clhash);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001702}
1703
1704static void cbq_put(struct Qdisc *sch, unsigned long arg)
1705{
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001706 struct cbq_class *cl = (struct cbq_class *)arg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001707
1708 if (--cl->refcnt == 0) {
Patrick McHardyc3bc7cf2007-07-15 00:03:05 -07001709#ifdef CONFIG_NET_CLS_ACT
Jarek Poplawski102396a2008-08-29 14:21:52 -07001710 spinlock_t *root_lock = qdisc_root_sleeping_lock(sch);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001711 struct cbq_sched_data *q = qdisc_priv(sch);
1712
David S. Miller7698b4f2008-07-16 01:42:40 -07001713 spin_lock_bh(root_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001714 if (q->rx_class == cl)
1715 q->rx_class = NULL;
David S. Miller7698b4f2008-07-16 01:42:40 -07001716 spin_unlock_bh(root_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001717#endif
1718
1719 cbq_destroy_class(sch, cl);
1720 }
1721}
1722
1723static int
Patrick McHardy1e904742008-01-22 22:11:17 -08001724cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **tca,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001725 unsigned long *arg)
1726{
1727 int err;
1728 struct cbq_sched_data *q = qdisc_priv(sch);
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001729 struct cbq_class *cl = (struct cbq_class *)*arg;
Patrick McHardy1e904742008-01-22 22:11:17 -08001730 struct nlattr *opt = tca[TCA_OPTIONS];
1731 struct nlattr *tb[TCA_CBQ_MAX + 1];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001732 struct cbq_class *parent;
1733 struct qdisc_rate_table *rtab = NULL;
1734
Patrick McHardycee63722008-01-23 20:33:32 -08001735 if (opt == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001736 return -EINVAL;
1737
Patrick McHardy27a34212008-01-23 20:35:39 -08001738 err = nla_parse_nested(tb, TCA_CBQ_MAX, opt, cbq_policy);
Patrick McHardycee63722008-01-23 20:33:32 -08001739 if (err < 0)
1740 return err;
1741
Linus Torvalds1da177e2005-04-16 15:20:36 -07001742 if (cl) {
1743 /* Check parent */
1744 if (parentid) {
Patrick McHardyd77fea22008-07-05 23:22:05 -07001745 if (cl->tparent &&
1746 cl->tparent->common.classid != parentid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001747 return -EINVAL;
1748 if (!cl->tparent && parentid != TC_H_ROOT)
1749 return -EINVAL;
1750 }
1751
Patrick McHardy1e904742008-01-22 22:11:17 -08001752 if (tb[TCA_CBQ_RATE]) {
Stephen Hemminger71bcb092008-11-25 21:13:31 -08001753 rtab = qdisc_get_rtab(nla_data(tb[TCA_CBQ_RATE]),
1754 tb[TCA_CBQ_RTAB]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001755 if (rtab == NULL)
1756 return -EINVAL;
1757 }
1758
Stephen Hemminger71bcb092008-11-25 21:13:31 -08001759 if (tca[TCA_RATE]) {
1760 err = gen_replace_estimator(&cl->bstats, &cl->rate_est,
1761 qdisc_root_sleeping_lock(sch),
1762 tca[TCA_RATE]);
1763 if (err) {
Yang Yingliang79c11f22013-12-17 15:29:17 +08001764 qdisc_put_rtab(rtab);
Stephen Hemminger71bcb092008-11-25 21:13:31 -08001765 return err;
1766 }
1767 }
1768
Linus Torvalds1da177e2005-04-16 15:20:36 -07001769 /* Change class parameters */
1770 sch_tree_lock(sch);
1771
1772 if (cl->next_alive != NULL)
1773 cbq_deactivate_class(cl);
1774
1775 if (rtab) {
Patrick McHardyb94c8af2008-11-20 04:11:36 -08001776 qdisc_put_rtab(cl->R_tab);
1777 cl->R_tab = rtab;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001778 }
1779
Patrick McHardy1e904742008-01-22 22:11:17 -08001780 if (tb[TCA_CBQ_LSSOPT])
1781 cbq_set_lss(cl, nla_data(tb[TCA_CBQ_LSSOPT]));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001782
Patrick McHardy1e904742008-01-22 22:11:17 -08001783 if (tb[TCA_CBQ_WRROPT]) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001784 cbq_rmprio(q, cl);
Patrick McHardy1e904742008-01-22 22:11:17 -08001785 cbq_set_wrr(cl, nla_data(tb[TCA_CBQ_WRROPT]));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001786 }
1787
Patrick McHardy1e904742008-01-22 22:11:17 -08001788 if (tb[TCA_CBQ_OVL_STRATEGY])
1789 cbq_set_overlimit(cl, nla_data(tb[TCA_CBQ_OVL_STRATEGY]));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001790
Patrick McHardyc3bc7cf2007-07-15 00:03:05 -07001791#ifdef CONFIG_NET_CLS_ACT
Patrick McHardy1e904742008-01-22 22:11:17 -08001792 if (tb[TCA_CBQ_POLICE])
1793 cbq_set_police(cl, nla_data(tb[TCA_CBQ_POLICE]));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001794#endif
1795
Patrick McHardy1e904742008-01-22 22:11:17 -08001796 if (tb[TCA_CBQ_FOPT])
1797 cbq_set_fopt(cl, nla_data(tb[TCA_CBQ_FOPT]));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001798
1799 if (cl->q->q.qlen)
1800 cbq_activate_class(cl);
1801
1802 sch_tree_unlock(sch);
1803
Linus Torvalds1da177e2005-04-16 15:20:36 -07001804 return 0;
1805 }
1806
1807 if (parentid == TC_H_ROOT)
1808 return -EINVAL;
1809
Patrick McHardy1e904742008-01-22 22:11:17 -08001810 if (tb[TCA_CBQ_WRROPT] == NULL || tb[TCA_CBQ_RATE] == NULL ||
1811 tb[TCA_CBQ_LSSOPT] == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001812 return -EINVAL;
1813
Patrick McHardy1e904742008-01-22 22:11:17 -08001814 rtab = qdisc_get_rtab(nla_data(tb[TCA_CBQ_RATE]), tb[TCA_CBQ_RTAB]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001815 if (rtab == NULL)
1816 return -EINVAL;
1817
1818 if (classid) {
1819 err = -EINVAL;
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001820 if (TC_H_MAJ(classid ^ sch->handle) ||
1821 cbq_class_lookup(q, classid))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001822 goto failure;
1823 } else {
1824 int i;
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001825 classid = TC_H_MAKE(sch->handle, 0x8000);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001826
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001827 for (i = 0; i < 0x8000; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001828 if (++q->hgenerator >= 0x8000)
1829 q->hgenerator = 1;
1830 if (cbq_class_lookup(q, classid|q->hgenerator) == NULL)
1831 break;
1832 }
1833 err = -ENOSR;
1834 if (i >= 0x8000)
1835 goto failure;
1836 classid = classid|q->hgenerator;
1837 }
1838
1839 parent = &q->link;
1840 if (parentid) {
1841 parent = cbq_class_lookup(q, parentid);
1842 err = -EINVAL;
1843 if (parent == NULL)
1844 goto failure;
1845 }
1846
1847 err = -ENOBUFS;
Panagiotis Issaris0da974f2006-07-21 14:51:30 -07001848 cl = kzalloc(sizeof(*cl), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001849 if (cl == NULL)
1850 goto failure;
Stephen Hemminger71bcb092008-11-25 21:13:31 -08001851
1852 if (tca[TCA_RATE]) {
1853 err = gen_new_estimator(&cl->bstats, &cl->rate_est,
1854 qdisc_root_sleeping_lock(sch),
1855 tca[TCA_RATE]);
1856 if (err) {
1857 kfree(cl);
1858 goto failure;
1859 }
1860 }
1861
Linus Torvalds1da177e2005-04-16 15:20:36 -07001862 cl->R_tab = rtab;
1863 rtab = NULL;
1864 cl->refcnt = 1;
Changli Gao3511c912010-10-16 13:04:08 +00001865 cl->q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, classid);
1866 if (!cl->q)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001867 cl->q = &noop_qdisc;
Patrick McHardyd77fea22008-07-05 23:22:05 -07001868 cl->common.classid = classid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001869 cl->tparent = parent;
1870 cl->qdisc = sch;
1871 cl->allot = parent->allot;
1872 cl->quantum = cl->allot;
1873 cl->weight = cl->R_tab->rate.rate;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001874
1875 sch_tree_lock(sch);
1876 cbq_link_class(cl);
1877 cl->borrow = cl->tparent;
1878 if (cl->tparent != &q->link)
1879 cl->share = cl->tparent;
1880 cbq_adjust_levels(parent);
1881 cl->minidle = -0x7FFFFFFF;
Patrick McHardy1e904742008-01-22 22:11:17 -08001882 cbq_set_lss(cl, nla_data(tb[TCA_CBQ_LSSOPT]));
1883 cbq_set_wrr(cl, nla_data(tb[TCA_CBQ_WRROPT]));
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001884 if (cl->ewma_log == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001885 cl->ewma_log = q->link.ewma_log;
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001886 if (cl->maxidle == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001887 cl->maxidle = q->link.maxidle;
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001888 if (cl->avpkt == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001889 cl->avpkt = q->link.avpkt;
1890 cl->overlimit = cbq_ovl_classic;
Patrick McHardy1e904742008-01-22 22:11:17 -08001891 if (tb[TCA_CBQ_OVL_STRATEGY])
1892 cbq_set_overlimit(cl, nla_data(tb[TCA_CBQ_OVL_STRATEGY]));
Patrick McHardyc3bc7cf2007-07-15 00:03:05 -07001893#ifdef CONFIG_NET_CLS_ACT
Patrick McHardy1e904742008-01-22 22:11:17 -08001894 if (tb[TCA_CBQ_POLICE])
1895 cbq_set_police(cl, nla_data(tb[TCA_CBQ_POLICE]));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001896#endif
Patrick McHardy1e904742008-01-22 22:11:17 -08001897 if (tb[TCA_CBQ_FOPT])
1898 cbq_set_fopt(cl, nla_data(tb[TCA_CBQ_FOPT]));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001899 sch_tree_unlock(sch);
1900
Patrick McHardyd77fea22008-07-05 23:22:05 -07001901 qdisc_class_hash_grow(sch, &q->clhash);
1902
Linus Torvalds1da177e2005-04-16 15:20:36 -07001903 *arg = (unsigned long)cl;
1904 return 0;
1905
1906failure:
1907 qdisc_put_rtab(rtab);
1908 return err;
1909}
1910
1911static int cbq_delete(struct Qdisc *sch, unsigned long arg)
1912{
1913 struct cbq_sched_data *q = qdisc_priv(sch);
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001914 struct cbq_class *cl = (struct cbq_class *)arg;
Jarek Poplawskia37ef2e2006-12-08 00:25:55 -08001915 unsigned int qlen;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001916
1917 if (cl->filters || cl->children || cl == &q->link)
1918 return -EBUSY;
1919
1920 sch_tree_lock(sch);
1921
Jarek Poplawskia37ef2e2006-12-08 00:25:55 -08001922 qlen = cl->q->q.qlen;
1923 qdisc_reset(cl->q);
1924 qdisc_tree_decrease_qlen(cl->q, qlen);
1925
Linus Torvalds1da177e2005-04-16 15:20:36 -07001926 if (cl->next_alive)
1927 cbq_deactivate_class(cl);
1928
1929 if (q->tx_borrowed == cl)
1930 q->tx_borrowed = q->tx_class;
1931 if (q->tx_class == cl) {
1932 q->tx_class = NULL;
1933 q->tx_borrowed = NULL;
1934 }
Patrick McHardyc3bc7cf2007-07-15 00:03:05 -07001935#ifdef CONFIG_NET_CLS_ACT
Linus Torvalds1da177e2005-04-16 15:20:36 -07001936 if (q->rx_class == cl)
1937 q->rx_class = NULL;
1938#endif
1939
1940 cbq_unlink_class(cl);
1941 cbq_adjust_levels(cl->tparent);
1942 cl->defmap = 0;
1943 cbq_sync_defmap(cl);
1944
1945 cbq_rmprio(q, cl);
1946 sch_tree_unlock(sch);
1947
Jarek Poplawski7cd0a632009-03-15 20:00:19 -07001948 BUG_ON(--cl->refcnt == 0);
1949 /*
1950 * This shouldn't happen: we "hold" one cops->get() when called
1951 * from tc_ctl_tclass; the destroy method is done from cops->put().
1952 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001953
1954 return 0;
1955}
1956
1957static struct tcf_proto **cbq_find_tcf(struct Qdisc *sch, unsigned long arg)
1958{
1959 struct cbq_sched_data *q = qdisc_priv(sch);
1960 struct cbq_class *cl = (struct cbq_class *)arg;
1961
1962 if (cl == NULL)
1963 cl = &q->link;
1964
1965 return &cl->filter_list;
1966}
1967
1968static unsigned long cbq_bind_filter(struct Qdisc *sch, unsigned long parent,
1969 u32 classid)
1970{
1971 struct cbq_sched_data *q = qdisc_priv(sch);
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001972 struct cbq_class *p = (struct cbq_class *)parent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001973 struct cbq_class *cl = cbq_class_lookup(q, classid);
1974
1975 if (cl) {
1976 if (p && p->level <= cl->level)
1977 return 0;
1978 cl->filters++;
1979 return (unsigned long)cl;
1980 }
1981 return 0;
1982}
1983
1984static void cbq_unbind_filter(struct Qdisc *sch, unsigned long arg)
1985{
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001986 struct cbq_class *cl = (struct cbq_class *)arg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001987
1988 cl->filters--;
1989}
1990
1991static void cbq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
1992{
1993 struct cbq_sched_data *q = qdisc_priv(sch);
Patrick McHardyd77fea22008-07-05 23:22:05 -07001994 struct cbq_class *cl;
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001995 unsigned int h;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001996
1997 if (arg->stop)
1998 return;
1999
Patrick McHardyd77fea22008-07-05 23:22:05 -07002000 for (h = 0; h < q->clhash.hashsize; h++) {
Sasha Levinb67bfe02013-02-27 17:06:00 -08002001 hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002002 if (arg->count < arg->skip) {
2003 arg->count++;
2004 continue;
2005 }
2006 if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
2007 arg->stop = 1;
2008 return;
2009 }
2010 arg->count++;
2011 }
2012 }
2013}
2014
Eric Dumazet20fea082007-11-14 01:44:41 -08002015static const struct Qdisc_class_ops cbq_class_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002016 .graft = cbq_graft,
2017 .leaf = cbq_leaf,
Jarek Poplawskia37ef2e2006-12-08 00:25:55 -08002018 .qlen_notify = cbq_qlen_notify,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002019 .get = cbq_get,
2020 .put = cbq_put,
2021 .change = cbq_change_class,
2022 .delete = cbq_delete,
2023 .walk = cbq_walk,
2024 .tcf_chain = cbq_find_tcf,
2025 .bind_tcf = cbq_bind_filter,
2026 .unbind_tcf = cbq_unbind_filter,
2027 .dump = cbq_dump_class,
2028 .dump_stats = cbq_dump_class_stats,
2029};
2030
Eric Dumazet20fea082007-11-14 01:44:41 -08002031static struct Qdisc_ops cbq_qdisc_ops __read_mostly = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002032 .next = NULL,
2033 .cl_ops = &cbq_class_ops,
2034 .id = "cbq",
2035 .priv_size = sizeof(struct cbq_sched_data),
2036 .enqueue = cbq_enqueue,
2037 .dequeue = cbq_dequeue,
Jarek Poplawski77be1552008-10-31 00:47:01 -07002038 .peek = qdisc_peek_dequeued,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002039 .drop = cbq_drop,
2040 .init = cbq_init,
2041 .reset = cbq_reset,
2042 .destroy = cbq_destroy,
2043 .change = NULL,
2044 .dump = cbq_dump,
2045 .dump_stats = cbq_dump_stats,
2046 .owner = THIS_MODULE,
2047};
2048
2049static int __init cbq_module_init(void)
2050{
2051 return register_qdisc(&cbq_qdisc_ops);
2052}
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +09002053static void __exit cbq_module_exit(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002054{
2055 unregister_qdisc(&cbq_qdisc_ops);
2056}
2057module_init(cbq_module_init)
2058module_exit(cbq_module_exit)
2059MODULE_LICENSE("GPL");