blob: f1207582cbf3005f08af70275d6c1b217f60f26a [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * net/sched/sch_cbq.c Class-Based Queueing discipline.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 *
11 */
12
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <linux/module.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090014#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include <linux/types.h>
16#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#include <linux/string.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include <linux/errno.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070019#include <linux/skbuff.h>
Patrick McHardy0ba48052007-07-02 22:49:07 -070020#include <net/netlink.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#include <net/pkt_sched.h>
22
23
24/* Class-Based Queueing (CBQ) algorithm.
25 =======================================
26
27 Sources: [1] Sally Floyd and Van Jacobson, "Link-sharing and Resource
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +090028 Management Models for Packet Networks",
Linus Torvalds1da177e2005-04-16 15:20:36 -070029 IEEE/ACM Transactions on Networking, Vol.3, No.4, 1995
30
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +090031 [2] Sally Floyd, "Notes on CBQ and Guaranteed Service", 1995
Linus Torvalds1da177e2005-04-16 15:20:36 -070032
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +090033 [3] Sally Floyd, "Notes on Class-Based Queueing: Setting
Linus Torvalds1da177e2005-04-16 15:20:36 -070034 Parameters", 1996
35
36 [4] Sally Floyd and Michael Speer, "Experimental Results
37 for Class-Based Queueing", 1998, not published.
38
39 -----------------------------------------------------------------------
40
41 Algorithm skeleton was taken from NS simulator cbq.cc.
42 If someone wants to check this code against the LBL version,
43 he should take into account that ONLY the skeleton was borrowed,
44 the implementation is different. Particularly:
45
46 --- The WRR algorithm is different. Our version looks more
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +090047 reasonable (I hope) and works when quanta are allowed to be
48 less than MTU, which is always the case when real time classes
49 have small rates. Note, that the statement of [3] is
50 incomplete, delay may actually be estimated even if class
51 per-round allotment is less than MTU. Namely, if per-round
52 allotment is W*r_i, and r_1+...+r_k = r < 1
Linus Torvalds1da177e2005-04-16 15:20:36 -070053
54 delay_i <= ([MTU/(W*r_i)]*W*r + W*r + k*MTU)/B
55
56 In the worst case we have IntServ estimate with D = W*r+k*MTU
57 and C = MTU*r. The proof (if correct at all) is trivial.
58
59
60 --- It seems that cbq-2.0 is not very accurate. At least, I cannot
61 interpret some places, which look like wrong translations
62 from NS. Anyone is advised to find these differences
63 and explain to me, why I am wrong 8).
64
65 --- Linux has no EOI event, so that we cannot estimate true class
66 idle time. Workaround is to consider the next dequeue event
67 as sign that previous packet is finished. This is wrong because of
68 internal device queueing, but on a permanently loaded link it is true.
69 Moreover, combined with clock integrator, this scheme looks
70 very close to an ideal solution. */
71
72struct cbq_sched_data;
73
74
Eric Dumazetcc7ec452011-01-19 19:26:56 +000075struct cbq_class {
Patrick McHardyd77fea22008-07-05 23:22:05 -070076 struct Qdisc_class_common common;
Linus Torvalds1da177e2005-04-16 15:20:36 -070077 struct cbq_class *next_alive; /* next class with backlog in this priority band */
78
79/* Parameters */
Linus Torvalds1da177e2005-04-16 15:20:36 -070080 unsigned char priority; /* class priority */
81 unsigned char priority2; /* priority to be used after overlimit */
82 unsigned char ewma_log; /* time constant for idle time calculation */
Linus Torvalds1da177e2005-04-16 15:20:36 -070083
84 u32 defmap;
85
86 /* Link-sharing scheduler parameters */
87 long maxidle; /* Class parameters: see below. */
88 long offtime;
89 long minidle;
90 u32 avpkt;
91 struct qdisc_rate_table *R_tab;
92
Linus Torvalds1da177e2005-04-16 15:20:36 -070093 /* General scheduler (WRR) parameters */
94 long allot;
95 long quantum; /* Allotment per WRR round */
96 long weight; /* Relative allotment: see below */
97
98 struct Qdisc *qdisc; /* Ptr to CBQ discipline */
99 struct cbq_class *split; /* Ptr to split node */
100 struct cbq_class *share; /* Ptr to LS parent in the class tree */
101 struct cbq_class *tparent; /* Ptr to tree parent in the class tree */
102 struct cbq_class *borrow; /* NULL if class is bandwidth limited;
103 parent otherwise */
104 struct cbq_class *sibling; /* Sibling chain */
105 struct cbq_class *children; /* Pointer to children chain */
106
107 struct Qdisc *q; /* Elementary queueing discipline */
108
109
110/* Variables */
111 unsigned char cpriority; /* Effective priority */
112 unsigned char delayed;
113 unsigned char level; /* level of the class in hierarchy:
114 0 for leaf classes, and maximal
115 level of children + 1 for nodes.
116 */
117
118 psched_time_t last; /* Last end of service */
119 psched_time_t undertime;
120 long avgidle;
121 long deficit; /* Saved deficit for WRR */
Patrick McHardy1a13cb62007-03-16 01:22:20 -0700122 psched_time_t penalized;
Eric Dumazetc1a8f1f2009-08-16 09:36:49 +0000123 struct gnet_stats_basic_packed bstats;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124 struct gnet_stats_queue qstats;
Eric Dumazet1c0d32f2016-12-04 09:48:16 -0800125 struct net_rate_estimator __rcu *rate_est;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126 struct tc_cbq_xstats xstats;
127
John Fastabend25d8c0d2014-09-12 20:05:27 -0700128 struct tcf_proto __rcu *filter_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129
130 int refcnt;
131 int filters;
132
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000133 struct cbq_class *defaults[TC_PRIO_MAX + 1];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134};
135
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000136struct cbq_sched_data {
Patrick McHardyd77fea22008-07-05 23:22:05 -0700137 struct Qdisc_class_hash clhash; /* Hash table of all classes */
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000138 int nclasses[TC_CBQ_MAXPRIO + 1];
139 unsigned int quanta[TC_CBQ_MAXPRIO + 1];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700140
141 struct cbq_class link;
142
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000143 unsigned int activemask;
144 struct cbq_class *active[TC_CBQ_MAXPRIO + 1]; /* List of all classes
Linus Torvalds1da177e2005-04-16 15:20:36 -0700145 with backlog */
146
Patrick McHardyc3bc7cf2007-07-15 00:03:05 -0700147#ifdef CONFIG_NET_CLS_ACT
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148 struct cbq_class *rx_class;
149#endif
150 struct cbq_class *tx_class;
151 struct cbq_class *tx_borrowed;
152 int tx_len;
153 psched_time_t now; /* Cached timestamp */
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000154 unsigned int pmask;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700155
David S. Miller2fbd3da2009-09-01 17:59:25 -0700156 struct hrtimer delay_timer;
Patrick McHardy88a99352007-03-16 01:21:11 -0700157 struct qdisc_watchdog watchdog; /* Watchdog timer,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158 started when CBQ has
159 backlog, but cannot
160 transmit just now */
Patrick McHardy88a99352007-03-16 01:21:11 -0700161 psched_tdiff_t wd_expires;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162 int toplevel;
163 u32 hgenerator;
164};
165
166
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000167#define L2T(cl, len) qdisc_l2t((cl)->R_tab, len)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000169static inline struct cbq_class *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170cbq_class_lookup(struct cbq_sched_data *q, u32 classid)
171{
Patrick McHardyd77fea22008-07-05 23:22:05 -0700172 struct Qdisc_class_common *clc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173
Patrick McHardyd77fea22008-07-05 23:22:05 -0700174 clc = qdisc_class_find(&q->clhash, classid);
175 if (clc == NULL)
176 return NULL;
177 return container_of(clc, struct cbq_class, common);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700178}
179
Patrick McHardyc3bc7cf2007-07-15 00:03:05 -0700180#ifdef CONFIG_NET_CLS_ACT
Linus Torvalds1da177e2005-04-16 15:20:36 -0700181
182static struct cbq_class *
183cbq_reclassify(struct sk_buff *skb, struct cbq_class *this)
184{
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000185 struct cbq_class *cl;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000187 for (cl = this->tparent; cl; cl = cl->tparent) {
188 struct cbq_class *new = cl->defaults[TC_PRIO_BESTEFFORT];
189
190 if (new != NULL && new != this)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191 return new;
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000192 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193 return NULL;
194}
195
196#endif
197
198/* Classify packet. The procedure is pretty complicated, but
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000199 * it allows us to combine link sharing and priority scheduling
200 * transparently.
201 *
202 * Namely, you can put link sharing rules (f.e. route based) at root of CBQ,
203 * so that it resolves to split nodes. Then packets are classified
204 * by logical priority, or a more specific classifier may be attached
205 * to the split node.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206 */
207
208static struct cbq_class *
209cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
210{
211 struct cbq_sched_data *q = qdisc_priv(sch);
212 struct cbq_class *head = &q->link;
213 struct cbq_class **defmap;
214 struct cbq_class *cl = NULL;
215 u32 prio = skb->priority;
John Fastabend25d8c0d2014-09-12 20:05:27 -0700216 struct tcf_proto *fl;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217 struct tcf_result res;
218
219 /*
220 * Step 1. If skb->priority points to one of our classes, use it.
221 */
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000222 if (TC_H_MAJ(prio ^ sch->handle) == 0 &&
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223 (cl = cbq_class_lookup(q, prio)) != NULL)
224 return cl;
225
Jarek Poplawskic27f3392008-08-04 22:39:11 -0700226 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227 for (;;) {
228 int result = 0;
229 defmap = head->defaults;
230
John Fastabend25d8c0d2014-09-12 20:05:27 -0700231 fl = rcu_dereference_bh(head->filter_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232 /*
233 * Step 2+n. Apply classifier.
234 */
Daniel Borkmann3b3ae882015-08-26 23:00:06 +0200235 result = tc_classify(skb, fl, &res, true);
John Fastabend25d8c0d2014-09-12 20:05:27 -0700236 if (!fl || result < 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700237 goto fallback;
238
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000239 cl = (void *)res.class;
240 if (!cl) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700241 if (TC_H_MAJ(res.classid))
242 cl = cbq_class_lookup(q, res.classid);
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000243 else if ((cl = defmap[res.classid & TC_PRIO_MAX]) == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244 cl = defmap[TC_PRIO_BESTEFFORT];
245
Eric Dumazetbdfc87f2012-09-11 13:11:12 +0000246 if (cl == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247 goto fallback;
248 }
Eric Dumazetbdfc87f2012-09-11 13:11:12 +0000249 if (cl->level >= head->level)
250 goto fallback;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251#ifdef CONFIG_NET_CLS_ACT
252 switch (result) {
253 case TC_ACT_QUEUED:
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900254 case TC_ACT_STOLEN:
Jarek Poplawski378a2f02008-08-04 22:31:03 -0700255 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256 case TC_ACT_SHOT:
257 return NULL;
Patrick McHardy73ca4912007-07-15 00:02:31 -0700258 case TC_ACT_RECLASSIFY:
259 return cbq_reclassify(skb, cl);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700260 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700261#endif
262 if (cl->level == 0)
263 return cl;
264
265 /*
266 * Step 3+n. If classifier selected a link sharing class,
267 * apply agency specific classifier.
268 * Repeat this procdure until we hit a leaf node.
269 */
270 head = cl;
271 }
272
273fallback:
274 cl = head;
275
276 /*
277 * Step 4. No success...
278 */
279 if (TC_H_MAJ(prio) == 0 &&
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000280 !(cl = head->defaults[prio & TC_PRIO_MAX]) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281 !(cl = head->defaults[TC_PRIO_BESTEFFORT]))
282 return head;
283
284 return cl;
285}
286
287/*
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000288 * A packet has just been enqueued on the empty class.
289 * cbq_activate_class adds it to the tail of active class list
290 * of its priority band.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700291 */
292
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000293static inline void cbq_activate_class(struct cbq_class *cl)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700294{
295 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
296 int prio = cl->cpriority;
297 struct cbq_class *cl_tail;
298
299 cl_tail = q->active[prio];
300 q->active[prio] = cl;
301
302 if (cl_tail != NULL) {
303 cl->next_alive = cl_tail->next_alive;
304 cl_tail->next_alive = cl;
305 } else {
306 cl->next_alive = cl;
307 q->activemask |= (1<<prio);
308 }
309}
310
311/*
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000312 * Unlink class from active chain.
313 * Note that this same procedure is done directly in cbq_dequeue*
314 * during round-robin procedure.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700315 */
316
317static void cbq_deactivate_class(struct cbq_class *this)
318{
319 struct cbq_sched_data *q = qdisc_priv(this->qdisc);
320 int prio = this->cpriority;
321 struct cbq_class *cl;
322 struct cbq_class *cl_prev = q->active[prio];
323
324 do {
325 cl = cl_prev->next_alive;
326 if (cl == this) {
327 cl_prev->next_alive = cl->next_alive;
328 cl->next_alive = NULL;
329
330 if (cl == q->active[prio]) {
331 q->active[prio] = cl_prev;
332 if (cl == q->active[prio]) {
333 q->active[prio] = NULL;
334 q->activemask &= ~(1<<prio);
335 return;
336 }
337 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700338 return;
339 }
340 } while ((cl_prev = cl) != q->active[prio]);
341}
342
343static void
344cbq_mark_toplevel(struct cbq_sched_data *q, struct cbq_class *cl)
345{
346 int toplevel = q->toplevel;
347
Eric Dumazetcca605d2016-06-10 16:41:37 -0700348 if (toplevel > cl->level) {
Vasily Averin7201c1d2014-08-14 12:27:59 +0400349 psched_time_t now = psched_get_time();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700350
351 do {
Patrick McHardy104e0872007-03-23 11:28:07 -0700352 if (cl->undertime < now) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700353 q->toplevel = cl->level;
354 return;
355 }
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000356 } while ((cl = cl->borrow) != NULL && toplevel > cl->level);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700357 }
358}
359
360static int
Eric Dumazet520ac302016-06-21 23:16:49 -0700361cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
362 struct sk_buff **to_free)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700363{
364 struct cbq_sched_data *q = qdisc_priv(sch);
Satyam Sharmaddeee3c2007-09-16 14:54:05 -0700365 int uninitialized_var(ret);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700366 struct cbq_class *cl = cbq_classify(skb, sch, &ret);
367
Patrick McHardyc3bc7cf2007-07-15 00:03:05 -0700368#ifdef CONFIG_NET_CLS_ACT
Linus Torvalds1da177e2005-04-16 15:20:36 -0700369 q->rx_class = cl;
370#endif
371 if (cl == NULL) {
Jarek Poplawskic27f3392008-08-04 22:39:11 -0700372 if (ret & __NET_XMIT_BYPASS)
John Fastabend25331d62014-09-28 11:53:29 -0700373 qdisc_qstats_drop(sch);
Eric Dumazet520ac302016-06-21 23:16:49 -0700374 __qdisc_drop(skb, to_free);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700375 return ret;
376 }
377
Eric Dumazet520ac302016-06-21 23:16:49 -0700378 ret = qdisc_enqueue(skb, cl->q, to_free);
Jussi Kivilinna5f861732008-07-20 00:08:04 -0700379 if (ret == NET_XMIT_SUCCESS) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700380 sch->q.qlen++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700381 cbq_mark_toplevel(q, cl);
382 if (!cl->next_alive)
383 cbq_activate_class(cl);
384 return ret;
385 }
386
Jarek Poplawski378a2f02008-08-04 22:31:03 -0700387 if (net_xmit_drop_count(ret)) {
John Fastabend25331d62014-09-28 11:53:29 -0700388 qdisc_qstats_drop(sch);
Jarek Poplawski378a2f02008-08-04 22:31:03 -0700389 cbq_mark_toplevel(q, cl);
390 cl->qstats.drops++;
391 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700392 return ret;
393}
394
Florian Westphalc3498d32016-06-09 00:27:39 +0200395/* Overlimit action: penalize leaf class by adding offtime */
396static void cbq_overlimit(struct cbq_class *cl)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700397{
398 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
Patrick McHardy8edc0c32007-03-23 11:28:55 -0700399 psched_tdiff_t delay = cl->undertime - q->now;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700400
401 if (!cl->delayed) {
402 delay += cl->offtime;
403
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900404 /*
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000405 * Class goes to sleep, so that it will have no
406 * chance to work avgidle. Let's forgive it 8)
407 *
408 * BTW cbq-2.0 has a crap in this
409 * place, apparently they forgot to shift it by cl->ewma_log.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700410 */
411 if (cl->avgidle < 0)
412 delay -= (-cl->avgidle) - ((-cl->avgidle) >> cl->ewma_log);
413 if (cl->avgidle < cl->minidle)
414 cl->avgidle = cl->minidle;
415 if (delay <= 0)
416 delay = 1;
Patrick McHardy7c59e252007-03-23 11:27:45 -0700417 cl->undertime = q->now + delay;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700418
419 cl->xstats.overactions++;
420 cl->delayed = 1;
421 }
422 if (q->wd_expires == 0 || q->wd_expires > delay)
423 q->wd_expires = delay;
424
425 /* Dirty work! We must schedule wakeups based on
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000426 * real available rate, rather than leaf rate,
427 * which may be tiny (even zero).
Linus Torvalds1da177e2005-04-16 15:20:36 -0700428 */
429 if (q->toplevel == TC_CBQ_MAXLEVEL) {
430 struct cbq_class *b;
431 psched_tdiff_t base_delay = q->wd_expires;
432
433 for (b = cl->borrow; b; b = b->borrow) {
Patrick McHardy8edc0c32007-03-23 11:28:55 -0700434 delay = b->undertime - q->now;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700435 if (delay < base_delay) {
436 if (delay <= 0)
437 delay = 1;
438 base_delay = delay;
439 }
440 }
441
442 q->wd_expires = base_delay;
443 }
444}
445
Patrick McHardy1a13cb62007-03-16 01:22:20 -0700446static psched_tdiff_t cbq_undelay_prio(struct cbq_sched_data *q, int prio,
447 psched_time_t now)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700448{
449 struct cbq_class *cl;
450 struct cbq_class *cl_prev = q->active[prio];
Patrick McHardy1a13cb62007-03-16 01:22:20 -0700451 psched_time_t sched = now;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700452
453 if (cl_prev == NULL)
Patrick McHardye9054a32007-03-16 01:21:40 -0700454 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700455
456 do {
457 cl = cl_prev->next_alive;
Patrick McHardy1a13cb62007-03-16 01:22:20 -0700458 if (now - cl->penalized > 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700459 cl_prev->next_alive = cl->next_alive;
460 cl->next_alive = NULL;
461 cl->cpriority = cl->priority;
462 cl->delayed = 0;
463 cbq_activate_class(cl);
464
465 if (cl == q->active[prio]) {
466 q->active[prio] = cl_prev;
467 if (cl == q->active[prio]) {
468 q->active[prio] = NULL;
469 return 0;
470 }
471 }
472
473 cl = cl_prev->next_alive;
Patrick McHardy1a13cb62007-03-16 01:22:20 -0700474 } else if (sched - cl->penalized > 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700475 sched = cl->penalized;
476 } while ((cl_prev = cl) != q->active[prio]);
477
Patrick McHardy1a13cb62007-03-16 01:22:20 -0700478 return sched - now;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700479}
480
Patrick McHardy1a13cb62007-03-16 01:22:20 -0700481static enum hrtimer_restart cbq_undelay(struct hrtimer *timer)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700482{
Patrick McHardy1a13cb62007-03-16 01:22:20 -0700483 struct cbq_sched_data *q = container_of(timer, struct cbq_sched_data,
David S. Miller2fbd3da2009-09-01 17:59:25 -0700484 delay_timer);
Patrick McHardy1a13cb62007-03-16 01:22:20 -0700485 struct Qdisc *sch = q->watchdog.qdisc;
486 psched_time_t now;
487 psched_tdiff_t delay = 0;
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000488 unsigned int pmask;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700489
Patrick McHardy3bebcda2007-03-23 11:29:25 -0700490 now = psched_get_time();
Patrick McHardy1a13cb62007-03-16 01:22:20 -0700491
Linus Torvalds1da177e2005-04-16 15:20:36 -0700492 pmask = q->pmask;
493 q->pmask = 0;
494
495 while (pmask) {
496 int prio = ffz(~pmask);
Patrick McHardy1a13cb62007-03-16 01:22:20 -0700497 psched_tdiff_t tmp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700498
499 pmask &= ~(1<<prio);
500
Patrick McHardy1a13cb62007-03-16 01:22:20 -0700501 tmp = cbq_undelay_prio(q, prio, now);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700502 if (tmp > 0) {
503 q->pmask |= 1<<prio;
504 if (tmp < delay || delay == 0)
505 delay = tmp;
506 }
507 }
508
509 if (delay) {
Patrick McHardy1a13cb62007-03-16 01:22:20 -0700510 ktime_t time;
511
Thomas Gleixner8b0e1952016-12-25 12:30:41 +0100512 time = 0;
Jarek Poplawskica44d6e2009-06-15 02:31:47 -0700513 time = ktime_add_ns(time, PSCHED_TICKS2NS(now + delay));
Eric Dumazet4a8e3202014-09-20 18:01:30 -0700514 hrtimer_start(&q->delay_timer, time, HRTIMER_MODE_ABS_PINNED);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700515 }
516
David S. Miller8608db02008-08-18 20:51:18 -0700517 __netif_schedule(qdisc_root(sch));
Patrick McHardy1a13cb62007-03-16 01:22:20 -0700518 return HRTIMER_NORESTART;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700519}
520
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900521/*
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000522 * It is mission critical procedure.
523 *
524 * We "regenerate" toplevel cutoff, if transmitting class
525 * has backlog and it is not regulated. It is not part of
526 * original CBQ description, but looks more reasonable.
527 * Probably, it is wrong. This question needs further investigation.
528 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700529
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000530static inline void
Linus Torvalds1da177e2005-04-16 15:20:36 -0700531cbq_update_toplevel(struct cbq_sched_data *q, struct cbq_class *cl,
532 struct cbq_class *borrowed)
533{
534 if (cl && q->toplevel >= borrowed->level) {
535 if (cl->q->q.qlen > 1) {
536 do {
Patrick McHardya0849802007-03-23 11:28:30 -0700537 if (borrowed->undertime == PSCHED_PASTPERFECT) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700538 q->toplevel = borrowed->level;
539 return;
540 }
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000541 } while ((borrowed = borrowed->borrow) != NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700542 }
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900543#if 0
Linus Torvalds1da177e2005-04-16 15:20:36 -0700544 /* It is not necessary now. Uncommenting it
545 will save CPU cycles, but decrease fairness.
546 */
547 q->toplevel = TC_CBQ_MAXLEVEL;
548#endif
549 }
550}
551
552static void
553cbq_update(struct cbq_sched_data *q)
554{
555 struct cbq_class *this = q->tx_class;
556 struct cbq_class *cl = this;
557 int len = q->tx_len;
Vasily Averin73d0f372014-08-14 12:27:47 +0400558 psched_time_t now;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700559
560 q->tx_class = NULL;
Vasily Averin73d0f372014-08-14 12:27:47 +0400561 /* Time integrator. We calculate EOS time
562 * by adding expected packet transmission time.
563 */
564 now = q->now + L2T(&q->link, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700565
566 for ( ; cl; cl = cl->share) {
567 long avgidle = cl->avgidle;
568 long idle;
569
570 cl->bstats.packets++;
571 cl->bstats.bytes += len;
572
573 /*
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000574 * (now - last) is total time between packet right edges.
575 * (last_pktlen/rate) is "virtual" busy time, so that
576 *
577 * idle = (now - last) - last_pktlen/rate
Linus Torvalds1da177e2005-04-16 15:20:36 -0700578 */
579
Vasily Averin73d0f372014-08-14 12:27:47 +0400580 idle = now - cl->last;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700581 if ((unsigned long)idle > 128*1024*1024) {
582 avgidle = cl->maxidle;
583 } else {
584 idle -= L2T(cl, len);
585
586 /* true_avgidle := (1-W)*true_avgidle + W*idle,
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000587 * where W=2^{-ewma_log}. But cl->avgidle is scaled:
588 * cl->avgidle == true_avgidle/W,
589 * hence:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700590 */
591 avgidle += idle - (avgidle>>cl->ewma_log);
592 }
593
594 if (avgidle <= 0) {
595 /* Overlimit or at-limit */
596
597 if (avgidle < cl->minidle)
598 avgidle = cl->minidle;
599
600 cl->avgidle = avgidle;
601
602 /* Calculate expected time, when this class
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000603 * will be allowed to send.
604 * It will occur, when:
605 * (1-W)*true_avgidle + W*delay = 0, i.e.
606 * idle = (1/W - 1)*(-true_avgidle)
607 * or
608 * idle = (1 - W)*(-cl->avgidle);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700609 */
610 idle = (-avgidle) - ((-avgidle) >> cl->ewma_log);
611
612 /*
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000613 * That is not all.
614 * To maintain the rate allocated to the class,
615 * we add to undertime virtual clock,
616 * necessary to complete transmitted packet.
617 * (len/phys_bandwidth has been already passed
618 * to the moment of cbq_update)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700619 */
620
621 idle -= L2T(&q->link, len);
622 idle += L2T(cl, len);
623
Vasily Averin73d0f372014-08-14 12:27:47 +0400624 cl->undertime = now + idle;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700625 } else {
626 /* Underlimit */
627
Patrick McHardya0849802007-03-23 11:28:30 -0700628 cl->undertime = PSCHED_PASTPERFECT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700629 if (avgidle > cl->maxidle)
630 cl->avgidle = cl->maxidle;
631 else
632 cl->avgidle = avgidle;
633 }
Vasily Averin73d0f372014-08-14 12:27:47 +0400634 if ((s64)(now - cl->last) > 0)
635 cl->last = now;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700636 }
637
638 cbq_update_toplevel(q, this, q->tx_borrowed);
639}
640
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000641static inline struct cbq_class *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700642cbq_under_limit(struct cbq_class *cl)
643{
644 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
645 struct cbq_class *this_cl = cl;
646
647 if (cl->tparent == NULL)
648 return cl;
649
Patrick McHardya0849802007-03-23 11:28:30 -0700650 if (cl->undertime == PSCHED_PASTPERFECT || q->now >= cl->undertime) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700651 cl->delayed = 0;
652 return cl;
653 }
654
655 do {
656 /* It is very suspicious place. Now overlimit
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000657 * action is generated for not bounded classes
658 * only if link is completely congested.
659 * Though it is in agree with ancestor-only paradigm,
660 * it looks very stupid. Particularly,
661 * it means that this chunk of code will either
662 * never be called or result in strong amplification
663 * of burstiness. Dangerous, silly, and, however,
664 * no another solution exists.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700665 */
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000666 cl = cl->borrow;
667 if (!cl) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700668 this_cl->qstats.overlimits++;
Florian Westphalc3498d32016-06-09 00:27:39 +0200669 cbq_overlimit(this_cl);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700670 return NULL;
671 }
672 if (cl->level > q->toplevel)
673 return NULL;
Patrick McHardya0849802007-03-23 11:28:30 -0700674 } while (cl->undertime != PSCHED_PASTPERFECT && q->now < cl->undertime);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700675
676 cl->delayed = 0;
677 return cl;
678}
679
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000680static inline struct sk_buff *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700681cbq_dequeue_prio(struct Qdisc *sch, int prio)
682{
683 struct cbq_sched_data *q = qdisc_priv(sch);
684 struct cbq_class *cl_tail, *cl_prev, *cl;
685 struct sk_buff *skb;
686 int deficit;
687
688 cl_tail = cl_prev = q->active[prio];
689 cl = cl_prev->next_alive;
690
691 do {
692 deficit = 0;
693
694 /* Start round */
695 do {
696 struct cbq_class *borrow = cl;
697
698 if (cl->q->q.qlen &&
699 (borrow = cbq_under_limit(cl)) == NULL)
700 goto skip_class;
701
702 if (cl->deficit <= 0) {
703 /* Class exhausted its allotment per
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000704 * this round. Switch to the next one.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700705 */
706 deficit = 1;
707 cl->deficit += cl->quantum;
708 goto next_class;
709 }
710
711 skb = cl->q->dequeue(cl->q);
712
713 /* Class did not give us any skb :-(
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000714 * It could occur even if cl->q->q.qlen != 0
715 * f.e. if cl->q == "tbf"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700716 */
717 if (skb == NULL)
718 goto skip_class;
719
Jussi Kivilinna0abf77e2008-07-20 00:08:27 -0700720 cl->deficit -= qdisc_pkt_len(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700721 q->tx_class = cl;
722 q->tx_borrowed = borrow;
723 if (borrow != cl) {
724#ifndef CBQ_XSTATS_BORROWS_BYTES
725 borrow->xstats.borrows++;
726 cl->xstats.borrows++;
727#else
Jussi Kivilinna0abf77e2008-07-20 00:08:27 -0700728 borrow->xstats.borrows += qdisc_pkt_len(skb);
729 cl->xstats.borrows += qdisc_pkt_len(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700730#endif
731 }
Jussi Kivilinna0abf77e2008-07-20 00:08:27 -0700732 q->tx_len = qdisc_pkt_len(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700733
734 if (cl->deficit <= 0) {
735 q->active[prio] = cl;
736 cl = cl->next_alive;
737 cl->deficit += cl->quantum;
738 }
739 return skb;
740
741skip_class:
742 if (cl->q->q.qlen == 0 || prio != cl->cpriority) {
743 /* Class is empty or penalized.
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000744 * Unlink it from active chain.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700745 */
746 cl_prev->next_alive = cl->next_alive;
747 cl->next_alive = NULL;
748
749 /* Did cl_tail point to it? */
750 if (cl == cl_tail) {
751 /* Repair it! */
752 cl_tail = cl_prev;
753
754 /* Was it the last class in this band? */
755 if (cl == cl_tail) {
756 /* Kill the band! */
757 q->active[prio] = NULL;
758 q->activemask &= ~(1<<prio);
759 if (cl->q->q.qlen)
760 cbq_activate_class(cl);
761 return NULL;
762 }
763
764 q->active[prio] = cl_tail;
765 }
766 if (cl->q->q.qlen)
767 cbq_activate_class(cl);
768
769 cl = cl_prev;
770 }
771
772next_class:
773 cl_prev = cl;
774 cl = cl->next_alive;
775 } while (cl_prev != cl_tail);
776 } while (deficit);
777
778 q->active[prio] = cl_prev;
779
780 return NULL;
781}
782
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000783static inline struct sk_buff *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700784cbq_dequeue_1(struct Qdisc *sch)
785{
786 struct cbq_sched_data *q = qdisc_priv(sch);
787 struct sk_buff *skb;
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000788 unsigned int activemask;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700789
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000790 activemask = q->activemask & 0xFF;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700791 while (activemask) {
792 int prio = ffz(~activemask);
793 activemask &= ~(1<<prio);
794 skb = cbq_dequeue_prio(sch, prio);
795 if (skb)
796 return skb;
797 }
798 return NULL;
799}
800
801static struct sk_buff *
802cbq_dequeue(struct Qdisc *sch)
803{
804 struct sk_buff *skb;
805 struct cbq_sched_data *q = qdisc_priv(sch);
806 psched_time_t now;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700807
Patrick McHardy3bebcda2007-03-23 11:29:25 -0700808 now = psched_get_time();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700809
Vasily Averin73d0f372014-08-14 12:27:47 +0400810 if (q->tx_class)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700811 cbq_update(q);
Vasily Averin73d0f372014-08-14 12:27:47 +0400812
813 q->now = now;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700814
815 for (;;) {
816 q->wd_expires = 0;
817
818 skb = cbq_dequeue_1(sch);
819 if (skb) {
Eric Dumazet9190b3b2011-01-20 23:31:33 -0800820 qdisc_bstats_update(sch, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700821 sch->q.qlen--;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700822 return skb;
823 }
824
825 /* All the classes are overlimit.
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000826 *
827 * It is possible, if:
828 *
829 * 1. Scheduler is empty.
830 * 2. Toplevel cutoff inhibited borrowing.
831 * 3. Root class is overlimit.
832 *
833 * Reset 2d and 3d conditions and retry.
834 *
835 * Note, that NS and cbq-2.0 are buggy, peeking
836 * an arbitrary class is appropriate for ancestor-only
837 * sharing, but not for toplevel algorithm.
838 *
839 * Our version is better, but slower, because it requires
840 * two passes, but it is unavoidable with top-level sharing.
841 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700842
843 if (q->toplevel == TC_CBQ_MAXLEVEL &&
Patrick McHardya0849802007-03-23 11:28:30 -0700844 q->link.undertime == PSCHED_PASTPERFECT)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700845 break;
846
847 q->toplevel = TC_CBQ_MAXLEVEL;
Patrick McHardya0849802007-03-23 11:28:30 -0700848 q->link.undertime = PSCHED_PASTPERFECT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700849 }
850
851 /* No packets in scheduler or nobody wants to give them to us :-(
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000852 * Sigh... start watchdog timer in the last case.
853 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700854
855 if (sch->q.qlen) {
John Fastabend25331d62014-09-28 11:53:29 -0700856 qdisc_qstats_overlimit(sch);
Patrick McHardy88a99352007-03-16 01:21:11 -0700857 if (q->wd_expires)
858 qdisc_watchdog_schedule(&q->watchdog,
Patrick McHardybb239ac2007-03-16 12:31:28 -0700859 now + q->wd_expires);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700860 }
861 return NULL;
862}
863
864/* CBQ class maintanance routines */
865
866static void cbq_adjust_levels(struct cbq_class *this)
867{
868 if (this == NULL)
869 return;
870
871 do {
872 int level = 0;
873 struct cbq_class *cl;
874
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000875 cl = this->children;
876 if (cl) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700877 do {
878 if (cl->level > level)
879 level = cl->level;
880 } while ((cl = cl->sibling) != this->children);
881 }
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000882 this->level = level + 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700883 } while ((this = this->tparent) != NULL);
884}
885
886static void cbq_normalize_quanta(struct cbq_sched_data *q, int prio)
887{
888 struct cbq_class *cl;
Patrick McHardyd77fea22008-07-05 23:22:05 -0700889 unsigned int h;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700890
891 if (q->quanta[prio] == 0)
892 return;
893
Patrick McHardyd77fea22008-07-05 23:22:05 -0700894 for (h = 0; h < q->clhash.hashsize; h++) {
Sasha Levinb67bfe02013-02-27 17:06:00 -0800895 hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700896 /* BUGGGG... Beware! This expression suffer of
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000897 * arithmetic overflows!
Linus Torvalds1da177e2005-04-16 15:20:36 -0700898 */
899 if (cl->priority == prio) {
900 cl->quantum = (cl->weight*cl->allot*q->nclasses[prio])/
901 q->quanta[prio];
902 }
Yang Yingliang833fa742013-12-10 20:55:32 +0800903 if (cl->quantum <= 0 ||
904 cl->quantum > 32*qdisc_dev(cl->qdisc)->mtu) {
Yang Yingliangc17988a2013-12-23 17:38:58 +0800905 pr_warn("CBQ: class %08x has bad quantum==%ld, repaired.\n",
906 cl->common.classid, cl->quantum);
David S. Miller5ce2d482008-07-08 17:06:30 -0700907 cl->quantum = qdisc_dev(cl->qdisc)->mtu/2 + 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700908 }
909 }
910 }
911}
912
913static void cbq_sync_defmap(struct cbq_class *cl)
914{
915 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
916 struct cbq_class *split = cl->split;
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000917 unsigned int h;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700918 int i;
919
920 if (split == NULL)
921 return;
922
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000923 for (i = 0; i <= TC_PRIO_MAX; i++) {
924 if (split->defaults[i] == cl && !(cl->defmap & (1<<i)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700925 split->defaults[i] = NULL;
926 }
927
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000928 for (i = 0; i <= TC_PRIO_MAX; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700929 int level = split->level;
930
931 if (split->defaults[i])
932 continue;
933
Patrick McHardyd77fea22008-07-05 23:22:05 -0700934 for (h = 0; h < q->clhash.hashsize; h++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700935 struct cbq_class *c;
936
Sasha Levinb67bfe02013-02-27 17:06:00 -0800937 hlist_for_each_entry(c, &q->clhash.hash[h],
Patrick McHardyd77fea22008-07-05 23:22:05 -0700938 common.hnode) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700939 if (c->split == split && c->level < level &&
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000940 c->defmap & (1<<i)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700941 split->defaults[i] = c;
942 level = c->level;
943 }
944 }
945 }
946 }
947}
948
949static void cbq_change_defmap(struct cbq_class *cl, u32 splitid, u32 def, u32 mask)
950{
951 struct cbq_class *split = NULL;
952
953 if (splitid == 0) {
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000954 split = cl->split;
955 if (!split)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700956 return;
Patrick McHardyd77fea22008-07-05 23:22:05 -0700957 splitid = split->common.classid;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700958 }
959
Patrick McHardyd77fea22008-07-05 23:22:05 -0700960 if (split == NULL || split->common.classid != splitid) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700961 for (split = cl->tparent; split; split = split->tparent)
Patrick McHardyd77fea22008-07-05 23:22:05 -0700962 if (split->common.classid == splitid)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700963 break;
964 }
965
966 if (split == NULL)
967 return;
968
969 if (cl->split != split) {
970 cl->defmap = 0;
971 cbq_sync_defmap(cl);
972 cl->split = split;
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000973 cl->defmap = def & mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700974 } else
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000975 cl->defmap = (cl->defmap & ~mask) | (def & mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700976
977 cbq_sync_defmap(cl);
978}
979
980static void cbq_unlink_class(struct cbq_class *this)
981{
982 struct cbq_class *cl, **clp;
983 struct cbq_sched_data *q = qdisc_priv(this->qdisc);
984
Patrick McHardyd77fea22008-07-05 23:22:05 -0700985 qdisc_class_hash_remove(&q->clhash, &this->common);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700986
987 if (this->tparent) {
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000988 clp = &this->sibling;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700989 cl = *clp;
990 do {
991 if (cl == this) {
992 *clp = cl->sibling;
993 break;
994 }
995 clp = &cl->sibling;
996 } while ((cl = *clp) != this->sibling);
997
998 if (this->tparent->children == this) {
999 this->tparent->children = this->sibling;
1000 if (this->sibling == this)
1001 this->tparent->children = NULL;
1002 }
1003 } else {
Ilpo Järvinen547b7922008-07-25 21:43:18 -07001004 WARN_ON(this->sibling != this);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001005 }
1006}
1007
1008static void cbq_link_class(struct cbq_class *this)
1009{
1010 struct cbq_sched_data *q = qdisc_priv(this->qdisc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001011 struct cbq_class *parent = this->tparent;
1012
1013 this->sibling = this;
Patrick McHardyd77fea22008-07-05 23:22:05 -07001014 qdisc_class_hash_insert(&q->clhash, &this->common);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001015
1016 if (parent == NULL)
1017 return;
1018
1019 if (parent->children == NULL) {
1020 parent->children = this;
1021 } else {
1022 this->sibling = parent->children->sibling;
1023 parent->children->sibling = this;
1024 }
1025}
1026
Linus Torvalds1da177e2005-04-16 15:20:36 -07001027static void
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001028cbq_reset(struct Qdisc *sch)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001029{
1030 struct cbq_sched_data *q = qdisc_priv(sch);
1031 struct cbq_class *cl;
1032 int prio;
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001033 unsigned int h;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001034
1035 q->activemask = 0;
1036 q->pmask = 0;
1037 q->tx_class = NULL;
1038 q->tx_borrowed = NULL;
Patrick McHardy88a99352007-03-16 01:21:11 -07001039 qdisc_watchdog_cancel(&q->watchdog);
David S. Miller2fbd3da2009-09-01 17:59:25 -07001040 hrtimer_cancel(&q->delay_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001041 q->toplevel = TC_CBQ_MAXLEVEL;
Patrick McHardy3bebcda2007-03-23 11:29:25 -07001042 q->now = psched_get_time();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001043
1044 for (prio = 0; prio <= TC_CBQ_MAXPRIO; prio++)
1045 q->active[prio] = NULL;
1046
Patrick McHardyd77fea22008-07-05 23:22:05 -07001047 for (h = 0; h < q->clhash.hashsize; h++) {
Sasha Levinb67bfe02013-02-27 17:06:00 -08001048 hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001049 qdisc_reset(cl->q);
1050
1051 cl->next_alive = NULL;
Patrick McHardya0849802007-03-23 11:28:30 -07001052 cl->undertime = PSCHED_PASTPERFECT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001053 cl->avgidle = cl->maxidle;
1054 cl->deficit = cl->quantum;
1055 cl->cpriority = cl->priority;
1056 }
1057 }
1058 sch->q.qlen = 0;
1059}
1060
1061
1062static int cbq_set_lss(struct cbq_class *cl, struct tc_cbq_lssopt *lss)
1063{
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001064 if (lss->change & TCF_CBQ_LSS_FLAGS) {
1065 cl->share = (lss->flags & TCF_CBQ_LSS_ISOLATED) ? NULL : cl->tparent;
1066 cl->borrow = (lss->flags & TCF_CBQ_LSS_BOUNDED) ? NULL : cl->tparent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001067 }
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001068 if (lss->change & TCF_CBQ_LSS_EWMA)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001069 cl->ewma_log = lss->ewma_log;
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001070 if (lss->change & TCF_CBQ_LSS_AVPKT)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001071 cl->avpkt = lss->avpkt;
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001072 if (lss->change & TCF_CBQ_LSS_MINIDLE)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001073 cl->minidle = -(long)lss->minidle;
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001074 if (lss->change & TCF_CBQ_LSS_MAXIDLE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001075 cl->maxidle = lss->maxidle;
1076 cl->avgidle = lss->maxidle;
1077 }
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001078 if (lss->change & TCF_CBQ_LSS_OFFTIME)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001079 cl->offtime = lss->offtime;
1080 return 0;
1081}
1082
1083static void cbq_rmprio(struct cbq_sched_data *q, struct cbq_class *cl)
1084{
1085 q->nclasses[cl->priority]--;
1086 q->quanta[cl->priority] -= cl->weight;
1087 cbq_normalize_quanta(q, cl->priority);
1088}
1089
1090static void cbq_addprio(struct cbq_sched_data *q, struct cbq_class *cl)
1091{
1092 q->nclasses[cl->priority]++;
1093 q->quanta[cl->priority] += cl->weight;
1094 cbq_normalize_quanta(q, cl->priority);
1095}
1096
1097static int cbq_set_wrr(struct cbq_class *cl, struct tc_cbq_wrropt *wrr)
1098{
1099 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
1100
1101 if (wrr->allot)
1102 cl->allot = wrr->allot;
1103 if (wrr->weight)
1104 cl->weight = wrr->weight;
1105 if (wrr->priority) {
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001106 cl->priority = wrr->priority - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001107 cl->cpriority = cl->priority;
1108 if (cl->priority >= cl->priority2)
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001109 cl->priority2 = TC_CBQ_MAXPRIO - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001110 }
1111
1112 cbq_addprio(q, cl);
1113 return 0;
1114}
1115
Linus Torvalds1da177e2005-04-16 15:20:36 -07001116static int cbq_set_fopt(struct cbq_class *cl, struct tc_cbq_fopt *fopt)
1117{
1118 cbq_change_defmap(cl, fopt->split, fopt->defmap, fopt->defchange);
1119 return 0;
1120}
1121
Patrick McHardy27a34212008-01-23 20:35:39 -08001122static const struct nla_policy cbq_policy[TCA_CBQ_MAX + 1] = {
1123 [TCA_CBQ_LSSOPT] = { .len = sizeof(struct tc_cbq_lssopt) },
1124 [TCA_CBQ_WRROPT] = { .len = sizeof(struct tc_cbq_wrropt) },
1125 [TCA_CBQ_FOPT] = { .len = sizeof(struct tc_cbq_fopt) },
1126 [TCA_CBQ_OVL_STRATEGY] = { .len = sizeof(struct tc_cbq_ovl) },
1127 [TCA_CBQ_RATE] = { .len = sizeof(struct tc_ratespec) },
1128 [TCA_CBQ_RTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
1129 [TCA_CBQ_POLICE] = { .len = sizeof(struct tc_cbq_police) },
1130};
1131
Patrick McHardy1e904742008-01-22 22:11:17 -08001132static int cbq_init(struct Qdisc *sch, struct nlattr *opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001133{
1134 struct cbq_sched_data *q = qdisc_priv(sch);
Patrick McHardy1e904742008-01-22 22:11:17 -08001135 struct nlattr *tb[TCA_CBQ_MAX + 1];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001136 struct tc_ratespec *r;
Patrick McHardycee63722008-01-23 20:33:32 -08001137 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001138
Patrick McHardy27a34212008-01-23 20:35:39 -08001139 err = nla_parse_nested(tb, TCA_CBQ_MAX, opt, cbq_policy);
Patrick McHardycee63722008-01-23 20:33:32 -08001140 if (err < 0)
1141 return err;
1142
Patrick McHardy27a34212008-01-23 20:35:39 -08001143 if (tb[TCA_CBQ_RTAB] == NULL || tb[TCA_CBQ_RATE] == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001144 return -EINVAL;
1145
Patrick McHardy1e904742008-01-22 22:11:17 -08001146 r = nla_data(tb[TCA_CBQ_RATE]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001147
Patrick McHardy1e904742008-01-22 22:11:17 -08001148 if ((q->link.R_tab = qdisc_get_rtab(r, tb[TCA_CBQ_RTAB])) == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001149 return -EINVAL;
1150
Patrick McHardyd77fea22008-07-05 23:22:05 -07001151 err = qdisc_class_hash_init(&q->clhash);
1152 if (err < 0)
1153 goto put_rtab;
1154
Linus Torvalds1da177e2005-04-16 15:20:36 -07001155 q->link.refcnt = 1;
1156 q->link.sibling = &q->link;
Patrick McHardyd77fea22008-07-05 23:22:05 -07001157 q->link.common.classid = sch->handle;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001158 q->link.qdisc = sch;
Changli Gao3511c912010-10-16 13:04:08 +00001159 q->link.q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
1160 sch->handle);
1161 if (!q->link.q)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001162 q->link.q = &noop_qdisc;
1163
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001164 q->link.priority = TC_CBQ_MAXPRIO - 1;
1165 q->link.priority2 = TC_CBQ_MAXPRIO - 1;
1166 q->link.cpriority = TC_CBQ_MAXPRIO - 1;
David S. Miller5ce2d482008-07-08 17:06:30 -07001167 q->link.allot = psched_mtu(qdisc_dev(sch));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001168 q->link.quantum = q->link.allot;
1169 q->link.weight = q->link.R_tab->rate.rate;
1170
1171 q->link.ewma_log = TC_CBQ_DEF_EWMA;
1172 q->link.avpkt = q->link.allot/2;
1173 q->link.minidle = -0x7FFFFFFF;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001174
Patrick McHardy88a99352007-03-16 01:21:11 -07001175 qdisc_watchdog_init(&q->watchdog, sch);
Eric Dumazet4a8e3202014-09-20 18:01:30 -07001176 hrtimer_init(&q->delay_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001177 q->delay_timer.function = cbq_undelay;
1178 q->toplevel = TC_CBQ_MAXLEVEL;
Patrick McHardy3bebcda2007-03-23 11:29:25 -07001179 q->now = psched_get_time();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001180
1181 cbq_link_class(&q->link);
1182
Patrick McHardy1e904742008-01-22 22:11:17 -08001183 if (tb[TCA_CBQ_LSSOPT])
1184 cbq_set_lss(&q->link, nla_data(tb[TCA_CBQ_LSSOPT]));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001185
1186 cbq_addprio(q, &q->link);
1187 return 0;
Patrick McHardyd77fea22008-07-05 23:22:05 -07001188
1189put_rtab:
1190 qdisc_put_rtab(q->link.R_tab);
1191 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001192}
1193
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001194static int cbq_dump_rate(struct sk_buff *skb, struct cbq_class *cl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001195{
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001196 unsigned char *b = skb_tail_pointer(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001197
David S. Miller1b34ec42012-03-29 05:11:39 -04001198 if (nla_put(skb, TCA_CBQ_RATE, sizeof(cl->R_tab->rate), &cl->R_tab->rate))
1199 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001200 return skb->len;
1201
Patrick McHardy1e904742008-01-22 22:11:17 -08001202nla_put_failure:
Arnaldo Carvalho de Melodc5fc572007-03-25 23:06:12 -07001203 nlmsg_trim(skb, b);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001204 return -1;
1205}
1206
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001207static int cbq_dump_lss(struct sk_buff *skb, struct cbq_class *cl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001208{
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001209 unsigned char *b = skb_tail_pointer(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001210 struct tc_cbq_lssopt opt;
1211
1212 opt.flags = 0;
1213 if (cl->borrow == NULL)
1214 opt.flags |= TCF_CBQ_LSS_BOUNDED;
1215 if (cl->share == NULL)
1216 opt.flags |= TCF_CBQ_LSS_ISOLATED;
1217 opt.ewma_log = cl->ewma_log;
1218 opt.level = cl->level;
1219 opt.avpkt = cl->avpkt;
1220 opt.maxidle = cl->maxidle;
1221 opt.minidle = (u32)(-cl->minidle);
1222 opt.offtime = cl->offtime;
1223 opt.change = ~0;
David S. Miller1b34ec42012-03-29 05:11:39 -04001224 if (nla_put(skb, TCA_CBQ_LSSOPT, sizeof(opt), &opt))
1225 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001226 return skb->len;
1227
Patrick McHardy1e904742008-01-22 22:11:17 -08001228nla_put_failure:
Arnaldo Carvalho de Melodc5fc572007-03-25 23:06:12 -07001229 nlmsg_trim(skb, b);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001230 return -1;
1231}
1232
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001233static int cbq_dump_wrr(struct sk_buff *skb, struct cbq_class *cl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001234{
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001235 unsigned char *b = skb_tail_pointer(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001236 struct tc_cbq_wrropt opt;
1237
David S. Millera0db8562013-07-30 00:16:21 -07001238 memset(&opt, 0, sizeof(opt));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001239 opt.flags = 0;
1240 opt.allot = cl->allot;
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001241 opt.priority = cl->priority + 1;
1242 opt.cpriority = cl->cpriority + 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001243 opt.weight = cl->weight;
David S. Miller1b34ec42012-03-29 05:11:39 -04001244 if (nla_put(skb, TCA_CBQ_WRROPT, sizeof(opt), &opt))
1245 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001246 return skb->len;
1247
Patrick McHardy1e904742008-01-22 22:11:17 -08001248nla_put_failure:
Arnaldo Carvalho de Melodc5fc572007-03-25 23:06:12 -07001249 nlmsg_trim(skb, b);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001250 return -1;
1251}
1252
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001253static int cbq_dump_fopt(struct sk_buff *skb, struct cbq_class *cl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001254{
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001255 unsigned char *b = skb_tail_pointer(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001256 struct tc_cbq_fopt opt;
1257
1258 if (cl->split || cl->defmap) {
Patrick McHardyd77fea22008-07-05 23:22:05 -07001259 opt.split = cl->split ? cl->split->common.classid : 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001260 opt.defmap = cl->defmap;
1261 opt.defchange = ~0;
David S. Miller1b34ec42012-03-29 05:11:39 -04001262 if (nla_put(skb, TCA_CBQ_FOPT, sizeof(opt), &opt))
1263 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001264 }
1265 return skb->len;
1266
Patrick McHardy1e904742008-01-22 22:11:17 -08001267nla_put_failure:
Arnaldo Carvalho de Melodc5fc572007-03-25 23:06:12 -07001268 nlmsg_trim(skb, b);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001269 return -1;
1270}
1271
Linus Torvalds1da177e2005-04-16 15:20:36 -07001272static int cbq_dump_attr(struct sk_buff *skb, struct cbq_class *cl)
1273{
1274 if (cbq_dump_lss(skb, cl) < 0 ||
1275 cbq_dump_rate(skb, cl) < 0 ||
1276 cbq_dump_wrr(skb, cl) < 0 ||
Linus Torvalds1da177e2005-04-16 15:20:36 -07001277 cbq_dump_fopt(skb, cl) < 0)
1278 return -1;
1279 return 0;
1280}
1281
1282static int cbq_dump(struct Qdisc *sch, struct sk_buff *skb)
1283{
1284 struct cbq_sched_data *q = qdisc_priv(sch);
Patrick McHardy4b3550ef2008-01-23 20:34:11 -08001285 struct nlattr *nest;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001286
Patrick McHardy4b3550ef2008-01-23 20:34:11 -08001287 nest = nla_nest_start(skb, TCA_OPTIONS);
1288 if (nest == NULL)
1289 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001290 if (cbq_dump_attr(skb, &q->link) < 0)
Patrick McHardy1e904742008-01-22 22:11:17 -08001291 goto nla_put_failure;
Yang Yingliangd59b7d82014-03-12 10:20:32 +08001292 return nla_nest_end(skb, nest);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001293
Patrick McHardy1e904742008-01-22 22:11:17 -08001294nla_put_failure:
Patrick McHardy4b3550ef2008-01-23 20:34:11 -08001295 nla_nest_cancel(skb, nest);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001296 return -1;
1297}
1298
1299static int
1300cbq_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
1301{
1302 struct cbq_sched_data *q = qdisc_priv(sch);
1303
1304 q->link.xstats.avgidle = q->link.avgidle;
1305 return gnet_stats_copy_app(d, &q->link.xstats, sizeof(q->link.xstats));
1306}
1307
1308static int
1309cbq_dump_class(struct Qdisc *sch, unsigned long arg,
1310 struct sk_buff *skb, struct tcmsg *tcm)
1311{
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001312 struct cbq_class *cl = (struct cbq_class *)arg;
Patrick McHardy4b3550ef2008-01-23 20:34:11 -08001313 struct nlattr *nest;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001314
1315 if (cl->tparent)
Patrick McHardyd77fea22008-07-05 23:22:05 -07001316 tcm->tcm_parent = cl->tparent->common.classid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001317 else
1318 tcm->tcm_parent = TC_H_ROOT;
Patrick McHardyd77fea22008-07-05 23:22:05 -07001319 tcm->tcm_handle = cl->common.classid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001320 tcm->tcm_info = cl->q->handle;
1321
Patrick McHardy4b3550ef2008-01-23 20:34:11 -08001322 nest = nla_nest_start(skb, TCA_OPTIONS);
1323 if (nest == NULL)
1324 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001325 if (cbq_dump_attr(skb, cl) < 0)
Patrick McHardy1e904742008-01-22 22:11:17 -08001326 goto nla_put_failure;
Yang Yingliangd59b7d82014-03-12 10:20:32 +08001327 return nla_nest_end(skb, nest);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001328
Patrick McHardy1e904742008-01-22 22:11:17 -08001329nla_put_failure:
Patrick McHardy4b3550ef2008-01-23 20:34:11 -08001330 nla_nest_cancel(skb, nest);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001331 return -1;
1332}
1333
1334static int
1335cbq_dump_class_stats(struct Qdisc *sch, unsigned long arg,
1336 struct gnet_dump *d)
1337{
1338 struct cbq_sched_data *q = qdisc_priv(sch);
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001339 struct cbq_class *cl = (struct cbq_class *)arg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001340
Linus Torvalds1da177e2005-04-16 15:20:36 -07001341 cl->xstats.avgidle = cl->avgidle;
1342 cl->xstats.undertime = 0;
1343
Patrick McHardya0849802007-03-23 11:28:30 -07001344 if (cl->undertime != PSCHED_PASTPERFECT)
Patrick McHardy8edc0c32007-03-23 11:28:55 -07001345 cl->xstats.undertime = cl->undertime - q->now;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001346
Eric Dumazetedb09eb2016-06-06 09:37:16 -07001347 if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
1348 d, NULL, &cl->bstats) < 0 ||
Eric Dumazet1c0d32f2016-12-04 09:48:16 -08001349 gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
John Fastabendb0ab6f92014-09-28 11:54:24 -07001350 gnet_stats_copy_queue(d, NULL, &cl->qstats, cl->q->q.qlen) < 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001351 return -1;
1352
1353 return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats));
1354}
1355
1356static int cbq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
1357 struct Qdisc **old)
1358{
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001359 struct cbq_class *cl = (struct cbq_class *)arg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001360
Patrick McHardy5b9a9cc2009-09-04 06:41:17 +00001361 if (new == NULL) {
Changli Gao3511c912010-10-16 13:04:08 +00001362 new = qdisc_create_dflt(sch->dev_queue,
Patrick McHardy5b9a9cc2009-09-04 06:41:17 +00001363 &pfifo_qdisc_ops, cl->common.classid);
1364 if (new == NULL)
1365 return -ENOBUFS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001366 }
Patrick McHardy5b9a9cc2009-09-04 06:41:17 +00001367
WANG Cong86a79962016-02-25 14:55:00 -08001368 *old = qdisc_replace(sch, new, &cl->q);
Patrick McHardy5b9a9cc2009-09-04 06:41:17 +00001369 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001370}
1371
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001372static struct Qdisc *cbq_leaf(struct Qdisc *sch, unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001373{
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001374 struct cbq_class *cl = (struct cbq_class *)arg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001375
Patrick McHardy5b9a9cc2009-09-04 06:41:17 +00001376 return cl->q;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001377}
1378
Jarek Poplawskia37ef2e2006-12-08 00:25:55 -08001379static void cbq_qlen_notify(struct Qdisc *sch, unsigned long arg)
1380{
1381 struct cbq_class *cl = (struct cbq_class *)arg;
1382
1383 if (cl->q->q.qlen == 0)
1384 cbq_deactivate_class(cl);
1385}
1386
Linus Torvalds1da177e2005-04-16 15:20:36 -07001387static unsigned long cbq_get(struct Qdisc *sch, u32 classid)
1388{
1389 struct cbq_sched_data *q = qdisc_priv(sch);
1390 struct cbq_class *cl = cbq_class_lookup(q, classid);
1391
1392 if (cl) {
1393 cl->refcnt++;
1394 return (unsigned long)cl;
1395 }
1396 return 0;
1397}
1398
Linus Torvalds1da177e2005-04-16 15:20:36 -07001399static void cbq_destroy_class(struct Qdisc *sch, struct cbq_class *cl)
1400{
1401 struct cbq_sched_data *q = qdisc_priv(sch);
1402
Ilpo Järvinen547b7922008-07-25 21:43:18 -07001403 WARN_ON(cl->filters);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001404
Patrick McHardyff31ab52008-07-01 19:52:38 -07001405 tcf_destroy_chain(&cl->filter_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001406 qdisc_destroy(cl->q);
1407 qdisc_put_rtab(cl->R_tab);
Eric Dumazet1c0d32f2016-12-04 09:48:16 -08001408 gen_kill_estimator(&cl->rate_est);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001409 if (cl != &q->link)
1410 kfree(cl);
1411}
1412
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001413static void cbq_destroy(struct Qdisc *sch)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001414{
1415 struct cbq_sched_data *q = qdisc_priv(sch);
Sasha Levinb67bfe02013-02-27 17:06:00 -08001416 struct hlist_node *next;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001417 struct cbq_class *cl;
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001418 unsigned int h;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001419
Patrick McHardyc3bc7cf2007-07-15 00:03:05 -07001420#ifdef CONFIG_NET_CLS_ACT
Linus Torvalds1da177e2005-04-16 15:20:36 -07001421 q->rx_class = NULL;
1422#endif
1423 /*
1424 * Filters must be destroyed first because we don't destroy the
1425 * classes from root to leafs which means that filters can still
1426 * be bound to classes which have been destroyed already. --TGR '04
1427 */
Patrick McHardyd77fea22008-07-05 23:22:05 -07001428 for (h = 0; h < q->clhash.hashsize; h++) {
Sasha Levinb67bfe02013-02-27 17:06:00 -08001429 hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode)
Patrick McHardyff31ab52008-07-01 19:52:38 -07001430 tcf_destroy_chain(&cl->filter_list);
Patrick McHardyb00b4bf2007-06-05 16:06:59 -07001431 }
Patrick McHardyd77fea22008-07-05 23:22:05 -07001432 for (h = 0; h < q->clhash.hashsize; h++) {
Sasha Levinb67bfe02013-02-27 17:06:00 -08001433 hlist_for_each_entry_safe(cl, next, &q->clhash.hash[h],
Patrick McHardyd77fea22008-07-05 23:22:05 -07001434 common.hnode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001435 cbq_destroy_class(sch, cl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001436 }
Patrick McHardyd77fea22008-07-05 23:22:05 -07001437 qdisc_class_hash_destroy(&q->clhash);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001438}
1439
1440static void cbq_put(struct Qdisc *sch, unsigned long arg)
1441{
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001442 struct cbq_class *cl = (struct cbq_class *)arg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001443
1444 if (--cl->refcnt == 0) {
Patrick McHardyc3bc7cf2007-07-15 00:03:05 -07001445#ifdef CONFIG_NET_CLS_ACT
Jarek Poplawski102396a2008-08-29 14:21:52 -07001446 spinlock_t *root_lock = qdisc_root_sleeping_lock(sch);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001447 struct cbq_sched_data *q = qdisc_priv(sch);
1448
David S. Miller7698b4f2008-07-16 01:42:40 -07001449 spin_lock_bh(root_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001450 if (q->rx_class == cl)
1451 q->rx_class = NULL;
David S. Miller7698b4f2008-07-16 01:42:40 -07001452 spin_unlock_bh(root_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001453#endif
1454
1455 cbq_destroy_class(sch, cl);
1456 }
1457}
1458
1459static int
Patrick McHardy1e904742008-01-22 22:11:17 -08001460cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **tca,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001461 unsigned long *arg)
1462{
1463 int err;
1464 struct cbq_sched_data *q = qdisc_priv(sch);
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001465 struct cbq_class *cl = (struct cbq_class *)*arg;
Patrick McHardy1e904742008-01-22 22:11:17 -08001466 struct nlattr *opt = tca[TCA_OPTIONS];
1467 struct nlattr *tb[TCA_CBQ_MAX + 1];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001468 struct cbq_class *parent;
1469 struct qdisc_rate_table *rtab = NULL;
1470
Patrick McHardycee63722008-01-23 20:33:32 -08001471 if (opt == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001472 return -EINVAL;
1473
Patrick McHardy27a34212008-01-23 20:35:39 -08001474 err = nla_parse_nested(tb, TCA_CBQ_MAX, opt, cbq_policy);
Patrick McHardycee63722008-01-23 20:33:32 -08001475 if (err < 0)
1476 return err;
1477
Florian Westphaldd47c1f2016-06-09 00:27:40 +02001478 if (tb[TCA_CBQ_OVL_STRATEGY] || tb[TCA_CBQ_POLICE])
Florian Westphalc3498d32016-06-09 00:27:39 +02001479 return -EOPNOTSUPP;
1480
Linus Torvalds1da177e2005-04-16 15:20:36 -07001481 if (cl) {
1482 /* Check parent */
1483 if (parentid) {
Patrick McHardyd77fea22008-07-05 23:22:05 -07001484 if (cl->tparent &&
1485 cl->tparent->common.classid != parentid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001486 return -EINVAL;
1487 if (!cl->tparent && parentid != TC_H_ROOT)
1488 return -EINVAL;
1489 }
1490
Patrick McHardy1e904742008-01-22 22:11:17 -08001491 if (tb[TCA_CBQ_RATE]) {
Stephen Hemminger71bcb092008-11-25 21:13:31 -08001492 rtab = qdisc_get_rtab(nla_data(tb[TCA_CBQ_RATE]),
1493 tb[TCA_CBQ_RTAB]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001494 if (rtab == NULL)
1495 return -EINVAL;
1496 }
1497
Stephen Hemminger71bcb092008-11-25 21:13:31 -08001498 if (tca[TCA_RATE]) {
John Fastabend22e0f8b2014-09-28 11:52:56 -07001499 err = gen_replace_estimator(&cl->bstats, NULL,
1500 &cl->rate_est,
Eric Dumazetedb09eb2016-06-06 09:37:16 -07001501 NULL,
1502 qdisc_root_sleeping_running(sch),
Stephen Hemminger71bcb092008-11-25 21:13:31 -08001503 tca[TCA_RATE]);
1504 if (err) {
Yang Yingliang79c11f22013-12-17 15:29:17 +08001505 qdisc_put_rtab(rtab);
Stephen Hemminger71bcb092008-11-25 21:13:31 -08001506 return err;
1507 }
1508 }
1509
Linus Torvalds1da177e2005-04-16 15:20:36 -07001510 /* Change class parameters */
1511 sch_tree_lock(sch);
1512
1513 if (cl->next_alive != NULL)
1514 cbq_deactivate_class(cl);
1515
1516 if (rtab) {
Patrick McHardyb94c8af2008-11-20 04:11:36 -08001517 qdisc_put_rtab(cl->R_tab);
1518 cl->R_tab = rtab;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001519 }
1520
Patrick McHardy1e904742008-01-22 22:11:17 -08001521 if (tb[TCA_CBQ_LSSOPT])
1522 cbq_set_lss(cl, nla_data(tb[TCA_CBQ_LSSOPT]));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001523
Patrick McHardy1e904742008-01-22 22:11:17 -08001524 if (tb[TCA_CBQ_WRROPT]) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001525 cbq_rmprio(q, cl);
Patrick McHardy1e904742008-01-22 22:11:17 -08001526 cbq_set_wrr(cl, nla_data(tb[TCA_CBQ_WRROPT]));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001527 }
1528
Patrick McHardy1e904742008-01-22 22:11:17 -08001529 if (tb[TCA_CBQ_FOPT])
1530 cbq_set_fopt(cl, nla_data(tb[TCA_CBQ_FOPT]));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001531
1532 if (cl->q->q.qlen)
1533 cbq_activate_class(cl);
1534
1535 sch_tree_unlock(sch);
1536
Linus Torvalds1da177e2005-04-16 15:20:36 -07001537 return 0;
1538 }
1539
1540 if (parentid == TC_H_ROOT)
1541 return -EINVAL;
1542
Patrick McHardy1e904742008-01-22 22:11:17 -08001543 if (tb[TCA_CBQ_WRROPT] == NULL || tb[TCA_CBQ_RATE] == NULL ||
1544 tb[TCA_CBQ_LSSOPT] == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001545 return -EINVAL;
1546
Patrick McHardy1e904742008-01-22 22:11:17 -08001547 rtab = qdisc_get_rtab(nla_data(tb[TCA_CBQ_RATE]), tb[TCA_CBQ_RTAB]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001548 if (rtab == NULL)
1549 return -EINVAL;
1550
1551 if (classid) {
1552 err = -EINVAL;
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001553 if (TC_H_MAJ(classid ^ sch->handle) ||
1554 cbq_class_lookup(q, classid))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001555 goto failure;
1556 } else {
1557 int i;
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001558 classid = TC_H_MAKE(sch->handle, 0x8000);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001559
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001560 for (i = 0; i < 0x8000; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001561 if (++q->hgenerator >= 0x8000)
1562 q->hgenerator = 1;
1563 if (cbq_class_lookup(q, classid|q->hgenerator) == NULL)
1564 break;
1565 }
1566 err = -ENOSR;
1567 if (i >= 0x8000)
1568 goto failure;
1569 classid = classid|q->hgenerator;
1570 }
1571
1572 parent = &q->link;
1573 if (parentid) {
1574 parent = cbq_class_lookup(q, parentid);
1575 err = -EINVAL;
1576 if (parent == NULL)
1577 goto failure;
1578 }
1579
1580 err = -ENOBUFS;
Panagiotis Issaris0da974f2006-07-21 14:51:30 -07001581 cl = kzalloc(sizeof(*cl), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001582 if (cl == NULL)
1583 goto failure;
Stephen Hemminger71bcb092008-11-25 21:13:31 -08001584
1585 if (tca[TCA_RATE]) {
John Fastabend22e0f8b2014-09-28 11:52:56 -07001586 err = gen_new_estimator(&cl->bstats, NULL, &cl->rate_est,
Eric Dumazetedb09eb2016-06-06 09:37:16 -07001587 NULL,
1588 qdisc_root_sleeping_running(sch),
Stephen Hemminger71bcb092008-11-25 21:13:31 -08001589 tca[TCA_RATE]);
1590 if (err) {
1591 kfree(cl);
1592 goto failure;
1593 }
1594 }
1595
Linus Torvalds1da177e2005-04-16 15:20:36 -07001596 cl->R_tab = rtab;
1597 rtab = NULL;
1598 cl->refcnt = 1;
Changli Gao3511c912010-10-16 13:04:08 +00001599 cl->q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, classid);
1600 if (!cl->q)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001601 cl->q = &noop_qdisc;
Patrick McHardyd77fea22008-07-05 23:22:05 -07001602 cl->common.classid = classid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001603 cl->tparent = parent;
1604 cl->qdisc = sch;
1605 cl->allot = parent->allot;
1606 cl->quantum = cl->allot;
1607 cl->weight = cl->R_tab->rate.rate;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001608
1609 sch_tree_lock(sch);
1610 cbq_link_class(cl);
1611 cl->borrow = cl->tparent;
1612 if (cl->tparent != &q->link)
1613 cl->share = cl->tparent;
1614 cbq_adjust_levels(parent);
1615 cl->minidle = -0x7FFFFFFF;
Patrick McHardy1e904742008-01-22 22:11:17 -08001616 cbq_set_lss(cl, nla_data(tb[TCA_CBQ_LSSOPT]));
1617 cbq_set_wrr(cl, nla_data(tb[TCA_CBQ_WRROPT]));
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001618 if (cl->ewma_log == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001619 cl->ewma_log = q->link.ewma_log;
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001620 if (cl->maxidle == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001621 cl->maxidle = q->link.maxidle;
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001622 if (cl->avpkt == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001623 cl->avpkt = q->link.avpkt;
Patrick McHardy1e904742008-01-22 22:11:17 -08001624 if (tb[TCA_CBQ_FOPT])
1625 cbq_set_fopt(cl, nla_data(tb[TCA_CBQ_FOPT]));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001626 sch_tree_unlock(sch);
1627
Patrick McHardyd77fea22008-07-05 23:22:05 -07001628 qdisc_class_hash_grow(sch, &q->clhash);
1629
Linus Torvalds1da177e2005-04-16 15:20:36 -07001630 *arg = (unsigned long)cl;
1631 return 0;
1632
1633failure:
1634 qdisc_put_rtab(rtab);
1635 return err;
1636}
1637
1638static int cbq_delete(struct Qdisc *sch, unsigned long arg)
1639{
1640 struct cbq_sched_data *q = qdisc_priv(sch);
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001641 struct cbq_class *cl = (struct cbq_class *)arg;
WANG Cong2ccccf52016-02-25 14:55:01 -08001642 unsigned int qlen, backlog;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001643
1644 if (cl->filters || cl->children || cl == &q->link)
1645 return -EBUSY;
1646
1647 sch_tree_lock(sch);
1648
Jarek Poplawskia37ef2e2006-12-08 00:25:55 -08001649 qlen = cl->q->q.qlen;
WANG Cong2ccccf52016-02-25 14:55:01 -08001650 backlog = cl->q->qstats.backlog;
Jarek Poplawskia37ef2e2006-12-08 00:25:55 -08001651 qdisc_reset(cl->q);
WANG Cong2ccccf52016-02-25 14:55:01 -08001652 qdisc_tree_reduce_backlog(cl->q, qlen, backlog);
Jarek Poplawskia37ef2e2006-12-08 00:25:55 -08001653
Linus Torvalds1da177e2005-04-16 15:20:36 -07001654 if (cl->next_alive)
1655 cbq_deactivate_class(cl);
1656
1657 if (q->tx_borrowed == cl)
1658 q->tx_borrowed = q->tx_class;
1659 if (q->tx_class == cl) {
1660 q->tx_class = NULL;
1661 q->tx_borrowed = NULL;
1662 }
Patrick McHardyc3bc7cf2007-07-15 00:03:05 -07001663#ifdef CONFIG_NET_CLS_ACT
Linus Torvalds1da177e2005-04-16 15:20:36 -07001664 if (q->rx_class == cl)
1665 q->rx_class = NULL;
1666#endif
1667
1668 cbq_unlink_class(cl);
1669 cbq_adjust_levels(cl->tparent);
1670 cl->defmap = 0;
1671 cbq_sync_defmap(cl);
1672
1673 cbq_rmprio(q, cl);
1674 sch_tree_unlock(sch);
1675
Jarek Poplawski7cd0a632009-03-15 20:00:19 -07001676 BUG_ON(--cl->refcnt == 0);
1677 /*
1678 * This shouldn't happen: we "hold" one cops->get() when called
1679 * from tc_ctl_tclass; the destroy method is done from cops->put().
1680 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001681
1682 return 0;
1683}
1684
John Fastabend25d8c0d2014-09-12 20:05:27 -07001685static struct tcf_proto __rcu **cbq_find_tcf(struct Qdisc *sch,
1686 unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001687{
1688 struct cbq_sched_data *q = qdisc_priv(sch);
1689 struct cbq_class *cl = (struct cbq_class *)arg;
1690
1691 if (cl == NULL)
1692 cl = &q->link;
1693
1694 return &cl->filter_list;
1695}
1696
1697static unsigned long cbq_bind_filter(struct Qdisc *sch, unsigned long parent,
1698 u32 classid)
1699{
1700 struct cbq_sched_data *q = qdisc_priv(sch);
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001701 struct cbq_class *p = (struct cbq_class *)parent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001702 struct cbq_class *cl = cbq_class_lookup(q, classid);
1703
1704 if (cl) {
1705 if (p && p->level <= cl->level)
1706 return 0;
1707 cl->filters++;
1708 return (unsigned long)cl;
1709 }
1710 return 0;
1711}
1712
1713static void cbq_unbind_filter(struct Qdisc *sch, unsigned long arg)
1714{
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001715 struct cbq_class *cl = (struct cbq_class *)arg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001716
1717 cl->filters--;
1718}
1719
1720static void cbq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
1721{
1722 struct cbq_sched_data *q = qdisc_priv(sch);
Patrick McHardyd77fea22008-07-05 23:22:05 -07001723 struct cbq_class *cl;
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001724 unsigned int h;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001725
1726 if (arg->stop)
1727 return;
1728
Patrick McHardyd77fea22008-07-05 23:22:05 -07001729 for (h = 0; h < q->clhash.hashsize; h++) {
Sasha Levinb67bfe02013-02-27 17:06:00 -08001730 hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001731 if (arg->count < arg->skip) {
1732 arg->count++;
1733 continue;
1734 }
1735 if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
1736 arg->stop = 1;
1737 return;
1738 }
1739 arg->count++;
1740 }
1741 }
1742}
1743
Eric Dumazet20fea082007-11-14 01:44:41 -08001744static const struct Qdisc_class_ops cbq_class_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001745 .graft = cbq_graft,
1746 .leaf = cbq_leaf,
Jarek Poplawskia37ef2e2006-12-08 00:25:55 -08001747 .qlen_notify = cbq_qlen_notify,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001748 .get = cbq_get,
1749 .put = cbq_put,
1750 .change = cbq_change_class,
1751 .delete = cbq_delete,
1752 .walk = cbq_walk,
1753 .tcf_chain = cbq_find_tcf,
1754 .bind_tcf = cbq_bind_filter,
1755 .unbind_tcf = cbq_unbind_filter,
1756 .dump = cbq_dump_class,
1757 .dump_stats = cbq_dump_class_stats,
1758};
1759
Eric Dumazet20fea082007-11-14 01:44:41 -08001760static struct Qdisc_ops cbq_qdisc_ops __read_mostly = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001761 .next = NULL,
1762 .cl_ops = &cbq_class_ops,
1763 .id = "cbq",
1764 .priv_size = sizeof(struct cbq_sched_data),
1765 .enqueue = cbq_enqueue,
1766 .dequeue = cbq_dequeue,
Jarek Poplawski77be1552008-10-31 00:47:01 -07001767 .peek = qdisc_peek_dequeued,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001768 .init = cbq_init,
1769 .reset = cbq_reset,
1770 .destroy = cbq_destroy,
1771 .change = NULL,
1772 .dump = cbq_dump,
1773 .dump_stats = cbq_dump_stats,
1774 .owner = THIS_MODULE,
1775};
1776
1777static int __init cbq_module_init(void)
1778{
1779 return register_qdisc(&cbq_qdisc_ops);
1780}
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +09001781static void __exit cbq_module_exit(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001782{
1783 unregister_qdisc(&cbq_qdisc_ops);
1784}
1785module_init(cbq_module_init)
1786module_exit(cbq_module_exit)
1787MODULE_LICENSE("GPL");