blob: 7a434a5c43fe62085ef172034ed5535645e06de5 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * net/sched/sch_cbq.c Class-Based Queueing discipline.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 *
11 */
12
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <linux/module.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090014#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include <linux/types.h>
16#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#include <linux/string.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include <linux/errno.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070019#include <linux/skbuff.h>
Patrick McHardy0ba48052007-07-02 22:49:07 -070020#include <net/netlink.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#include <net/pkt_sched.h>
22
23
24/* Class-Based Queueing (CBQ) algorithm.
25 =======================================
26
27 Sources: [1] Sally Floyd and Van Jacobson, "Link-sharing and Resource
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +090028 Management Models for Packet Networks",
Linus Torvalds1da177e2005-04-16 15:20:36 -070029 IEEE/ACM Transactions on Networking, Vol.3, No.4, 1995
30
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +090031 [2] Sally Floyd, "Notes on CBQ and Guaranteed Service", 1995
Linus Torvalds1da177e2005-04-16 15:20:36 -070032
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +090033 [3] Sally Floyd, "Notes on Class-Based Queueing: Setting
Linus Torvalds1da177e2005-04-16 15:20:36 -070034 Parameters", 1996
35
36 [4] Sally Floyd and Michael Speer, "Experimental Results
37 for Class-Based Queueing", 1998, not published.
38
39 -----------------------------------------------------------------------
40
41 Algorithm skeleton was taken from NS simulator cbq.cc.
42 If someone wants to check this code against the LBL version,
43 he should take into account that ONLY the skeleton was borrowed,
44 the implementation is different. Particularly:
45
46 --- The WRR algorithm is different. Our version looks more
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +090047 reasonable (I hope) and works when quanta are allowed to be
48 less than MTU, which is always the case when real time classes
49 have small rates. Note, that the statement of [3] is
50 incomplete, delay may actually be estimated even if class
51 per-round allotment is less than MTU. Namely, if per-round
52 allotment is W*r_i, and r_1+...+r_k = r < 1
Linus Torvalds1da177e2005-04-16 15:20:36 -070053
54 delay_i <= ([MTU/(W*r_i)]*W*r + W*r + k*MTU)/B
55
56 In the worst case we have IntServ estimate with D = W*r+k*MTU
57 and C = MTU*r. The proof (if correct at all) is trivial.
58
59
60 --- It seems that cbq-2.0 is not very accurate. At least, I cannot
61 interpret some places, which look like wrong translations
62 from NS. Anyone is advised to find these differences
63 and explain to me, why I am wrong 8).
64
65 --- Linux has no EOI event, so that we cannot estimate true class
66 idle time. Workaround is to consider the next dequeue event
67 as sign that previous packet is finished. This is wrong because of
68 internal device queueing, but on a permanently loaded link it is true.
69 Moreover, combined with clock integrator, this scheme looks
70 very close to an ideal solution. */
71
72struct cbq_sched_data;
73
74
Eric Dumazetcc7ec452011-01-19 19:26:56 +000075struct cbq_class {
Patrick McHardyd77fea22008-07-05 23:22:05 -070076 struct Qdisc_class_common common;
Linus Torvalds1da177e2005-04-16 15:20:36 -070077 struct cbq_class *next_alive; /* next class with backlog in this priority band */
78
79/* Parameters */
Linus Torvalds1da177e2005-04-16 15:20:36 -070080 unsigned char priority; /* class priority */
81 unsigned char priority2; /* priority to be used after overlimit */
82 unsigned char ewma_log; /* time constant for idle time calculation */
Linus Torvalds1da177e2005-04-16 15:20:36 -070083
84 u32 defmap;
85
86 /* Link-sharing scheduler parameters */
87 long maxidle; /* Class parameters: see below. */
88 long offtime;
89 long minidle;
90 u32 avpkt;
91 struct qdisc_rate_table *R_tab;
92
Linus Torvalds1da177e2005-04-16 15:20:36 -070093 /* General scheduler (WRR) parameters */
94 long allot;
95 long quantum; /* Allotment per WRR round */
96 long weight; /* Relative allotment: see below */
97
98 struct Qdisc *qdisc; /* Ptr to CBQ discipline */
99 struct cbq_class *split; /* Ptr to split node */
100 struct cbq_class *share; /* Ptr to LS parent in the class tree */
101 struct cbq_class *tparent; /* Ptr to tree parent in the class tree */
102 struct cbq_class *borrow; /* NULL if class is bandwidth limited;
103 parent otherwise */
104 struct cbq_class *sibling; /* Sibling chain */
105 struct cbq_class *children; /* Pointer to children chain */
106
107 struct Qdisc *q; /* Elementary queueing discipline */
108
109
110/* Variables */
111 unsigned char cpriority; /* Effective priority */
112 unsigned char delayed;
113 unsigned char level; /* level of the class in hierarchy:
114 0 for leaf classes, and maximal
115 level of children + 1 for nodes.
116 */
117
118 psched_time_t last; /* Last end of service */
119 psched_time_t undertime;
120 long avgidle;
121 long deficit; /* Saved deficit for WRR */
Patrick McHardy1a13cb62007-03-16 01:22:20 -0700122 psched_time_t penalized;
Eric Dumazetc1a8f1f2009-08-16 09:36:49 +0000123 struct gnet_stats_basic_packed bstats;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124 struct gnet_stats_queue qstats;
Eric Dumazet45203a32013-06-06 08:43:22 -0700125 struct gnet_stats_rate_est64 rate_est;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126 struct tc_cbq_xstats xstats;
127
John Fastabend25d8c0d2014-09-12 20:05:27 -0700128 struct tcf_proto __rcu *filter_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129
130 int refcnt;
131 int filters;
132
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000133 struct cbq_class *defaults[TC_PRIO_MAX + 1];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134};
135
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000136struct cbq_sched_data {
Patrick McHardyd77fea22008-07-05 23:22:05 -0700137 struct Qdisc_class_hash clhash; /* Hash table of all classes */
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000138 int nclasses[TC_CBQ_MAXPRIO + 1];
139 unsigned int quanta[TC_CBQ_MAXPRIO + 1];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700140
141 struct cbq_class link;
142
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000143 unsigned int activemask;
144 struct cbq_class *active[TC_CBQ_MAXPRIO + 1]; /* List of all classes
Linus Torvalds1da177e2005-04-16 15:20:36 -0700145 with backlog */
146
Patrick McHardyc3bc7cf2007-07-15 00:03:05 -0700147#ifdef CONFIG_NET_CLS_ACT
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148 struct cbq_class *rx_class;
149#endif
150 struct cbq_class *tx_class;
151 struct cbq_class *tx_borrowed;
152 int tx_len;
153 psched_time_t now; /* Cached timestamp */
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000154 unsigned int pmask;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700155
David S. Miller2fbd3da2009-09-01 17:59:25 -0700156 struct hrtimer delay_timer;
Patrick McHardy88a99352007-03-16 01:21:11 -0700157 struct qdisc_watchdog watchdog; /* Watchdog timer,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158 started when CBQ has
159 backlog, but cannot
160 transmit just now */
Patrick McHardy88a99352007-03-16 01:21:11 -0700161 psched_tdiff_t wd_expires;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162 int toplevel;
163 u32 hgenerator;
164};
165
166
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000167#define L2T(cl, len) qdisc_l2t((cl)->R_tab, len)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000169static inline struct cbq_class *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170cbq_class_lookup(struct cbq_sched_data *q, u32 classid)
171{
Patrick McHardyd77fea22008-07-05 23:22:05 -0700172 struct Qdisc_class_common *clc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173
Patrick McHardyd77fea22008-07-05 23:22:05 -0700174 clc = qdisc_class_find(&q->clhash, classid);
175 if (clc == NULL)
176 return NULL;
177 return container_of(clc, struct cbq_class, common);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700178}
179
Patrick McHardyc3bc7cf2007-07-15 00:03:05 -0700180#ifdef CONFIG_NET_CLS_ACT
Linus Torvalds1da177e2005-04-16 15:20:36 -0700181
182static struct cbq_class *
183cbq_reclassify(struct sk_buff *skb, struct cbq_class *this)
184{
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000185 struct cbq_class *cl;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000187 for (cl = this->tparent; cl; cl = cl->tparent) {
188 struct cbq_class *new = cl->defaults[TC_PRIO_BESTEFFORT];
189
190 if (new != NULL && new != this)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191 return new;
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000192 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193 return NULL;
194}
195
196#endif
197
198/* Classify packet. The procedure is pretty complicated, but
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000199 * it allows us to combine link sharing and priority scheduling
200 * transparently.
201 *
202 * Namely, you can put link sharing rules (f.e. route based) at root of CBQ,
203 * so that it resolves to split nodes. Then packets are classified
204 * by logical priority, or a more specific classifier may be attached
205 * to the split node.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206 */
207
208static struct cbq_class *
209cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
210{
211 struct cbq_sched_data *q = qdisc_priv(sch);
212 struct cbq_class *head = &q->link;
213 struct cbq_class **defmap;
214 struct cbq_class *cl = NULL;
215 u32 prio = skb->priority;
John Fastabend25d8c0d2014-09-12 20:05:27 -0700216 struct tcf_proto *fl;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217 struct tcf_result res;
218
219 /*
220 * Step 1. If skb->priority points to one of our classes, use it.
221 */
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000222 if (TC_H_MAJ(prio ^ sch->handle) == 0 &&
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223 (cl = cbq_class_lookup(q, prio)) != NULL)
224 return cl;
225
Jarek Poplawskic27f3392008-08-04 22:39:11 -0700226 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227 for (;;) {
228 int result = 0;
229 defmap = head->defaults;
230
John Fastabend25d8c0d2014-09-12 20:05:27 -0700231 fl = rcu_dereference_bh(head->filter_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232 /*
233 * Step 2+n. Apply classifier.
234 */
Daniel Borkmann3b3ae882015-08-26 23:00:06 +0200235 result = tc_classify(skb, fl, &res, true);
John Fastabend25d8c0d2014-09-12 20:05:27 -0700236 if (!fl || result < 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700237 goto fallback;
238
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000239 cl = (void *)res.class;
240 if (!cl) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700241 if (TC_H_MAJ(res.classid))
242 cl = cbq_class_lookup(q, res.classid);
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000243 else if ((cl = defmap[res.classid & TC_PRIO_MAX]) == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244 cl = defmap[TC_PRIO_BESTEFFORT];
245
Eric Dumazetbdfc87f2012-09-11 13:11:12 +0000246 if (cl == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247 goto fallback;
248 }
Eric Dumazetbdfc87f2012-09-11 13:11:12 +0000249 if (cl->level >= head->level)
250 goto fallback;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251#ifdef CONFIG_NET_CLS_ACT
252 switch (result) {
253 case TC_ACT_QUEUED:
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900254 case TC_ACT_STOLEN:
Jarek Poplawski378a2f02008-08-04 22:31:03 -0700255 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256 case TC_ACT_SHOT:
257 return NULL;
Patrick McHardy73ca4912007-07-15 00:02:31 -0700258 case TC_ACT_RECLASSIFY:
259 return cbq_reclassify(skb, cl);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700260 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700261#endif
262 if (cl->level == 0)
263 return cl;
264
265 /*
266 * Step 3+n. If classifier selected a link sharing class,
267 * apply agency specific classifier.
268 * Repeat this procdure until we hit a leaf node.
269 */
270 head = cl;
271 }
272
273fallback:
274 cl = head;
275
276 /*
277 * Step 4. No success...
278 */
279 if (TC_H_MAJ(prio) == 0 &&
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000280 !(cl = head->defaults[prio & TC_PRIO_MAX]) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281 !(cl = head->defaults[TC_PRIO_BESTEFFORT]))
282 return head;
283
284 return cl;
285}
286
287/*
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000288 * A packet has just been enqueued on the empty class.
289 * cbq_activate_class adds it to the tail of active class list
290 * of its priority band.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700291 */
292
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000293static inline void cbq_activate_class(struct cbq_class *cl)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700294{
295 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
296 int prio = cl->cpriority;
297 struct cbq_class *cl_tail;
298
299 cl_tail = q->active[prio];
300 q->active[prio] = cl;
301
302 if (cl_tail != NULL) {
303 cl->next_alive = cl_tail->next_alive;
304 cl_tail->next_alive = cl;
305 } else {
306 cl->next_alive = cl;
307 q->activemask |= (1<<prio);
308 }
309}
310
311/*
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000312 * Unlink class from active chain.
313 * Note that this same procedure is done directly in cbq_dequeue*
314 * during round-robin procedure.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700315 */
316
317static void cbq_deactivate_class(struct cbq_class *this)
318{
319 struct cbq_sched_data *q = qdisc_priv(this->qdisc);
320 int prio = this->cpriority;
321 struct cbq_class *cl;
322 struct cbq_class *cl_prev = q->active[prio];
323
324 do {
325 cl = cl_prev->next_alive;
326 if (cl == this) {
327 cl_prev->next_alive = cl->next_alive;
328 cl->next_alive = NULL;
329
330 if (cl == q->active[prio]) {
331 q->active[prio] = cl_prev;
332 if (cl == q->active[prio]) {
333 q->active[prio] = NULL;
334 q->activemask &= ~(1<<prio);
335 return;
336 }
337 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700338 return;
339 }
340 } while ((cl_prev = cl) != q->active[prio]);
341}
342
343static void
344cbq_mark_toplevel(struct cbq_sched_data *q, struct cbq_class *cl)
345{
346 int toplevel = q->toplevel;
347
Eric Dumazetcca605d2016-06-10 16:41:37 -0700348 if (toplevel > cl->level) {
Vasily Averin7201c1d2014-08-14 12:27:59 +0400349 psched_time_t now = psched_get_time();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700350
351 do {
Patrick McHardy104e0872007-03-23 11:28:07 -0700352 if (cl->undertime < now) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700353 q->toplevel = cl->level;
354 return;
355 }
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000356 } while ((cl = cl->borrow) != NULL && toplevel > cl->level);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700357 }
358}
359
360static int
Eric Dumazet520ac302016-06-21 23:16:49 -0700361cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
362 struct sk_buff **to_free)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700363{
364 struct cbq_sched_data *q = qdisc_priv(sch);
Satyam Sharmaddeee3c2007-09-16 14:54:05 -0700365 int uninitialized_var(ret);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700366 struct cbq_class *cl = cbq_classify(skb, sch, &ret);
367
Patrick McHardyc3bc7cf2007-07-15 00:03:05 -0700368#ifdef CONFIG_NET_CLS_ACT
Linus Torvalds1da177e2005-04-16 15:20:36 -0700369 q->rx_class = cl;
370#endif
371 if (cl == NULL) {
Jarek Poplawskic27f3392008-08-04 22:39:11 -0700372 if (ret & __NET_XMIT_BYPASS)
John Fastabend25331d62014-09-28 11:53:29 -0700373 qdisc_qstats_drop(sch);
Eric Dumazet520ac302016-06-21 23:16:49 -0700374 __qdisc_drop(skb, to_free);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700375 return ret;
376 }
377
Eric Dumazet520ac302016-06-21 23:16:49 -0700378 ret = qdisc_enqueue(skb, cl->q, to_free);
Jussi Kivilinna5f861732008-07-20 00:08:04 -0700379 if (ret == NET_XMIT_SUCCESS) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700380 sch->q.qlen++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700381 cbq_mark_toplevel(q, cl);
382 if (!cl->next_alive)
383 cbq_activate_class(cl);
384 return ret;
385 }
386
Jarek Poplawski378a2f02008-08-04 22:31:03 -0700387 if (net_xmit_drop_count(ret)) {
John Fastabend25331d62014-09-28 11:53:29 -0700388 qdisc_qstats_drop(sch);
Jarek Poplawski378a2f02008-08-04 22:31:03 -0700389 cbq_mark_toplevel(q, cl);
390 cl->qstats.drops++;
391 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700392 return ret;
393}
394
Florian Westphalc3498d32016-06-09 00:27:39 +0200395/* Overlimit action: penalize leaf class by adding offtime */
396static void cbq_overlimit(struct cbq_class *cl)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700397{
398 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
Patrick McHardy8edc0c32007-03-23 11:28:55 -0700399 psched_tdiff_t delay = cl->undertime - q->now;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700400
401 if (!cl->delayed) {
402 delay += cl->offtime;
403
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900404 /*
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000405 * Class goes to sleep, so that it will have no
406 * chance to work avgidle. Let's forgive it 8)
407 *
408 * BTW cbq-2.0 has a crap in this
409 * place, apparently they forgot to shift it by cl->ewma_log.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700410 */
411 if (cl->avgidle < 0)
412 delay -= (-cl->avgidle) - ((-cl->avgidle) >> cl->ewma_log);
413 if (cl->avgidle < cl->minidle)
414 cl->avgidle = cl->minidle;
415 if (delay <= 0)
416 delay = 1;
Patrick McHardy7c59e252007-03-23 11:27:45 -0700417 cl->undertime = q->now + delay;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700418
419 cl->xstats.overactions++;
420 cl->delayed = 1;
421 }
422 if (q->wd_expires == 0 || q->wd_expires > delay)
423 q->wd_expires = delay;
424
425 /* Dirty work! We must schedule wakeups based on
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000426 * real available rate, rather than leaf rate,
427 * which may be tiny (even zero).
Linus Torvalds1da177e2005-04-16 15:20:36 -0700428 */
429 if (q->toplevel == TC_CBQ_MAXLEVEL) {
430 struct cbq_class *b;
431 psched_tdiff_t base_delay = q->wd_expires;
432
433 for (b = cl->borrow; b; b = b->borrow) {
Patrick McHardy8edc0c32007-03-23 11:28:55 -0700434 delay = b->undertime - q->now;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700435 if (delay < base_delay) {
436 if (delay <= 0)
437 delay = 1;
438 base_delay = delay;
439 }
440 }
441
442 q->wd_expires = base_delay;
443 }
444}
445
Patrick McHardy1a13cb62007-03-16 01:22:20 -0700446static psched_tdiff_t cbq_undelay_prio(struct cbq_sched_data *q, int prio,
447 psched_time_t now)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700448{
449 struct cbq_class *cl;
450 struct cbq_class *cl_prev = q->active[prio];
Patrick McHardy1a13cb62007-03-16 01:22:20 -0700451 psched_time_t sched = now;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700452
453 if (cl_prev == NULL)
Patrick McHardye9054a32007-03-16 01:21:40 -0700454 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700455
456 do {
457 cl = cl_prev->next_alive;
Patrick McHardy1a13cb62007-03-16 01:22:20 -0700458 if (now - cl->penalized > 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700459 cl_prev->next_alive = cl->next_alive;
460 cl->next_alive = NULL;
461 cl->cpriority = cl->priority;
462 cl->delayed = 0;
463 cbq_activate_class(cl);
464
465 if (cl == q->active[prio]) {
466 q->active[prio] = cl_prev;
467 if (cl == q->active[prio]) {
468 q->active[prio] = NULL;
469 return 0;
470 }
471 }
472
473 cl = cl_prev->next_alive;
Patrick McHardy1a13cb62007-03-16 01:22:20 -0700474 } else if (sched - cl->penalized > 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700475 sched = cl->penalized;
476 } while ((cl_prev = cl) != q->active[prio]);
477
Patrick McHardy1a13cb62007-03-16 01:22:20 -0700478 return sched - now;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700479}
480
Patrick McHardy1a13cb62007-03-16 01:22:20 -0700481static enum hrtimer_restart cbq_undelay(struct hrtimer *timer)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700482{
Patrick McHardy1a13cb62007-03-16 01:22:20 -0700483 struct cbq_sched_data *q = container_of(timer, struct cbq_sched_data,
David S. Miller2fbd3da2009-09-01 17:59:25 -0700484 delay_timer);
Patrick McHardy1a13cb62007-03-16 01:22:20 -0700485 struct Qdisc *sch = q->watchdog.qdisc;
486 psched_time_t now;
487 psched_tdiff_t delay = 0;
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000488 unsigned int pmask;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700489
Patrick McHardy3bebcda2007-03-23 11:29:25 -0700490 now = psched_get_time();
Patrick McHardy1a13cb62007-03-16 01:22:20 -0700491
Linus Torvalds1da177e2005-04-16 15:20:36 -0700492 pmask = q->pmask;
493 q->pmask = 0;
494
495 while (pmask) {
496 int prio = ffz(~pmask);
Patrick McHardy1a13cb62007-03-16 01:22:20 -0700497 psched_tdiff_t tmp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700498
499 pmask &= ~(1<<prio);
500
Patrick McHardy1a13cb62007-03-16 01:22:20 -0700501 tmp = cbq_undelay_prio(q, prio, now);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700502 if (tmp > 0) {
503 q->pmask |= 1<<prio;
504 if (tmp < delay || delay == 0)
505 delay = tmp;
506 }
507 }
508
509 if (delay) {
Patrick McHardy1a13cb62007-03-16 01:22:20 -0700510 ktime_t time;
511
512 time = ktime_set(0, 0);
Jarek Poplawskica44d6e2009-06-15 02:31:47 -0700513 time = ktime_add_ns(time, PSCHED_TICKS2NS(now + delay));
Eric Dumazet4a8e3202014-09-20 18:01:30 -0700514 hrtimer_start(&q->delay_timer, time, HRTIMER_MODE_ABS_PINNED);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700515 }
516
David S. Miller8608db02008-08-18 20:51:18 -0700517 __netif_schedule(qdisc_root(sch));
Patrick McHardy1a13cb62007-03-16 01:22:20 -0700518 return HRTIMER_NORESTART;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700519}
520
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900521/*
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000522 * It is mission critical procedure.
523 *
524 * We "regenerate" toplevel cutoff, if transmitting class
525 * has backlog and it is not regulated. It is not part of
526 * original CBQ description, but looks more reasonable.
527 * Probably, it is wrong. This question needs further investigation.
528 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700529
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000530static inline void
Linus Torvalds1da177e2005-04-16 15:20:36 -0700531cbq_update_toplevel(struct cbq_sched_data *q, struct cbq_class *cl,
532 struct cbq_class *borrowed)
533{
534 if (cl && q->toplevel >= borrowed->level) {
535 if (cl->q->q.qlen > 1) {
536 do {
Patrick McHardya0849802007-03-23 11:28:30 -0700537 if (borrowed->undertime == PSCHED_PASTPERFECT) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700538 q->toplevel = borrowed->level;
539 return;
540 }
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000541 } while ((borrowed = borrowed->borrow) != NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700542 }
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900543#if 0
Linus Torvalds1da177e2005-04-16 15:20:36 -0700544 /* It is not necessary now. Uncommenting it
545 will save CPU cycles, but decrease fairness.
546 */
547 q->toplevel = TC_CBQ_MAXLEVEL;
548#endif
549 }
550}
551
552static void
553cbq_update(struct cbq_sched_data *q)
554{
555 struct cbq_class *this = q->tx_class;
556 struct cbq_class *cl = this;
557 int len = q->tx_len;
Vasily Averin73d0f372014-08-14 12:27:47 +0400558 psched_time_t now;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700559
560 q->tx_class = NULL;
Vasily Averin73d0f372014-08-14 12:27:47 +0400561 /* Time integrator. We calculate EOS time
562 * by adding expected packet transmission time.
563 */
564 now = q->now + L2T(&q->link, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700565
566 for ( ; cl; cl = cl->share) {
567 long avgidle = cl->avgidle;
568 long idle;
569
570 cl->bstats.packets++;
571 cl->bstats.bytes += len;
572
573 /*
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000574 * (now - last) is total time between packet right edges.
575 * (last_pktlen/rate) is "virtual" busy time, so that
576 *
577 * idle = (now - last) - last_pktlen/rate
Linus Torvalds1da177e2005-04-16 15:20:36 -0700578 */
579
Vasily Averin73d0f372014-08-14 12:27:47 +0400580 idle = now - cl->last;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700581 if ((unsigned long)idle > 128*1024*1024) {
582 avgidle = cl->maxidle;
583 } else {
584 idle -= L2T(cl, len);
585
586 /* true_avgidle := (1-W)*true_avgidle + W*idle,
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000587 * where W=2^{-ewma_log}. But cl->avgidle is scaled:
588 * cl->avgidle == true_avgidle/W,
589 * hence:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700590 */
591 avgidle += idle - (avgidle>>cl->ewma_log);
592 }
593
594 if (avgidle <= 0) {
595 /* Overlimit or at-limit */
596
597 if (avgidle < cl->minidle)
598 avgidle = cl->minidle;
599
600 cl->avgidle = avgidle;
601
602 /* Calculate expected time, when this class
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000603 * will be allowed to send.
604 * It will occur, when:
605 * (1-W)*true_avgidle + W*delay = 0, i.e.
606 * idle = (1/W - 1)*(-true_avgidle)
607 * or
608 * idle = (1 - W)*(-cl->avgidle);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700609 */
610 idle = (-avgidle) - ((-avgidle) >> cl->ewma_log);
611
612 /*
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000613 * That is not all.
614 * To maintain the rate allocated to the class,
615 * we add to undertime virtual clock,
616 * necessary to complete transmitted packet.
617 * (len/phys_bandwidth has been already passed
618 * to the moment of cbq_update)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700619 */
620
621 idle -= L2T(&q->link, len);
622 idle += L2T(cl, len);
623
Vasily Averin73d0f372014-08-14 12:27:47 +0400624 cl->undertime = now + idle;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700625 } else {
626 /* Underlimit */
627
Patrick McHardya0849802007-03-23 11:28:30 -0700628 cl->undertime = PSCHED_PASTPERFECT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700629 if (avgidle > cl->maxidle)
630 cl->avgidle = cl->maxidle;
631 else
632 cl->avgidle = avgidle;
633 }
Vasily Averin73d0f372014-08-14 12:27:47 +0400634 if ((s64)(now - cl->last) > 0)
635 cl->last = now;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700636 }
637
638 cbq_update_toplevel(q, this, q->tx_borrowed);
639}
640
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000641static inline struct cbq_class *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700642cbq_under_limit(struct cbq_class *cl)
643{
644 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
645 struct cbq_class *this_cl = cl;
646
647 if (cl->tparent == NULL)
648 return cl;
649
Patrick McHardya0849802007-03-23 11:28:30 -0700650 if (cl->undertime == PSCHED_PASTPERFECT || q->now >= cl->undertime) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700651 cl->delayed = 0;
652 return cl;
653 }
654
655 do {
656 /* It is very suspicious place. Now overlimit
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000657 * action is generated for not bounded classes
658 * only if link is completely congested.
659 * Though it is in agree with ancestor-only paradigm,
660 * it looks very stupid. Particularly,
661 * it means that this chunk of code will either
662 * never be called or result in strong amplification
663 * of burstiness. Dangerous, silly, and, however,
664 * no another solution exists.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700665 */
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000666 cl = cl->borrow;
667 if (!cl) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700668 this_cl->qstats.overlimits++;
Florian Westphalc3498d32016-06-09 00:27:39 +0200669 cbq_overlimit(this_cl);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700670 return NULL;
671 }
672 if (cl->level > q->toplevel)
673 return NULL;
Patrick McHardya0849802007-03-23 11:28:30 -0700674 } while (cl->undertime != PSCHED_PASTPERFECT && q->now < cl->undertime);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700675
676 cl->delayed = 0;
677 return cl;
678}
679
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000680static inline struct sk_buff *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700681cbq_dequeue_prio(struct Qdisc *sch, int prio)
682{
683 struct cbq_sched_data *q = qdisc_priv(sch);
684 struct cbq_class *cl_tail, *cl_prev, *cl;
685 struct sk_buff *skb;
686 int deficit;
687
688 cl_tail = cl_prev = q->active[prio];
689 cl = cl_prev->next_alive;
690
691 do {
692 deficit = 0;
693
694 /* Start round */
695 do {
696 struct cbq_class *borrow = cl;
697
698 if (cl->q->q.qlen &&
699 (borrow = cbq_under_limit(cl)) == NULL)
700 goto skip_class;
701
702 if (cl->deficit <= 0) {
703 /* Class exhausted its allotment per
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000704 * this round. Switch to the next one.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700705 */
706 deficit = 1;
707 cl->deficit += cl->quantum;
708 goto next_class;
709 }
710
711 skb = cl->q->dequeue(cl->q);
712
713 /* Class did not give us any skb :-(
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000714 * It could occur even if cl->q->q.qlen != 0
715 * f.e. if cl->q == "tbf"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700716 */
717 if (skb == NULL)
718 goto skip_class;
719
Jussi Kivilinna0abf77e2008-07-20 00:08:27 -0700720 cl->deficit -= qdisc_pkt_len(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700721 q->tx_class = cl;
722 q->tx_borrowed = borrow;
723 if (borrow != cl) {
724#ifndef CBQ_XSTATS_BORROWS_BYTES
725 borrow->xstats.borrows++;
726 cl->xstats.borrows++;
727#else
Jussi Kivilinna0abf77e2008-07-20 00:08:27 -0700728 borrow->xstats.borrows += qdisc_pkt_len(skb);
729 cl->xstats.borrows += qdisc_pkt_len(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700730#endif
731 }
Jussi Kivilinna0abf77e2008-07-20 00:08:27 -0700732 q->tx_len = qdisc_pkt_len(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700733
734 if (cl->deficit <= 0) {
735 q->active[prio] = cl;
736 cl = cl->next_alive;
737 cl->deficit += cl->quantum;
738 }
739 return skb;
740
741skip_class:
742 if (cl->q->q.qlen == 0 || prio != cl->cpriority) {
743 /* Class is empty or penalized.
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000744 * Unlink it from active chain.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700745 */
746 cl_prev->next_alive = cl->next_alive;
747 cl->next_alive = NULL;
748
749 /* Did cl_tail point to it? */
750 if (cl == cl_tail) {
751 /* Repair it! */
752 cl_tail = cl_prev;
753
754 /* Was it the last class in this band? */
755 if (cl == cl_tail) {
756 /* Kill the band! */
757 q->active[prio] = NULL;
758 q->activemask &= ~(1<<prio);
759 if (cl->q->q.qlen)
760 cbq_activate_class(cl);
761 return NULL;
762 }
763
764 q->active[prio] = cl_tail;
765 }
766 if (cl->q->q.qlen)
767 cbq_activate_class(cl);
768
769 cl = cl_prev;
770 }
771
772next_class:
773 cl_prev = cl;
774 cl = cl->next_alive;
775 } while (cl_prev != cl_tail);
776 } while (deficit);
777
778 q->active[prio] = cl_prev;
779
780 return NULL;
781}
782
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000783static inline struct sk_buff *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700784cbq_dequeue_1(struct Qdisc *sch)
785{
786 struct cbq_sched_data *q = qdisc_priv(sch);
787 struct sk_buff *skb;
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000788 unsigned int activemask;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700789
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000790 activemask = q->activemask & 0xFF;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700791 while (activemask) {
792 int prio = ffz(~activemask);
793 activemask &= ~(1<<prio);
794 skb = cbq_dequeue_prio(sch, prio);
795 if (skb)
796 return skb;
797 }
798 return NULL;
799}
800
801static struct sk_buff *
802cbq_dequeue(struct Qdisc *sch)
803{
804 struct sk_buff *skb;
805 struct cbq_sched_data *q = qdisc_priv(sch);
806 psched_time_t now;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700807
Patrick McHardy3bebcda2007-03-23 11:29:25 -0700808 now = psched_get_time();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700809
Vasily Averin73d0f372014-08-14 12:27:47 +0400810 if (q->tx_class)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700811 cbq_update(q);
Vasily Averin73d0f372014-08-14 12:27:47 +0400812
813 q->now = now;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700814
815 for (;;) {
816 q->wd_expires = 0;
817
818 skb = cbq_dequeue_1(sch);
819 if (skb) {
Eric Dumazet9190b3b2011-01-20 23:31:33 -0800820 qdisc_bstats_update(sch, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700821 sch->q.qlen--;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700822 return skb;
823 }
824
825 /* All the classes are overlimit.
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000826 *
827 * It is possible, if:
828 *
829 * 1. Scheduler is empty.
830 * 2. Toplevel cutoff inhibited borrowing.
831 * 3. Root class is overlimit.
832 *
833 * Reset 2d and 3d conditions and retry.
834 *
835 * Note, that NS and cbq-2.0 are buggy, peeking
836 * an arbitrary class is appropriate for ancestor-only
837 * sharing, but not for toplevel algorithm.
838 *
839 * Our version is better, but slower, because it requires
840 * two passes, but it is unavoidable with top-level sharing.
841 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700842
843 if (q->toplevel == TC_CBQ_MAXLEVEL &&
Patrick McHardya0849802007-03-23 11:28:30 -0700844 q->link.undertime == PSCHED_PASTPERFECT)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700845 break;
846
847 q->toplevel = TC_CBQ_MAXLEVEL;
Patrick McHardya0849802007-03-23 11:28:30 -0700848 q->link.undertime = PSCHED_PASTPERFECT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700849 }
850
851 /* No packets in scheduler or nobody wants to give them to us :-(
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000852 * Sigh... start watchdog timer in the last case.
853 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700854
855 if (sch->q.qlen) {
John Fastabend25331d62014-09-28 11:53:29 -0700856 qdisc_qstats_overlimit(sch);
Patrick McHardy88a99352007-03-16 01:21:11 -0700857 if (q->wd_expires)
858 qdisc_watchdog_schedule(&q->watchdog,
Patrick McHardybb239ac2007-03-16 12:31:28 -0700859 now + q->wd_expires);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700860 }
861 return NULL;
862}
863
864/* CBQ class maintanance routines */
865
866static void cbq_adjust_levels(struct cbq_class *this)
867{
868 if (this == NULL)
869 return;
870
871 do {
872 int level = 0;
873 struct cbq_class *cl;
874
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000875 cl = this->children;
876 if (cl) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700877 do {
878 if (cl->level > level)
879 level = cl->level;
880 } while ((cl = cl->sibling) != this->children);
881 }
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000882 this->level = level + 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700883 } while ((this = this->tparent) != NULL);
884}
885
886static void cbq_normalize_quanta(struct cbq_sched_data *q, int prio)
887{
888 struct cbq_class *cl;
Patrick McHardyd77fea22008-07-05 23:22:05 -0700889 unsigned int h;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700890
891 if (q->quanta[prio] == 0)
892 return;
893
Patrick McHardyd77fea22008-07-05 23:22:05 -0700894 for (h = 0; h < q->clhash.hashsize; h++) {
Sasha Levinb67bfe02013-02-27 17:06:00 -0800895 hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700896 /* BUGGGG... Beware! This expression suffer of
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000897 * arithmetic overflows!
Linus Torvalds1da177e2005-04-16 15:20:36 -0700898 */
899 if (cl->priority == prio) {
900 cl->quantum = (cl->weight*cl->allot*q->nclasses[prio])/
901 q->quanta[prio];
902 }
Yang Yingliang833fa742013-12-10 20:55:32 +0800903 if (cl->quantum <= 0 ||
904 cl->quantum > 32*qdisc_dev(cl->qdisc)->mtu) {
Yang Yingliangc17988a2013-12-23 17:38:58 +0800905 pr_warn("CBQ: class %08x has bad quantum==%ld, repaired.\n",
906 cl->common.classid, cl->quantum);
David S. Miller5ce2d482008-07-08 17:06:30 -0700907 cl->quantum = qdisc_dev(cl->qdisc)->mtu/2 + 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700908 }
909 }
910 }
911}
912
913static void cbq_sync_defmap(struct cbq_class *cl)
914{
915 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
916 struct cbq_class *split = cl->split;
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000917 unsigned int h;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700918 int i;
919
920 if (split == NULL)
921 return;
922
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000923 for (i = 0; i <= TC_PRIO_MAX; i++) {
924 if (split->defaults[i] == cl && !(cl->defmap & (1<<i)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700925 split->defaults[i] = NULL;
926 }
927
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000928 for (i = 0; i <= TC_PRIO_MAX; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700929 int level = split->level;
930
931 if (split->defaults[i])
932 continue;
933
Patrick McHardyd77fea22008-07-05 23:22:05 -0700934 for (h = 0; h < q->clhash.hashsize; h++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700935 struct cbq_class *c;
936
Sasha Levinb67bfe02013-02-27 17:06:00 -0800937 hlist_for_each_entry(c, &q->clhash.hash[h],
Patrick McHardyd77fea22008-07-05 23:22:05 -0700938 common.hnode) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700939 if (c->split == split && c->level < level &&
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000940 c->defmap & (1<<i)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700941 split->defaults[i] = c;
942 level = c->level;
943 }
944 }
945 }
946 }
947}
948
949static void cbq_change_defmap(struct cbq_class *cl, u32 splitid, u32 def, u32 mask)
950{
951 struct cbq_class *split = NULL;
952
953 if (splitid == 0) {
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000954 split = cl->split;
955 if (!split)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700956 return;
Patrick McHardyd77fea22008-07-05 23:22:05 -0700957 splitid = split->common.classid;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700958 }
959
Patrick McHardyd77fea22008-07-05 23:22:05 -0700960 if (split == NULL || split->common.classid != splitid) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700961 for (split = cl->tparent; split; split = split->tparent)
Patrick McHardyd77fea22008-07-05 23:22:05 -0700962 if (split->common.classid == splitid)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700963 break;
964 }
965
966 if (split == NULL)
967 return;
968
969 if (cl->split != split) {
970 cl->defmap = 0;
971 cbq_sync_defmap(cl);
972 cl->split = split;
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000973 cl->defmap = def & mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700974 } else
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000975 cl->defmap = (cl->defmap & ~mask) | (def & mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700976
977 cbq_sync_defmap(cl);
978}
979
980static void cbq_unlink_class(struct cbq_class *this)
981{
982 struct cbq_class *cl, **clp;
983 struct cbq_sched_data *q = qdisc_priv(this->qdisc);
984
Patrick McHardyd77fea22008-07-05 23:22:05 -0700985 qdisc_class_hash_remove(&q->clhash, &this->common);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700986
987 if (this->tparent) {
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000988 clp = &this->sibling;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700989 cl = *clp;
990 do {
991 if (cl == this) {
992 *clp = cl->sibling;
993 break;
994 }
995 clp = &cl->sibling;
996 } while ((cl = *clp) != this->sibling);
997
998 if (this->tparent->children == this) {
999 this->tparent->children = this->sibling;
1000 if (this->sibling == this)
1001 this->tparent->children = NULL;
1002 }
1003 } else {
Ilpo Järvinen547b7922008-07-25 21:43:18 -07001004 WARN_ON(this->sibling != this);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001005 }
1006}
1007
1008static void cbq_link_class(struct cbq_class *this)
1009{
1010 struct cbq_sched_data *q = qdisc_priv(this->qdisc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001011 struct cbq_class *parent = this->tparent;
1012
1013 this->sibling = this;
Patrick McHardyd77fea22008-07-05 23:22:05 -07001014 qdisc_class_hash_insert(&q->clhash, &this->common);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001015
1016 if (parent == NULL)
1017 return;
1018
1019 if (parent->children == NULL) {
1020 parent->children = this;
1021 } else {
1022 this->sibling = parent->children->sibling;
1023 parent->children->sibling = this;
1024 }
1025}
1026
Linus Torvalds1da177e2005-04-16 15:20:36 -07001027static void
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001028cbq_reset(struct Qdisc *sch)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001029{
1030 struct cbq_sched_data *q = qdisc_priv(sch);
1031 struct cbq_class *cl;
1032 int prio;
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001033 unsigned int h;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001034
1035 q->activemask = 0;
1036 q->pmask = 0;
1037 q->tx_class = NULL;
1038 q->tx_borrowed = NULL;
Patrick McHardy88a99352007-03-16 01:21:11 -07001039 qdisc_watchdog_cancel(&q->watchdog);
David S. Miller2fbd3da2009-09-01 17:59:25 -07001040 hrtimer_cancel(&q->delay_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001041 q->toplevel = TC_CBQ_MAXLEVEL;
Patrick McHardy3bebcda2007-03-23 11:29:25 -07001042 q->now = psched_get_time();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001043
1044 for (prio = 0; prio <= TC_CBQ_MAXPRIO; prio++)
1045 q->active[prio] = NULL;
1046
Patrick McHardyd77fea22008-07-05 23:22:05 -07001047 for (h = 0; h < q->clhash.hashsize; h++) {
Sasha Levinb67bfe02013-02-27 17:06:00 -08001048 hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001049 qdisc_reset(cl->q);
1050
1051 cl->next_alive = NULL;
Patrick McHardya0849802007-03-23 11:28:30 -07001052 cl->undertime = PSCHED_PASTPERFECT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001053 cl->avgidle = cl->maxidle;
1054 cl->deficit = cl->quantum;
1055 cl->cpriority = cl->priority;
1056 }
1057 }
1058 sch->q.qlen = 0;
1059}
1060
1061
1062static int cbq_set_lss(struct cbq_class *cl, struct tc_cbq_lssopt *lss)
1063{
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001064 if (lss->change & TCF_CBQ_LSS_FLAGS) {
1065 cl->share = (lss->flags & TCF_CBQ_LSS_ISOLATED) ? NULL : cl->tparent;
1066 cl->borrow = (lss->flags & TCF_CBQ_LSS_BOUNDED) ? NULL : cl->tparent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001067 }
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001068 if (lss->change & TCF_CBQ_LSS_EWMA)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001069 cl->ewma_log = lss->ewma_log;
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001070 if (lss->change & TCF_CBQ_LSS_AVPKT)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001071 cl->avpkt = lss->avpkt;
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001072 if (lss->change & TCF_CBQ_LSS_MINIDLE)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001073 cl->minidle = -(long)lss->minidle;
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001074 if (lss->change & TCF_CBQ_LSS_MAXIDLE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001075 cl->maxidle = lss->maxidle;
1076 cl->avgidle = lss->maxidle;
1077 }
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001078 if (lss->change & TCF_CBQ_LSS_OFFTIME)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001079 cl->offtime = lss->offtime;
1080 return 0;
1081}
1082
1083static void cbq_rmprio(struct cbq_sched_data *q, struct cbq_class *cl)
1084{
1085 q->nclasses[cl->priority]--;
1086 q->quanta[cl->priority] -= cl->weight;
1087 cbq_normalize_quanta(q, cl->priority);
1088}
1089
1090static void cbq_addprio(struct cbq_sched_data *q, struct cbq_class *cl)
1091{
1092 q->nclasses[cl->priority]++;
1093 q->quanta[cl->priority] += cl->weight;
1094 cbq_normalize_quanta(q, cl->priority);
1095}
1096
1097static int cbq_set_wrr(struct cbq_class *cl, struct tc_cbq_wrropt *wrr)
1098{
1099 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
1100
1101 if (wrr->allot)
1102 cl->allot = wrr->allot;
1103 if (wrr->weight)
1104 cl->weight = wrr->weight;
1105 if (wrr->priority) {
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001106 cl->priority = wrr->priority - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001107 cl->cpriority = cl->priority;
1108 if (cl->priority >= cl->priority2)
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001109 cl->priority2 = TC_CBQ_MAXPRIO - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001110 }
1111
1112 cbq_addprio(q, cl);
1113 return 0;
1114}
1115
Linus Torvalds1da177e2005-04-16 15:20:36 -07001116static int cbq_set_fopt(struct cbq_class *cl, struct tc_cbq_fopt *fopt)
1117{
1118 cbq_change_defmap(cl, fopt->split, fopt->defmap, fopt->defchange);
1119 return 0;
1120}
1121
Patrick McHardy27a34212008-01-23 20:35:39 -08001122static const struct nla_policy cbq_policy[TCA_CBQ_MAX + 1] = {
1123 [TCA_CBQ_LSSOPT] = { .len = sizeof(struct tc_cbq_lssopt) },
1124 [TCA_CBQ_WRROPT] = { .len = sizeof(struct tc_cbq_wrropt) },
1125 [TCA_CBQ_FOPT] = { .len = sizeof(struct tc_cbq_fopt) },
1126 [TCA_CBQ_OVL_STRATEGY] = { .len = sizeof(struct tc_cbq_ovl) },
1127 [TCA_CBQ_RATE] = { .len = sizeof(struct tc_ratespec) },
1128 [TCA_CBQ_RTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
1129 [TCA_CBQ_POLICE] = { .len = sizeof(struct tc_cbq_police) },
1130};
1131
Eric Dumazet2e9b0c52019-09-26 18:24:43 -07001132static int cbq_opt_parse(struct nlattr *tb[TCA_CBQ_MAX + 1], struct nlattr *opt)
1133{
1134 int err;
1135
1136 if (!opt)
1137 return -EINVAL;
1138
1139 err = nla_parse_nested(tb, TCA_CBQ_MAX, opt, cbq_policy);
1140 if (err < 0)
1141 return err;
1142
1143 if (tb[TCA_CBQ_WRROPT]) {
1144 const struct tc_cbq_wrropt *wrr = nla_data(tb[TCA_CBQ_WRROPT]);
1145
1146 if (wrr->priority > TC_CBQ_MAXPRIO)
1147 err = -EINVAL;
1148 }
1149 return err;
1150}
1151
Patrick McHardy1e904742008-01-22 22:11:17 -08001152static int cbq_init(struct Qdisc *sch, struct nlattr *opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001153{
1154 struct cbq_sched_data *q = qdisc_priv(sch);
Patrick McHardy1e904742008-01-22 22:11:17 -08001155 struct nlattr *tb[TCA_CBQ_MAX + 1];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001156 struct tc_ratespec *r;
Patrick McHardycee63722008-01-23 20:33:32 -08001157 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001158
Eric Dumazet2e9b0c52019-09-26 18:24:43 -07001159 err = cbq_opt_parse(tb, opt);
Patrick McHardycee63722008-01-23 20:33:32 -08001160 if (err < 0)
1161 return err;
1162
Patrick McHardy27a34212008-01-23 20:35:39 -08001163 if (tb[TCA_CBQ_RTAB] == NULL || tb[TCA_CBQ_RATE] == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001164 return -EINVAL;
1165
Patrick McHardy1e904742008-01-22 22:11:17 -08001166 r = nla_data(tb[TCA_CBQ_RATE]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001167
Patrick McHardy1e904742008-01-22 22:11:17 -08001168 if ((q->link.R_tab = qdisc_get_rtab(r, tb[TCA_CBQ_RTAB])) == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001169 return -EINVAL;
1170
Patrick McHardyd77fea22008-07-05 23:22:05 -07001171 err = qdisc_class_hash_init(&q->clhash);
1172 if (err < 0)
1173 goto put_rtab;
1174
Linus Torvalds1da177e2005-04-16 15:20:36 -07001175 q->link.refcnt = 1;
1176 q->link.sibling = &q->link;
Patrick McHardyd77fea22008-07-05 23:22:05 -07001177 q->link.common.classid = sch->handle;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001178 q->link.qdisc = sch;
Changli Gao3511c912010-10-16 13:04:08 +00001179 q->link.q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
1180 sch->handle);
1181 if (!q->link.q)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001182 q->link.q = &noop_qdisc;
1183
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001184 q->link.priority = TC_CBQ_MAXPRIO - 1;
1185 q->link.priority2 = TC_CBQ_MAXPRIO - 1;
1186 q->link.cpriority = TC_CBQ_MAXPRIO - 1;
David S. Miller5ce2d482008-07-08 17:06:30 -07001187 q->link.allot = psched_mtu(qdisc_dev(sch));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001188 q->link.quantum = q->link.allot;
1189 q->link.weight = q->link.R_tab->rate.rate;
1190
1191 q->link.ewma_log = TC_CBQ_DEF_EWMA;
1192 q->link.avpkt = q->link.allot/2;
1193 q->link.minidle = -0x7FFFFFFF;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001194
Patrick McHardy88a99352007-03-16 01:21:11 -07001195 qdisc_watchdog_init(&q->watchdog, sch);
Eric Dumazet4a8e3202014-09-20 18:01:30 -07001196 hrtimer_init(&q->delay_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001197 q->delay_timer.function = cbq_undelay;
1198 q->toplevel = TC_CBQ_MAXLEVEL;
Patrick McHardy3bebcda2007-03-23 11:29:25 -07001199 q->now = psched_get_time();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001200
1201 cbq_link_class(&q->link);
1202
Patrick McHardy1e904742008-01-22 22:11:17 -08001203 if (tb[TCA_CBQ_LSSOPT])
1204 cbq_set_lss(&q->link, nla_data(tb[TCA_CBQ_LSSOPT]));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001205
1206 cbq_addprio(q, &q->link);
1207 return 0;
Patrick McHardyd77fea22008-07-05 23:22:05 -07001208
1209put_rtab:
1210 qdisc_put_rtab(q->link.R_tab);
1211 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001212}
1213
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001214static int cbq_dump_rate(struct sk_buff *skb, struct cbq_class *cl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001215{
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001216 unsigned char *b = skb_tail_pointer(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001217
David S. Miller1b34ec42012-03-29 05:11:39 -04001218 if (nla_put(skb, TCA_CBQ_RATE, sizeof(cl->R_tab->rate), &cl->R_tab->rate))
1219 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001220 return skb->len;
1221
Patrick McHardy1e904742008-01-22 22:11:17 -08001222nla_put_failure:
Arnaldo Carvalho de Melodc5fc572007-03-25 23:06:12 -07001223 nlmsg_trim(skb, b);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001224 return -1;
1225}
1226
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001227static int cbq_dump_lss(struct sk_buff *skb, struct cbq_class *cl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001228{
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001229 unsigned char *b = skb_tail_pointer(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001230 struct tc_cbq_lssopt opt;
1231
1232 opt.flags = 0;
1233 if (cl->borrow == NULL)
1234 opt.flags |= TCF_CBQ_LSS_BOUNDED;
1235 if (cl->share == NULL)
1236 opt.flags |= TCF_CBQ_LSS_ISOLATED;
1237 opt.ewma_log = cl->ewma_log;
1238 opt.level = cl->level;
1239 opt.avpkt = cl->avpkt;
1240 opt.maxidle = cl->maxidle;
1241 opt.minidle = (u32)(-cl->minidle);
1242 opt.offtime = cl->offtime;
1243 opt.change = ~0;
David S. Miller1b34ec42012-03-29 05:11:39 -04001244 if (nla_put(skb, TCA_CBQ_LSSOPT, sizeof(opt), &opt))
1245 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001246 return skb->len;
1247
Patrick McHardy1e904742008-01-22 22:11:17 -08001248nla_put_failure:
Arnaldo Carvalho de Melodc5fc572007-03-25 23:06:12 -07001249 nlmsg_trim(skb, b);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001250 return -1;
1251}
1252
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001253static int cbq_dump_wrr(struct sk_buff *skb, struct cbq_class *cl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001254{
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001255 unsigned char *b = skb_tail_pointer(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001256 struct tc_cbq_wrropt opt;
1257
David S. Millera0db8562013-07-30 00:16:21 -07001258 memset(&opt, 0, sizeof(opt));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001259 opt.flags = 0;
1260 opt.allot = cl->allot;
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001261 opt.priority = cl->priority + 1;
1262 opt.cpriority = cl->cpriority + 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001263 opt.weight = cl->weight;
David S. Miller1b34ec42012-03-29 05:11:39 -04001264 if (nla_put(skb, TCA_CBQ_WRROPT, sizeof(opt), &opt))
1265 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001266 return skb->len;
1267
Patrick McHardy1e904742008-01-22 22:11:17 -08001268nla_put_failure:
Arnaldo Carvalho de Melodc5fc572007-03-25 23:06:12 -07001269 nlmsg_trim(skb, b);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001270 return -1;
1271}
1272
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001273static int cbq_dump_fopt(struct sk_buff *skb, struct cbq_class *cl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001274{
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001275 unsigned char *b = skb_tail_pointer(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001276 struct tc_cbq_fopt opt;
1277
1278 if (cl->split || cl->defmap) {
Patrick McHardyd77fea22008-07-05 23:22:05 -07001279 opt.split = cl->split ? cl->split->common.classid : 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001280 opt.defmap = cl->defmap;
1281 opt.defchange = ~0;
David S. Miller1b34ec42012-03-29 05:11:39 -04001282 if (nla_put(skb, TCA_CBQ_FOPT, sizeof(opt), &opt))
1283 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001284 }
1285 return skb->len;
1286
Patrick McHardy1e904742008-01-22 22:11:17 -08001287nla_put_failure:
Arnaldo Carvalho de Melodc5fc572007-03-25 23:06:12 -07001288 nlmsg_trim(skb, b);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001289 return -1;
1290}
1291
Linus Torvalds1da177e2005-04-16 15:20:36 -07001292static int cbq_dump_attr(struct sk_buff *skb, struct cbq_class *cl)
1293{
1294 if (cbq_dump_lss(skb, cl) < 0 ||
1295 cbq_dump_rate(skb, cl) < 0 ||
1296 cbq_dump_wrr(skb, cl) < 0 ||
Linus Torvalds1da177e2005-04-16 15:20:36 -07001297 cbq_dump_fopt(skb, cl) < 0)
1298 return -1;
1299 return 0;
1300}
1301
1302static int cbq_dump(struct Qdisc *sch, struct sk_buff *skb)
1303{
1304 struct cbq_sched_data *q = qdisc_priv(sch);
Patrick McHardy4b3550ef2008-01-23 20:34:11 -08001305 struct nlattr *nest;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001306
Patrick McHardy4b3550ef2008-01-23 20:34:11 -08001307 nest = nla_nest_start(skb, TCA_OPTIONS);
1308 if (nest == NULL)
1309 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001310 if (cbq_dump_attr(skb, &q->link) < 0)
Patrick McHardy1e904742008-01-22 22:11:17 -08001311 goto nla_put_failure;
Yang Yingliangd59b7d82014-03-12 10:20:32 +08001312 return nla_nest_end(skb, nest);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001313
Patrick McHardy1e904742008-01-22 22:11:17 -08001314nla_put_failure:
Patrick McHardy4b3550ef2008-01-23 20:34:11 -08001315 nla_nest_cancel(skb, nest);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001316 return -1;
1317}
1318
1319static int
1320cbq_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
1321{
1322 struct cbq_sched_data *q = qdisc_priv(sch);
1323
1324 q->link.xstats.avgidle = q->link.avgidle;
1325 return gnet_stats_copy_app(d, &q->link.xstats, sizeof(q->link.xstats));
1326}
1327
1328static int
1329cbq_dump_class(struct Qdisc *sch, unsigned long arg,
1330 struct sk_buff *skb, struct tcmsg *tcm)
1331{
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001332 struct cbq_class *cl = (struct cbq_class *)arg;
Patrick McHardy4b3550ef2008-01-23 20:34:11 -08001333 struct nlattr *nest;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001334
1335 if (cl->tparent)
Patrick McHardyd77fea22008-07-05 23:22:05 -07001336 tcm->tcm_parent = cl->tparent->common.classid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001337 else
1338 tcm->tcm_parent = TC_H_ROOT;
Patrick McHardyd77fea22008-07-05 23:22:05 -07001339 tcm->tcm_handle = cl->common.classid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001340 tcm->tcm_info = cl->q->handle;
1341
Patrick McHardy4b3550ef2008-01-23 20:34:11 -08001342 nest = nla_nest_start(skb, TCA_OPTIONS);
1343 if (nest == NULL)
1344 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001345 if (cbq_dump_attr(skb, cl) < 0)
Patrick McHardy1e904742008-01-22 22:11:17 -08001346 goto nla_put_failure;
Yang Yingliangd59b7d82014-03-12 10:20:32 +08001347 return nla_nest_end(skb, nest);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001348
Patrick McHardy1e904742008-01-22 22:11:17 -08001349nla_put_failure:
Patrick McHardy4b3550ef2008-01-23 20:34:11 -08001350 nla_nest_cancel(skb, nest);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001351 return -1;
1352}
1353
1354static int
1355cbq_dump_class_stats(struct Qdisc *sch, unsigned long arg,
1356 struct gnet_dump *d)
1357{
1358 struct cbq_sched_data *q = qdisc_priv(sch);
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001359 struct cbq_class *cl = (struct cbq_class *)arg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001360
Linus Torvalds1da177e2005-04-16 15:20:36 -07001361 cl->xstats.avgidle = cl->avgidle;
1362 cl->xstats.undertime = 0;
1363
Patrick McHardya0849802007-03-23 11:28:30 -07001364 if (cl->undertime != PSCHED_PASTPERFECT)
Patrick McHardy8edc0c32007-03-23 11:28:55 -07001365 cl->xstats.undertime = cl->undertime - q->now;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001366
Eric Dumazetedb09eb2016-06-06 09:37:16 -07001367 if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
1368 d, NULL, &cl->bstats) < 0 ||
Eric Dumazetd250a5f2009-10-02 10:32:18 +00001369 gnet_stats_copy_rate_est(d, &cl->bstats, &cl->rate_est) < 0 ||
John Fastabendb0ab6f92014-09-28 11:54:24 -07001370 gnet_stats_copy_queue(d, NULL, &cl->qstats, cl->q->q.qlen) < 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001371 return -1;
1372
1373 return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats));
1374}
1375
1376static int cbq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
1377 struct Qdisc **old)
1378{
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001379 struct cbq_class *cl = (struct cbq_class *)arg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001380
Patrick McHardy5b9a9cc2009-09-04 06:41:17 +00001381 if (new == NULL) {
Changli Gao3511c912010-10-16 13:04:08 +00001382 new = qdisc_create_dflt(sch->dev_queue,
Patrick McHardy5b9a9cc2009-09-04 06:41:17 +00001383 &pfifo_qdisc_ops, cl->common.classid);
1384 if (new == NULL)
1385 return -ENOBUFS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001386 }
Patrick McHardy5b9a9cc2009-09-04 06:41:17 +00001387
WANG Cong86a79962016-02-25 14:55:00 -08001388 *old = qdisc_replace(sch, new, &cl->q);
Patrick McHardy5b9a9cc2009-09-04 06:41:17 +00001389 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001390}
1391
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001392static struct Qdisc *cbq_leaf(struct Qdisc *sch, unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001393{
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001394 struct cbq_class *cl = (struct cbq_class *)arg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001395
Patrick McHardy5b9a9cc2009-09-04 06:41:17 +00001396 return cl->q;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001397}
1398
Jarek Poplawskia37ef2e2006-12-08 00:25:55 -08001399static void cbq_qlen_notify(struct Qdisc *sch, unsigned long arg)
1400{
1401 struct cbq_class *cl = (struct cbq_class *)arg;
1402
1403 if (cl->q->q.qlen == 0)
1404 cbq_deactivate_class(cl);
1405}
1406
Linus Torvalds1da177e2005-04-16 15:20:36 -07001407static unsigned long cbq_get(struct Qdisc *sch, u32 classid)
1408{
1409 struct cbq_sched_data *q = qdisc_priv(sch);
1410 struct cbq_class *cl = cbq_class_lookup(q, classid);
1411
1412 if (cl) {
1413 cl->refcnt++;
1414 return (unsigned long)cl;
1415 }
1416 return 0;
1417}
1418
Linus Torvalds1da177e2005-04-16 15:20:36 -07001419static void cbq_destroy_class(struct Qdisc *sch, struct cbq_class *cl)
1420{
1421 struct cbq_sched_data *q = qdisc_priv(sch);
1422
Ilpo Järvinen547b7922008-07-25 21:43:18 -07001423 WARN_ON(cl->filters);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001424
Patrick McHardyff31ab52008-07-01 19:52:38 -07001425 tcf_destroy_chain(&cl->filter_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001426 qdisc_destroy(cl->q);
1427 qdisc_put_rtab(cl->R_tab);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001428 gen_kill_estimator(&cl->bstats, &cl->rate_est);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001429 if (cl != &q->link)
1430 kfree(cl);
1431}
1432
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001433static void cbq_destroy(struct Qdisc *sch)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001434{
1435 struct cbq_sched_data *q = qdisc_priv(sch);
Sasha Levinb67bfe02013-02-27 17:06:00 -08001436 struct hlist_node *next;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001437 struct cbq_class *cl;
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001438 unsigned int h;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001439
Patrick McHardyc3bc7cf2007-07-15 00:03:05 -07001440#ifdef CONFIG_NET_CLS_ACT
Linus Torvalds1da177e2005-04-16 15:20:36 -07001441 q->rx_class = NULL;
1442#endif
1443 /*
1444 * Filters must be destroyed first because we don't destroy the
1445 * classes from root to leafs which means that filters can still
1446 * be bound to classes which have been destroyed already. --TGR '04
1447 */
Patrick McHardyd77fea22008-07-05 23:22:05 -07001448 for (h = 0; h < q->clhash.hashsize; h++) {
Sasha Levinb67bfe02013-02-27 17:06:00 -08001449 hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode)
Patrick McHardyff31ab52008-07-01 19:52:38 -07001450 tcf_destroy_chain(&cl->filter_list);
Patrick McHardyb00b4bf2007-06-05 16:06:59 -07001451 }
Patrick McHardyd77fea22008-07-05 23:22:05 -07001452 for (h = 0; h < q->clhash.hashsize; h++) {
Sasha Levinb67bfe02013-02-27 17:06:00 -08001453 hlist_for_each_entry_safe(cl, next, &q->clhash.hash[h],
Patrick McHardyd77fea22008-07-05 23:22:05 -07001454 common.hnode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001455 cbq_destroy_class(sch, cl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001456 }
Patrick McHardyd77fea22008-07-05 23:22:05 -07001457 qdisc_class_hash_destroy(&q->clhash);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001458}
1459
1460static void cbq_put(struct Qdisc *sch, unsigned long arg)
1461{
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001462 struct cbq_class *cl = (struct cbq_class *)arg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001463
1464 if (--cl->refcnt == 0) {
Patrick McHardyc3bc7cf2007-07-15 00:03:05 -07001465#ifdef CONFIG_NET_CLS_ACT
Jarek Poplawski102396a2008-08-29 14:21:52 -07001466 spinlock_t *root_lock = qdisc_root_sleeping_lock(sch);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001467 struct cbq_sched_data *q = qdisc_priv(sch);
1468
David S. Miller7698b4f2008-07-16 01:42:40 -07001469 spin_lock_bh(root_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001470 if (q->rx_class == cl)
1471 q->rx_class = NULL;
David S. Miller7698b4f2008-07-16 01:42:40 -07001472 spin_unlock_bh(root_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001473#endif
1474
1475 cbq_destroy_class(sch, cl);
1476 }
1477}
1478
1479static int
Patrick McHardy1e904742008-01-22 22:11:17 -08001480cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **tca,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001481 unsigned long *arg)
1482{
1483 int err;
1484 struct cbq_sched_data *q = qdisc_priv(sch);
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001485 struct cbq_class *cl = (struct cbq_class *)*arg;
Patrick McHardy1e904742008-01-22 22:11:17 -08001486 struct nlattr *opt = tca[TCA_OPTIONS];
1487 struct nlattr *tb[TCA_CBQ_MAX + 1];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001488 struct cbq_class *parent;
1489 struct qdisc_rate_table *rtab = NULL;
1490
Eric Dumazet2e9b0c52019-09-26 18:24:43 -07001491 err = cbq_opt_parse(tb, opt);
Patrick McHardycee63722008-01-23 20:33:32 -08001492 if (err < 0)
1493 return err;
1494
Florian Westphaldd47c1f2016-06-09 00:27:40 +02001495 if (tb[TCA_CBQ_OVL_STRATEGY] || tb[TCA_CBQ_POLICE])
Florian Westphalc3498d32016-06-09 00:27:39 +02001496 return -EOPNOTSUPP;
1497
Linus Torvalds1da177e2005-04-16 15:20:36 -07001498 if (cl) {
1499 /* Check parent */
1500 if (parentid) {
Patrick McHardyd77fea22008-07-05 23:22:05 -07001501 if (cl->tparent &&
1502 cl->tparent->common.classid != parentid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001503 return -EINVAL;
1504 if (!cl->tparent && parentid != TC_H_ROOT)
1505 return -EINVAL;
1506 }
1507
Patrick McHardy1e904742008-01-22 22:11:17 -08001508 if (tb[TCA_CBQ_RATE]) {
Stephen Hemminger71bcb092008-11-25 21:13:31 -08001509 rtab = qdisc_get_rtab(nla_data(tb[TCA_CBQ_RATE]),
1510 tb[TCA_CBQ_RTAB]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001511 if (rtab == NULL)
1512 return -EINVAL;
1513 }
1514
Stephen Hemminger71bcb092008-11-25 21:13:31 -08001515 if (tca[TCA_RATE]) {
John Fastabend22e0f8b2014-09-28 11:52:56 -07001516 err = gen_replace_estimator(&cl->bstats, NULL,
1517 &cl->rate_est,
Eric Dumazetedb09eb2016-06-06 09:37:16 -07001518 NULL,
1519 qdisc_root_sleeping_running(sch),
Stephen Hemminger71bcb092008-11-25 21:13:31 -08001520 tca[TCA_RATE]);
1521 if (err) {
Yang Yingliang79c11f22013-12-17 15:29:17 +08001522 qdisc_put_rtab(rtab);
Stephen Hemminger71bcb092008-11-25 21:13:31 -08001523 return err;
1524 }
1525 }
1526
Linus Torvalds1da177e2005-04-16 15:20:36 -07001527 /* Change class parameters */
1528 sch_tree_lock(sch);
1529
1530 if (cl->next_alive != NULL)
1531 cbq_deactivate_class(cl);
1532
1533 if (rtab) {
Patrick McHardyb94c8af2008-11-20 04:11:36 -08001534 qdisc_put_rtab(cl->R_tab);
1535 cl->R_tab = rtab;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001536 }
1537
Patrick McHardy1e904742008-01-22 22:11:17 -08001538 if (tb[TCA_CBQ_LSSOPT])
1539 cbq_set_lss(cl, nla_data(tb[TCA_CBQ_LSSOPT]));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001540
Patrick McHardy1e904742008-01-22 22:11:17 -08001541 if (tb[TCA_CBQ_WRROPT]) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001542 cbq_rmprio(q, cl);
Patrick McHardy1e904742008-01-22 22:11:17 -08001543 cbq_set_wrr(cl, nla_data(tb[TCA_CBQ_WRROPT]));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001544 }
1545
Patrick McHardy1e904742008-01-22 22:11:17 -08001546 if (tb[TCA_CBQ_FOPT])
1547 cbq_set_fopt(cl, nla_data(tb[TCA_CBQ_FOPT]));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001548
1549 if (cl->q->q.qlen)
1550 cbq_activate_class(cl);
1551
1552 sch_tree_unlock(sch);
1553
Linus Torvalds1da177e2005-04-16 15:20:36 -07001554 return 0;
1555 }
1556
1557 if (parentid == TC_H_ROOT)
1558 return -EINVAL;
1559
Patrick McHardy1e904742008-01-22 22:11:17 -08001560 if (tb[TCA_CBQ_WRROPT] == NULL || tb[TCA_CBQ_RATE] == NULL ||
1561 tb[TCA_CBQ_LSSOPT] == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001562 return -EINVAL;
1563
Patrick McHardy1e904742008-01-22 22:11:17 -08001564 rtab = qdisc_get_rtab(nla_data(tb[TCA_CBQ_RATE]), tb[TCA_CBQ_RTAB]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001565 if (rtab == NULL)
1566 return -EINVAL;
1567
1568 if (classid) {
1569 err = -EINVAL;
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001570 if (TC_H_MAJ(classid ^ sch->handle) ||
1571 cbq_class_lookup(q, classid))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001572 goto failure;
1573 } else {
1574 int i;
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001575 classid = TC_H_MAKE(sch->handle, 0x8000);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001576
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001577 for (i = 0; i < 0x8000; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001578 if (++q->hgenerator >= 0x8000)
1579 q->hgenerator = 1;
1580 if (cbq_class_lookup(q, classid|q->hgenerator) == NULL)
1581 break;
1582 }
1583 err = -ENOSR;
1584 if (i >= 0x8000)
1585 goto failure;
1586 classid = classid|q->hgenerator;
1587 }
1588
1589 parent = &q->link;
1590 if (parentid) {
1591 parent = cbq_class_lookup(q, parentid);
1592 err = -EINVAL;
1593 if (parent == NULL)
1594 goto failure;
1595 }
1596
1597 err = -ENOBUFS;
Panagiotis Issaris0da974f2006-07-21 14:51:30 -07001598 cl = kzalloc(sizeof(*cl), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001599 if (cl == NULL)
1600 goto failure;
Stephen Hemminger71bcb092008-11-25 21:13:31 -08001601
1602 if (tca[TCA_RATE]) {
John Fastabend22e0f8b2014-09-28 11:52:56 -07001603 err = gen_new_estimator(&cl->bstats, NULL, &cl->rate_est,
Eric Dumazetedb09eb2016-06-06 09:37:16 -07001604 NULL,
1605 qdisc_root_sleeping_running(sch),
Stephen Hemminger71bcb092008-11-25 21:13:31 -08001606 tca[TCA_RATE]);
1607 if (err) {
1608 kfree(cl);
1609 goto failure;
1610 }
1611 }
1612
Linus Torvalds1da177e2005-04-16 15:20:36 -07001613 cl->R_tab = rtab;
1614 rtab = NULL;
1615 cl->refcnt = 1;
Changli Gao3511c912010-10-16 13:04:08 +00001616 cl->q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, classid);
1617 if (!cl->q)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001618 cl->q = &noop_qdisc;
Patrick McHardyd77fea22008-07-05 23:22:05 -07001619 cl->common.classid = classid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001620 cl->tparent = parent;
1621 cl->qdisc = sch;
1622 cl->allot = parent->allot;
1623 cl->quantum = cl->allot;
1624 cl->weight = cl->R_tab->rate.rate;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001625
1626 sch_tree_lock(sch);
1627 cbq_link_class(cl);
1628 cl->borrow = cl->tparent;
1629 if (cl->tparent != &q->link)
1630 cl->share = cl->tparent;
1631 cbq_adjust_levels(parent);
1632 cl->minidle = -0x7FFFFFFF;
Patrick McHardy1e904742008-01-22 22:11:17 -08001633 cbq_set_lss(cl, nla_data(tb[TCA_CBQ_LSSOPT]));
1634 cbq_set_wrr(cl, nla_data(tb[TCA_CBQ_WRROPT]));
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001635 if (cl->ewma_log == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001636 cl->ewma_log = q->link.ewma_log;
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001637 if (cl->maxidle == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001638 cl->maxidle = q->link.maxidle;
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001639 if (cl->avpkt == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001640 cl->avpkt = q->link.avpkt;
Patrick McHardy1e904742008-01-22 22:11:17 -08001641 if (tb[TCA_CBQ_FOPT])
1642 cbq_set_fopt(cl, nla_data(tb[TCA_CBQ_FOPT]));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001643 sch_tree_unlock(sch);
1644
Patrick McHardyd77fea22008-07-05 23:22:05 -07001645 qdisc_class_hash_grow(sch, &q->clhash);
1646
Linus Torvalds1da177e2005-04-16 15:20:36 -07001647 *arg = (unsigned long)cl;
1648 return 0;
1649
1650failure:
1651 qdisc_put_rtab(rtab);
1652 return err;
1653}
1654
1655static int cbq_delete(struct Qdisc *sch, unsigned long arg)
1656{
1657 struct cbq_sched_data *q = qdisc_priv(sch);
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001658 struct cbq_class *cl = (struct cbq_class *)arg;
WANG Cong2ccccf52016-02-25 14:55:01 -08001659 unsigned int qlen, backlog;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001660
1661 if (cl->filters || cl->children || cl == &q->link)
1662 return -EBUSY;
1663
1664 sch_tree_lock(sch);
1665
Jarek Poplawskia37ef2e2006-12-08 00:25:55 -08001666 qlen = cl->q->q.qlen;
WANG Cong2ccccf52016-02-25 14:55:01 -08001667 backlog = cl->q->qstats.backlog;
Jarek Poplawskia37ef2e2006-12-08 00:25:55 -08001668 qdisc_reset(cl->q);
WANG Cong2ccccf52016-02-25 14:55:01 -08001669 qdisc_tree_reduce_backlog(cl->q, qlen, backlog);
Jarek Poplawskia37ef2e2006-12-08 00:25:55 -08001670
Linus Torvalds1da177e2005-04-16 15:20:36 -07001671 if (cl->next_alive)
1672 cbq_deactivate_class(cl);
1673
1674 if (q->tx_borrowed == cl)
1675 q->tx_borrowed = q->tx_class;
1676 if (q->tx_class == cl) {
1677 q->tx_class = NULL;
1678 q->tx_borrowed = NULL;
1679 }
Patrick McHardyc3bc7cf2007-07-15 00:03:05 -07001680#ifdef CONFIG_NET_CLS_ACT
Linus Torvalds1da177e2005-04-16 15:20:36 -07001681 if (q->rx_class == cl)
1682 q->rx_class = NULL;
1683#endif
1684
1685 cbq_unlink_class(cl);
1686 cbq_adjust_levels(cl->tparent);
1687 cl->defmap = 0;
1688 cbq_sync_defmap(cl);
1689
1690 cbq_rmprio(q, cl);
1691 sch_tree_unlock(sch);
1692
Jarek Poplawski7cd0a632009-03-15 20:00:19 -07001693 BUG_ON(--cl->refcnt == 0);
1694 /*
1695 * This shouldn't happen: we "hold" one cops->get() when called
1696 * from tc_ctl_tclass; the destroy method is done from cops->put().
1697 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001698
1699 return 0;
1700}
1701
John Fastabend25d8c0d2014-09-12 20:05:27 -07001702static struct tcf_proto __rcu **cbq_find_tcf(struct Qdisc *sch,
1703 unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001704{
1705 struct cbq_sched_data *q = qdisc_priv(sch);
1706 struct cbq_class *cl = (struct cbq_class *)arg;
1707
1708 if (cl == NULL)
1709 cl = &q->link;
1710
1711 return &cl->filter_list;
1712}
1713
1714static unsigned long cbq_bind_filter(struct Qdisc *sch, unsigned long parent,
1715 u32 classid)
1716{
1717 struct cbq_sched_data *q = qdisc_priv(sch);
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001718 struct cbq_class *p = (struct cbq_class *)parent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001719 struct cbq_class *cl = cbq_class_lookup(q, classid);
1720
1721 if (cl) {
1722 if (p && p->level <= cl->level)
1723 return 0;
1724 cl->filters++;
1725 return (unsigned long)cl;
1726 }
1727 return 0;
1728}
1729
1730static void cbq_unbind_filter(struct Qdisc *sch, unsigned long arg)
1731{
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001732 struct cbq_class *cl = (struct cbq_class *)arg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001733
1734 cl->filters--;
1735}
1736
1737static void cbq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
1738{
1739 struct cbq_sched_data *q = qdisc_priv(sch);
Patrick McHardyd77fea22008-07-05 23:22:05 -07001740 struct cbq_class *cl;
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001741 unsigned int h;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001742
1743 if (arg->stop)
1744 return;
1745
Patrick McHardyd77fea22008-07-05 23:22:05 -07001746 for (h = 0; h < q->clhash.hashsize; h++) {
Sasha Levinb67bfe02013-02-27 17:06:00 -08001747 hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001748 if (arg->count < arg->skip) {
1749 arg->count++;
1750 continue;
1751 }
1752 if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
1753 arg->stop = 1;
1754 return;
1755 }
1756 arg->count++;
1757 }
1758 }
1759}
1760
Eric Dumazet20fea082007-11-14 01:44:41 -08001761static const struct Qdisc_class_ops cbq_class_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001762 .graft = cbq_graft,
1763 .leaf = cbq_leaf,
Jarek Poplawskia37ef2e2006-12-08 00:25:55 -08001764 .qlen_notify = cbq_qlen_notify,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001765 .get = cbq_get,
1766 .put = cbq_put,
1767 .change = cbq_change_class,
1768 .delete = cbq_delete,
1769 .walk = cbq_walk,
1770 .tcf_chain = cbq_find_tcf,
1771 .bind_tcf = cbq_bind_filter,
1772 .unbind_tcf = cbq_unbind_filter,
1773 .dump = cbq_dump_class,
1774 .dump_stats = cbq_dump_class_stats,
1775};
1776
Eric Dumazet20fea082007-11-14 01:44:41 -08001777static struct Qdisc_ops cbq_qdisc_ops __read_mostly = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001778 .next = NULL,
1779 .cl_ops = &cbq_class_ops,
1780 .id = "cbq",
1781 .priv_size = sizeof(struct cbq_sched_data),
1782 .enqueue = cbq_enqueue,
1783 .dequeue = cbq_dequeue,
Jarek Poplawski77be1552008-10-31 00:47:01 -07001784 .peek = qdisc_peek_dequeued,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001785 .init = cbq_init,
1786 .reset = cbq_reset,
1787 .destroy = cbq_destroy,
1788 .change = NULL,
1789 .dump = cbq_dump,
1790 .dump_stats = cbq_dump_stats,
1791 .owner = THIS_MODULE,
1792};
1793
1794static int __init cbq_module_init(void)
1795{
1796 return register_qdisc(&cbq_qdisc_ops);
1797}
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +09001798static void __exit cbq_module_exit(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001799{
1800 unregister_qdisc(&cbq_qdisc_ops);
1801}
1802module_init(cbq_module_init)
1803module_exit(cbq_module_exit)
1804MODULE_LICENSE("GPL");