blob: 481036f6b54e4730ee27fae6236277c64d3eaa1a [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * net/sched/sch_cbq.c Class-Based Queueing discipline.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 *
11 */
12
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <linux/module.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090014#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include <linux/types.h>
16#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#include <linux/string.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include <linux/errno.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070019#include <linux/skbuff.h>
Patrick McHardy0ba48052007-07-02 22:49:07 -070020#include <net/netlink.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#include <net/pkt_sched.h>
Jiri Pirkocf1facd2017-02-09 14:38:56 +010022#include <net/pkt_cls.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070023
24
25/* Class-Based Queueing (CBQ) algorithm.
26 =======================================
27
28 Sources: [1] Sally Floyd and Van Jacobson, "Link-sharing and Resource
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +090029 Management Models for Packet Networks",
Linus Torvalds1da177e2005-04-16 15:20:36 -070030 IEEE/ACM Transactions on Networking, Vol.3, No.4, 1995
31
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +090032 [2] Sally Floyd, "Notes on CBQ and Guaranteed Service", 1995
Linus Torvalds1da177e2005-04-16 15:20:36 -070033
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +090034 [3] Sally Floyd, "Notes on Class-Based Queueing: Setting
Linus Torvalds1da177e2005-04-16 15:20:36 -070035 Parameters", 1996
36
37 [4] Sally Floyd and Michael Speer, "Experimental Results
38 for Class-Based Queueing", 1998, not published.
39
40 -----------------------------------------------------------------------
41
42 Algorithm skeleton was taken from NS simulator cbq.cc.
43 If someone wants to check this code against the LBL version,
44 he should take into account that ONLY the skeleton was borrowed,
45 the implementation is different. Particularly:
46
47 --- The WRR algorithm is different. Our version looks more
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +090048 reasonable (I hope) and works when quanta are allowed to be
49 less than MTU, which is always the case when real time classes
50 have small rates. Note, that the statement of [3] is
51 incomplete, delay may actually be estimated even if class
52 per-round allotment is less than MTU. Namely, if per-round
53 allotment is W*r_i, and r_1+...+r_k = r < 1
Linus Torvalds1da177e2005-04-16 15:20:36 -070054
55 delay_i <= ([MTU/(W*r_i)]*W*r + W*r + k*MTU)/B
56
57 In the worst case we have IntServ estimate with D = W*r+k*MTU
58 and C = MTU*r. The proof (if correct at all) is trivial.
59
60
61 --- It seems that cbq-2.0 is not very accurate. At least, I cannot
62 interpret some places, which look like wrong translations
63 from NS. Anyone is advised to find these differences
64 and explain to me, why I am wrong 8).
65
66 --- Linux has no EOI event, so that we cannot estimate true class
67 idle time. Workaround is to consider the next dequeue event
68 as sign that previous packet is finished. This is wrong because of
69 internal device queueing, but on a permanently loaded link it is true.
70 Moreover, combined with clock integrator, this scheme looks
71 very close to an ideal solution. */
72
73struct cbq_sched_data;
74
75
Eric Dumazetcc7ec452011-01-19 19:26:56 +000076struct cbq_class {
Patrick McHardyd77fea22008-07-05 23:22:05 -070077 struct Qdisc_class_common common;
Linus Torvalds1da177e2005-04-16 15:20:36 -070078 struct cbq_class *next_alive; /* next class with backlog in this priority band */
79
80/* Parameters */
Linus Torvalds1da177e2005-04-16 15:20:36 -070081 unsigned char priority; /* class priority */
82 unsigned char priority2; /* priority to be used after overlimit */
83 unsigned char ewma_log; /* time constant for idle time calculation */
Linus Torvalds1da177e2005-04-16 15:20:36 -070084
85 u32 defmap;
86
87 /* Link-sharing scheduler parameters */
88 long maxidle; /* Class parameters: see below. */
89 long offtime;
90 long minidle;
91 u32 avpkt;
92 struct qdisc_rate_table *R_tab;
93
Linus Torvalds1da177e2005-04-16 15:20:36 -070094 /* General scheduler (WRR) parameters */
95 long allot;
96 long quantum; /* Allotment per WRR round */
97 long weight; /* Relative allotment: see below */
98
99 struct Qdisc *qdisc; /* Ptr to CBQ discipline */
100 struct cbq_class *split; /* Ptr to split node */
101 struct cbq_class *share; /* Ptr to LS parent in the class tree */
102 struct cbq_class *tparent; /* Ptr to tree parent in the class tree */
103 struct cbq_class *borrow; /* NULL if class is bandwidth limited;
104 parent otherwise */
105 struct cbq_class *sibling; /* Sibling chain */
106 struct cbq_class *children; /* Pointer to children chain */
107
108 struct Qdisc *q; /* Elementary queueing discipline */
109
110
111/* Variables */
112 unsigned char cpriority; /* Effective priority */
113 unsigned char delayed;
114 unsigned char level; /* level of the class in hierarchy:
115 0 for leaf classes, and maximal
116 level of children + 1 for nodes.
117 */
118
119 psched_time_t last; /* Last end of service */
120 psched_time_t undertime;
121 long avgidle;
122 long deficit; /* Saved deficit for WRR */
Patrick McHardy1a13cb62007-03-16 01:22:20 -0700123 psched_time_t penalized;
Eric Dumazetc1a8f1f2009-08-16 09:36:49 +0000124 struct gnet_stats_basic_packed bstats;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700125 struct gnet_stats_queue qstats;
Eric Dumazet1c0d32f2016-12-04 09:48:16 -0800126 struct net_rate_estimator __rcu *rate_est;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127 struct tc_cbq_xstats xstats;
128
John Fastabend25d8c0d2014-09-12 20:05:27 -0700129 struct tcf_proto __rcu *filter_list;
Jiri Pirko6529eab2017-05-17 11:07:55 +0200130 struct tcf_block *block;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700131
132 int refcnt;
133 int filters;
134
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000135 struct cbq_class *defaults[TC_PRIO_MAX + 1];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700136};
137
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000138struct cbq_sched_data {
Patrick McHardyd77fea22008-07-05 23:22:05 -0700139 struct Qdisc_class_hash clhash; /* Hash table of all classes */
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000140 int nclasses[TC_CBQ_MAXPRIO + 1];
141 unsigned int quanta[TC_CBQ_MAXPRIO + 1];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142
143 struct cbq_class link;
144
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000145 unsigned int activemask;
146 struct cbq_class *active[TC_CBQ_MAXPRIO + 1]; /* List of all classes
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147 with backlog */
148
Patrick McHardyc3bc7cf2007-07-15 00:03:05 -0700149#ifdef CONFIG_NET_CLS_ACT
Linus Torvalds1da177e2005-04-16 15:20:36 -0700150 struct cbq_class *rx_class;
151#endif
152 struct cbq_class *tx_class;
153 struct cbq_class *tx_borrowed;
154 int tx_len;
155 psched_time_t now; /* Cached timestamp */
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000156 unsigned int pmask;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157
David S. Miller2fbd3da2009-09-01 17:59:25 -0700158 struct hrtimer delay_timer;
Patrick McHardy88a99352007-03-16 01:21:11 -0700159 struct qdisc_watchdog watchdog; /* Watchdog timer,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160 started when CBQ has
161 backlog, but cannot
162 transmit just now */
Patrick McHardy88a99352007-03-16 01:21:11 -0700163 psched_tdiff_t wd_expires;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700164 int toplevel;
165 u32 hgenerator;
166};
167
168
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000169#define L2T(cl, len) qdisc_l2t((cl)->R_tab, len)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000171static inline struct cbq_class *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700172cbq_class_lookup(struct cbq_sched_data *q, u32 classid)
173{
Patrick McHardyd77fea22008-07-05 23:22:05 -0700174 struct Qdisc_class_common *clc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175
Patrick McHardyd77fea22008-07-05 23:22:05 -0700176 clc = qdisc_class_find(&q->clhash, classid);
177 if (clc == NULL)
178 return NULL;
179 return container_of(clc, struct cbq_class, common);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180}
181
Patrick McHardyc3bc7cf2007-07-15 00:03:05 -0700182#ifdef CONFIG_NET_CLS_ACT
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183
184static struct cbq_class *
185cbq_reclassify(struct sk_buff *skb, struct cbq_class *this)
186{
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000187 struct cbq_class *cl;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000189 for (cl = this->tparent; cl; cl = cl->tparent) {
190 struct cbq_class *new = cl->defaults[TC_PRIO_BESTEFFORT];
191
192 if (new != NULL && new != this)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193 return new;
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000194 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195 return NULL;
196}
197
198#endif
199
200/* Classify packet. The procedure is pretty complicated, but
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000201 * it allows us to combine link sharing and priority scheduling
202 * transparently.
203 *
204 * Namely, you can put link sharing rules (f.e. route based) at root of CBQ,
205 * so that it resolves to split nodes. Then packets are classified
206 * by logical priority, or a more specific classifier may be attached
207 * to the split node.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208 */
209
210static struct cbq_class *
211cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
212{
213 struct cbq_sched_data *q = qdisc_priv(sch);
214 struct cbq_class *head = &q->link;
215 struct cbq_class **defmap;
216 struct cbq_class *cl = NULL;
217 u32 prio = skb->priority;
John Fastabend25d8c0d2014-09-12 20:05:27 -0700218 struct tcf_proto *fl;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700219 struct tcf_result res;
220
221 /*
222 * Step 1. If skb->priority points to one of our classes, use it.
223 */
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000224 if (TC_H_MAJ(prio ^ sch->handle) == 0 &&
Linus Torvalds1da177e2005-04-16 15:20:36 -0700225 (cl = cbq_class_lookup(q, prio)) != NULL)
226 return cl;
227
Jarek Poplawskic27f3392008-08-04 22:39:11 -0700228 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700229 for (;;) {
230 int result = 0;
231 defmap = head->defaults;
232
John Fastabend25d8c0d2014-09-12 20:05:27 -0700233 fl = rcu_dereference_bh(head->filter_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234 /*
235 * Step 2+n. Apply classifier.
236 */
Jiri Pirko87d83092017-05-17 11:07:54 +0200237 result = tcf_classify(skb, fl, &res, true);
John Fastabend25d8c0d2014-09-12 20:05:27 -0700238 if (!fl || result < 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700239 goto fallback;
240
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000241 cl = (void *)res.class;
242 if (!cl) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243 if (TC_H_MAJ(res.classid))
244 cl = cbq_class_lookup(q, res.classid);
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000245 else if ((cl = defmap[res.classid & TC_PRIO_MAX]) == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700246 cl = defmap[TC_PRIO_BESTEFFORT];
247
Eric Dumazetbdfc87f2012-09-11 13:11:12 +0000248 if (cl == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700249 goto fallback;
250 }
Eric Dumazetbdfc87f2012-09-11 13:11:12 +0000251 if (cl->level >= head->level)
252 goto fallback;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253#ifdef CONFIG_NET_CLS_ACT
254 switch (result) {
255 case TC_ACT_QUEUED:
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900256 case TC_ACT_STOLEN:
Jiri Pirkoe25ea212017-06-06 14:12:02 +0200257 case TC_ACT_TRAP:
Jarek Poplawski378a2f02008-08-04 22:31:03 -0700258 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259 case TC_ACT_SHOT:
260 return NULL;
Patrick McHardy73ca4912007-07-15 00:02:31 -0700261 case TC_ACT_RECLASSIFY:
262 return cbq_reclassify(skb, cl);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700263 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700264#endif
265 if (cl->level == 0)
266 return cl;
267
268 /*
269 * Step 3+n. If classifier selected a link sharing class,
270 * apply agency specific classifier.
271 * Repeat this procdure until we hit a leaf node.
272 */
273 head = cl;
274 }
275
276fallback:
277 cl = head;
278
279 /*
280 * Step 4. No success...
281 */
282 if (TC_H_MAJ(prio) == 0 &&
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000283 !(cl = head->defaults[prio & TC_PRIO_MAX]) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -0700284 !(cl = head->defaults[TC_PRIO_BESTEFFORT]))
285 return head;
286
287 return cl;
288}
289
290/*
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000291 * A packet has just been enqueued on the empty class.
292 * cbq_activate_class adds it to the tail of active class list
293 * of its priority band.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700294 */
295
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000296static inline void cbq_activate_class(struct cbq_class *cl)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700297{
298 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
299 int prio = cl->cpriority;
300 struct cbq_class *cl_tail;
301
302 cl_tail = q->active[prio];
303 q->active[prio] = cl;
304
305 if (cl_tail != NULL) {
306 cl->next_alive = cl_tail->next_alive;
307 cl_tail->next_alive = cl;
308 } else {
309 cl->next_alive = cl;
310 q->activemask |= (1<<prio);
311 }
312}
313
314/*
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000315 * Unlink class from active chain.
316 * Note that this same procedure is done directly in cbq_dequeue*
317 * during round-robin procedure.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700318 */
319
320static void cbq_deactivate_class(struct cbq_class *this)
321{
322 struct cbq_sched_data *q = qdisc_priv(this->qdisc);
323 int prio = this->cpriority;
324 struct cbq_class *cl;
325 struct cbq_class *cl_prev = q->active[prio];
326
327 do {
328 cl = cl_prev->next_alive;
329 if (cl == this) {
330 cl_prev->next_alive = cl->next_alive;
331 cl->next_alive = NULL;
332
333 if (cl == q->active[prio]) {
334 q->active[prio] = cl_prev;
335 if (cl == q->active[prio]) {
336 q->active[prio] = NULL;
337 q->activemask &= ~(1<<prio);
338 return;
339 }
340 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700341 return;
342 }
343 } while ((cl_prev = cl) != q->active[prio]);
344}
345
346static void
347cbq_mark_toplevel(struct cbq_sched_data *q, struct cbq_class *cl)
348{
349 int toplevel = q->toplevel;
350
Eric Dumazetcca605d2016-06-10 16:41:37 -0700351 if (toplevel > cl->level) {
Vasily Averin7201c1d2014-08-14 12:27:59 +0400352 psched_time_t now = psched_get_time();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700353
354 do {
Patrick McHardy104e0872007-03-23 11:28:07 -0700355 if (cl->undertime < now) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700356 q->toplevel = cl->level;
357 return;
358 }
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000359 } while ((cl = cl->borrow) != NULL && toplevel > cl->level);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700360 }
361}
362
363static int
Eric Dumazet520ac302016-06-21 23:16:49 -0700364cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
365 struct sk_buff **to_free)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700366{
367 struct cbq_sched_data *q = qdisc_priv(sch);
Satyam Sharmaddeee3c2007-09-16 14:54:05 -0700368 int uninitialized_var(ret);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700369 struct cbq_class *cl = cbq_classify(skb, sch, &ret);
370
Patrick McHardyc3bc7cf2007-07-15 00:03:05 -0700371#ifdef CONFIG_NET_CLS_ACT
Linus Torvalds1da177e2005-04-16 15:20:36 -0700372 q->rx_class = cl;
373#endif
374 if (cl == NULL) {
Jarek Poplawskic27f3392008-08-04 22:39:11 -0700375 if (ret & __NET_XMIT_BYPASS)
John Fastabend25331d62014-09-28 11:53:29 -0700376 qdisc_qstats_drop(sch);
Eric Dumazet520ac302016-06-21 23:16:49 -0700377 __qdisc_drop(skb, to_free);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378 return ret;
379 }
380
Eric Dumazet520ac302016-06-21 23:16:49 -0700381 ret = qdisc_enqueue(skb, cl->q, to_free);
Jussi Kivilinna5f861732008-07-20 00:08:04 -0700382 if (ret == NET_XMIT_SUCCESS) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700383 sch->q.qlen++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700384 cbq_mark_toplevel(q, cl);
385 if (!cl->next_alive)
386 cbq_activate_class(cl);
387 return ret;
388 }
389
Jarek Poplawski378a2f02008-08-04 22:31:03 -0700390 if (net_xmit_drop_count(ret)) {
John Fastabend25331d62014-09-28 11:53:29 -0700391 qdisc_qstats_drop(sch);
Jarek Poplawski378a2f02008-08-04 22:31:03 -0700392 cbq_mark_toplevel(q, cl);
393 cl->qstats.drops++;
394 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700395 return ret;
396}
397
Florian Westphalc3498d32016-06-09 00:27:39 +0200398/* Overlimit action: penalize leaf class by adding offtime */
399static void cbq_overlimit(struct cbq_class *cl)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700400{
401 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
Patrick McHardy8edc0c32007-03-23 11:28:55 -0700402 psched_tdiff_t delay = cl->undertime - q->now;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700403
404 if (!cl->delayed) {
405 delay += cl->offtime;
406
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900407 /*
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000408 * Class goes to sleep, so that it will have no
409 * chance to work avgidle. Let's forgive it 8)
410 *
411 * BTW cbq-2.0 has a crap in this
412 * place, apparently they forgot to shift it by cl->ewma_log.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700413 */
414 if (cl->avgidle < 0)
415 delay -= (-cl->avgidle) - ((-cl->avgidle) >> cl->ewma_log);
416 if (cl->avgidle < cl->minidle)
417 cl->avgidle = cl->minidle;
418 if (delay <= 0)
419 delay = 1;
Patrick McHardy7c59e252007-03-23 11:27:45 -0700420 cl->undertime = q->now + delay;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700421
422 cl->xstats.overactions++;
423 cl->delayed = 1;
424 }
425 if (q->wd_expires == 0 || q->wd_expires > delay)
426 q->wd_expires = delay;
427
428 /* Dirty work! We must schedule wakeups based on
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000429 * real available rate, rather than leaf rate,
430 * which may be tiny (even zero).
Linus Torvalds1da177e2005-04-16 15:20:36 -0700431 */
432 if (q->toplevel == TC_CBQ_MAXLEVEL) {
433 struct cbq_class *b;
434 psched_tdiff_t base_delay = q->wd_expires;
435
436 for (b = cl->borrow; b; b = b->borrow) {
Patrick McHardy8edc0c32007-03-23 11:28:55 -0700437 delay = b->undertime - q->now;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700438 if (delay < base_delay) {
439 if (delay <= 0)
440 delay = 1;
441 base_delay = delay;
442 }
443 }
444
445 q->wd_expires = base_delay;
446 }
447}
448
Patrick McHardy1a13cb62007-03-16 01:22:20 -0700449static psched_tdiff_t cbq_undelay_prio(struct cbq_sched_data *q, int prio,
450 psched_time_t now)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700451{
452 struct cbq_class *cl;
453 struct cbq_class *cl_prev = q->active[prio];
Patrick McHardy1a13cb62007-03-16 01:22:20 -0700454 psched_time_t sched = now;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700455
456 if (cl_prev == NULL)
Patrick McHardye9054a32007-03-16 01:21:40 -0700457 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700458
459 do {
460 cl = cl_prev->next_alive;
Patrick McHardy1a13cb62007-03-16 01:22:20 -0700461 if (now - cl->penalized > 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700462 cl_prev->next_alive = cl->next_alive;
463 cl->next_alive = NULL;
464 cl->cpriority = cl->priority;
465 cl->delayed = 0;
466 cbq_activate_class(cl);
467
468 if (cl == q->active[prio]) {
469 q->active[prio] = cl_prev;
470 if (cl == q->active[prio]) {
471 q->active[prio] = NULL;
472 return 0;
473 }
474 }
475
476 cl = cl_prev->next_alive;
Patrick McHardy1a13cb62007-03-16 01:22:20 -0700477 } else if (sched - cl->penalized > 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700478 sched = cl->penalized;
479 } while ((cl_prev = cl) != q->active[prio]);
480
Patrick McHardy1a13cb62007-03-16 01:22:20 -0700481 return sched - now;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700482}
483
Patrick McHardy1a13cb62007-03-16 01:22:20 -0700484static enum hrtimer_restart cbq_undelay(struct hrtimer *timer)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700485{
Patrick McHardy1a13cb62007-03-16 01:22:20 -0700486 struct cbq_sched_data *q = container_of(timer, struct cbq_sched_data,
David S. Miller2fbd3da2009-09-01 17:59:25 -0700487 delay_timer);
Patrick McHardy1a13cb62007-03-16 01:22:20 -0700488 struct Qdisc *sch = q->watchdog.qdisc;
489 psched_time_t now;
490 psched_tdiff_t delay = 0;
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000491 unsigned int pmask;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700492
Patrick McHardy3bebcda2007-03-23 11:29:25 -0700493 now = psched_get_time();
Patrick McHardy1a13cb62007-03-16 01:22:20 -0700494
Linus Torvalds1da177e2005-04-16 15:20:36 -0700495 pmask = q->pmask;
496 q->pmask = 0;
497
498 while (pmask) {
499 int prio = ffz(~pmask);
Patrick McHardy1a13cb62007-03-16 01:22:20 -0700500 psched_tdiff_t tmp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700501
502 pmask &= ~(1<<prio);
503
Patrick McHardy1a13cb62007-03-16 01:22:20 -0700504 tmp = cbq_undelay_prio(q, prio, now);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700505 if (tmp > 0) {
506 q->pmask |= 1<<prio;
507 if (tmp < delay || delay == 0)
508 delay = tmp;
509 }
510 }
511
512 if (delay) {
Patrick McHardy1a13cb62007-03-16 01:22:20 -0700513 ktime_t time;
514
Thomas Gleixner8b0e1952016-12-25 12:30:41 +0100515 time = 0;
Jarek Poplawskica44d6e2009-06-15 02:31:47 -0700516 time = ktime_add_ns(time, PSCHED_TICKS2NS(now + delay));
Eric Dumazet4a8e3202014-09-20 18:01:30 -0700517 hrtimer_start(&q->delay_timer, time, HRTIMER_MODE_ABS_PINNED);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700518 }
519
David S. Miller8608db02008-08-18 20:51:18 -0700520 __netif_schedule(qdisc_root(sch));
Patrick McHardy1a13cb62007-03-16 01:22:20 -0700521 return HRTIMER_NORESTART;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700522}
523
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900524/*
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000525 * It is mission critical procedure.
526 *
527 * We "regenerate" toplevel cutoff, if transmitting class
528 * has backlog and it is not regulated. It is not part of
529 * original CBQ description, but looks more reasonable.
530 * Probably, it is wrong. This question needs further investigation.
531 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700532
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000533static inline void
Linus Torvalds1da177e2005-04-16 15:20:36 -0700534cbq_update_toplevel(struct cbq_sched_data *q, struct cbq_class *cl,
535 struct cbq_class *borrowed)
536{
537 if (cl && q->toplevel >= borrowed->level) {
538 if (cl->q->q.qlen > 1) {
539 do {
Patrick McHardya0849802007-03-23 11:28:30 -0700540 if (borrowed->undertime == PSCHED_PASTPERFECT) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700541 q->toplevel = borrowed->level;
542 return;
543 }
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000544 } while ((borrowed = borrowed->borrow) != NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700545 }
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900546#if 0
Linus Torvalds1da177e2005-04-16 15:20:36 -0700547 /* It is not necessary now. Uncommenting it
548 will save CPU cycles, but decrease fairness.
549 */
550 q->toplevel = TC_CBQ_MAXLEVEL;
551#endif
552 }
553}
554
555static void
556cbq_update(struct cbq_sched_data *q)
557{
558 struct cbq_class *this = q->tx_class;
559 struct cbq_class *cl = this;
560 int len = q->tx_len;
Vasily Averin73d0f372014-08-14 12:27:47 +0400561 psched_time_t now;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700562
563 q->tx_class = NULL;
Vasily Averin73d0f372014-08-14 12:27:47 +0400564 /* Time integrator. We calculate EOS time
565 * by adding expected packet transmission time.
566 */
567 now = q->now + L2T(&q->link, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700568
569 for ( ; cl; cl = cl->share) {
570 long avgidle = cl->avgidle;
571 long idle;
572
573 cl->bstats.packets++;
574 cl->bstats.bytes += len;
575
576 /*
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000577 * (now - last) is total time between packet right edges.
578 * (last_pktlen/rate) is "virtual" busy time, so that
579 *
580 * idle = (now - last) - last_pktlen/rate
Linus Torvalds1da177e2005-04-16 15:20:36 -0700581 */
582
Vasily Averin73d0f372014-08-14 12:27:47 +0400583 idle = now - cl->last;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700584 if ((unsigned long)idle > 128*1024*1024) {
585 avgidle = cl->maxidle;
586 } else {
587 idle -= L2T(cl, len);
588
589 /* true_avgidle := (1-W)*true_avgidle + W*idle,
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000590 * where W=2^{-ewma_log}. But cl->avgidle is scaled:
591 * cl->avgidle == true_avgidle/W,
592 * hence:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700593 */
594 avgidle += idle - (avgidle>>cl->ewma_log);
595 }
596
597 if (avgidle <= 0) {
598 /* Overlimit or at-limit */
599
600 if (avgidle < cl->minidle)
601 avgidle = cl->minidle;
602
603 cl->avgidle = avgidle;
604
605 /* Calculate expected time, when this class
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000606 * will be allowed to send.
607 * It will occur, when:
608 * (1-W)*true_avgidle + W*delay = 0, i.e.
609 * idle = (1/W - 1)*(-true_avgidle)
610 * or
611 * idle = (1 - W)*(-cl->avgidle);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700612 */
613 idle = (-avgidle) - ((-avgidle) >> cl->ewma_log);
614
615 /*
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000616 * That is not all.
617 * To maintain the rate allocated to the class,
618 * we add to undertime virtual clock,
619 * necessary to complete transmitted packet.
620 * (len/phys_bandwidth has been already passed
621 * to the moment of cbq_update)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700622 */
623
624 idle -= L2T(&q->link, len);
625 idle += L2T(cl, len);
626
Vasily Averin73d0f372014-08-14 12:27:47 +0400627 cl->undertime = now + idle;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700628 } else {
629 /* Underlimit */
630
Patrick McHardya0849802007-03-23 11:28:30 -0700631 cl->undertime = PSCHED_PASTPERFECT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700632 if (avgidle > cl->maxidle)
633 cl->avgidle = cl->maxidle;
634 else
635 cl->avgidle = avgidle;
636 }
Vasily Averin73d0f372014-08-14 12:27:47 +0400637 if ((s64)(now - cl->last) > 0)
638 cl->last = now;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700639 }
640
641 cbq_update_toplevel(q, this, q->tx_borrowed);
642}
643
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000644static inline struct cbq_class *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700645cbq_under_limit(struct cbq_class *cl)
646{
647 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
648 struct cbq_class *this_cl = cl;
649
650 if (cl->tparent == NULL)
651 return cl;
652
Patrick McHardya0849802007-03-23 11:28:30 -0700653 if (cl->undertime == PSCHED_PASTPERFECT || q->now >= cl->undertime) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700654 cl->delayed = 0;
655 return cl;
656 }
657
658 do {
659 /* It is very suspicious place. Now overlimit
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000660 * action is generated for not bounded classes
661 * only if link is completely congested.
662 * Though it is in agree with ancestor-only paradigm,
663 * it looks very stupid. Particularly,
664 * it means that this chunk of code will either
665 * never be called or result in strong amplification
666 * of burstiness. Dangerous, silly, and, however,
667 * no another solution exists.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700668 */
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000669 cl = cl->borrow;
670 if (!cl) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700671 this_cl->qstats.overlimits++;
Florian Westphalc3498d32016-06-09 00:27:39 +0200672 cbq_overlimit(this_cl);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700673 return NULL;
674 }
675 if (cl->level > q->toplevel)
676 return NULL;
Patrick McHardya0849802007-03-23 11:28:30 -0700677 } while (cl->undertime != PSCHED_PASTPERFECT && q->now < cl->undertime);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700678
679 cl->delayed = 0;
680 return cl;
681}
682
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000683static inline struct sk_buff *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700684cbq_dequeue_prio(struct Qdisc *sch, int prio)
685{
686 struct cbq_sched_data *q = qdisc_priv(sch);
687 struct cbq_class *cl_tail, *cl_prev, *cl;
688 struct sk_buff *skb;
689 int deficit;
690
691 cl_tail = cl_prev = q->active[prio];
692 cl = cl_prev->next_alive;
693
694 do {
695 deficit = 0;
696
697 /* Start round */
698 do {
699 struct cbq_class *borrow = cl;
700
701 if (cl->q->q.qlen &&
702 (borrow = cbq_under_limit(cl)) == NULL)
703 goto skip_class;
704
705 if (cl->deficit <= 0) {
706 /* Class exhausted its allotment per
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000707 * this round. Switch to the next one.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700708 */
709 deficit = 1;
710 cl->deficit += cl->quantum;
711 goto next_class;
712 }
713
714 skb = cl->q->dequeue(cl->q);
715
716 /* Class did not give us any skb :-(
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000717 * It could occur even if cl->q->q.qlen != 0
718 * f.e. if cl->q == "tbf"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700719 */
720 if (skb == NULL)
721 goto skip_class;
722
Jussi Kivilinna0abf77e2008-07-20 00:08:27 -0700723 cl->deficit -= qdisc_pkt_len(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700724 q->tx_class = cl;
725 q->tx_borrowed = borrow;
726 if (borrow != cl) {
727#ifndef CBQ_XSTATS_BORROWS_BYTES
728 borrow->xstats.borrows++;
729 cl->xstats.borrows++;
730#else
Jussi Kivilinna0abf77e2008-07-20 00:08:27 -0700731 borrow->xstats.borrows += qdisc_pkt_len(skb);
732 cl->xstats.borrows += qdisc_pkt_len(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700733#endif
734 }
Jussi Kivilinna0abf77e2008-07-20 00:08:27 -0700735 q->tx_len = qdisc_pkt_len(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700736
737 if (cl->deficit <= 0) {
738 q->active[prio] = cl;
739 cl = cl->next_alive;
740 cl->deficit += cl->quantum;
741 }
742 return skb;
743
744skip_class:
745 if (cl->q->q.qlen == 0 || prio != cl->cpriority) {
746 /* Class is empty or penalized.
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000747 * Unlink it from active chain.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700748 */
749 cl_prev->next_alive = cl->next_alive;
750 cl->next_alive = NULL;
751
752 /* Did cl_tail point to it? */
753 if (cl == cl_tail) {
754 /* Repair it! */
755 cl_tail = cl_prev;
756
757 /* Was it the last class in this band? */
758 if (cl == cl_tail) {
759 /* Kill the band! */
760 q->active[prio] = NULL;
761 q->activemask &= ~(1<<prio);
762 if (cl->q->q.qlen)
763 cbq_activate_class(cl);
764 return NULL;
765 }
766
767 q->active[prio] = cl_tail;
768 }
769 if (cl->q->q.qlen)
770 cbq_activate_class(cl);
771
772 cl = cl_prev;
773 }
774
775next_class:
776 cl_prev = cl;
777 cl = cl->next_alive;
778 } while (cl_prev != cl_tail);
779 } while (deficit);
780
781 q->active[prio] = cl_prev;
782
783 return NULL;
784}
785
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000786static inline struct sk_buff *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700787cbq_dequeue_1(struct Qdisc *sch)
788{
789 struct cbq_sched_data *q = qdisc_priv(sch);
790 struct sk_buff *skb;
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000791 unsigned int activemask;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700792
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000793 activemask = q->activemask & 0xFF;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700794 while (activemask) {
795 int prio = ffz(~activemask);
796 activemask &= ~(1<<prio);
797 skb = cbq_dequeue_prio(sch, prio);
798 if (skb)
799 return skb;
800 }
801 return NULL;
802}
803
804static struct sk_buff *
805cbq_dequeue(struct Qdisc *sch)
806{
807 struct sk_buff *skb;
808 struct cbq_sched_data *q = qdisc_priv(sch);
809 psched_time_t now;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700810
Patrick McHardy3bebcda2007-03-23 11:29:25 -0700811 now = psched_get_time();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700812
Vasily Averin73d0f372014-08-14 12:27:47 +0400813 if (q->tx_class)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700814 cbq_update(q);
Vasily Averin73d0f372014-08-14 12:27:47 +0400815
816 q->now = now;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700817
818 for (;;) {
819 q->wd_expires = 0;
820
821 skb = cbq_dequeue_1(sch);
822 if (skb) {
Eric Dumazet9190b3b2011-01-20 23:31:33 -0800823 qdisc_bstats_update(sch, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700824 sch->q.qlen--;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700825 return skb;
826 }
827
828 /* All the classes are overlimit.
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000829 *
830 * It is possible, if:
831 *
832 * 1. Scheduler is empty.
833 * 2. Toplevel cutoff inhibited borrowing.
834 * 3. Root class is overlimit.
835 *
836 * Reset 2d and 3d conditions and retry.
837 *
838 * Note, that NS and cbq-2.0 are buggy, peeking
839 * an arbitrary class is appropriate for ancestor-only
840 * sharing, but not for toplevel algorithm.
841 *
842 * Our version is better, but slower, because it requires
843 * two passes, but it is unavoidable with top-level sharing.
844 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700845
846 if (q->toplevel == TC_CBQ_MAXLEVEL &&
Patrick McHardya0849802007-03-23 11:28:30 -0700847 q->link.undertime == PSCHED_PASTPERFECT)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700848 break;
849
850 q->toplevel = TC_CBQ_MAXLEVEL;
Patrick McHardya0849802007-03-23 11:28:30 -0700851 q->link.undertime = PSCHED_PASTPERFECT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700852 }
853
854 /* No packets in scheduler or nobody wants to give them to us :-(
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000855 * Sigh... start watchdog timer in the last case.
856 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700857
858 if (sch->q.qlen) {
John Fastabend25331d62014-09-28 11:53:29 -0700859 qdisc_qstats_overlimit(sch);
Patrick McHardy88a99352007-03-16 01:21:11 -0700860 if (q->wd_expires)
861 qdisc_watchdog_schedule(&q->watchdog,
Patrick McHardybb239ac2007-03-16 12:31:28 -0700862 now + q->wd_expires);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700863 }
864 return NULL;
865}
866
867/* CBQ class maintanance routines */
868
869static void cbq_adjust_levels(struct cbq_class *this)
870{
871 if (this == NULL)
872 return;
873
874 do {
875 int level = 0;
876 struct cbq_class *cl;
877
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000878 cl = this->children;
879 if (cl) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700880 do {
881 if (cl->level > level)
882 level = cl->level;
883 } while ((cl = cl->sibling) != this->children);
884 }
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000885 this->level = level + 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700886 } while ((this = this->tparent) != NULL);
887}
888
889static void cbq_normalize_quanta(struct cbq_sched_data *q, int prio)
890{
891 struct cbq_class *cl;
Patrick McHardyd77fea22008-07-05 23:22:05 -0700892 unsigned int h;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700893
894 if (q->quanta[prio] == 0)
895 return;
896
Patrick McHardyd77fea22008-07-05 23:22:05 -0700897 for (h = 0; h < q->clhash.hashsize; h++) {
Sasha Levinb67bfe02013-02-27 17:06:00 -0800898 hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700899 /* BUGGGG... Beware! This expression suffer of
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000900 * arithmetic overflows!
Linus Torvalds1da177e2005-04-16 15:20:36 -0700901 */
902 if (cl->priority == prio) {
903 cl->quantum = (cl->weight*cl->allot*q->nclasses[prio])/
904 q->quanta[prio];
905 }
Yang Yingliang833fa742013-12-10 20:55:32 +0800906 if (cl->quantum <= 0 ||
907 cl->quantum > 32*qdisc_dev(cl->qdisc)->mtu) {
Yang Yingliangc17988a2013-12-23 17:38:58 +0800908 pr_warn("CBQ: class %08x has bad quantum==%ld, repaired.\n",
909 cl->common.classid, cl->quantum);
David S. Miller5ce2d482008-07-08 17:06:30 -0700910 cl->quantum = qdisc_dev(cl->qdisc)->mtu/2 + 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700911 }
912 }
913 }
914}
915
916static void cbq_sync_defmap(struct cbq_class *cl)
917{
918 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
919 struct cbq_class *split = cl->split;
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000920 unsigned int h;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700921 int i;
922
923 if (split == NULL)
924 return;
925
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000926 for (i = 0; i <= TC_PRIO_MAX; i++) {
927 if (split->defaults[i] == cl && !(cl->defmap & (1<<i)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700928 split->defaults[i] = NULL;
929 }
930
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000931 for (i = 0; i <= TC_PRIO_MAX; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700932 int level = split->level;
933
934 if (split->defaults[i])
935 continue;
936
Patrick McHardyd77fea22008-07-05 23:22:05 -0700937 for (h = 0; h < q->clhash.hashsize; h++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700938 struct cbq_class *c;
939
Sasha Levinb67bfe02013-02-27 17:06:00 -0800940 hlist_for_each_entry(c, &q->clhash.hash[h],
Patrick McHardyd77fea22008-07-05 23:22:05 -0700941 common.hnode) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700942 if (c->split == split && c->level < level &&
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000943 c->defmap & (1<<i)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700944 split->defaults[i] = c;
945 level = c->level;
946 }
947 }
948 }
949 }
950}
951
952static void cbq_change_defmap(struct cbq_class *cl, u32 splitid, u32 def, u32 mask)
953{
954 struct cbq_class *split = NULL;
955
956 if (splitid == 0) {
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000957 split = cl->split;
958 if (!split)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700959 return;
Patrick McHardyd77fea22008-07-05 23:22:05 -0700960 splitid = split->common.classid;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700961 }
962
Patrick McHardyd77fea22008-07-05 23:22:05 -0700963 if (split == NULL || split->common.classid != splitid) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700964 for (split = cl->tparent; split; split = split->tparent)
Patrick McHardyd77fea22008-07-05 23:22:05 -0700965 if (split->common.classid == splitid)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700966 break;
967 }
968
969 if (split == NULL)
970 return;
971
972 if (cl->split != split) {
973 cl->defmap = 0;
974 cbq_sync_defmap(cl);
975 cl->split = split;
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000976 cl->defmap = def & mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700977 } else
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000978 cl->defmap = (cl->defmap & ~mask) | (def & mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700979
980 cbq_sync_defmap(cl);
981}
982
983static void cbq_unlink_class(struct cbq_class *this)
984{
985 struct cbq_class *cl, **clp;
986 struct cbq_sched_data *q = qdisc_priv(this->qdisc);
987
Patrick McHardyd77fea22008-07-05 23:22:05 -0700988 qdisc_class_hash_remove(&q->clhash, &this->common);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700989
990 if (this->tparent) {
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000991 clp = &this->sibling;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700992 cl = *clp;
993 do {
994 if (cl == this) {
995 *clp = cl->sibling;
996 break;
997 }
998 clp = &cl->sibling;
999 } while ((cl = *clp) != this->sibling);
1000
1001 if (this->tparent->children == this) {
1002 this->tparent->children = this->sibling;
1003 if (this->sibling == this)
1004 this->tparent->children = NULL;
1005 }
1006 } else {
Ilpo Järvinen547b7922008-07-25 21:43:18 -07001007 WARN_ON(this->sibling != this);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001008 }
1009}
1010
1011static void cbq_link_class(struct cbq_class *this)
1012{
1013 struct cbq_sched_data *q = qdisc_priv(this->qdisc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001014 struct cbq_class *parent = this->tparent;
1015
1016 this->sibling = this;
Patrick McHardyd77fea22008-07-05 23:22:05 -07001017 qdisc_class_hash_insert(&q->clhash, &this->common);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001018
1019 if (parent == NULL)
1020 return;
1021
1022 if (parent->children == NULL) {
1023 parent->children = this;
1024 } else {
1025 this->sibling = parent->children->sibling;
1026 parent->children->sibling = this;
1027 }
1028}
1029
Linus Torvalds1da177e2005-04-16 15:20:36 -07001030static void
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001031cbq_reset(struct Qdisc *sch)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001032{
1033 struct cbq_sched_data *q = qdisc_priv(sch);
1034 struct cbq_class *cl;
1035 int prio;
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001036 unsigned int h;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001037
1038 q->activemask = 0;
1039 q->pmask = 0;
1040 q->tx_class = NULL;
1041 q->tx_borrowed = NULL;
Patrick McHardy88a99352007-03-16 01:21:11 -07001042 qdisc_watchdog_cancel(&q->watchdog);
David S. Miller2fbd3da2009-09-01 17:59:25 -07001043 hrtimer_cancel(&q->delay_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001044 q->toplevel = TC_CBQ_MAXLEVEL;
Patrick McHardy3bebcda2007-03-23 11:29:25 -07001045 q->now = psched_get_time();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001046
1047 for (prio = 0; prio <= TC_CBQ_MAXPRIO; prio++)
1048 q->active[prio] = NULL;
1049
Patrick McHardyd77fea22008-07-05 23:22:05 -07001050 for (h = 0; h < q->clhash.hashsize; h++) {
Sasha Levinb67bfe02013-02-27 17:06:00 -08001051 hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001052 qdisc_reset(cl->q);
1053
1054 cl->next_alive = NULL;
Patrick McHardya0849802007-03-23 11:28:30 -07001055 cl->undertime = PSCHED_PASTPERFECT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001056 cl->avgidle = cl->maxidle;
1057 cl->deficit = cl->quantum;
1058 cl->cpriority = cl->priority;
1059 }
1060 }
1061 sch->q.qlen = 0;
1062}
1063
1064
1065static int cbq_set_lss(struct cbq_class *cl, struct tc_cbq_lssopt *lss)
1066{
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001067 if (lss->change & TCF_CBQ_LSS_FLAGS) {
1068 cl->share = (lss->flags & TCF_CBQ_LSS_ISOLATED) ? NULL : cl->tparent;
1069 cl->borrow = (lss->flags & TCF_CBQ_LSS_BOUNDED) ? NULL : cl->tparent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001070 }
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001071 if (lss->change & TCF_CBQ_LSS_EWMA)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001072 cl->ewma_log = lss->ewma_log;
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001073 if (lss->change & TCF_CBQ_LSS_AVPKT)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001074 cl->avpkt = lss->avpkt;
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001075 if (lss->change & TCF_CBQ_LSS_MINIDLE)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001076 cl->minidle = -(long)lss->minidle;
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001077 if (lss->change & TCF_CBQ_LSS_MAXIDLE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001078 cl->maxidle = lss->maxidle;
1079 cl->avgidle = lss->maxidle;
1080 }
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001081 if (lss->change & TCF_CBQ_LSS_OFFTIME)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001082 cl->offtime = lss->offtime;
1083 return 0;
1084}
1085
1086static void cbq_rmprio(struct cbq_sched_data *q, struct cbq_class *cl)
1087{
1088 q->nclasses[cl->priority]--;
1089 q->quanta[cl->priority] -= cl->weight;
1090 cbq_normalize_quanta(q, cl->priority);
1091}
1092
1093static void cbq_addprio(struct cbq_sched_data *q, struct cbq_class *cl)
1094{
1095 q->nclasses[cl->priority]++;
1096 q->quanta[cl->priority] += cl->weight;
1097 cbq_normalize_quanta(q, cl->priority);
1098}
1099
1100static int cbq_set_wrr(struct cbq_class *cl, struct tc_cbq_wrropt *wrr)
1101{
1102 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
1103
1104 if (wrr->allot)
1105 cl->allot = wrr->allot;
1106 if (wrr->weight)
1107 cl->weight = wrr->weight;
1108 if (wrr->priority) {
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001109 cl->priority = wrr->priority - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001110 cl->cpriority = cl->priority;
1111 if (cl->priority >= cl->priority2)
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001112 cl->priority2 = TC_CBQ_MAXPRIO - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001113 }
1114
1115 cbq_addprio(q, cl);
1116 return 0;
1117}
1118
Linus Torvalds1da177e2005-04-16 15:20:36 -07001119static int cbq_set_fopt(struct cbq_class *cl, struct tc_cbq_fopt *fopt)
1120{
1121 cbq_change_defmap(cl, fopt->split, fopt->defmap, fopt->defchange);
1122 return 0;
1123}
1124
Patrick McHardy27a34212008-01-23 20:35:39 -08001125static const struct nla_policy cbq_policy[TCA_CBQ_MAX + 1] = {
1126 [TCA_CBQ_LSSOPT] = { .len = sizeof(struct tc_cbq_lssopt) },
1127 [TCA_CBQ_WRROPT] = { .len = sizeof(struct tc_cbq_wrropt) },
1128 [TCA_CBQ_FOPT] = { .len = sizeof(struct tc_cbq_fopt) },
1129 [TCA_CBQ_OVL_STRATEGY] = { .len = sizeof(struct tc_cbq_ovl) },
1130 [TCA_CBQ_RATE] = { .len = sizeof(struct tc_ratespec) },
1131 [TCA_CBQ_RTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
1132 [TCA_CBQ_POLICE] = { .len = sizeof(struct tc_cbq_police) },
1133};
1134
Patrick McHardy1e904742008-01-22 22:11:17 -08001135static int cbq_init(struct Qdisc *sch, struct nlattr *opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001136{
1137 struct cbq_sched_data *q = qdisc_priv(sch);
Patrick McHardy1e904742008-01-22 22:11:17 -08001138 struct nlattr *tb[TCA_CBQ_MAX + 1];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001139 struct tc_ratespec *r;
Patrick McHardycee63722008-01-23 20:33:32 -08001140 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001141
Johannes Bergfceb6432017-04-12 14:34:07 +02001142 err = nla_parse_nested(tb, TCA_CBQ_MAX, opt, cbq_policy, NULL);
Patrick McHardycee63722008-01-23 20:33:32 -08001143 if (err < 0)
1144 return err;
1145
Patrick McHardy27a34212008-01-23 20:35:39 -08001146 if (tb[TCA_CBQ_RTAB] == NULL || tb[TCA_CBQ_RATE] == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001147 return -EINVAL;
1148
Patrick McHardy1e904742008-01-22 22:11:17 -08001149 r = nla_data(tb[TCA_CBQ_RATE]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001150
Patrick McHardy1e904742008-01-22 22:11:17 -08001151 if ((q->link.R_tab = qdisc_get_rtab(r, tb[TCA_CBQ_RTAB])) == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001152 return -EINVAL;
1153
Patrick McHardyd77fea22008-07-05 23:22:05 -07001154 err = qdisc_class_hash_init(&q->clhash);
1155 if (err < 0)
1156 goto put_rtab;
1157
Linus Torvalds1da177e2005-04-16 15:20:36 -07001158 q->link.refcnt = 1;
1159 q->link.sibling = &q->link;
Patrick McHardyd77fea22008-07-05 23:22:05 -07001160 q->link.common.classid = sch->handle;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001161 q->link.qdisc = sch;
Changli Gao3511c912010-10-16 13:04:08 +00001162 q->link.q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
1163 sch->handle);
1164 if (!q->link.q)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001165 q->link.q = &noop_qdisc;
Jiri Kosina49b49972017-03-08 16:03:32 +01001166 else
1167 qdisc_hash_add(q->link.q, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001168
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001169 q->link.priority = TC_CBQ_MAXPRIO - 1;
1170 q->link.priority2 = TC_CBQ_MAXPRIO - 1;
1171 q->link.cpriority = TC_CBQ_MAXPRIO - 1;
David S. Miller5ce2d482008-07-08 17:06:30 -07001172 q->link.allot = psched_mtu(qdisc_dev(sch));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001173 q->link.quantum = q->link.allot;
1174 q->link.weight = q->link.R_tab->rate.rate;
1175
1176 q->link.ewma_log = TC_CBQ_DEF_EWMA;
1177 q->link.avpkt = q->link.allot/2;
1178 q->link.minidle = -0x7FFFFFFF;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001179
Patrick McHardy88a99352007-03-16 01:21:11 -07001180 qdisc_watchdog_init(&q->watchdog, sch);
Eric Dumazet4a8e3202014-09-20 18:01:30 -07001181 hrtimer_init(&q->delay_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001182 q->delay_timer.function = cbq_undelay;
1183 q->toplevel = TC_CBQ_MAXLEVEL;
Patrick McHardy3bebcda2007-03-23 11:29:25 -07001184 q->now = psched_get_time();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001185
1186 cbq_link_class(&q->link);
1187
Patrick McHardy1e904742008-01-22 22:11:17 -08001188 if (tb[TCA_CBQ_LSSOPT])
1189 cbq_set_lss(&q->link, nla_data(tb[TCA_CBQ_LSSOPT]));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001190
1191 cbq_addprio(q, &q->link);
1192 return 0;
Patrick McHardyd77fea22008-07-05 23:22:05 -07001193
1194put_rtab:
1195 qdisc_put_rtab(q->link.R_tab);
1196 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001197}
1198
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001199static int cbq_dump_rate(struct sk_buff *skb, struct cbq_class *cl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001200{
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001201 unsigned char *b = skb_tail_pointer(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001202
David S. Miller1b34ec42012-03-29 05:11:39 -04001203 if (nla_put(skb, TCA_CBQ_RATE, sizeof(cl->R_tab->rate), &cl->R_tab->rate))
1204 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001205 return skb->len;
1206
Patrick McHardy1e904742008-01-22 22:11:17 -08001207nla_put_failure:
Arnaldo Carvalho de Melodc5fc572007-03-25 23:06:12 -07001208 nlmsg_trim(skb, b);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001209 return -1;
1210}
1211
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001212static int cbq_dump_lss(struct sk_buff *skb, struct cbq_class *cl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001213{
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001214 unsigned char *b = skb_tail_pointer(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001215 struct tc_cbq_lssopt opt;
1216
1217 opt.flags = 0;
1218 if (cl->borrow == NULL)
1219 opt.flags |= TCF_CBQ_LSS_BOUNDED;
1220 if (cl->share == NULL)
1221 opt.flags |= TCF_CBQ_LSS_ISOLATED;
1222 opt.ewma_log = cl->ewma_log;
1223 opt.level = cl->level;
1224 opt.avpkt = cl->avpkt;
1225 opt.maxidle = cl->maxidle;
1226 opt.minidle = (u32)(-cl->minidle);
1227 opt.offtime = cl->offtime;
1228 opt.change = ~0;
David S. Miller1b34ec42012-03-29 05:11:39 -04001229 if (nla_put(skb, TCA_CBQ_LSSOPT, sizeof(opt), &opt))
1230 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001231 return skb->len;
1232
Patrick McHardy1e904742008-01-22 22:11:17 -08001233nla_put_failure:
Arnaldo Carvalho de Melodc5fc572007-03-25 23:06:12 -07001234 nlmsg_trim(skb, b);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001235 return -1;
1236}
1237
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001238static int cbq_dump_wrr(struct sk_buff *skb, struct cbq_class *cl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001239{
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001240 unsigned char *b = skb_tail_pointer(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001241 struct tc_cbq_wrropt opt;
1242
David S. Millera0db8562013-07-30 00:16:21 -07001243 memset(&opt, 0, sizeof(opt));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001244 opt.flags = 0;
1245 opt.allot = cl->allot;
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001246 opt.priority = cl->priority + 1;
1247 opt.cpriority = cl->cpriority + 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001248 opt.weight = cl->weight;
David S. Miller1b34ec42012-03-29 05:11:39 -04001249 if (nla_put(skb, TCA_CBQ_WRROPT, sizeof(opt), &opt))
1250 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001251 return skb->len;
1252
Patrick McHardy1e904742008-01-22 22:11:17 -08001253nla_put_failure:
Arnaldo Carvalho de Melodc5fc572007-03-25 23:06:12 -07001254 nlmsg_trim(skb, b);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001255 return -1;
1256}
1257
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001258static int cbq_dump_fopt(struct sk_buff *skb, struct cbq_class *cl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001259{
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001260 unsigned char *b = skb_tail_pointer(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001261 struct tc_cbq_fopt opt;
1262
1263 if (cl->split || cl->defmap) {
Patrick McHardyd77fea22008-07-05 23:22:05 -07001264 opt.split = cl->split ? cl->split->common.classid : 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001265 opt.defmap = cl->defmap;
1266 opt.defchange = ~0;
David S. Miller1b34ec42012-03-29 05:11:39 -04001267 if (nla_put(skb, TCA_CBQ_FOPT, sizeof(opt), &opt))
1268 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001269 }
1270 return skb->len;
1271
Patrick McHardy1e904742008-01-22 22:11:17 -08001272nla_put_failure:
Arnaldo Carvalho de Melodc5fc572007-03-25 23:06:12 -07001273 nlmsg_trim(skb, b);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001274 return -1;
1275}
1276
Linus Torvalds1da177e2005-04-16 15:20:36 -07001277static int cbq_dump_attr(struct sk_buff *skb, struct cbq_class *cl)
1278{
1279 if (cbq_dump_lss(skb, cl) < 0 ||
1280 cbq_dump_rate(skb, cl) < 0 ||
1281 cbq_dump_wrr(skb, cl) < 0 ||
Linus Torvalds1da177e2005-04-16 15:20:36 -07001282 cbq_dump_fopt(skb, cl) < 0)
1283 return -1;
1284 return 0;
1285}
1286
1287static int cbq_dump(struct Qdisc *sch, struct sk_buff *skb)
1288{
1289 struct cbq_sched_data *q = qdisc_priv(sch);
Patrick McHardy4b3550ef2008-01-23 20:34:11 -08001290 struct nlattr *nest;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001291
Patrick McHardy4b3550ef2008-01-23 20:34:11 -08001292 nest = nla_nest_start(skb, TCA_OPTIONS);
1293 if (nest == NULL)
1294 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001295 if (cbq_dump_attr(skb, &q->link) < 0)
Patrick McHardy1e904742008-01-22 22:11:17 -08001296 goto nla_put_failure;
Yang Yingliangd59b7d82014-03-12 10:20:32 +08001297 return nla_nest_end(skb, nest);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001298
Patrick McHardy1e904742008-01-22 22:11:17 -08001299nla_put_failure:
Patrick McHardy4b3550ef2008-01-23 20:34:11 -08001300 nla_nest_cancel(skb, nest);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001301 return -1;
1302}
1303
1304static int
1305cbq_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
1306{
1307 struct cbq_sched_data *q = qdisc_priv(sch);
1308
1309 q->link.xstats.avgidle = q->link.avgidle;
1310 return gnet_stats_copy_app(d, &q->link.xstats, sizeof(q->link.xstats));
1311}
1312
1313static int
1314cbq_dump_class(struct Qdisc *sch, unsigned long arg,
1315 struct sk_buff *skb, struct tcmsg *tcm)
1316{
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001317 struct cbq_class *cl = (struct cbq_class *)arg;
Patrick McHardy4b3550ef2008-01-23 20:34:11 -08001318 struct nlattr *nest;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001319
1320 if (cl->tparent)
Patrick McHardyd77fea22008-07-05 23:22:05 -07001321 tcm->tcm_parent = cl->tparent->common.classid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001322 else
1323 tcm->tcm_parent = TC_H_ROOT;
Patrick McHardyd77fea22008-07-05 23:22:05 -07001324 tcm->tcm_handle = cl->common.classid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001325 tcm->tcm_info = cl->q->handle;
1326
Patrick McHardy4b3550ef2008-01-23 20:34:11 -08001327 nest = nla_nest_start(skb, TCA_OPTIONS);
1328 if (nest == NULL)
1329 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001330 if (cbq_dump_attr(skb, cl) < 0)
Patrick McHardy1e904742008-01-22 22:11:17 -08001331 goto nla_put_failure;
Yang Yingliangd59b7d82014-03-12 10:20:32 +08001332 return nla_nest_end(skb, nest);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001333
Patrick McHardy1e904742008-01-22 22:11:17 -08001334nla_put_failure:
Patrick McHardy4b3550ef2008-01-23 20:34:11 -08001335 nla_nest_cancel(skb, nest);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001336 return -1;
1337}
1338
1339static int
1340cbq_dump_class_stats(struct Qdisc *sch, unsigned long arg,
1341 struct gnet_dump *d)
1342{
1343 struct cbq_sched_data *q = qdisc_priv(sch);
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001344 struct cbq_class *cl = (struct cbq_class *)arg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001345
Linus Torvalds1da177e2005-04-16 15:20:36 -07001346 cl->xstats.avgidle = cl->avgidle;
1347 cl->xstats.undertime = 0;
1348
Patrick McHardya0849802007-03-23 11:28:30 -07001349 if (cl->undertime != PSCHED_PASTPERFECT)
Patrick McHardy8edc0c32007-03-23 11:28:55 -07001350 cl->xstats.undertime = cl->undertime - q->now;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001351
Eric Dumazetedb09eb2016-06-06 09:37:16 -07001352 if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
1353 d, NULL, &cl->bstats) < 0 ||
Eric Dumazet1c0d32f2016-12-04 09:48:16 -08001354 gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
John Fastabendb0ab6f92014-09-28 11:54:24 -07001355 gnet_stats_copy_queue(d, NULL, &cl->qstats, cl->q->q.qlen) < 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001356 return -1;
1357
1358 return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats));
1359}
1360
1361static int cbq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
1362 struct Qdisc **old)
1363{
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001364 struct cbq_class *cl = (struct cbq_class *)arg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001365
Patrick McHardy5b9a9cc2009-09-04 06:41:17 +00001366 if (new == NULL) {
Changli Gao3511c912010-10-16 13:04:08 +00001367 new = qdisc_create_dflt(sch->dev_queue,
Patrick McHardy5b9a9cc2009-09-04 06:41:17 +00001368 &pfifo_qdisc_ops, cl->common.classid);
1369 if (new == NULL)
1370 return -ENOBUFS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001371 }
Patrick McHardy5b9a9cc2009-09-04 06:41:17 +00001372
WANG Cong86a79962016-02-25 14:55:00 -08001373 *old = qdisc_replace(sch, new, &cl->q);
Patrick McHardy5b9a9cc2009-09-04 06:41:17 +00001374 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001375}
1376
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001377static struct Qdisc *cbq_leaf(struct Qdisc *sch, unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001378{
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001379 struct cbq_class *cl = (struct cbq_class *)arg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001380
Patrick McHardy5b9a9cc2009-09-04 06:41:17 +00001381 return cl->q;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001382}
1383
Jarek Poplawskia37ef2e2006-12-08 00:25:55 -08001384static void cbq_qlen_notify(struct Qdisc *sch, unsigned long arg)
1385{
1386 struct cbq_class *cl = (struct cbq_class *)arg;
1387
1388 if (cl->q->q.qlen == 0)
1389 cbq_deactivate_class(cl);
1390}
1391
Linus Torvalds1da177e2005-04-16 15:20:36 -07001392static unsigned long cbq_get(struct Qdisc *sch, u32 classid)
1393{
1394 struct cbq_sched_data *q = qdisc_priv(sch);
1395 struct cbq_class *cl = cbq_class_lookup(q, classid);
1396
1397 if (cl) {
1398 cl->refcnt++;
1399 return (unsigned long)cl;
1400 }
1401 return 0;
1402}
1403
Linus Torvalds1da177e2005-04-16 15:20:36 -07001404static void cbq_destroy_class(struct Qdisc *sch, struct cbq_class *cl)
1405{
1406 struct cbq_sched_data *q = qdisc_priv(sch);
1407
Ilpo Järvinen547b7922008-07-25 21:43:18 -07001408 WARN_ON(cl->filters);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001409
Jiri Pirko6529eab2017-05-17 11:07:55 +02001410 tcf_block_put(cl->block);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001411 qdisc_destroy(cl->q);
1412 qdisc_put_rtab(cl->R_tab);
Eric Dumazet1c0d32f2016-12-04 09:48:16 -08001413 gen_kill_estimator(&cl->rate_est);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001414 if (cl != &q->link)
1415 kfree(cl);
1416}
1417
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001418static void cbq_destroy(struct Qdisc *sch)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001419{
1420 struct cbq_sched_data *q = qdisc_priv(sch);
Sasha Levinb67bfe02013-02-27 17:06:00 -08001421 struct hlist_node *next;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001422 struct cbq_class *cl;
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001423 unsigned int h;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001424
Patrick McHardyc3bc7cf2007-07-15 00:03:05 -07001425#ifdef CONFIG_NET_CLS_ACT
Linus Torvalds1da177e2005-04-16 15:20:36 -07001426 q->rx_class = NULL;
1427#endif
1428 /*
1429 * Filters must be destroyed first because we don't destroy the
1430 * classes from root to leafs which means that filters can still
1431 * be bound to classes which have been destroyed already. --TGR '04
1432 */
Patrick McHardyd77fea22008-07-05 23:22:05 -07001433 for (h = 0; h < q->clhash.hashsize; h++) {
Sasha Levinb67bfe02013-02-27 17:06:00 -08001434 hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode)
Jiri Pirko6529eab2017-05-17 11:07:55 +02001435 tcf_block_put(cl->block);
Patrick McHardyb00b4bf2007-06-05 16:06:59 -07001436 }
Patrick McHardyd77fea22008-07-05 23:22:05 -07001437 for (h = 0; h < q->clhash.hashsize; h++) {
Sasha Levinb67bfe02013-02-27 17:06:00 -08001438 hlist_for_each_entry_safe(cl, next, &q->clhash.hash[h],
Patrick McHardyd77fea22008-07-05 23:22:05 -07001439 common.hnode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001440 cbq_destroy_class(sch, cl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001441 }
Patrick McHardyd77fea22008-07-05 23:22:05 -07001442 qdisc_class_hash_destroy(&q->clhash);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001443}
1444
1445static void cbq_put(struct Qdisc *sch, unsigned long arg)
1446{
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001447 struct cbq_class *cl = (struct cbq_class *)arg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001448
1449 if (--cl->refcnt == 0) {
Patrick McHardyc3bc7cf2007-07-15 00:03:05 -07001450#ifdef CONFIG_NET_CLS_ACT
Jarek Poplawski102396a2008-08-29 14:21:52 -07001451 spinlock_t *root_lock = qdisc_root_sleeping_lock(sch);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001452 struct cbq_sched_data *q = qdisc_priv(sch);
1453
David S. Miller7698b4f2008-07-16 01:42:40 -07001454 spin_lock_bh(root_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001455 if (q->rx_class == cl)
1456 q->rx_class = NULL;
David S. Miller7698b4f2008-07-16 01:42:40 -07001457 spin_unlock_bh(root_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001458#endif
1459
1460 cbq_destroy_class(sch, cl);
1461 }
1462}
1463
1464static int
Patrick McHardy1e904742008-01-22 22:11:17 -08001465cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **tca,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001466 unsigned long *arg)
1467{
1468 int err;
1469 struct cbq_sched_data *q = qdisc_priv(sch);
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001470 struct cbq_class *cl = (struct cbq_class *)*arg;
Patrick McHardy1e904742008-01-22 22:11:17 -08001471 struct nlattr *opt = tca[TCA_OPTIONS];
1472 struct nlattr *tb[TCA_CBQ_MAX + 1];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001473 struct cbq_class *parent;
1474 struct qdisc_rate_table *rtab = NULL;
1475
Patrick McHardycee63722008-01-23 20:33:32 -08001476 if (opt == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001477 return -EINVAL;
1478
Johannes Bergfceb6432017-04-12 14:34:07 +02001479 err = nla_parse_nested(tb, TCA_CBQ_MAX, opt, cbq_policy, NULL);
Patrick McHardycee63722008-01-23 20:33:32 -08001480 if (err < 0)
1481 return err;
1482
Florian Westphaldd47c1f2016-06-09 00:27:40 +02001483 if (tb[TCA_CBQ_OVL_STRATEGY] || tb[TCA_CBQ_POLICE])
Florian Westphalc3498d32016-06-09 00:27:39 +02001484 return -EOPNOTSUPP;
1485
Linus Torvalds1da177e2005-04-16 15:20:36 -07001486 if (cl) {
1487 /* Check parent */
1488 if (parentid) {
Patrick McHardyd77fea22008-07-05 23:22:05 -07001489 if (cl->tparent &&
1490 cl->tparent->common.classid != parentid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001491 return -EINVAL;
1492 if (!cl->tparent && parentid != TC_H_ROOT)
1493 return -EINVAL;
1494 }
1495
Patrick McHardy1e904742008-01-22 22:11:17 -08001496 if (tb[TCA_CBQ_RATE]) {
Stephen Hemminger71bcb092008-11-25 21:13:31 -08001497 rtab = qdisc_get_rtab(nla_data(tb[TCA_CBQ_RATE]),
1498 tb[TCA_CBQ_RTAB]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001499 if (rtab == NULL)
1500 return -EINVAL;
1501 }
1502
Stephen Hemminger71bcb092008-11-25 21:13:31 -08001503 if (tca[TCA_RATE]) {
John Fastabend22e0f8b2014-09-28 11:52:56 -07001504 err = gen_replace_estimator(&cl->bstats, NULL,
1505 &cl->rate_est,
Eric Dumazetedb09eb2016-06-06 09:37:16 -07001506 NULL,
1507 qdisc_root_sleeping_running(sch),
Stephen Hemminger71bcb092008-11-25 21:13:31 -08001508 tca[TCA_RATE]);
1509 if (err) {
Yang Yingliang79c11f22013-12-17 15:29:17 +08001510 qdisc_put_rtab(rtab);
Stephen Hemminger71bcb092008-11-25 21:13:31 -08001511 return err;
1512 }
1513 }
1514
Linus Torvalds1da177e2005-04-16 15:20:36 -07001515 /* Change class parameters */
1516 sch_tree_lock(sch);
1517
1518 if (cl->next_alive != NULL)
1519 cbq_deactivate_class(cl);
1520
1521 if (rtab) {
Patrick McHardyb94c8af2008-11-20 04:11:36 -08001522 qdisc_put_rtab(cl->R_tab);
1523 cl->R_tab = rtab;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001524 }
1525
Patrick McHardy1e904742008-01-22 22:11:17 -08001526 if (tb[TCA_CBQ_LSSOPT])
1527 cbq_set_lss(cl, nla_data(tb[TCA_CBQ_LSSOPT]));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001528
Patrick McHardy1e904742008-01-22 22:11:17 -08001529 if (tb[TCA_CBQ_WRROPT]) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001530 cbq_rmprio(q, cl);
Patrick McHardy1e904742008-01-22 22:11:17 -08001531 cbq_set_wrr(cl, nla_data(tb[TCA_CBQ_WRROPT]));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001532 }
1533
Patrick McHardy1e904742008-01-22 22:11:17 -08001534 if (tb[TCA_CBQ_FOPT])
1535 cbq_set_fopt(cl, nla_data(tb[TCA_CBQ_FOPT]));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001536
1537 if (cl->q->q.qlen)
1538 cbq_activate_class(cl);
1539
1540 sch_tree_unlock(sch);
1541
Linus Torvalds1da177e2005-04-16 15:20:36 -07001542 return 0;
1543 }
1544
1545 if (parentid == TC_H_ROOT)
1546 return -EINVAL;
1547
Patrick McHardy1e904742008-01-22 22:11:17 -08001548 if (tb[TCA_CBQ_WRROPT] == NULL || tb[TCA_CBQ_RATE] == NULL ||
1549 tb[TCA_CBQ_LSSOPT] == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001550 return -EINVAL;
1551
Patrick McHardy1e904742008-01-22 22:11:17 -08001552 rtab = qdisc_get_rtab(nla_data(tb[TCA_CBQ_RATE]), tb[TCA_CBQ_RTAB]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001553 if (rtab == NULL)
1554 return -EINVAL;
1555
1556 if (classid) {
1557 err = -EINVAL;
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001558 if (TC_H_MAJ(classid ^ sch->handle) ||
1559 cbq_class_lookup(q, classid))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001560 goto failure;
1561 } else {
1562 int i;
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001563 classid = TC_H_MAKE(sch->handle, 0x8000);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001564
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001565 for (i = 0; i < 0x8000; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001566 if (++q->hgenerator >= 0x8000)
1567 q->hgenerator = 1;
1568 if (cbq_class_lookup(q, classid|q->hgenerator) == NULL)
1569 break;
1570 }
1571 err = -ENOSR;
1572 if (i >= 0x8000)
1573 goto failure;
1574 classid = classid|q->hgenerator;
1575 }
1576
1577 parent = &q->link;
1578 if (parentid) {
1579 parent = cbq_class_lookup(q, parentid);
1580 err = -EINVAL;
1581 if (parent == NULL)
1582 goto failure;
1583 }
1584
1585 err = -ENOBUFS;
Panagiotis Issaris0da974f2006-07-21 14:51:30 -07001586 cl = kzalloc(sizeof(*cl), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001587 if (cl == NULL)
1588 goto failure;
Stephen Hemminger71bcb092008-11-25 21:13:31 -08001589
Jiri Pirko6529eab2017-05-17 11:07:55 +02001590 err = tcf_block_get(&cl->block, &cl->filter_list);
1591 if (err) {
1592 kfree(cl);
1593 return err;
1594 }
1595
Stephen Hemminger71bcb092008-11-25 21:13:31 -08001596 if (tca[TCA_RATE]) {
John Fastabend22e0f8b2014-09-28 11:52:56 -07001597 err = gen_new_estimator(&cl->bstats, NULL, &cl->rate_est,
Eric Dumazetedb09eb2016-06-06 09:37:16 -07001598 NULL,
1599 qdisc_root_sleeping_running(sch),
Stephen Hemminger71bcb092008-11-25 21:13:31 -08001600 tca[TCA_RATE]);
1601 if (err) {
Jiri Pirko6529eab2017-05-17 11:07:55 +02001602 tcf_block_put(cl->block);
Stephen Hemminger71bcb092008-11-25 21:13:31 -08001603 kfree(cl);
1604 goto failure;
1605 }
1606 }
1607
Linus Torvalds1da177e2005-04-16 15:20:36 -07001608 cl->R_tab = rtab;
1609 rtab = NULL;
1610 cl->refcnt = 1;
Changli Gao3511c912010-10-16 13:04:08 +00001611 cl->q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, classid);
1612 if (!cl->q)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001613 cl->q = &noop_qdisc;
Jiri Kosina49b49972017-03-08 16:03:32 +01001614 else
1615 qdisc_hash_add(cl->q, true);
1616
Patrick McHardyd77fea22008-07-05 23:22:05 -07001617 cl->common.classid = classid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001618 cl->tparent = parent;
1619 cl->qdisc = sch;
1620 cl->allot = parent->allot;
1621 cl->quantum = cl->allot;
1622 cl->weight = cl->R_tab->rate.rate;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001623
1624 sch_tree_lock(sch);
1625 cbq_link_class(cl);
1626 cl->borrow = cl->tparent;
1627 if (cl->tparent != &q->link)
1628 cl->share = cl->tparent;
1629 cbq_adjust_levels(parent);
1630 cl->minidle = -0x7FFFFFFF;
Patrick McHardy1e904742008-01-22 22:11:17 -08001631 cbq_set_lss(cl, nla_data(tb[TCA_CBQ_LSSOPT]));
1632 cbq_set_wrr(cl, nla_data(tb[TCA_CBQ_WRROPT]));
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001633 if (cl->ewma_log == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001634 cl->ewma_log = q->link.ewma_log;
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001635 if (cl->maxidle == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001636 cl->maxidle = q->link.maxidle;
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001637 if (cl->avpkt == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001638 cl->avpkt = q->link.avpkt;
Patrick McHardy1e904742008-01-22 22:11:17 -08001639 if (tb[TCA_CBQ_FOPT])
1640 cbq_set_fopt(cl, nla_data(tb[TCA_CBQ_FOPT]));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001641 sch_tree_unlock(sch);
1642
Patrick McHardyd77fea22008-07-05 23:22:05 -07001643 qdisc_class_hash_grow(sch, &q->clhash);
1644
Linus Torvalds1da177e2005-04-16 15:20:36 -07001645 *arg = (unsigned long)cl;
1646 return 0;
1647
1648failure:
1649 qdisc_put_rtab(rtab);
1650 return err;
1651}
1652
1653static int cbq_delete(struct Qdisc *sch, unsigned long arg)
1654{
1655 struct cbq_sched_data *q = qdisc_priv(sch);
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001656 struct cbq_class *cl = (struct cbq_class *)arg;
WANG Cong2ccccf52016-02-25 14:55:01 -08001657 unsigned int qlen, backlog;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001658
1659 if (cl->filters || cl->children || cl == &q->link)
1660 return -EBUSY;
1661
1662 sch_tree_lock(sch);
1663
Jarek Poplawskia37ef2e2006-12-08 00:25:55 -08001664 qlen = cl->q->q.qlen;
WANG Cong2ccccf52016-02-25 14:55:01 -08001665 backlog = cl->q->qstats.backlog;
Jarek Poplawskia37ef2e2006-12-08 00:25:55 -08001666 qdisc_reset(cl->q);
WANG Cong2ccccf52016-02-25 14:55:01 -08001667 qdisc_tree_reduce_backlog(cl->q, qlen, backlog);
Jarek Poplawskia37ef2e2006-12-08 00:25:55 -08001668
Linus Torvalds1da177e2005-04-16 15:20:36 -07001669 if (cl->next_alive)
1670 cbq_deactivate_class(cl);
1671
1672 if (q->tx_borrowed == cl)
1673 q->tx_borrowed = q->tx_class;
1674 if (q->tx_class == cl) {
1675 q->tx_class = NULL;
1676 q->tx_borrowed = NULL;
1677 }
Patrick McHardyc3bc7cf2007-07-15 00:03:05 -07001678#ifdef CONFIG_NET_CLS_ACT
Linus Torvalds1da177e2005-04-16 15:20:36 -07001679 if (q->rx_class == cl)
1680 q->rx_class = NULL;
1681#endif
1682
1683 cbq_unlink_class(cl);
1684 cbq_adjust_levels(cl->tparent);
1685 cl->defmap = 0;
1686 cbq_sync_defmap(cl);
1687
1688 cbq_rmprio(q, cl);
1689 sch_tree_unlock(sch);
1690
Jarek Poplawski7cd0a632009-03-15 20:00:19 -07001691 BUG_ON(--cl->refcnt == 0);
1692 /*
1693 * This shouldn't happen: we "hold" one cops->get() when called
1694 * from tc_ctl_tclass; the destroy method is done from cops->put().
1695 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001696
1697 return 0;
1698}
1699
Jiri Pirko6529eab2017-05-17 11:07:55 +02001700static struct tcf_block *cbq_tcf_block(struct Qdisc *sch, unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001701{
1702 struct cbq_sched_data *q = qdisc_priv(sch);
1703 struct cbq_class *cl = (struct cbq_class *)arg;
1704
1705 if (cl == NULL)
1706 cl = &q->link;
1707
Jiri Pirko6529eab2017-05-17 11:07:55 +02001708 return cl->block;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001709}
1710
1711static unsigned long cbq_bind_filter(struct Qdisc *sch, unsigned long parent,
1712 u32 classid)
1713{
1714 struct cbq_sched_data *q = qdisc_priv(sch);
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001715 struct cbq_class *p = (struct cbq_class *)parent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001716 struct cbq_class *cl = cbq_class_lookup(q, classid);
1717
1718 if (cl) {
1719 if (p && p->level <= cl->level)
1720 return 0;
1721 cl->filters++;
1722 return (unsigned long)cl;
1723 }
1724 return 0;
1725}
1726
1727static void cbq_unbind_filter(struct Qdisc *sch, unsigned long arg)
1728{
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001729 struct cbq_class *cl = (struct cbq_class *)arg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001730
1731 cl->filters--;
1732}
1733
1734static void cbq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
1735{
1736 struct cbq_sched_data *q = qdisc_priv(sch);
Patrick McHardyd77fea22008-07-05 23:22:05 -07001737 struct cbq_class *cl;
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001738 unsigned int h;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001739
1740 if (arg->stop)
1741 return;
1742
Patrick McHardyd77fea22008-07-05 23:22:05 -07001743 for (h = 0; h < q->clhash.hashsize; h++) {
Sasha Levinb67bfe02013-02-27 17:06:00 -08001744 hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001745 if (arg->count < arg->skip) {
1746 arg->count++;
1747 continue;
1748 }
1749 if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
1750 arg->stop = 1;
1751 return;
1752 }
1753 arg->count++;
1754 }
1755 }
1756}
1757
Eric Dumazet20fea082007-11-14 01:44:41 -08001758static const struct Qdisc_class_ops cbq_class_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001759 .graft = cbq_graft,
1760 .leaf = cbq_leaf,
Jarek Poplawskia37ef2e2006-12-08 00:25:55 -08001761 .qlen_notify = cbq_qlen_notify,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001762 .get = cbq_get,
1763 .put = cbq_put,
1764 .change = cbq_change_class,
1765 .delete = cbq_delete,
1766 .walk = cbq_walk,
Jiri Pirko6529eab2017-05-17 11:07:55 +02001767 .tcf_block = cbq_tcf_block,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001768 .bind_tcf = cbq_bind_filter,
1769 .unbind_tcf = cbq_unbind_filter,
1770 .dump = cbq_dump_class,
1771 .dump_stats = cbq_dump_class_stats,
1772};
1773
Eric Dumazet20fea082007-11-14 01:44:41 -08001774static struct Qdisc_ops cbq_qdisc_ops __read_mostly = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001775 .next = NULL,
1776 .cl_ops = &cbq_class_ops,
1777 .id = "cbq",
1778 .priv_size = sizeof(struct cbq_sched_data),
1779 .enqueue = cbq_enqueue,
1780 .dequeue = cbq_dequeue,
Jarek Poplawski77be1552008-10-31 00:47:01 -07001781 .peek = qdisc_peek_dequeued,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001782 .init = cbq_init,
1783 .reset = cbq_reset,
1784 .destroy = cbq_destroy,
1785 .change = NULL,
1786 .dump = cbq_dump,
1787 .dump_stats = cbq_dump_stats,
1788 .owner = THIS_MODULE,
1789};
1790
1791static int __init cbq_module_init(void)
1792{
1793 return register_qdisc(&cbq_qdisc_ops);
1794}
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +09001795static void __exit cbq_module_exit(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001796{
1797 unregister_qdisc(&cbq_qdisc_ops);
1798}
1799module_init(cbq_module_init)
1800module_exit(cbq_module_exit)
1801MODULE_LICENSE("GPL");