blob: cf93e5ff3d630e50442d65b5440883fb8467e6a0 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * net/sched/sch_cbq.c Class-Based Queueing discipline.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 *
11 */
12
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <linux/module.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090014#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include <linux/types.h>
16#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#include <linux/string.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include <linux/errno.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070019#include <linux/skbuff.h>
Patrick McHardy0ba48052007-07-02 22:49:07 -070020#include <net/netlink.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#include <net/pkt_sched.h>
Jiri Pirkocf1facd2017-02-09 14:38:56 +010022#include <net/pkt_cls.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070023
24
25/* Class-Based Queueing (CBQ) algorithm.
26 =======================================
27
28 Sources: [1] Sally Floyd and Van Jacobson, "Link-sharing and Resource
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +090029 Management Models for Packet Networks",
Linus Torvalds1da177e2005-04-16 15:20:36 -070030 IEEE/ACM Transactions on Networking, Vol.3, No.4, 1995
31
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +090032 [2] Sally Floyd, "Notes on CBQ and Guaranteed Service", 1995
Linus Torvalds1da177e2005-04-16 15:20:36 -070033
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +090034 [3] Sally Floyd, "Notes on Class-Based Queueing: Setting
Linus Torvalds1da177e2005-04-16 15:20:36 -070035 Parameters", 1996
36
37 [4] Sally Floyd and Michael Speer, "Experimental Results
38 for Class-Based Queueing", 1998, not published.
39
40 -----------------------------------------------------------------------
41
42 Algorithm skeleton was taken from NS simulator cbq.cc.
43 If someone wants to check this code against the LBL version,
44 he should take into account that ONLY the skeleton was borrowed,
45 the implementation is different. Particularly:
46
47 --- The WRR algorithm is different. Our version looks more
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +090048 reasonable (I hope) and works when quanta are allowed to be
49 less than MTU, which is always the case when real time classes
50 have small rates. Note, that the statement of [3] is
51 incomplete, delay may actually be estimated even if class
52 per-round allotment is less than MTU. Namely, if per-round
53 allotment is W*r_i, and r_1+...+r_k = r < 1
Linus Torvalds1da177e2005-04-16 15:20:36 -070054
55 delay_i <= ([MTU/(W*r_i)]*W*r + W*r + k*MTU)/B
56
57 In the worst case we have IntServ estimate with D = W*r+k*MTU
58 and C = MTU*r. The proof (if correct at all) is trivial.
59
60
61 --- It seems that cbq-2.0 is not very accurate. At least, I cannot
62 interpret some places, which look like wrong translations
63 from NS. Anyone is advised to find these differences
64 and explain to me, why I am wrong 8).
65
66 --- Linux has no EOI event, so that we cannot estimate true class
67 idle time. Workaround is to consider the next dequeue event
68 as sign that previous packet is finished. This is wrong because of
69 internal device queueing, but on a permanently loaded link it is true.
70 Moreover, combined with clock integrator, this scheme looks
71 very close to an ideal solution. */
72
73struct cbq_sched_data;
74
75
Eric Dumazetcc7ec452011-01-19 19:26:56 +000076struct cbq_class {
Patrick McHardyd77fea22008-07-05 23:22:05 -070077 struct Qdisc_class_common common;
Linus Torvalds1da177e2005-04-16 15:20:36 -070078 struct cbq_class *next_alive; /* next class with backlog in this priority band */
79
80/* Parameters */
Linus Torvalds1da177e2005-04-16 15:20:36 -070081 unsigned char priority; /* class priority */
82 unsigned char priority2; /* priority to be used after overlimit */
83 unsigned char ewma_log; /* time constant for idle time calculation */
Linus Torvalds1da177e2005-04-16 15:20:36 -070084
85 u32 defmap;
86
87 /* Link-sharing scheduler parameters */
88 long maxidle; /* Class parameters: see below. */
89 long offtime;
90 long minidle;
91 u32 avpkt;
92 struct qdisc_rate_table *R_tab;
93
Linus Torvalds1da177e2005-04-16 15:20:36 -070094 /* General scheduler (WRR) parameters */
95 long allot;
96 long quantum; /* Allotment per WRR round */
97 long weight; /* Relative allotment: see below */
98
99 struct Qdisc *qdisc; /* Ptr to CBQ discipline */
100 struct cbq_class *split; /* Ptr to split node */
101 struct cbq_class *share; /* Ptr to LS parent in the class tree */
102 struct cbq_class *tparent; /* Ptr to tree parent in the class tree */
103 struct cbq_class *borrow; /* NULL if class is bandwidth limited;
104 parent otherwise */
105 struct cbq_class *sibling; /* Sibling chain */
106 struct cbq_class *children; /* Pointer to children chain */
107
108 struct Qdisc *q; /* Elementary queueing discipline */
109
110
111/* Variables */
112 unsigned char cpriority; /* Effective priority */
113 unsigned char delayed;
114 unsigned char level; /* level of the class in hierarchy:
115 0 for leaf classes, and maximal
116 level of children + 1 for nodes.
117 */
118
119 psched_time_t last; /* Last end of service */
120 psched_time_t undertime;
121 long avgidle;
122 long deficit; /* Saved deficit for WRR */
Patrick McHardy1a13cb62007-03-16 01:22:20 -0700123 psched_time_t penalized;
Eric Dumazetc1a8f1f2009-08-16 09:36:49 +0000124 struct gnet_stats_basic_packed bstats;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700125 struct gnet_stats_queue qstats;
Eric Dumazet1c0d32f2016-12-04 09:48:16 -0800126 struct net_rate_estimator __rcu *rate_est;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127 struct tc_cbq_xstats xstats;
128
John Fastabend25d8c0d2014-09-12 20:05:27 -0700129 struct tcf_proto __rcu *filter_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130
131 int refcnt;
132 int filters;
133
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000134 struct cbq_class *defaults[TC_PRIO_MAX + 1];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135};
136
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000137struct cbq_sched_data {
Patrick McHardyd77fea22008-07-05 23:22:05 -0700138 struct Qdisc_class_hash clhash; /* Hash table of all classes */
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000139 int nclasses[TC_CBQ_MAXPRIO + 1];
140 unsigned int quanta[TC_CBQ_MAXPRIO + 1];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141
142 struct cbq_class link;
143
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000144 unsigned int activemask;
145 struct cbq_class *active[TC_CBQ_MAXPRIO + 1]; /* List of all classes
Linus Torvalds1da177e2005-04-16 15:20:36 -0700146 with backlog */
147
Patrick McHardyc3bc7cf2007-07-15 00:03:05 -0700148#ifdef CONFIG_NET_CLS_ACT
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149 struct cbq_class *rx_class;
150#endif
151 struct cbq_class *tx_class;
152 struct cbq_class *tx_borrowed;
153 int tx_len;
154 psched_time_t now; /* Cached timestamp */
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000155 unsigned int pmask;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156
David S. Miller2fbd3da2009-09-01 17:59:25 -0700157 struct hrtimer delay_timer;
Patrick McHardy88a99352007-03-16 01:21:11 -0700158 struct qdisc_watchdog watchdog; /* Watchdog timer,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159 started when CBQ has
160 backlog, but cannot
161 transmit just now */
Patrick McHardy88a99352007-03-16 01:21:11 -0700162 psched_tdiff_t wd_expires;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163 int toplevel;
164 u32 hgenerator;
165};
166
167
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000168#define L2T(cl, len) qdisc_l2t((cl)->R_tab, len)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000170static inline struct cbq_class *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171cbq_class_lookup(struct cbq_sched_data *q, u32 classid)
172{
Patrick McHardyd77fea22008-07-05 23:22:05 -0700173 struct Qdisc_class_common *clc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174
Patrick McHardyd77fea22008-07-05 23:22:05 -0700175 clc = qdisc_class_find(&q->clhash, classid);
176 if (clc == NULL)
177 return NULL;
178 return container_of(clc, struct cbq_class, common);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700179}
180
Patrick McHardyc3bc7cf2007-07-15 00:03:05 -0700181#ifdef CONFIG_NET_CLS_ACT
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182
183static struct cbq_class *
184cbq_reclassify(struct sk_buff *skb, struct cbq_class *this)
185{
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000186 struct cbq_class *cl;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000188 for (cl = this->tparent; cl; cl = cl->tparent) {
189 struct cbq_class *new = cl->defaults[TC_PRIO_BESTEFFORT];
190
191 if (new != NULL && new != this)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700192 return new;
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000193 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194 return NULL;
195}
196
197#endif
198
199/* Classify packet. The procedure is pretty complicated, but
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000200 * it allows us to combine link sharing and priority scheduling
201 * transparently.
202 *
203 * Namely, you can put link sharing rules (f.e. route based) at root of CBQ,
204 * so that it resolves to split nodes. Then packets are classified
205 * by logical priority, or a more specific classifier may be attached
206 * to the split node.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207 */
208
209static struct cbq_class *
210cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
211{
212 struct cbq_sched_data *q = qdisc_priv(sch);
213 struct cbq_class *head = &q->link;
214 struct cbq_class **defmap;
215 struct cbq_class *cl = NULL;
216 u32 prio = skb->priority;
John Fastabend25d8c0d2014-09-12 20:05:27 -0700217 struct tcf_proto *fl;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700218 struct tcf_result res;
219
220 /*
221 * Step 1. If skb->priority points to one of our classes, use it.
222 */
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000223 if (TC_H_MAJ(prio ^ sch->handle) == 0 &&
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224 (cl = cbq_class_lookup(q, prio)) != NULL)
225 return cl;
226
Jarek Poplawskic27f3392008-08-04 22:39:11 -0700227 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700228 for (;;) {
229 int result = 0;
230 defmap = head->defaults;
231
John Fastabend25d8c0d2014-09-12 20:05:27 -0700232 fl = rcu_dereference_bh(head->filter_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700233 /*
234 * Step 2+n. Apply classifier.
235 */
Daniel Borkmann3b3ae882015-08-26 23:00:06 +0200236 result = tc_classify(skb, fl, &res, true);
John Fastabend25d8c0d2014-09-12 20:05:27 -0700237 if (!fl || result < 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700238 goto fallback;
239
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000240 cl = (void *)res.class;
241 if (!cl) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700242 if (TC_H_MAJ(res.classid))
243 cl = cbq_class_lookup(q, res.classid);
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000244 else if ((cl = defmap[res.classid & TC_PRIO_MAX]) == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700245 cl = defmap[TC_PRIO_BESTEFFORT];
246
Eric Dumazetbdfc87f2012-09-11 13:11:12 +0000247 if (cl == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700248 goto fallback;
249 }
Eric Dumazetbdfc87f2012-09-11 13:11:12 +0000250 if (cl->level >= head->level)
251 goto fallback;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252#ifdef CONFIG_NET_CLS_ACT
253 switch (result) {
254 case TC_ACT_QUEUED:
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900255 case TC_ACT_STOLEN:
Jarek Poplawski378a2f02008-08-04 22:31:03 -0700256 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257 case TC_ACT_SHOT:
258 return NULL;
Patrick McHardy73ca4912007-07-15 00:02:31 -0700259 case TC_ACT_RECLASSIFY:
260 return cbq_reclassify(skb, cl);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700261 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700262#endif
263 if (cl->level == 0)
264 return cl;
265
266 /*
267 * Step 3+n. If classifier selected a link sharing class,
268 * apply agency specific classifier.
269 * Repeat this procdure until we hit a leaf node.
270 */
271 head = cl;
272 }
273
274fallback:
275 cl = head;
276
277 /*
278 * Step 4. No success...
279 */
280 if (TC_H_MAJ(prio) == 0 &&
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000281 !(cl = head->defaults[prio & TC_PRIO_MAX]) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -0700282 !(cl = head->defaults[TC_PRIO_BESTEFFORT]))
283 return head;
284
285 return cl;
286}
287
288/*
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000289 * A packet has just been enqueued on the empty class.
290 * cbq_activate_class adds it to the tail of active class list
291 * of its priority band.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292 */
293
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000294static inline void cbq_activate_class(struct cbq_class *cl)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295{
296 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
297 int prio = cl->cpriority;
298 struct cbq_class *cl_tail;
299
300 cl_tail = q->active[prio];
301 q->active[prio] = cl;
302
303 if (cl_tail != NULL) {
304 cl->next_alive = cl_tail->next_alive;
305 cl_tail->next_alive = cl;
306 } else {
307 cl->next_alive = cl;
308 q->activemask |= (1<<prio);
309 }
310}
311
312/*
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000313 * Unlink class from active chain.
314 * Note that this same procedure is done directly in cbq_dequeue*
315 * during round-robin procedure.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700316 */
317
318static void cbq_deactivate_class(struct cbq_class *this)
319{
320 struct cbq_sched_data *q = qdisc_priv(this->qdisc);
321 int prio = this->cpriority;
322 struct cbq_class *cl;
323 struct cbq_class *cl_prev = q->active[prio];
324
325 do {
326 cl = cl_prev->next_alive;
327 if (cl == this) {
328 cl_prev->next_alive = cl->next_alive;
329 cl->next_alive = NULL;
330
331 if (cl == q->active[prio]) {
332 q->active[prio] = cl_prev;
333 if (cl == q->active[prio]) {
334 q->active[prio] = NULL;
335 q->activemask &= ~(1<<prio);
336 return;
337 }
338 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700339 return;
340 }
341 } while ((cl_prev = cl) != q->active[prio]);
342}
343
344static void
345cbq_mark_toplevel(struct cbq_sched_data *q, struct cbq_class *cl)
346{
347 int toplevel = q->toplevel;
348
Eric Dumazetcca605d2016-06-10 16:41:37 -0700349 if (toplevel > cl->level) {
Vasily Averin7201c1d2014-08-14 12:27:59 +0400350 psched_time_t now = psched_get_time();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700351
352 do {
Patrick McHardy104e0872007-03-23 11:28:07 -0700353 if (cl->undertime < now) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700354 q->toplevel = cl->level;
355 return;
356 }
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000357 } while ((cl = cl->borrow) != NULL && toplevel > cl->level);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700358 }
359}
360
361static int
Eric Dumazet520ac302016-06-21 23:16:49 -0700362cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
363 struct sk_buff **to_free)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700364{
365 struct cbq_sched_data *q = qdisc_priv(sch);
Satyam Sharmaddeee3c2007-09-16 14:54:05 -0700366 int uninitialized_var(ret);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700367 struct cbq_class *cl = cbq_classify(skb, sch, &ret);
368
Patrick McHardyc3bc7cf2007-07-15 00:03:05 -0700369#ifdef CONFIG_NET_CLS_ACT
Linus Torvalds1da177e2005-04-16 15:20:36 -0700370 q->rx_class = cl;
371#endif
372 if (cl == NULL) {
Jarek Poplawskic27f3392008-08-04 22:39:11 -0700373 if (ret & __NET_XMIT_BYPASS)
John Fastabend25331d62014-09-28 11:53:29 -0700374 qdisc_qstats_drop(sch);
Eric Dumazet520ac302016-06-21 23:16:49 -0700375 __qdisc_drop(skb, to_free);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700376 return ret;
377 }
378
Eric Dumazet520ac302016-06-21 23:16:49 -0700379 ret = qdisc_enqueue(skb, cl->q, to_free);
Jussi Kivilinna5f861732008-07-20 00:08:04 -0700380 if (ret == NET_XMIT_SUCCESS) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700381 sch->q.qlen++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700382 cbq_mark_toplevel(q, cl);
383 if (!cl->next_alive)
384 cbq_activate_class(cl);
385 return ret;
386 }
387
Jarek Poplawski378a2f02008-08-04 22:31:03 -0700388 if (net_xmit_drop_count(ret)) {
John Fastabend25331d62014-09-28 11:53:29 -0700389 qdisc_qstats_drop(sch);
Jarek Poplawski378a2f02008-08-04 22:31:03 -0700390 cbq_mark_toplevel(q, cl);
391 cl->qstats.drops++;
392 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393 return ret;
394}
395
Florian Westphalc3498d32016-06-09 00:27:39 +0200396/* Overlimit action: penalize leaf class by adding offtime */
397static void cbq_overlimit(struct cbq_class *cl)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700398{
399 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
Patrick McHardy8edc0c32007-03-23 11:28:55 -0700400 psched_tdiff_t delay = cl->undertime - q->now;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700401
402 if (!cl->delayed) {
403 delay += cl->offtime;
404
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900405 /*
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000406 * Class goes to sleep, so that it will have no
407 * chance to work avgidle. Let's forgive it 8)
408 *
409 * BTW cbq-2.0 has a crap in this
410 * place, apparently they forgot to shift it by cl->ewma_log.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700411 */
412 if (cl->avgidle < 0)
413 delay -= (-cl->avgidle) - ((-cl->avgidle) >> cl->ewma_log);
414 if (cl->avgidle < cl->minidle)
415 cl->avgidle = cl->minidle;
416 if (delay <= 0)
417 delay = 1;
Patrick McHardy7c59e252007-03-23 11:27:45 -0700418 cl->undertime = q->now + delay;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700419
420 cl->xstats.overactions++;
421 cl->delayed = 1;
422 }
423 if (q->wd_expires == 0 || q->wd_expires > delay)
424 q->wd_expires = delay;
425
426 /* Dirty work! We must schedule wakeups based on
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000427 * real available rate, rather than leaf rate,
428 * which may be tiny (even zero).
Linus Torvalds1da177e2005-04-16 15:20:36 -0700429 */
430 if (q->toplevel == TC_CBQ_MAXLEVEL) {
431 struct cbq_class *b;
432 psched_tdiff_t base_delay = q->wd_expires;
433
434 for (b = cl->borrow; b; b = b->borrow) {
Patrick McHardy8edc0c32007-03-23 11:28:55 -0700435 delay = b->undertime - q->now;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700436 if (delay < base_delay) {
437 if (delay <= 0)
438 delay = 1;
439 base_delay = delay;
440 }
441 }
442
443 q->wd_expires = base_delay;
444 }
445}
446
Patrick McHardy1a13cb62007-03-16 01:22:20 -0700447static psched_tdiff_t cbq_undelay_prio(struct cbq_sched_data *q, int prio,
448 psched_time_t now)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700449{
450 struct cbq_class *cl;
451 struct cbq_class *cl_prev = q->active[prio];
Patrick McHardy1a13cb62007-03-16 01:22:20 -0700452 psched_time_t sched = now;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700453
454 if (cl_prev == NULL)
Patrick McHardye9054a32007-03-16 01:21:40 -0700455 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700456
457 do {
458 cl = cl_prev->next_alive;
Patrick McHardy1a13cb62007-03-16 01:22:20 -0700459 if (now - cl->penalized > 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700460 cl_prev->next_alive = cl->next_alive;
461 cl->next_alive = NULL;
462 cl->cpriority = cl->priority;
463 cl->delayed = 0;
464 cbq_activate_class(cl);
465
466 if (cl == q->active[prio]) {
467 q->active[prio] = cl_prev;
468 if (cl == q->active[prio]) {
469 q->active[prio] = NULL;
470 return 0;
471 }
472 }
473
474 cl = cl_prev->next_alive;
Patrick McHardy1a13cb62007-03-16 01:22:20 -0700475 } else if (sched - cl->penalized > 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700476 sched = cl->penalized;
477 } while ((cl_prev = cl) != q->active[prio]);
478
Patrick McHardy1a13cb62007-03-16 01:22:20 -0700479 return sched - now;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700480}
481
Patrick McHardy1a13cb62007-03-16 01:22:20 -0700482static enum hrtimer_restart cbq_undelay(struct hrtimer *timer)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700483{
Patrick McHardy1a13cb62007-03-16 01:22:20 -0700484 struct cbq_sched_data *q = container_of(timer, struct cbq_sched_data,
David S. Miller2fbd3da2009-09-01 17:59:25 -0700485 delay_timer);
Patrick McHardy1a13cb62007-03-16 01:22:20 -0700486 struct Qdisc *sch = q->watchdog.qdisc;
487 psched_time_t now;
488 psched_tdiff_t delay = 0;
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000489 unsigned int pmask;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700490
Patrick McHardy3bebcda2007-03-23 11:29:25 -0700491 now = psched_get_time();
Patrick McHardy1a13cb62007-03-16 01:22:20 -0700492
Linus Torvalds1da177e2005-04-16 15:20:36 -0700493 pmask = q->pmask;
494 q->pmask = 0;
495
496 while (pmask) {
497 int prio = ffz(~pmask);
Patrick McHardy1a13cb62007-03-16 01:22:20 -0700498 psched_tdiff_t tmp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700499
500 pmask &= ~(1<<prio);
501
Patrick McHardy1a13cb62007-03-16 01:22:20 -0700502 tmp = cbq_undelay_prio(q, prio, now);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700503 if (tmp > 0) {
504 q->pmask |= 1<<prio;
505 if (tmp < delay || delay == 0)
506 delay = tmp;
507 }
508 }
509
510 if (delay) {
Patrick McHardy1a13cb62007-03-16 01:22:20 -0700511 ktime_t time;
512
Thomas Gleixner8b0e1952016-12-25 12:30:41 +0100513 time = 0;
Jarek Poplawskica44d6e2009-06-15 02:31:47 -0700514 time = ktime_add_ns(time, PSCHED_TICKS2NS(now + delay));
Eric Dumazet4a8e3202014-09-20 18:01:30 -0700515 hrtimer_start(&q->delay_timer, time, HRTIMER_MODE_ABS_PINNED);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700516 }
517
David S. Miller8608db02008-08-18 20:51:18 -0700518 __netif_schedule(qdisc_root(sch));
Patrick McHardy1a13cb62007-03-16 01:22:20 -0700519 return HRTIMER_NORESTART;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700520}
521
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900522/*
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000523 * It is mission critical procedure.
524 *
525 * We "regenerate" toplevel cutoff, if transmitting class
526 * has backlog and it is not regulated. It is not part of
527 * original CBQ description, but looks more reasonable.
528 * Probably, it is wrong. This question needs further investigation.
529 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700530
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000531static inline void
Linus Torvalds1da177e2005-04-16 15:20:36 -0700532cbq_update_toplevel(struct cbq_sched_data *q, struct cbq_class *cl,
533 struct cbq_class *borrowed)
534{
535 if (cl && q->toplevel >= borrowed->level) {
536 if (cl->q->q.qlen > 1) {
537 do {
Patrick McHardya0849802007-03-23 11:28:30 -0700538 if (borrowed->undertime == PSCHED_PASTPERFECT) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700539 q->toplevel = borrowed->level;
540 return;
541 }
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000542 } while ((borrowed = borrowed->borrow) != NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700543 }
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900544#if 0
Linus Torvalds1da177e2005-04-16 15:20:36 -0700545 /* It is not necessary now. Uncommenting it
546 will save CPU cycles, but decrease fairness.
547 */
548 q->toplevel = TC_CBQ_MAXLEVEL;
549#endif
550 }
551}
552
553static void
554cbq_update(struct cbq_sched_data *q)
555{
556 struct cbq_class *this = q->tx_class;
557 struct cbq_class *cl = this;
558 int len = q->tx_len;
Vasily Averin73d0f372014-08-14 12:27:47 +0400559 psched_time_t now;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700560
561 q->tx_class = NULL;
Vasily Averin73d0f372014-08-14 12:27:47 +0400562 /* Time integrator. We calculate EOS time
563 * by adding expected packet transmission time.
564 */
565 now = q->now + L2T(&q->link, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700566
567 for ( ; cl; cl = cl->share) {
568 long avgidle = cl->avgidle;
569 long idle;
570
571 cl->bstats.packets++;
572 cl->bstats.bytes += len;
573
574 /*
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000575 * (now - last) is total time between packet right edges.
576 * (last_pktlen/rate) is "virtual" busy time, so that
577 *
578 * idle = (now - last) - last_pktlen/rate
Linus Torvalds1da177e2005-04-16 15:20:36 -0700579 */
580
Vasily Averin73d0f372014-08-14 12:27:47 +0400581 idle = now - cl->last;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700582 if ((unsigned long)idle > 128*1024*1024) {
583 avgidle = cl->maxidle;
584 } else {
585 idle -= L2T(cl, len);
586
587 /* true_avgidle := (1-W)*true_avgidle + W*idle,
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000588 * where W=2^{-ewma_log}. But cl->avgidle is scaled:
589 * cl->avgidle == true_avgidle/W,
590 * hence:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700591 */
592 avgidle += idle - (avgidle>>cl->ewma_log);
593 }
594
595 if (avgidle <= 0) {
596 /* Overlimit or at-limit */
597
598 if (avgidle < cl->minidle)
599 avgidle = cl->minidle;
600
601 cl->avgidle = avgidle;
602
603 /* Calculate expected time, when this class
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000604 * will be allowed to send.
605 * It will occur, when:
606 * (1-W)*true_avgidle + W*delay = 0, i.e.
607 * idle = (1/W - 1)*(-true_avgidle)
608 * or
609 * idle = (1 - W)*(-cl->avgidle);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700610 */
611 idle = (-avgidle) - ((-avgidle) >> cl->ewma_log);
612
613 /*
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000614 * That is not all.
615 * To maintain the rate allocated to the class,
616 * we add to undertime virtual clock,
617 * necessary to complete transmitted packet.
618 * (len/phys_bandwidth has been already passed
619 * to the moment of cbq_update)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700620 */
621
622 idle -= L2T(&q->link, len);
623 idle += L2T(cl, len);
624
Vasily Averin73d0f372014-08-14 12:27:47 +0400625 cl->undertime = now + idle;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700626 } else {
627 /* Underlimit */
628
Patrick McHardya0849802007-03-23 11:28:30 -0700629 cl->undertime = PSCHED_PASTPERFECT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700630 if (avgidle > cl->maxidle)
631 cl->avgidle = cl->maxidle;
632 else
633 cl->avgidle = avgidle;
634 }
Vasily Averin73d0f372014-08-14 12:27:47 +0400635 if ((s64)(now - cl->last) > 0)
636 cl->last = now;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700637 }
638
639 cbq_update_toplevel(q, this, q->tx_borrowed);
640}
641
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000642static inline struct cbq_class *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700643cbq_under_limit(struct cbq_class *cl)
644{
645 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
646 struct cbq_class *this_cl = cl;
647
648 if (cl->tparent == NULL)
649 return cl;
650
Patrick McHardya0849802007-03-23 11:28:30 -0700651 if (cl->undertime == PSCHED_PASTPERFECT || q->now >= cl->undertime) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700652 cl->delayed = 0;
653 return cl;
654 }
655
656 do {
657 /* It is very suspicious place. Now overlimit
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000658 * action is generated for not bounded classes
659 * only if link is completely congested.
660 * Though it is in agree with ancestor-only paradigm,
661 * it looks very stupid. Particularly,
662 * it means that this chunk of code will either
663 * never be called or result in strong amplification
664 * of burstiness. Dangerous, silly, and, however,
665 * no another solution exists.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700666 */
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000667 cl = cl->borrow;
668 if (!cl) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700669 this_cl->qstats.overlimits++;
Florian Westphalc3498d32016-06-09 00:27:39 +0200670 cbq_overlimit(this_cl);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700671 return NULL;
672 }
673 if (cl->level > q->toplevel)
674 return NULL;
Patrick McHardya0849802007-03-23 11:28:30 -0700675 } while (cl->undertime != PSCHED_PASTPERFECT && q->now < cl->undertime);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700676
677 cl->delayed = 0;
678 return cl;
679}
680
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000681static inline struct sk_buff *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700682cbq_dequeue_prio(struct Qdisc *sch, int prio)
683{
684 struct cbq_sched_data *q = qdisc_priv(sch);
685 struct cbq_class *cl_tail, *cl_prev, *cl;
686 struct sk_buff *skb;
687 int deficit;
688
689 cl_tail = cl_prev = q->active[prio];
690 cl = cl_prev->next_alive;
691
692 do {
693 deficit = 0;
694
695 /* Start round */
696 do {
697 struct cbq_class *borrow = cl;
698
699 if (cl->q->q.qlen &&
700 (borrow = cbq_under_limit(cl)) == NULL)
701 goto skip_class;
702
703 if (cl->deficit <= 0) {
704 /* Class exhausted its allotment per
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000705 * this round. Switch to the next one.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700706 */
707 deficit = 1;
708 cl->deficit += cl->quantum;
709 goto next_class;
710 }
711
712 skb = cl->q->dequeue(cl->q);
713
714 /* Class did not give us any skb :-(
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000715 * It could occur even if cl->q->q.qlen != 0
716 * f.e. if cl->q == "tbf"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700717 */
718 if (skb == NULL)
719 goto skip_class;
720
Jussi Kivilinna0abf77e2008-07-20 00:08:27 -0700721 cl->deficit -= qdisc_pkt_len(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700722 q->tx_class = cl;
723 q->tx_borrowed = borrow;
724 if (borrow != cl) {
725#ifndef CBQ_XSTATS_BORROWS_BYTES
726 borrow->xstats.borrows++;
727 cl->xstats.borrows++;
728#else
Jussi Kivilinna0abf77e2008-07-20 00:08:27 -0700729 borrow->xstats.borrows += qdisc_pkt_len(skb);
730 cl->xstats.borrows += qdisc_pkt_len(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700731#endif
732 }
Jussi Kivilinna0abf77e2008-07-20 00:08:27 -0700733 q->tx_len = qdisc_pkt_len(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700734
735 if (cl->deficit <= 0) {
736 q->active[prio] = cl;
737 cl = cl->next_alive;
738 cl->deficit += cl->quantum;
739 }
740 return skb;
741
742skip_class:
743 if (cl->q->q.qlen == 0 || prio != cl->cpriority) {
744 /* Class is empty or penalized.
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000745 * Unlink it from active chain.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700746 */
747 cl_prev->next_alive = cl->next_alive;
748 cl->next_alive = NULL;
749
750 /* Did cl_tail point to it? */
751 if (cl == cl_tail) {
752 /* Repair it! */
753 cl_tail = cl_prev;
754
755 /* Was it the last class in this band? */
756 if (cl == cl_tail) {
757 /* Kill the band! */
758 q->active[prio] = NULL;
759 q->activemask &= ~(1<<prio);
760 if (cl->q->q.qlen)
761 cbq_activate_class(cl);
762 return NULL;
763 }
764
765 q->active[prio] = cl_tail;
766 }
767 if (cl->q->q.qlen)
768 cbq_activate_class(cl);
769
770 cl = cl_prev;
771 }
772
773next_class:
774 cl_prev = cl;
775 cl = cl->next_alive;
776 } while (cl_prev != cl_tail);
777 } while (deficit);
778
779 q->active[prio] = cl_prev;
780
781 return NULL;
782}
783
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000784static inline struct sk_buff *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700785cbq_dequeue_1(struct Qdisc *sch)
786{
787 struct cbq_sched_data *q = qdisc_priv(sch);
788 struct sk_buff *skb;
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000789 unsigned int activemask;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700790
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000791 activemask = q->activemask & 0xFF;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700792 while (activemask) {
793 int prio = ffz(~activemask);
794 activemask &= ~(1<<prio);
795 skb = cbq_dequeue_prio(sch, prio);
796 if (skb)
797 return skb;
798 }
799 return NULL;
800}
801
802static struct sk_buff *
803cbq_dequeue(struct Qdisc *sch)
804{
805 struct sk_buff *skb;
806 struct cbq_sched_data *q = qdisc_priv(sch);
807 psched_time_t now;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700808
Patrick McHardy3bebcda2007-03-23 11:29:25 -0700809 now = psched_get_time();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700810
Vasily Averin73d0f372014-08-14 12:27:47 +0400811 if (q->tx_class)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700812 cbq_update(q);
Vasily Averin73d0f372014-08-14 12:27:47 +0400813
814 q->now = now;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700815
816 for (;;) {
817 q->wd_expires = 0;
818
819 skb = cbq_dequeue_1(sch);
820 if (skb) {
Eric Dumazet9190b3b2011-01-20 23:31:33 -0800821 qdisc_bstats_update(sch, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700822 sch->q.qlen--;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700823 return skb;
824 }
825
826 /* All the classes are overlimit.
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000827 *
828 * It is possible, if:
829 *
830 * 1. Scheduler is empty.
831 * 2. Toplevel cutoff inhibited borrowing.
832 * 3. Root class is overlimit.
833 *
834 * Reset 2d and 3d conditions and retry.
835 *
836 * Note, that NS and cbq-2.0 are buggy, peeking
837 * an arbitrary class is appropriate for ancestor-only
838 * sharing, but not for toplevel algorithm.
839 *
840 * Our version is better, but slower, because it requires
841 * two passes, but it is unavoidable with top-level sharing.
842 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700843
844 if (q->toplevel == TC_CBQ_MAXLEVEL &&
Patrick McHardya0849802007-03-23 11:28:30 -0700845 q->link.undertime == PSCHED_PASTPERFECT)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700846 break;
847
848 q->toplevel = TC_CBQ_MAXLEVEL;
Patrick McHardya0849802007-03-23 11:28:30 -0700849 q->link.undertime = PSCHED_PASTPERFECT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700850 }
851
852 /* No packets in scheduler or nobody wants to give them to us :-(
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000853 * Sigh... start watchdog timer in the last case.
854 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700855
856 if (sch->q.qlen) {
John Fastabend25331d62014-09-28 11:53:29 -0700857 qdisc_qstats_overlimit(sch);
Patrick McHardy88a99352007-03-16 01:21:11 -0700858 if (q->wd_expires)
859 qdisc_watchdog_schedule(&q->watchdog,
Patrick McHardybb239ac2007-03-16 12:31:28 -0700860 now + q->wd_expires);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700861 }
862 return NULL;
863}
864
865/* CBQ class maintanance routines */
866
867static void cbq_adjust_levels(struct cbq_class *this)
868{
869 if (this == NULL)
870 return;
871
872 do {
873 int level = 0;
874 struct cbq_class *cl;
875
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000876 cl = this->children;
877 if (cl) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700878 do {
879 if (cl->level > level)
880 level = cl->level;
881 } while ((cl = cl->sibling) != this->children);
882 }
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000883 this->level = level + 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700884 } while ((this = this->tparent) != NULL);
885}
886
887static void cbq_normalize_quanta(struct cbq_sched_data *q, int prio)
888{
889 struct cbq_class *cl;
Patrick McHardyd77fea22008-07-05 23:22:05 -0700890 unsigned int h;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700891
892 if (q->quanta[prio] == 0)
893 return;
894
Patrick McHardyd77fea22008-07-05 23:22:05 -0700895 for (h = 0; h < q->clhash.hashsize; h++) {
Sasha Levinb67bfe02013-02-27 17:06:00 -0800896 hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700897 /* BUGGGG... Beware! This expression suffer of
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000898 * arithmetic overflows!
Linus Torvalds1da177e2005-04-16 15:20:36 -0700899 */
900 if (cl->priority == prio) {
901 cl->quantum = (cl->weight*cl->allot*q->nclasses[prio])/
902 q->quanta[prio];
903 }
Yang Yingliang833fa742013-12-10 20:55:32 +0800904 if (cl->quantum <= 0 ||
905 cl->quantum > 32*qdisc_dev(cl->qdisc)->mtu) {
Yang Yingliangc17988a2013-12-23 17:38:58 +0800906 pr_warn("CBQ: class %08x has bad quantum==%ld, repaired.\n",
907 cl->common.classid, cl->quantum);
David S. Miller5ce2d482008-07-08 17:06:30 -0700908 cl->quantum = qdisc_dev(cl->qdisc)->mtu/2 + 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700909 }
910 }
911 }
912}
913
914static void cbq_sync_defmap(struct cbq_class *cl)
915{
916 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
917 struct cbq_class *split = cl->split;
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000918 unsigned int h;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700919 int i;
920
921 if (split == NULL)
922 return;
923
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000924 for (i = 0; i <= TC_PRIO_MAX; i++) {
925 if (split->defaults[i] == cl && !(cl->defmap & (1<<i)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700926 split->defaults[i] = NULL;
927 }
928
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000929 for (i = 0; i <= TC_PRIO_MAX; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700930 int level = split->level;
931
932 if (split->defaults[i])
933 continue;
934
Patrick McHardyd77fea22008-07-05 23:22:05 -0700935 for (h = 0; h < q->clhash.hashsize; h++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700936 struct cbq_class *c;
937
Sasha Levinb67bfe02013-02-27 17:06:00 -0800938 hlist_for_each_entry(c, &q->clhash.hash[h],
Patrick McHardyd77fea22008-07-05 23:22:05 -0700939 common.hnode) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700940 if (c->split == split && c->level < level &&
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000941 c->defmap & (1<<i)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700942 split->defaults[i] = c;
943 level = c->level;
944 }
945 }
946 }
947 }
948}
949
950static void cbq_change_defmap(struct cbq_class *cl, u32 splitid, u32 def, u32 mask)
951{
952 struct cbq_class *split = NULL;
953
954 if (splitid == 0) {
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000955 split = cl->split;
956 if (!split)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700957 return;
Patrick McHardyd77fea22008-07-05 23:22:05 -0700958 splitid = split->common.classid;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700959 }
960
Patrick McHardyd77fea22008-07-05 23:22:05 -0700961 if (split == NULL || split->common.classid != splitid) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700962 for (split = cl->tparent; split; split = split->tparent)
Patrick McHardyd77fea22008-07-05 23:22:05 -0700963 if (split->common.classid == splitid)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700964 break;
965 }
966
967 if (split == NULL)
968 return;
969
970 if (cl->split != split) {
971 cl->defmap = 0;
972 cbq_sync_defmap(cl);
973 cl->split = split;
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000974 cl->defmap = def & mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700975 } else
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000976 cl->defmap = (cl->defmap & ~mask) | (def & mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700977
978 cbq_sync_defmap(cl);
979}
980
981static void cbq_unlink_class(struct cbq_class *this)
982{
983 struct cbq_class *cl, **clp;
984 struct cbq_sched_data *q = qdisc_priv(this->qdisc);
985
Patrick McHardyd77fea22008-07-05 23:22:05 -0700986 qdisc_class_hash_remove(&q->clhash, &this->common);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700987
988 if (this->tparent) {
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000989 clp = &this->sibling;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700990 cl = *clp;
991 do {
992 if (cl == this) {
993 *clp = cl->sibling;
994 break;
995 }
996 clp = &cl->sibling;
997 } while ((cl = *clp) != this->sibling);
998
999 if (this->tparent->children == this) {
1000 this->tparent->children = this->sibling;
1001 if (this->sibling == this)
1002 this->tparent->children = NULL;
1003 }
1004 } else {
Ilpo Järvinen547b7922008-07-25 21:43:18 -07001005 WARN_ON(this->sibling != this);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001006 }
1007}
1008
1009static void cbq_link_class(struct cbq_class *this)
1010{
1011 struct cbq_sched_data *q = qdisc_priv(this->qdisc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001012 struct cbq_class *parent = this->tparent;
1013
1014 this->sibling = this;
Patrick McHardyd77fea22008-07-05 23:22:05 -07001015 qdisc_class_hash_insert(&q->clhash, &this->common);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001016
1017 if (parent == NULL)
1018 return;
1019
1020 if (parent->children == NULL) {
1021 parent->children = this;
1022 } else {
1023 this->sibling = parent->children->sibling;
1024 parent->children->sibling = this;
1025 }
1026}
1027
Linus Torvalds1da177e2005-04-16 15:20:36 -07001028static void
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001029cbq_reset(struct Qdisc *sch)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001030{
1031 struct cbq_sched_data *q = qdisc_priv(sch);
1032 struct cbq_class *cl;
1033 int prio;
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001034 unsigned int h;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001035
1036 q->activemask = 0;
1037 q->pmask = 0;
1038 q->tx_class = NULL;
1039 q->tx_borrowed = NULL;
Patrick McHardy88a99352007-03-16 01:21:11 -07001040 qdisc_watchdog_cancel(&q->watchdog);
David S. Miller2fbd3da2009-09-01 17:59:25 -07001041 hrtimer_cancel(&q->delay_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001042 q->toplevel = TC_CBQ_MAXLEVEL;
Patrick McHardy3bebcda2007-03-23 11:29:25 -07001043 q->now = psched_get_time();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001044
1045 for (prio = 0; prio <= TC_CBQ_MAXPRIO; prio++)
1046 q->active[prio] = NULL;
1047
Patrick McHardyd77fea22008-07-05 23:22:05 -07001048 for (h = 0; h < q->clhash.hashsize; h++) {
Sasha Levinb67bfe02013-02-27 17:06:00 -08001049 hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001050 qdisc_reset(cl->q);
1051
1052 cl->next_alive = NULL;
Patrick McHardya0849802007-03-23 11:28:30 -07001053 cl->undertime = PSCHED_PASTPERFECT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001054 cl->avgidle = cl->maxidle;
1055 cl->deficit = cl->quantum;
1056 cl->cpriority = cl->priority;
1057 }
1058 }
1059 sch->q.qlen = 0;
1060}
1061
1062
1063static int cbq_set_lss(struct cbq_class *cl, struct tc_cbq_lssopt *lss)
1064{
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001065 if (lss->change & TCF_CBQ_LSS_FLAGS) {
1066 cl->share = (lss->flags & TCF_CBQ_LSS_ISOLATED) ? NULL : cl->tparent;
1067 cl->borrow = (lss->flags & TCF_CBQ_LSS_BOUNDED) ? NULL : cl->tparent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001068 }
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001069 if (lss->change & TCF_CBQ_LSS_EWMA)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001070 cl->ewma_log = lss->ewma_log;
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001071 if (lss->change & TCF_CBQ_LSS_AVPKT)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001072 cl->avpkt = lss->avpkt;
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001073 if (lss->change & TCF_CBQ_LSS_MINIDLE)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001074 cl->minidle = -(long)lss->minidle;
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001075 if (lss->change & TCF_CBQ_LSS_MAXIDLE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001076 cl->maxidle = lss->maxidle;
1077 cl->avgidle = lss->maxidle;
1078 }
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001079 if (lss->change & TCF_CBQ_LSS_OFFTIME)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001080 cl->offtime = lss->offtime;
1081 return 0;
1082}
1083
1084static void cbq_rmprio(struct cbq_sched_data *q, struct cbq_class *cl)
1085{
1086 q->nclasses[cl->priority]--;
1087 q->quanta[cl->priority] -= cl->weight;
1088 cbq_normalize_quanta(q, cl->priority);
1089}
1090
1091static void cbq_addprio(struct cbq_sched_data *q, struct cbq_class *cl)
1092{
1093 q->nclasses[cl->priority]++;
1094 q->quanta[cl->priority] += cl->weight;
1095 cbq_normalize_quanta(q, cl->priority);
1096}
1097
1098static int cbq_set_wrr(struct cbq_class *cl, struct tc_cbq_wrropt *wrr)
1099{
1100 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
1101
1102 if (wrr->allot)
1103 cl->allot = wrr->allot;
1104 if (wrr->weight)
1105 cl->weight = wrr->weight;
1106 if (wrr->priority) {
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001107 cl->priority = wrr->priority - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001108 cl->cpriority = cl->priority;
1109 if (cl->priority >= cl->priority2)
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001110 cl->priority2 = TC_CBQ_MAXPRIO - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001111 }
1112
1113 cbq_addprio(q, cl);
1114 return 0;
1115}
1116
Linus Torvalds1da177e2005-04-16 15:20:36 -07001117static int cbq_set_fopt(struct cbq_class *cl, struct tc_cbq_fopt *fopt)
1118{
1119 cbq_change_defmap(cl, fopt->split, fopt->defmap, fopt->defchange);
1120 return 0;
1121}
1122
Patrick McHardy27a34212008-01-23 20:35:39 -08001123static const struct nla_policy cbq_policy[TCA_CBQ_MAX + 1] = {
1124 [TCA_CBQ_LSSOPT] = { .len = sizeof(struct tc_cbq_lssopt) },
1125 [TCA_CBQ_WRROPT] = { .len = sizeof(struct tc_cbq_wrropt) },
1126 [TCA_CBQ_FOPT] = { .len = sizeof(struct tc_cbq_fopt) },
1127 [TCA_CBQ_OVL_STRATEGY] = { .len = sizeof(struct tc_cbq_ovl) },
1128 [TCA_CBQ_RATE] = { .len = sizeof(struct tc_ratespec) },
1129 [TCA_CBQ_RTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
1130 [TCA_CBQ_POLICE] = { .len = sizeof(struct tc_cbq_police) },
1131};
1132
Patrick McHardy1e904742008-01-22 22:11:17 -08001133static int cbq_init(struct Qdisc *sch, struct nlattr *opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001134{
1135 struct cbq_sched_data *q = qdisc_priv(sch);
Patrick McHardy1e904742008-01-22 22:11:17 -08001136 struct nlattr *tb[TCA_CBQ_MAX + 1];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001137 struct tc_ratespec *r;
Patrick McHardycee63722008-01-23 20:33:32 -08001138 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001139
Patrick McHardy27a34212008-01-23 20:35:39 -08001140 err = nla_parse_nested(tb, TCA_CBQ_MAX, opt, cbq_policy);
Patrick McHardycee63722008-01-23 20:33:32 -08001141 if (err < 0)
1142 return err;
1143
Patrick McHardy27a34212008-01-23 20:35:39 -08001144 if (tb[TCA_CBQ_RTAB] == NULL || tb[TCA_CBQ_RATE] == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001145 return -EINVAL;
1146
Patrick McHardy1e904742008-01-22 22:11:17 -08001147 r = nla_data(tb[TCA_CBQ_RATE]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001148
Patrick McHardy1e904742008-01-22 22:11:17 -08001149 if ((q->link.R_tab = qdisc_get_rtab(r, tb[TCA_CBQ_RTAB])) == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001150 return -EINVAL;
1151
Patrick McHardyd77fea22008-07-05 23:22:05 -07001152 err = qdisc_class_hash_init(&q->clhash);
1153 if (err < 0)
1154 goto put_rtab;
1155
Linus Torvalds1da177e2005-04-16 15:20:36 -07001156 q->link.refcnt = 1;
1157 q->link.sibling = &q->link;
Patrick McHardyd77fea22008-07-05 23:22:05 -07001158 q->link.common.classid = sch->handle;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001159 q->link.qdisc = sch;
Changli Gao3511c912010-10-16 13:04:08 +00001160 q->link.q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
1161 sch->handle);
1162 if (!q->link.q)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001163 q->link.q = &noop_qdisc;
Jiri Kosina49b49972017-03-08 16:03:32 +01001164 else
1165 qdisc_hash_add(q->link.q, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001166
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001167 q->link.priority = TC_CBQ_MAXPRIO - 1;
1168 q->link.priority2 = TC_CBQ_MAXPRIO - 1;
1169 q->link.cpriority = TC_CBQ_MAXPRIO - 1;
David S. Miller5ce2d482008-07-08 17:06:30 -07001170 q->link.allot = psched_mtu(qdisc_dev(sch));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001171 q->link.quantum = q->link.allot;
1172 q->link.weight = q->link.R_tab->rate.rate;
1173
1174 q->link.ewma_log = TC_CBQ_DEF_EWMA;
1175 q->link.avpkt = q->link.allot/2;
1176 q->link.minidle = -0x7FFFFFFF;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001177
Patrick McHardy88a99352007-03-16 01:21:11 -07001178 qdisc_watchdog_init(&q->watchdog, sch);
Eric Dumazet4a8e3202014-09-20 18:01:30 -07001179 hrtimer_init(&q->delay_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001180 q->delay_timer.function = cbq_undelay;
1181 q->toplevel = TC_CBQ_MAXLEVEL;
Patrick McHardy3bebcda2007-03-23 11:29:25 -07001182 q->now = psched_get_time();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001183
1184 cbq_link_class(&q->link);
1185
Patrick McHardy1e904742008-01-22 22:11:17 -08001186 if (tb[TCA_CBQ_LSSOPT])
1187 cbq_set_lss(&q->link, nla_data(tb[TCA_CBQ_LSSOPT]));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001188
1189 cbq_addprio(q, &q->link);
1190 return 0;
Patrick McHardyd77fea22008-07-05 23:22:05 -07001191
1192put_rtab:
1193 qdisc_put_rtab(q->link.R_tab);
1194 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001195}
1196
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001197static int cbq_dump_rate(struct sk_buff *skb, struct cbq_class *cl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001198{
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001199 unsigned char *b = skb_tail_pointer(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001200
David S. Miller1b34ec42012-03-29 05:11:39 -04001201 if (nla_put(skb, TCA_CBQ_RATE, sizeof(cl->R_tab->rate), &cl->R_tab->rate))
1202 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001203 return skb->len;
1204
Patrick McHardy1e904742008-01-22 22:11:17 -08001205nla_put_failure:
Arnaldo Carvalho de Melodc5fc572007-03-25 23:06:12 -07001206 nlmsg_trim(skb, b);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001207 return -1;
1208}
1209
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001210static int cbq_dump_lss(struct sk_buff *skb, struct cbq_class *cl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001211{
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001212 unsigned char *b = skb_tail_pointer(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001213 struct tc_cbq_lssopt opt;
1214
1215 opt.flags = 0;
1216 if (cl->borrow == NULL)
1217 opt.flags |= TCF_CBQ_LSS_BOUNDED;
1218 if (cl->share == NULL)
1219 opt.flags |= TCF_CBQ_LSS_ISOLATED;
1220 opt.ewma_log = cl->ewma_log;
1221 opt.level = cl->level;
1222 opt.avpkt = cl->avpkt;
1223 opt.maxidle = cl->maxidle;
1224 opt.minidle = (u32)(-cl->minidle);
1225 opt.offtime = cl->offtime;
1226 opt.change = ~0;
David S. Miller1b34ec42012-03-29 05:11:39 -04001227 if (nla_put(skb, TCA_CBQ_LSSOPT, sizeof(opt), &opt))
1228 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001229 return skb->len;
1230
Patrick McHardy1e904742008-01-22 22:11:17 -08001231nla_put_failure:
Arnaldo Carvalho de Melodc5fc572007-03-25 23:06:12 -07001232 nlmsg_trim(skb, b);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001233 return -1;
1234}
1235
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001236static int cbq_dump_wrr(struct sk_buff *skb, struct cbq_class *cl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001237{
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001238 unsigned char *b = skb_tail_pointer(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001239 struct tc_cbq_wrropt opt;
1240
David S. Millera0db8562013-07-30 00:16:21 -07001241 memset(&opt, 0, sizeof(opt));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001242 opt.flags = 0;
1243 opt.allot = cl->allot;
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001244 opt.priority = cl->priority + 1;
1245 opt.cpriority = cl->cpriority + 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001246 opt.weight = cl->weight;
David S. Miller1b34ec42012-03-29 05:11:39 -04001247 if (nla_put(skb, TCA_CBQ_WRROPT, sizeof(opt), &opt))
1248 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001249 return skb->len;
1250
Patrick McHardy1e904742008-01-22 22:11:17 -08001251nla_put_failure:
Arnaldo Carvalho de Melodc5fc572007-03-25 23:06:12 -07001252 nlmsg_trim(skb, b);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001253 return -1;
1254}
1255
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001256static int cbq_dump_fopt(struct sk_buff *skb, struct cbq_class *cl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001257{
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001258 unsigned char *b = skb_tail_pointer(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001259 struct tc_cbq_fopt opt;
1260
1261 if (cl->split || cl->defmap) {
Patrick McHardyd77fea22008-07-05 23:22:05 -07001262 opt.split = cl->split ? cl->split->common.classid : 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001263 opt.defmap = cl->defmap;
1264 opt.defchange = ~0;
David S. Miller1b34ec42012-03-29 05:11:39 -04001265 if (nla_put(skb, TCA_CBQ_FOPT, sizeof(opt), &opt))
1266 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001267 }
1268 return skb->len;
1269
Patrick McHardy1e904742008-01-22 22:11:17 -08001270nla_put_failure:
Arnaldo Carvalho de Melodc5fc572007-03-25 23:06:12 -07001271 nlmsg_trim(skb, b);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001272 return -1;
1273}
1274
Linus Torvalds1da177e2005-04-16 15:20:36 -07001275static int cbq_dump_attr(struct sk_buff *skb, struct cbq_class *cl)
1276{
1277 if (cbq_dump_lss(skb, cl) < 0 ||
1278 cbq_dump_rate(skb, cl) < 0 ||
1279 cbq_dump_wrr(skb, cl) < 0 ||
Linus Torvalds1da177e2005-04-16 15:20:36 -07001280 cbq_dump_fopt(skb, cl) < 0)
1281 return -1;
1282 return 0;
1283}
1284
1285static int cbq_dump(struct Qdisc *sch, struct sk_buff *skb)
1286{
1287 struct cbq_sched_data *q = qdisc_priv(sch);
Patrick McHardy4b3550ef2008-01-23 20:34:11 -08001288 struct nlattr *nest;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001289
Patrick McHardy4b3550ef2008-01-23 20:34:11 -08001290 nest = nla_nest_start(skb, TCA_OPTIONS);
1291 if (nest == NULL)
1292 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001293 if (cbq_dump_attr(skb, &q->link) < 0)
Patrick McHardy1e904742008-01-22 22:11:17 -08001294 goto nla_put_failure;
Yang Yingliangd59b7d82014-03-12 10:20:32 +08001295 return nla_nest_end(skb, nest);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001296
Patrick McHardy1e904742008-01-22 22:11:17 -08001297nla_put_failure:
Patrick McHardy4b3550ef2008-01-23 20:34:11 -08001298 nla_nest_cancel(skb, nest);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001299 return -1;
1300}
1301
1302static int
1303cbq_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
1304{
1305 struct cbq_sched_data *q = qdisc_priv(sch);
1306
1307 q->link.xstats.avgidle = q->link.avgidle;
1308 return gnet_stats_copy_app(d, &q->link.xstats, sizeof(q->link.xstats));
1309}
1310
1311static int
1312cbq_dump_class(struct Qdisc *sch, unsigned long arg,
1313 struct sk_buff *skb, struct tcmsg *tcm)
1314{
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001315 struct cbq_class *cl = (struct cbq_class *)arg;
Patrick McHardy4b3550ef2008-01-23 20:34:11 -08001316 struct nlattr *nest;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001317
1318 if (cl->tparent)
Patrick McHardyd77fea22008-07-05 23:22:05 -07001319 tcm->tcm_parent = cl->tparent->common.classid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001320 else
1321 tcm->tcm_parent = TC_H_ROOT;
Patrick McHardyd77fea22008-07-05 23:22:05 -07001322 tcm->tcm_handle = cl->common.classid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001323 tcm->tcm_info = cl->q->handle;
1324
Patrick McHardy4b3550ef2008-01-23 20:34:11 -08001325 nest = nla_nest_start(skb, TCA_OPTIONS);
1326 if (nest == NULL)
1327 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001328 if (cbq_dump_attr(skb, cl) < 0)
Patrick McHardy1e904742008-01-22 22:11:17 -08001329 goto nla_put_failure;
Yang Yingliangd59b7d82014-03-12 10:20:32 +08001330 return nla_nest_end(skb, nest);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001331
Patrick McHardy1e904742008-01-22 22:11:17 -08001332nla_put_failure:
Patrick McHardy4b3550ef2008-01-23 20:34:11 -08001333 nla_nest_cancel(skb, nest);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001334 return -1;
1335}
1336
1337static int
1338cbq_dump_class_stats(struct Qdisc *sch, unsigned long arg,
1339 struct gnet_dump *d)
1340{
1341 struct cbq_sched_data *q = qdisc_priv(sch);
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001342 struct cbq_class *cl = (struct cbq_class *)arg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001343
Linus Torvalds1da177e2005-04-16 15:20:36 -07001344 cl->xstats.avgidle = cl->avgidle;
1345 cl->xstats.undertime = 0;
1346
Patrick McHardya0849802007-03-23 11:28:30 -07001347 if (cl->undertime != PSCHED_PASTPERFECT)
Patrick McHardy8edc0c32007-03-23 11:28:55 -07001348 cl->xstats.undertime = cl->undertime - q->now;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001349
Eric Dumazetedb09eb2016-06-06 09:37:16 -07001350 if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
1351 d, NULL, &cl->bstats) < 0 ||
Eric Dumazet1c0d32f2016-12-04 09:48:16 -08001352 gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
John Fastabendb0ab6f92014-09-28 11:54:24 -07001353 gnet_stats_copy_queue(d, NULL, &cl->qstats, cl->q->q.qlen) < 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001354 return -1;
1355
1356 return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats));
1357}
1358
1359static int cbq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
1360 struct Qdisc **old)
1361{
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001362 struct cbq_class *cl = (struct cbq_class *)arg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001363
Patrick McHardy5b9a9cc2009-09-04 06:41:17 +00001364 if (new == NULL) {
Changli Gao3511c912010-10-16 13:04:08 +00001365 new = qdisc_create_dflt(sch->dev_queue,
Patrick McHardy5b9a9cc2009-09-04 06:41:17 +00001366 &pfifo_qdisc_ops, cl->common.classid);
1367 if (new == NULL)
1368 return -ENOBUFS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001369 }
Patrick McHardy5b9a9cc2009-09-04 06:41:17 +00001370
WANG Cong86a79962016-02-25 14:55:00 -08001371 *old = qdisc_replace(sch, new, &cl->q);
Patrick McHardy5b9a9cc2009-09-04 06:41:17 +00001372 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001373}
1374
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001375static struct Qdisc *cbq_leaf(struct Qdisc *sch, unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001376{
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001377 struct cbq_class *cl = (struct cbq_class *)arg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001378
Patrick McHardy5b9a9cc2009-09-04 06:41:17 +00001379 return cl->q;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001380}
1381
Jarek Poplawskia37ef2e2006-12-08 00:25:55 -08001382static void cbq_qlen_notify(struct Qdisc *sch, unsigned long arg)
1383{
1384 struct cbq_class *cl = (struct cbq_class *)arg;
1385
1386 if (cl->q->q.qlen == 0)
1387 cbq_deactivate_class(cl);
1388}
1389
Linus Torvalds1da177e2005-04-16 15:20:36 -07001390static unsigned long cbq_get(struct Qdisc *sch, u32 classid)
1391{
1392 struct cbq_sched_data *q = qdisc_priv(sch);
1393 struct cbq_class *cl = cbq_class_lookup(q, classid);
1394
1395 if (cl) {
1396 cl->refcnt++;
1397 return (unsigned long)cl;
1398 }
1399 return 0;
1400}
1401
Linus Torvalds1da177e2005-04-16 15:20:36 -07001402static void cbq_destroy_class(struct Qdisc *sch, struct cbq_class *cl)
1403{
1404 struct cbq_sched_data *q = qdisc_priv(sch);
1405
Ilpo Järvinen547b7922008-07-25 21:43:18 -07001406 WARN_ON(cl->filters);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001407
Patrick McHardyff31ab52008-07-01 19:52:38 -07001408 tcf_destroy_chain(&cl->filter_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001409 qdisc_destroy(cl->q);
1410 qdisc_put_rtab(cl->R_tab);
Eric Dumazet1c0d32f2016-12-04 09:48:16 -08001411 gen_kill_estimator(&cl->rate_est);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001412 if (cl != &q->link)
1413 kfree(cl);
1414}
1415
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001416static void cbq_destroy(struct Qdisc *sch)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001417{
1418 struct cbq_sched_data *q = qdisc_priv(sch);
Sasha Levinb67bfe02013-02-27 17:06:00 -08001419 struct hlist_node *next;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001420 struct cbq_class *cl;
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001421 unsigned int h;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001422
Patrick McHardyc3bc7cf2007-07-15 00:03:05 -07001423#ifdef CONFIG_NET_CLS_ACT
Linus Torvalds1da177e2005-04-16 15:20:36 -07001424 q->rx_class = NULL;
1425#endif
1426 /*
1427 * Filters must be destroyed first because we don't destroy the
1428 * classes from root to leafs which means that filters can still
1429 * be bound to classes which have been destroyed already. --TGR '04
1430 */
Patrick McHardyd77fea22008-07-05 23:22:05 -07001431 for (h = 0; h < q->clhash.hashsize; h++) {
Sasha Levinb67bfe02013-02-27 17:06:00 -08001432 hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode)
Patrick McHardyff31ab52008-07-01 19:52:38 -07001433 tcf_destroy_chain(&cl->filter_list);
Patrick McHardyb00b4bf2007-06-05 16:06:59 -07001434 }
Patrick McHardyd77fea22008-07-05 23:22:05 -07001435 for (h = 0; h < q->clhash.hashsize; h++) {
Sasha Levinb67bfe02013-02-27 17:06:00 -08001436 hlist_for_each_entry_safe(cl, next, &q->clhash.hash[h],
Patrick McHardyd77fea22008-07-05 23:22:05 -07001437 common.hnode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001438 cbq_destroy_class(sch, cl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001439 }
Patrick McHardyd77fea22008-07-05 23:22:05 -07001440 qdisc_class_hash_destroy(&q->clhash);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001441}
1442
1443static void cbq_put(struct Qdisc *sch, unsigned long arg)
1444{
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001445 struct cbq_class *cl = (struct cbq_class *)arg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001446
1447 if (--cl->refcnt == 0) {
Patrick McHardyc3bc7cf2007-07-15 00:03:05 -07001448#ifdef CONFIG_NET_CLS_ACT
Jarek Poplawski102396a2008-08-29 14:21:52 -07001449 spinlock_t *root_lock = qdisc_root_sleeping_lock(sch);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001450 struct cbq_sched_data *q = qdisc_priv(sch);
1451
David S. Miller7698b4f2008-07-16 01:42:40 -07001452 spin_lock_bh(root_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001453 if (q->rx_class == cl)
1454 q->rx_class = NULL;
David S. Miller7698b4f2008-07-16 01:42:40 -07001455 spin_unlock_bh(root_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001456#endif
1457
1458 cbq_destroy_class(sch, cl);
1459 }
1460}
1461
1462static int
Patrick McHardy1e904742008-01-22 22:11:17 -08001463cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **tca,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001464 unsigned long *arg)
1465{
1466 int err;
1467 struct cbq_sched_data *q = qdisc_priv(sch);
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001468 struct cbq_class *cl = (struct cbq_class *)*arg;
Patrick McHardy1e904742008-01-22 22:11:17 -08001469 struct nlattr *opt = tca[TCA_OPTIONS];
1470 struct nlattr *tb[TCA_CBQ_MAX + 1];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001471 struct cbq_class *parent;
1472 struct qdisc_rate_table *rtab = NULL;
1473
Patrick McHardycee63722008-01-23 20:33:32 -08001474 if (opt == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001475 return -EINVAL;
1476
Patrick McHardy27a34212008-01-23 20:35:39 -08001477 err = nla_parse_nested(tb, TCA_CBQ_MAX, opt, cbq_policy);
Patrick McHardycee63722008-01-23 20:33:32 -08001478 if (err < 0)
1479 return err;
1480
Florian Westphaldd47c1f2016-06-09 00:27:40 +02001481 if (tb[TCA_CBQ_OVL_STRATEGY] || tb[TCA_CBQ_POLICE])
Florian Westphalc3498d32016-06-09 00:27:39 +02001482 return -EOPNOTSUPP;
1483
Linus Torvalds1da177e2005-04-16 15:20:36 -07001484 if (cl) {
1485 /* Check parent */
1486 if (parentid) {
Patrick McHardyd77fea22008-07-05 23:22:05 -07001487 if (cl->tparent &&
1488 cl->tparent->common.classid != parentid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001489 return -EINVAL;
1490 if (!cl->tparent && parentid != TC_H_ROOT)
1491 return -EINVAL;
1492 }
1493
Patrick McHardy1e904742008-01-22 22:11:17 -08001494 if (tb[TCA_CBQ_RATE]) {
Stephen Hemminger71bcb092008-11-25 21:13:31 -08001495 rtab = qdisc_get_rtab(nla_data(tb[TCA_CBQ_RATE]),
1496 tb[TCA_CBQ_RTAB]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001497 if (rtab == NULL)
1498 return -EINVAL;
1499 }
1500
Stephen Hemminger71bcb092008-11-25 21:13:31 -08001501 if (tca[TCA_RATE]) {
John Fastabend22e0f8b2014-09-28 11:52:56 -07001502 err = gen_replace_estimator(&cl->bstats, NULL,
1503 &cl->rate_est,
Eric Dumazetedb09eb2016-06-06 09:37:16 -07001504 NULL,
1505 qdisc_root_sleeping_running(sch),
Stephen Hemminger71bcb092008-11-25 21:13:31 -08001506 tca[TCA_RATE]);
1507 if (err) {
Yang Yingliang79c11f22013-12-17 15:29:17 +08001508 qdisc_put_rtab(rtab);
Stephen Hemminger71bcb092008-11-25 21:13:31 -08001509 return err;
1510 }
1511 }
1512
Linus Torvalds1da177e2005-04-16 15:20:36 -07001513 /* Change class parameters */
1514 sch_tree_lock(sch);
1515
1516 if (cl->next_alive != NULL)
1517 cbq_deactivate_class(cl);
1518
1519 if (rtab) {
Patrick McHardyb94c8af2008-11-20 04:11:36 -08001520 qdisc_put_rtab(cl->R_tab);
1521 cl->R_tab = rtab;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001522 }
1523
Patrick McHardy1e904742008-01-22 22:11:17 -08001524 if (tb[TCA_CBQ_LSSOPT])
1525 cbq_set_lss(cl, nla_data(tb[TCA_CBQ_LSSOPT]));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001526
Patrick McHardy1e904742008-01-22 22:11:17 -08001527 if (tb[TCA_CBQ_WRROPT]) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001528 cbq_rmprio(q, cl);
Patrick McHardy1e904742008-01-22 22:11:17 -08001529 cbq_set_wrr(cl, nla_data(tb[TCA_CBQ_WRROPT]));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001530 }
1531
Patrick McHardy1e904742008-01-22 22:11:17 -08001532 if (tb[TCA_CBQ_FOPT])
1533 cbq_set_fopt(cl, nla_data(tb[TCA_CBQ_FOPT]));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001534
1535 if (cl->q->q.qlen)
1536 cbq_activate_class(cl);
1537
1538 sch_tree_unlock(sch);
1539
Linus Torvalds1da177e2005-04-16 15:20:36 -07001540 return 0;
1541 }
1542
1543 if (parentid == TC_H_ROOT)
1544 return -EINVAL;
1545
Patrick McHardy1e904742008-01-22 22:11:17 -08001546 if (tb[TCA_CBQ_WRROPT] == NULL || tb[TCA_CBQ_RATE] == NULL ||
1547 tb[TCA_CBQ_LSSOPT] == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001548 return -EINVAL;
1549
Patrick McHardy1e904742008-01-22 22:11:17 -08001550 rtab = qdisc_get_rtab(nla_data(tb[TCA_CBQ_RATE]), tb[TCA_CBQ_RTAB]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001551 if (rtab == NULL)
1552 return -EINVAL;
1553
1554 if (classid) {
1555 err = -EINVAL;
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001556 if (TC_H_MAJ(classid ^ sch->handle) ||
1557 cbq_class_lookup(q, classid))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001558 goto failure;
1559 } else {
1560 int i;
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001561 classid = TC_H_MAKE(sch->handle, 0x8000);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001562
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001563 for (i = 0; i < 0x8000; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001564 if (++q->hgenerator >= 0x8000)
1565 q->hgenerator = 1;
1566 if (cbq_class_lookup(q, classid|q->hgenerator) == NULL)
1567 break;
1568 }
1569 err = -ENOSR;
1570 if (i >= 0x8000)
1571 goto failure;
1572 classid = classid|q->hgenerator;
1573 }
1574
1575 parent = &q->link;
1576 if (parentid) {
1577 parent = cbq_class_lookup(q, parentid);
1578 err = -EINVAL;
1579 if (parent == NULL)
1580 goto failure;
1581 }
1582
1583 err = -ENOBUFS;
Panagiotis Issaris0da974f2006-07-21 14:51:30 -07001584 cl = kzalloc(sizeof(*cl), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001585 if (cl == NULL)
1586 goto failure;
Stephen Hemminger71bcb092008-11-25 21:13:31 -08001587
1588 if (tca[TCA_RATE]) {
John Fastabend22e0f8b2014-09-28 11:52:56 -07001589 err = gen_new_estimator(&cl->bstats, NULL, &cl->rate_est,
Eric Dumazetedb09eb2016-06-06 09:37:16 -07001590 NULL,
1591 qdisc_root_sleeping_running(sch),
Stephen Hemminger71bcb092008-11-25 21:13:31 -08001592 tca[TCA_RATE]);
1593 if (err) {
1594 kfree(cl);
1595 goto failure;
1596 }
1597 }
1598
Linus Torvalds1da177e2005-04-16 15:20:36 -07001599 cl->R_tab = rtab;
1600 rtab = NULL;
1601 cl->refcnt = 1;
Changli Gao3511c912010-10-16 13:04:08 +00001602 cl->q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, classid);
1603 if (!cl->q)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001604 cl->q = &noop_qdisc;
Jiri Kosina49b49972017-03-08 16:03:32 +01001605 else
1606 qdisc_hash_add(cl->q, true);
1607
Patrick McHardyd77fea22008-07-05 23:22:05 -07001608 cl->common.classid = classid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001609 cl->tparent = parent;
1610 cl->qdisc = sch;
1611 cl->allot = parent->allot;
1612 cl->quantum = cl->allot;
1613 cl->weight = cl->R_tab->rate.rate;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001614
1615 sch_tree_lock(sch);
1616 cbq_link_class(cl);
1617 cl->borrow = cl->tparent;
1618 if (cl->tparent != &q->link)
1619 cl->share = cl->tparent;
1620 cbq_adjust_levels(parent);
1621 cl->minidle = -0x7FFFFFFF;
Patrick McHardy1e904742008-01-22 22:11:17 -08001622 cbq_set_lss(cl, nla_data(tb[TCA_CBQ_LSSOPT]));
1623 cbq_set_wrr(cl, nla_data(tb[TCA_CBQ_WRROPT]));
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001624 if (cl->ewma_log == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001625 cl->ewma_log = q->link.ewma_log;
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001626 if (cl->maxidle == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001627 cl->maxidle = q->link.maxidle;
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001628 if (cl->avpkt == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001629 cl->avpkt = q->link.avpkt;
Patrick McHardy1e904742008-01-22 22:11:17 -08001630 if (tb[TCA_CBQ_FOPT])
1631 cbq_set_fopt(cl, nla_data(tb[TCA_CBQ_FOPT]));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001632 sch_tree_unlock(sch);
1633
Patrick McHardyd77fea22008-07-05 23:22:05 -07001634 qdisc_class_hash_grow(sch, &q->clhash);
1635
Linus Torvalds1da177e2005-04-16 15:20:36 -07001636 *arg = (unsigned long)cl;
1637 return 0;
1638
1639failure:
1640 qdisc_put_rtab(rtab);
1641 return err;
1642}
1643
1644static int cbq_delete(struct Qdisc *sch, unsigned long arg)
1645{
1646 struct cbq_sched_data *q = qdisc_priv(sch);
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001647 struct cbq_class *cl = (struct cbq_class *)arg;
WANG Cong2ccccf52016-02-25 14:55:01 -08001648 unsigned int qlen, backlog;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001649
1650 if (cl->filters || cl->children || cl == &q->link)
1651 return -EBUSY;
1652
1653 sch_tree_lock(sch);
1654
Jarek Poplawskia37ef2e2006-12-08 00:25:55 -08001655 qlen = cl->q->q.qlen;
WANG Cong2ccccf52016-02-25 14:55:01 -08001656 backlog = cl->q->qstats.backlog;
Jarek Poplawskia37ef2e2006-12-08 00:25:55 -08001657 qdisc_reset(cl->q);
WANG Cong2ccccf52016-02-25 14:55:01 -08001658 qdisc_tree_reduce_backlog(cl->q, qlen, backlog);
Jarek Poplawskia37ef2e2006-12-08 00:25:55 -08001659
Linus Torvalds1da177e2005-04-16 15:20:36 -07001660 if (cl->next_alive)
1661 cbq_deactivate_class(cl);
1662
1663 if (q->tx_borrowed == cl)
1664 q->tx_borrowed = q->tx_class;
1665 if (q->tx_class == cl) {
1666 q->tx_class = NULL;
1667 q->tx_borrowed = NULL;
1668 }
Patrick McHardyc3bc7cf2007-07-15 00:03:05 -07001669#ifdef CONFIG_NET_CLS_ACT
Linus Torvalds1da177e2005-04-16 15:20:36 -07001670 if (q->rx_class == cl)
1671 q->rx_class = NULL;
1672#endif
1673
1674 cbq_unlink_class(cl);
1675 cbq_adjust_levels(cl->tparent);
1676 cl->defmap = 0;
1677 cbq_sync_defmap(cl);
1678
1679 cbq_rmprio(q, cl);
1680 sch_tree_unlock(sch);
1681
Jarek Poplawski7cd0a632009-03-15 20:00:19 -07001682 BUG_ON(--cl->refcnt == 0);
1683 /*
1684 * This shouldn't happen: we "hold" one cops->get() when called
1685 * from tc_ctl_tclass; the destroy method is done from cops->put().
1686 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001687
1688 return 0;
1689}
1690
John Fastabend25d8c0d2014-09-12 20:05:27 -07001691static struct tcf_proto __rcu **cbq_find_tcf(struct Qdisc *sch,
1692 unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001693{
1694 struct cbq_sched_data *q = qdisc_priv(sch);
1695 struct cbq_class *cl = (struct cbq_class *)arg;
1696
1697 if (cl == NULL)
1698 cl = &q->link;
1699
1700 return &cl->filter_list;
1701}
1702
1703static unsigned long cbq_bind_filter(struct Qdisc *sch, unsigned long parent,
1704 u32 classid)
1705{
1706 struct cbq_sched_data *q = qdisc_priv(sch);
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001707 struct cbq_class *p = (struct cbq_class *)parent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001708 struct cbq_class *cl = cbq_class_lookup(q, classid);
1709
1710 if (cl) {
1711 if (p && p->level <= cl->level)
1712 return 0;
1713 cl->filters++;
1714 return (unsigned long)cl;
1715 }
1716 return 0;
1717}
1718
1719static void cbq_unbind_filter(struct Qdisc *sch, unsigned long arg)
1720{
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001721 struct cbq_class *cl = (struct cbq_class *)arg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001722
1723 cl->filters--;
1724}
1725
1726static void cbq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
1727{
1728 struct cbq_sched_data *q = qdisc_priv(sch);
Patrick McHardyd77fea22008-07-05 23:22:05 -07001729 struct cbq_class *cl;
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001730 unsigned int h;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001731
1732 if (arg->stop)
1733 return;
1734
Patrick McHardyd77fea22008-07-05 23:22:05 -07001735 for (h = 0; h < q->clhash.hashsize; h++) {
Sasha Levinb67bfe02013-02-27 17:06:00 -08001736 hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001737 if (arg->count < arg->skip) {
1738 arg->count++;
1739 continue;
1740 }
1741 if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
1742 arg->stop = 1;
1743 return;
1744 }
1745 arg->count++;
1746 }
1747 }
1748}
1749
Eric Dumazet20fea082007-11-14 01:44:41 -08001750static const struct Qdisc_class_ops cbq_class_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001751 .graft = cbq_graft,
1752 .leaf = cbq_leaf,
Jarek Poplawskia37ef2e2006-12-08 00:25:55 -08001753 .qlen_notify = cbq_qlen_notify,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001754 .get = cbq_get,
1755 .put = cbq_put,
1756 .change = cbq_change_class,
1757 .delete = cbq_delete,
1758 .walk = cbq_walk,
1759 .tcf_chain = cbq_find_tcf,
1760 .bind_tcf = cbq_bind_filter,
1761 .unbind_tcf = cbq_unbind_filter,
1762 .dump = cbq_dump_class,
1763 .dump_stats = cbq_dump_class_stats,
1764};
1765
Eric Dumazet20fea082007-11-14 01:44:41 -08001766static struct Qdisc_ops cbq_qdisc_ops __read_mostly = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001767 .next = NULL,
1768 .cl_ops = &cbq_class_ops,
1769 .id = "cbq",
1770 .priv_size = sizeof(struct cbq_sched_data),
1771 .enqueue = cbq_enqueue,
1772 .dequeue = cbq_dequeue,
Jarek Poplawski77be1552008-10-31 00:47:01 -07001773 .peek = qdisc_peek_dequeued,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001774 .init = cbq_init,
1775 .reset = cbq_reset,
1776 .destroy = cbq_destroy,
1777 .change = NULL,
1778 .dump = cbq_dump,
1779 .dump_stats = cbq_dump_stats,
1780 .owner = THIS_MODULE,
1781};
1782
1783static int __init cbq_module_init(void)
1784{
1785 return register_qdisc(&cbq_qdisc_ops);
1786}
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +09001787static void __exit cbq_module_exit(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001788{
1789 unregister_qdisc(&cbq_qdisc_ops);
1790}
1791module_init(cbq_module_init)
1792module_exit(cbq_module_exit)
1793MODULE_LICENSE("GPL");