blob: 1aaf1b6e51a2be238bc47797597ed3bcb76de4b1 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * net/sched/sch_tbf.c Token Bucket Filter queue.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 * Dmitry Torokhov <dtor@mail.ru> - allow attaching inner qdiscs -
11 * original idea by Martin Devera
12 *
13 */
14
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include <linux/types.h>
17#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include <linux/string.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070019#include <linux/errno.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070020#include <linux/skbuff.h>
Patrick McHardy0ba48052007-07-02 22:49:07 -070021#include <net/netlink.h>
Jiri Pirkob757c932013-02-12 00:12:05 +000022#include <net/sch_generic.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070023#include <net/pkt_sched.h>
24
25
26/* Simple Token Bucket Filter.
27 =======================================
28
29 SOURCE.
30 -------
31
32 None.
33
34 Description.
35 ------------
36
37 A data flow obeys TBF with rate R and depth B, if for any
38 time interval t_i...t_f the number of transmitted bits
39 does not exceed B + R*(t_f-t_i).
40
41 Packetized version of this definition:
42 The sequence of packets of sizes s_i served at moments t_i
43 obeys TBF, if for any i<=k:
44
45 s_i+....+s_k <= B + R*(t_k - t_i)
46
47 Algorithm.
48 ----------
49
50 Let N(t_i) be B/R initially and N(t) grow continuously with time as:
51
52 N(t+delta) = min{B/R, N(t) + delta}
53
54 If the first packet in queue has length S, it may be
55 transmitted only at the time t_* when S/R <= N(t_*),
56 and in this case N(t) jumps:
57
58 N(t_* + 0) = N(t_* - 0) - S/R.
59
60
61
62 Actually, QoS requires two TBF to be applied to a data stream.
63 One of them controls steady state burst size, another
64 one with rate P (peak rate) and depth M (equal to link MTU)
65 limits bursts at a smaller time scale.
66
67 It is easy to see that P>R, and B>M. If P is infinity, this double
68 TBF is equivalent to a single one.
69
70 When TBF works in reshaping mode, latency is estimated as:
71
72 lat = max ((L-B)/R, (L-M)/P)
73
74
75 NOTES.
76 ------
77
78 If TBF throttles, it starts a watchdog timer, which will wake it up
79 when it is ready to transmit.
80 Note that the minimal timer resolution is 1/HZ.
81 If no new packets arrive during this period,
82 or if the device is not awaken by EOI for some previous packet,
83 TBF can stop its activity for 1/HZ.
84
85
86 This means, that with depth B, the maximal rate is
87
88 R_crit = B*HZ
89
90 F.e. for 10Mbit ethernet and HZ=100 the minimal allowed B is ~10Kbytes.
91
92 Note that the peak rate TBF is much more tough: with MTU 1500
93 P_crit = 150Kbytes/sec. So, if you need greater peak
94 rates, use alpha with HZ=1000 :-)
95
96 With classful TBF, limit is just kept for backwards compatibility.
97 It is passed to the default bfifo qdisc - if the inner qdisc is
98 changed the limit is not effective anymore.
99*/
100
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000101struct tbf_sched_data {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700102/* Parameters */
103 u32 limit; /* Maximal length of backlog: bytes */
Jiri Pirkob757c932013-02-12 00:12:05 +0000104 s64 buffer; /* Token bucket depth/rate: MUST BE >= MTU/B */
105 s64 mtu;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106 u32 max_size;
Jiri Pirkob757c932013-02-12 00:12:05 +0000107 struct psched_ratecfg rate;
108 struct psched_ratecfg peak;
109 bool peak_present;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110
111/* Variables */
Jiri Pirkob757c932013-02-12 00:12:05 +0000112 s64 tokens; /* Current number of B tokens */
113 s64 ptokens; /* Current number of P tokens */
114 s64 t_c; /* Time check-point */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115 struct Qdisc *qdisc; /* Inner qdisc, default - bfifo queue */
Patrick McHardyf7f593e2007-03-16 01:20:07 -0700116 struct qdisc_watchdog watchdog; /* Watchdog timer */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117};
118
Eric Dumazete43ac792013-05-21 08:16:46 +0000119
120/* GSO packet is too big, segment it so that tbf can transmit
121 * each segment in time
122 */
123static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch)
124{
125 struct tbf_sched_data *q = qdisc_priv(sch);
126 struct sk_buff *segs, *nskb;
127 netdev_features_t features = netif_skb_features(skb);
128 int ret, nb;
129
130 segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
131
132 if (IS_ERR_OR_NULL(segs))
133 return qdisc_reshape_fail(skb, sch);
134
135 nb = 0;
136 while (segs) {
137 nskb = segs->next;
138 segs->next = NULL;
139 if (likely(segs->len <= q->max_size)) {
140 qdisc_skb_cb(segs)->pkt_len = segs->len;
141 ret = qdisc_enqueue(segs, q->qdisc);
142 } else {
143 ret = qdisc_reshape_fail(skb, sch);
144 }
145 if (ret != NET_XMIT_SUCCESS) {
146 if (net_xmit_drop_count(ret))
147 sch->qstats.drops++;
148 } else {
149 nb++;
150 }
151 segs = nskb;
152 }
153 sch->q.qlen += nb;
154 if (nb > 1)
155 qdisc_tree_decrease_qlen(sch, 1 - nb);
156 consume_skb(skb);
157 return nb > 0 ? NET_XMIT_SUCCESS : NET_XMIT_DROP;
158}
159
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000160static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161{
162 struct tbf_sched_data *q = qdisc_priv(sch);
163 int ret;
164
Eric Dumazete43ac792013-05-21 08:16:46 +0000165 if (qdisc_pkt_len(skb) > q->max_size) {
166 if (skb_is_gso(skb))
167 return tbf_segment(skb, sch);
David S. Miller69747652008-08-17 23:55:36 -0700168 return qdisc_reshape_fail(skb, sch);
Eric Dumazete43ac792013-05-21 08:16:46 +0000169 }
Jussi Kivilinna5f861732008-07-20 00:08:04 -0700170 ret = qdisc_enqueue(skb, q->qdisc);
Ben Greear9871e502010-08-10 01:45:40 -0700171 if (ret != NET_XMIT_SUCCESS) {
Jarek Poplawski378a2f02008-08-04 22:31:03 -0700172 if (net_xmit_drop_count(ret))
173 sch->qstats.drops++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174 return ret;
175 }
176
177 sch->q.qlen++;
Ben Greear9871e502010-08-10 01:45:40 -0700178 return NET_XMIT_SUCCESS;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700179}
180
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000181static unsigned int tbf_drop(struct Qdisc *sch)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182{
183 struct tbf_sched_data *q = qdisc_priv(sch);
Patrick McHardy6d037a22006-03-20 19:00:49 -0800184 unsigned int len = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185
Patrick McHardy6d037a22006-03-20 19:00:49 -0800186 if (q->qdisc->ops->drop && (len = q->qdisc->ops->drop(q->qdisc)) != 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187 sch->q.qlen--;
188 sch->qstats.drops++;
189 }
190 return len;
191}
192
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000193static struct sk_buff *tbf_dequeue(struct Qdisc *sch)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194{
195 struct tbf_sched_data *q = qdisc_priv(sch);
196 struct sk_buff *skb;
197
Jarek Poplawski03c05f02008-10-31 00:46:19 -0700198 skb = q->qdisc->ops->peek(q->qdisc);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199
200 if (skb) {
Jiri Pirkob757c932013-02-12 00:12:05 +0000201 s64 now;
202 s64 toks;
203 s64 ptoks = 0;
Jussi Kivilinna0abf77e2008-07-20 00:08:27 -0700204 unsigned int len = qdisc_pkt_len(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205
Jiri Pirkob757c932013-02-12 00:12:05 +0000206 now = ktime_to_ns(ktime_get());
207 toks = min_t(s64, now - q->t_c, q->buffer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208
Jiri Pirkob757c932013-02-12 00:12:05 +0000209 if (q->peak_present) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700210 ptoks = toks + q->ptokens;
Jiri Pirkob757c932013-02-12 00:12:05 +0000211 if (ptoks > q->mtu)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212 ptoks = q->mtu;
Jiri Pirkob757c932013-02-12 00:12:05 +0000213 ptoks -= (s64) psched_l2t_ns(&q->peak, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214 }
215 toks += q->tokens;
Jiri Pirkob757c932013-02-12 00:12:05 +0000216 if (toks > q->buffer)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217 toks = q->buffer;
Jiri Pirkob757c932013-02-12 00:12:05 +0000218 toks -= (s64) psched_l2t_ns(&q->rate, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700219
220 if ((toks|ptoks) >= 0) {
Jarek Poplawski77be1552008-10-31 00:47:01 -0700221 skb = qdisc_dequeue_peeked(q->qdisc);
Jarek Poplawski03c05f02008-10-31 00:46:19 -0700222 if (unlikely(!skb))
223 return NULL;
224
Linus Torvalds1da177e2005-04-16 15:20:36 -0700225 q->t_c = now;
226 q->tokens = toks;
227 q->ptokens = ptoks;
228 sch->q.qlen--;
Eric Dumazetfd245a42011-01-20 05:27:16 +0000229 qdisc_unthrottled(sch);
Eric Dumazet9190b3b2011-01-20 23:31:33 -0800230 qdisc_bstats_update(sch, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231 return skb;
232 }
233
Jiri Pirkob757c932013-02-12 00:12:05 +0000234 qdisc_watchdog_schedule_ns(&q->watchdog,
235 now + max_t(long, -toks, -ptoks));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700236
237 /* Maybe we have a shorter packet in the queue,
238 which can be sent now. It sounds cool,
239 but, however, this is wrong in principle.
240 We MUST NOT reorder packets under these circumstances.
241
242 Really, if we split the flow into independent
243 subflows, it would be a very good solution.
244 This is the main idea of all FQ algorithms
245 (cf. CSZ, HPFQ, HFSC)
246 */
247
Linus Torvalds1da177e2005-04-16 15:20:36 -0700248 sch->qstats.overlimits++;
249 }
250 return NULL;
251}
252
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000253static void tbf_reset(struct Qdisc *sch)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254{
255 struct tbf_sched_data *q = qdisc_priv(sch);
256
257 qdisc_reset(q->qdisc);
258 sch->q.qlen = 0;
Jiri Pirkob757c932013-02-12 00:12:05 +0000259 q->t_c = ktime_to_ns(ktime_get());
Linus Torvalds1da177e2005-04-16 15:20:36 -0700260 q->tokens = q->buffer;
261 q->ptokens = q->mtu;
Patrick McHardyf7f593e2007-03-16 01:20:07 -0700262 qdisc_watchdog_cancel(&q->watchdog);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700263}
264
Patrick McHardy27a34212008-01-23 20:35:39 -0800265static const struct nla_policy tbf_policy[TCA_TBF_MAX + 1] = {
266 [TCA_TBF_PARMS] = { .len = sizeof(struct tc_tbf_qopt) },
267 [TCA_TBF_RTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
268 [TCA_TBF_PTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
269};
270
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000271static int tbf_change(struct Qdisc *sch, struct nlattr *opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272{
Patrick McHardycee63722008-01-23 20:33:32 -0800273 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700274 struct tbf_sched_data *q = qdisc_priv(sch);
Patrick McHardy1e904742008-01-22 22:11:17 -0800275 struct nlattr *tb[TCA_TBF_PTAB + 1];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700276 struct tc_tbf_qopt *qopt;
277 struct qdisc_rate_table *rtab = NULL;
278 struct qdisc_rate_table *ptab = NULL;
279 struct Qdisc *child = NULL;
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000280 int max_size, n;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281
Patrick McHardy27a34212008-01-23 20:35:39 -0800282 err = nla_parse_nested(tb, TCA_TBF_PTAB, opt, tbf_policy);
Patrick McHardycee63722008-01-23 20:33:32 -0800283 if (err < 0)
284 return err;
285
286 err = -EINVAL;
Patrick McHardy27a34212008-01-23 20:35:39 -0800287 if (tb[TCA_TBF_PARMS] == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700288 goto done;
289
Patrick McHardy1e904742008-01-22 22:11:17 -0800290 qopt = nla_data(tb[TCA_TBF_PARMS]);
291 rtab = qdisc_get_rtab(&qopt->rate, tb[TCA_TBF_RTAB]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292 if (rtab == NULL)
293 goto done;
294
295 if (qopt->peakrate.rate) {
296 if (qopt->peakrate.rate > qopt->rate.rate)
Patrick McHardy1e904742008-01-22 22:11:17 -0800297 ptab = qdisc_get_rtab(&qopt->peakrate, tb[TCA_TBF_PTAB]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700298 if (ptab == NULL)
299 goto done;
300 }
301
302 for (n = 0; n < 256; n++)
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000303 if (rtab->data[n] > qopt->buffer)
304 break;
305 max_size = (n << qopt->rate.cell_log) - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700306 if (ptab) {
307 int size;
308
309 for (n = 0; n < 256; n++)
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000310 if (ptab->data[n] > qopt->mtu)
311 break;
312 size = (n << qopt->peakrate.cell_log) - 1;
313 if (size < max_size)
314 max_size = size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700315 }
316 if (max_size < 0)
317 goto done;
318
stephen hemmingerf0cd1502010-05-14 14:38:59 +0000319 if (q->qdisc != &noop_qdisc) {
320 err = fifo_set_limit(q->qdisc, qopt->limit);
321 if (err)
322 goto done;
323 } else if (qopt->limit > 0) {
Patrick McHardyfb0305c2008-07-05 23:40:21 -0700324 child = fifo_create_dflt(sch, &bfifo_qdisc_ops, qopt->limit);
325 if (IS_ERR(child)) {
326 err = PTR_ERR(child);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700327 goto done;
Patrick McHardyfb0305c2008-07-05 23:40:21 -0700328 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700329 }
330
331 sch_tree_lock(sch);
Patrick McHardy5e50da02006-11-29 17:36:20 -0800332 if (child) {
333 qdisc_tree_decrease_qlen(q->qdisc, q->qdisc->q.qlen);
Patrick McHardyb94c8af2008-11-20 04:11:36 -0800334 qdisc_destroy(q->qdisc);
335 q->qdisc = child;
Patrick McHardy5e50da02006-11-29 17:36:20 -0800336 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700337 q->limit = qopt->limit;
Jiri Pirkob757c932013-02-12 00:12:05 +0000338 q->mtu = PSCHED_TICKS2NS(qopt->mtu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700339 q->max_size = max_size;
Jiri Pirkob757c932013-02-12 00:12:05 +0000340 q->buffer = PSCHED_TICKS2NS(qopt->buffer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700341 q->tokens = q->buffer;
342 q->ptokens = q->mtu;
Patrick McHardyb94c8af2008-11-20 04:11:36 -0800343
Eric Dumazet01cb71d2013-06-02 13:55:05 +0000344 psched_ratecfg_precompute(&q->rate, &rtab->rate);
Jiri Pirkob757c932013-02-12 00:12:05 +0000345 if (ptab) {
Eric Dumazet01cb71d2013-06-02 13:55:05 +0000346 psched_ratecfg_precompute(&q->peak, &ptab->rate);
Jiri Pirkob757c932013-02-12 00:12:05 +0000347 q->peak_present = true;
348 } else {
349 q->peak_present = false;
350 }
Patrick McHardyb94c8af2008-11-20 04:11:36 -0800351
Linus Torvalds1da177e2005-04-16 15:20:36 -0700352 sch_tree_unlock(sch);
353 err = 0;
354done:
355 if (rtab)
356 qdisc_put_rtab(rtab);
357 if (ptab)
358 qdisc_put_rtab(ptab);
359 return err;
360}
361
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000362static int tbf_init(struct Qdisc *sch, struct nlattr *opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700363{
364 struct tbf_sched_data *q = qdisc_priv(sch);
365
366 if (opt == NULL)
367 return -EINVAL;
368
Jiri Pirkob757c932013-02-12 00:12:05 +0000369 q->t_c = ktime_to_ns(ktime_get());
Patrick McHardyf7f593e2007-03-16 01:20:07 -0700370 qdisc_watchdog_init(&q->watchdog, sch);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700371 q->qdisc = &noop_qdisc;
372
373 return tbf_change(sch, opt);
374}
375
376static void tbf_destroy(struct Qdisc *sch)
377{
378 struct tbf_sched_data *q = qdisc_priv(sch);
379
Patrick McHardyf7f593e2007-03-16 01:20:07 -0700380 qdisc_watchdog_cancel(&q->watchdog);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700381 qdisc_destroy(q->qdisc);
382}
383
384static int tbf_dump(struct Qdisc *sch, struct sk_buff *skb)
385{
386 struct tbf_sched_data *q = qdisc_priv(sch);
Patrick McHardy4b3550ef2008-01-23 20:34:11 -0800387 struct nlattr *nest;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700388 struct tc_tbf_qopt opt;
389
Eric Dumazetb0460e42011-12-28 23:27:44 +0000390 sch->qstats.backlog = q->qdisc->qstats.backlog;
Patrick McHardy4b3550ef2008-01-23 20:34:11 -0800391 nest = nla_nest_start(skb, TCA_OPTIONS);
392 if (nest == NULL)
393 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700394
395 opt.limit = q->limit;
Eric Dumazet01cb71d2013-06-02 13:55:05 +0000396 psched_ratecfg_getrate(&opt.rate, &q->rate);
Jiri Pirkob757c932013-02-12 00:12:05 +0000397 if (q->peak_present)
Eric Dumazet01cb71d2013-06-02 13:55:05 +0000398 psched_ratecfg_getrate(&opt.peakrate, &q->peak);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700399 else
400 memset(&opt.peakrate, 0, sizeof(opt.peakrate));
Jiri Pirkob757c932013-02-12 00:12:05 +0000401 opt.mtu = PSCHED_NS2TICKS(q->mtu);
402 opt.buffer = PSCHED_NS2TICKS(q->buffer);
David S. Miller1b34ec42012-03-29 05:11:39 -0400403 if (nla_put(skb, TCA_TBF_PARMS, sizeof(opt), &opt))
404 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700405
Patrick McHardy4b3550ef2008-01-23 20:34:11 -0800406 nla_nest_end(skb, nest);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700407 return skb->len;
408
Patrick McHardy1e904742008-01-22 22:11:17 -0800409nla_put_failure:
Patrick McHardy4b3550ef2008-01-23 20:34:11 -0800410 nla_nest_cancel(skb, nest);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700411 return -1;
412}
413
414static int tbf_dump_class(struct Qdisc *sch, unsigned long cl,
415 struct sk_buff *skb, struct tcmsg *tcm)
416{
417 struct tbf_sched_data *q = qdisc_priv(sch);
418
Linus Torvalds1da177e2005-04-16 15:20:36 -0700419 tcm->tcm_handle |= TC_H_MIN(1);
420 tcm->tcm_info = q->qdisc->handle;
421
422 return 0;
423}
424
425static int tbf_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
426 struct Qdisc **old)
427{
428 struct tbf_sched_data *q = qdisc_priv(sch);
429
430 if (new == NULL)
431 new = &noop_qdisc;
432
433 sch_tree_lock(sch);
Patrick McHardyb94c8af2008-11-20 04:11:36 -0800434 *old = q->qdisc;
435 q->qdisc = new;
Patrick McHardy5e50da02006-11-29 17:36:20 -0800436 qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700437 qdisc_reset(*old);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700438 sch_tree_unlock(sch);
439
440 return 0;
441}
442
443static struct Qdisc *tbf_leaf(struct Qdisc *sch, unsigned long arg)
444{
445 struct tbf_sched_data *q = qdisc_priv(sch);
446 return q->qdisc;
447}
448
449static unsigned long tbf_get(struct Qdisc *sch, u32 classid)
450{
451 return 1;
452}
453
454static void tbf_put(struct Qdisc *sch, unsigned long arg)
455{
456}
457
Linus Torvalds1da177e2005-04-16 15:20:36 -0700458static void tbf_walk(struct Qdisc *sch, struct qdisc_walker *walker)
459{
460 if (!walker->stop) {
461 if (walker->count >= walker->skip)
462 if (walker->fn(sch, 1, walker) < 0) {
463 walker->stop = 1;
464 return;
465 }
466 walker->count++;
467 }
468}
469
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000470static const struct Qdisc_class_ops tbf_class_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700471 .graft = tbf_graft,
472 .leaf = tbf_leaf,
473 .get = tbf_get,
474 .put = tbf_put,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700475 .walk = tbf_walk,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700476 .dump = tbf_dump_class,
477};
478
Eric Dumazet20fea082007-11-14 01:44:41 -0800479static struct Qdisc_ops tbf_qdisc_ops __read_mostly = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700480 .next = NULL,
481 .cl_ops = &tbf_class_ops,
482 .id = "tbf",
483 .priv_size = sizeof(struct tbf_sched_data),
484 .enqueue = tbf_enqueue,
485 .dequeue = tbf_dequeue,
Jarek Poplawski77be1552008-10-31 00:47:01 -0700486 .peek = qdisc_peek_dequeued,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700487 .drop = tbf_drop,
488 .init = tbf_init,
489 .reset = tbf_reset,
490 .destroy = tbf_destroy,
491 .change = tbf_change,
492 .dump = tbf_dump,
493 .owner = THIS_MODULE,
494};
495
496static int __init tbf_module_init(void)
497{
498 return register_qdisc(&tbf_qdisc_ops);
499}
500
501static void __exit tbf_module_exit(void)
502{
503 unregister_qdisc(&tbf_qdisc_ops);
504}
505module_init(tbf_module_init)
506module_exit(tbf_module_exit)
507MODULE_LICENSE("GPL");