blob: 229172d509cc5d189b926f15ec06785c7f6303ab [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * net/sched/sch_tbf.c Token Bucket Filter queue.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 * Dmitry Torokhov <dtor@mail.ru> - allow attaching inner qdiscs -
11 * original idea by Martin Devera
12 *
13 */
14
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include <linux/types.h>
17#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include <linux/string.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070019#include <linux/errno.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070020#include <linux/skbuff.h>
Patrick McHardy0ba48052007-07-02 22:49:07 -070021#include <net/netlink.h>
Jiri Pirkob757c932013-02-12 00:12:05 +000022#include <net/sch_generic.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070023#include <net/pkt_sched.h>
24
25
26/* Simple Token Bucket Filter.
27 =======================================
28
29 SOURCE.
30 -------
31
32 None.
33
34 Description.
35 ------------
36
37 A data flow obeys TBF with rate R and depth B, if for any
38 time interval t_i...t_f the number of transmitted bits
39 does not exceed B + R*(t_f-t_i).
40
41 Packetized version of this definition:
42 The sequence of packets of sizes s_i served at moments t_i
43 obeys TBF, if for any i<=k:
44
45 s_i+....+s_k <= B + R*(t_k - t_i)
46
47 Algorithm.
48 ----------
49
50 Let N(t_i) be B/R initially and N(t) grow continuously with time as:
51
52 N(t+delta) = min{B/R, N(t) + delta}
53
54 If the first packet in queue has length S, it may be
55 transmitted only at the time t_* when S/R <= N(t_*),
56 and in this case N(t) jumps:
57
58 N(t_* + 0) = N(t_* - 0) - S/R.
59
60
61
62 Actually, QoS requires two TBF to be applied to a data stream.
63 One of them controls steady state burst size, another
64 one with rate P (peak rate) and depth M (equal to link MTU)
65 limits bursts at a smaller time scale.
66
67 It is easy to see that P>R, and B>M. If P is infinity, this double
68 TBF is equivalent to a single one.
69
70 When TBF works in reshaping mode, latency is estimated as:
71
72 lat = max ((L-B)/R, (L-M)/P)
73
74
75 NOTES.
76 ------
77
78 If TBF throttles, it starts a watchdog timer, which will wake it up
79 when it is ready to transmit.
80 Note that the minimal timer resolution is 1/HZ.
81 If no new packets arrive during this period,
82 or if the device is not awaken by EOI for some previous packet,
83 TBF can stop its activity for 1/HZ.
84
85
86 This means, that with depth B, the maximal rate is
87
88 R_crit = B*HZ
89
90 F.e. for 10Mbit ethernet and HZ=100 the minimal allowed B is ~10Kbytes.
91
92 Note that the peak rate TBF is much more tough: with MTU 1500
93 P_crit = 150Kbytes/sec. So, if you need greater peak
94 rates, use alpha with HZ=1000 :-)
95
96 With classful TBF, limit is just kept for backwards compatibility.
97 It is passed to the default bfifo qdisc - if the inner qdisc is
98 changed the limit is not effective anymore.
99*/
100
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000101struct tbf_sched_data {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700102/* Parameters */
103 u32 limit; /* Maximal length of backlog: bytes */
Hiroaki SHIMODAa135e592014-03-02 17:30:26 +0900104 u32 max_size;
Jiri Pirkob757c932013-02-12 00:12:05 +0000105 s64 buffer; /* Token bucket depth/rate: MUST BE >= MTU/B */
106 s64 mtu;
Jiri Pirkob757c932013-02-12 00:12:05 +0000107 struct psched_ratecfg rate;
108 struct psched_ratecfg peak;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109
110/* Variables */
Jiri Pirkob757c932013-02-12 00:12:05 +0000111 s64 tokens; /* Current number of B tokens */
112 s64 ptokens; /* Current number of P tokens */
113 s64 t_c; /* Time check-point */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114 struct Qdisc *qdisc; /* Inner qdisc, default - bfifo queue */
Patrick McHardyf7f593e2007-03-16 01:20:07 -0700115 struct qdisc_watchdog watchdog; /* Watchdog timer */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116};
117
Eric Dumazete43ac792013-05-21 08:16:46 +0000118
Yang Yingliangcc106e42013-12-10 14:59:27 +0800119/* Time to Length, convert time in ns to length in bytes
120 * to determinate how many bytes can be sent in given time.
121 */
122static u64 psched_ns_t2l(const struct psched_ratecfg *r,
123 u64 time_in_ns)
124{
125 /* The formula is :
126 * len = (time_in_ns * r->rate_bytes_ps) / NSEC_PER_SEC
127 */
128 u64 len = time_in_ns * r->rate_bytes_ps;
129
130 do_div(len, NSEC_PER_SEC);
131
Yang Yingliangd55d2822013-12-12 10:57:22 +0800132 if (unlikely(r->linklayer == TC_LINKLAYER_ATM)) {
133 do_div(len, 53);
134 len = len * 48;
135 }
Yang Yingliangcc106e42013-12-10 14:59:27 +0800136
137 if (len > r->overhead)
138 len -= r->overhead;
139 else
140 len = 0;
141
142 return len;
143}
144
Eric Dumazete43ac792013-05-21 08:16:46 +0000145/* GSO packet is too big, segment it so that tbf can transmit
146 * each segment in time
147 */
Eric Dumazet520ac302016-06-21 23:16:49 -0700148static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch,
149 struct sk_buff **to_free)
Eric Dumazete43ac792013-05-21 08:16:46 +0000150{
151 struct tbf_sched_data *q = qdisc_priv(sch);
152 struct sk_buff *segs, *nskb;
153 netdev_features_t features = netif_skb_features(skb);
WANG Cong2ccccf52016-02-25 14:55:01 -0800154 unsigned int len = 0, prev_len = qdisc_pkt_len(skb);
Eric Dumazete43ac792013-05-21 08:16:46 +0000155 int ret, nb;
156
157 segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
158
159 if (IS_ERR_OR_NULL(segs))
Eric Dumazet520ac302016-06-21 23:16:49 -0700160 return qdisc_drop(skb, sch, to_free);
Eric Dumazete43ac792013-05-21 08:16:46 +0000161
162 nb = 0;
163 while (segs) {
164 nskb = segs->next;
165 segs->next = NULL;
Eric Dumazet4d0820c2013-11-23 12:59:20 -0800166 qdisc_skb_cb(segs)->pkt_len = segs->len;
WANG Cong2ccccf52016-02-25 14:55:01 -0800167 len += segs->len;
Eric Dumazet520ac302016-06-21 23:16:49 -0700168 ret = qdisc_enqueue(segs, q->qdisc, to_free);
Eric Dumazete43ac792013-05-21 08:16:46 +0000169 if (ret != NET_XMIT_SUCCESS) {
170 if (net_xmit_drop_count(ret))
John Fastabend25331d62014-09-28 11:53:29 -0700171 qdisc_qstats_drop(sch);
Eric Dumazete43ac792013-05-21 08:16:46 +0000172 } else {
173 nb++;
174 }
175 segs = nskb;
176 }
177 sch->q.qlen += nb;
178 if (nb > 1)
WANG Cong2ccccf52016-02-25 14:55:01 -0800179 qdisc_tree_reduce_backlog(sch, 1 - nb, prev_len - len);
Eric Dumazete43ac792013-05-21 08:16:46 +0000180 consume_skb(skb);
181 return nb > 0 ? NET_XMIT_SUCCESS : NET_XMIT_DROP;
182}
183
Eric Dumazet520ac302016-06-21 23:16:49 -0700184static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch,
185 struct sk_buff **to_free)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186{
187 struct tbf_sched_data *q = qdisc_priv(sch);
188 int ret;
189
Eric Dumazete43ac792013-05-21 08:16:46 +0000190 if (qdisc_pkt_len(skb) > q->max_size) {
Florian Westphalde960aa2014-01-26 10:58:16 +0100191 if (skb_is_gso(skb) && skb_gso_mac_seglen(skb) <= q->max_size)
Eric Dumazet520ac302016-06-21 23:16:49 -0700192 return tbf_segment(skb, sch, to_free);
193 return qdisc_drop(skb, sch, to_free);
Eric Dumazete43ac792013-05-21 08:16:46 +0000194 }
Eric Dumazet520ac302016-06-21 23:16:49 -0700195 ret = qdisc_enqueue(skb, q->qdisc, to_free);
Ben Greear9871e502010-08-10 01:45:40 -0700196 if (ret != NET_XMIT_SUCCESS) {
Jarek Poplawski378a2f02008-08-04 22:31:03 -0700197 if (net_xmit_drop_count(ret))
John Fastabend25331d62014-09-28 11:53:29 -0700198 qdisc_qstats_drop(sch);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199 return ret;
200 }
201
WANG Cong8d5958f2016-06-01 16:15:19 -0700202 qdisc_qstats_backlog_inc(sch, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203 sch->q.qlen++;
Ben Greear9871e502010-08-10 01:45:40 -0700204 return NET_XMIT_SUCCESS;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205}
206
Hiroaki SHIMODAa135e592014-03-02 17:30:26 +0900207static bool tbf_peak_present(const struct tbf_sched_data *q)
208{
209 return q->peak.rate_bytes_ps;
210}
211
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000212static struct sk_buff *tbf_dequeue(struct Qdisc *sch)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700213{
214 struct tbf_sched_data *q = qdisc_priv(sch);
215 struct sk_buff *skb;
216
Jarek Poplawski03c05f02008-10-31 00:46:19 -0700217 skb = q->qdisc->ops->peek(q->qdisc);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700218
219 if (skb) {
Jiri Pirkob757c932013-02-12 00:12:05 +0000220 s64 now;
221 s64 toks;
222 s64 ptoks = 0;
Jussi Kivilinna0abf77e2008-07-20 00:08:27 -0700223 unsigned int len = qdisc_pkt_len(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224
Eric Dumazetd2de8752014-08-22 18:32:09 -0700225 now = ktime_get_ns();
Jiri Pirkob757c932013-02-12 00:12:05 +0000226 toks = min_t(s64, now - q->t_c, q->buffer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227
Hiroaki SHIMODAa135e592014-03-02 17:30:26 +0900228 if (tbf_peak_present(q)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700229 ptoks = toks + q->ptokens;
Jiri Pirkob757c932013-02-12 00:12:05 +0000230 if (ptoks > q->mtu)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231 ptoks = q->mtu;
Jiri Pirkob757c932013-02-12 00:12:05 +0000232 ptoks -= (s64) psched_l2t_ns(&q->peak, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700233 }
234 toks += q->tokens;
Jiri Pirkob757c932013-02-12 00:12:05 +0000235 if (toks > q->buffer)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700236 toks = q->buffer;
Jiri Pirkob757c932013-02-12 00:12:05 +0000237 toks -= (s64) psched_l2t_ns(&q->rate, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700238
239 if ((toks|ptoks) >= 0) {
Jarek Poplawski77be1552008-10-31 00:47:01 -0700240 skb = qdisc_dequeue_peeked(q->qdisc);
Jarek Poplawski03c05f02008-10-31 00:46:19 -0700241 if (unlikely(!skb))
242 return NULL;
243
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244 q->t_c = now;
245 q->tokens = toks;
246 q->ptokens = ptoks;
WANG Cong8d5958f2016-06-01 16:15:19 -0700247 qdisc_qstats_backlog_dec(sch, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700248 sch->q.qlen--;
Eric Dumazet9190b3b2011-01-20 23:31:33 -0800249 qdisc_bstats_update(sch, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250 return skb;
251 }
252
Jiri Pirkob757c932013-02-12 00:12:05 +0000253 qdisc_watchdog_schedule_ns(&q->watchdog,
Eric Dumazet45f50be2016-06-10 16:41:39 -0700254 now + max_t(long, -toks, -ptoks));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700255
256 /* Maybe we have a shorter packet in the queue,
257 which can be sent now. It sounds cool,
258 but, however, this is wrong in principle.
259 We MUST NOT reorder packets under these circumstances.
260
261 Really, if we split the flow into independent
262 subflows, it would be a very good solution.
263 This is the main idea of all FQ algorithms
264 (cf. CSZ, HPFQ, HFSC)
265 */
266
John Fastabend25331d62014-09-28 11:53:29 -0700267 qdisc_qstats_overlimit(sch);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700268 }
269 return NULL;
270}
271
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000272static void tbf_reset(struct Qdisc *sch)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700273{
274 struct tbf_sched_data *q = qdisc_priv(sch);
275
276 qdisc_reset(q->qdisc);
WANG Cong8d5958f2016-06-01 16:15:19 -0700277 sch->qstats.backlog = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278 sch->q.qlen = 0;
Eric Dumazetd2de8752014-08-22 18:32:09 -0700279 q->t_c = ktime_get_ns();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700280 q->tokens = q->buffer;
281 q->ptokens = q->mtu;
Patrick McHardyf7f593e2007-03-16 01:20:07 -0700282 qdisc_watchdog_cancel(&q->watchdog);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283}
284
Patrick McHardy27a34212008-01-23 20:35:39 -0800285static const struct nla_policy tbf_policy[TCA_TBF_MAX + 1] = {
286 [TCA_TBF_PARMS] = { .len = sizeof(struct tc_tbf_qopt) },
287 [TCA_TBF_RTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
288 [TCA_TBF_PTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
Yang Yinglianga33c4a22013-11-08 10:23:34 +0800289 [TCA_TBF_RATE64] = { .type = NLA_U64 },
290 [TCA_TBF_PRATE64] = { .type = NLA_U64 },
Yang Yingliang2e04ad42013-12-20 09:24:47 +0800291 [TCA_TBF_BURST] = { .type = NLA_U32 },
292 [TCA_TBF_PBURST] = { .type = NLA_U32 },
Patrick McHardy27a34212008-01-23 20:35:39 -0800293};
294
Alexander Aring20307212017-12-20 12:35:14 -0500295static int tbf_change(struct Qdisc *sch, struct nlattr *opt,
296 struct netlink_ext_ack *extack)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700297{
Patrick McHardycee63722008-01-23 20:33:32 -0800298 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700299 struct tbf_sched_data *q = qdisc_priv(sch);
Yang Yinglianga33c4a22013-11-08 10:23:34 +0800300 struct nlattr *tb[TCA_TBF_MAX + 1];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301 struct tc_tbf_qopt *qopt;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700302 struct Qdisc *child = NULL;
Yang Yingliangcc106e42013-12-10 14:59:27 +0800303 struct psched_ratecfg rate;
304 struct psched_ratecfg peak;
305 u64 max_size;
306 s64 buffer, mtu;
Yang Yinglianga33c4a22013-11-08 10:23:34 +0800307 u64 rate64 = 0, prate64 = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700308
Johannes Bergfceb6432017-04-12 14:34:07 +0200309 err = nla_parse_nested(tb, TCA_TBF_MAX, opt, tbf_policy, NULL);
Patrick McHardycee63722008-01-23 20:33:32 -0800310 if (err < 0)
311 return err;
312
313 err = -EINVAL;
Patrick McHardy27a34212008-01-23 20:35:39 -0800314 if (tb[TCA_TBF_PARMS] == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700315 goto done;
316
Patrick McHardy1e904742008-01-22 22:11:17 -0800317 qopt = nla_data(tb[TCA_TBF_PARMS]);
Yang Yingliangcc106e42013-12-10 14:59:27 +0800318 if (qopt->rate.linklayer == TC_LINKLAYER_UNAWARE)
319 qdisc_put_rtab(qdisc_get_rtab(&qopt->rate,
Alexander Aringe9bc3fa2017-12-20 12:35:18 -0500320 tb[TCA_TBF_RTAB],
321 NULL));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700322
Yang Yingliangcc106e42013-12-10 14:59:27 +0800323 if (qopt->peakrate.linklayer == TC_LINKLAYER_UNAWARE)
324 qdisc_put_rtab(qdisc_get_rtab(&qopt->peakrate,
Alexander Aringe9bc3fa2017-12-20 12:35:18 -0500325 tb[TCA_TBF_PTAB],
326 NULL));
Eric Dumazet4d0820c2013-11-23 12:59:20 -0800327
Yang Yingliangcc106e42013-12-10 14:59:27 +0800328 buffer = min_t(u64, PSCHED_TICKS2NS(qopt->buffer), ~0U);
329 mtu = min_t(u64, PSCHED_TICKS2NS(qopt->mtu), ~0U);
330
331 if (tb[TCA_TBF_RATE64])
332 rate64 = nla_get_u64(tb[TCA_TBF_RATE64]);
333 psched_ratecfg_precompute(&rate, &qopt->rate, rate64);
334
Yang Yingliang2e04ad42013-12-20 09:24:47 +0800335 if (tb[TCA_TBF_BURST]) {
336 max_size = nla_get_u32(tb[TCA_TBF_BURST]);
337 buffer = psched_l2t_ns(&rate, max_size);
338 } else {
339 max_size = min_t(u64, psched_ns_t2l(&rate, buffer), ~0U);
340 }
Yang Yingliangcc106e42013-12-10 14:59:27 +0800341
342 if (qopt->peakrate.rate) {
343 if (tb[TCA_TBF_PRATE64])
344 prate64 = nla_get_u64(tb[TCA_TBF_PRATE64]);
345 psched_ratecfg_precompute(&peak, &qopt->peakrate, prate64);
346 if (peak.rate_bytes_ps <= rate.rate_bytes_ps) {
347 pr_warn_ratelimited("sch_tbf: peakrate %llu is lower than or equals to rate %llu !\n",
Yang Yingliang2e04ad42013-12-20 09:24:47 +0800348 peak.rate_bytes_ps, rate.rate_bytes_ps);
Yang Yingliangcc106e42013-12-10 14:59:27 +0800349 err = -EINVAL;
350 goto done;
351 }
352
Yang Yingliang2e04ad42013-12-20 09:24:47 +0800353 if (tb[TCA_TBF_PBURST]) {
354 u32 pburst = nla_get_u32(tb[TCA_TBF_PBURST]);
355 max_size = min_t(u32, max_size, pburst);
356 mtu = psched_l2t_ns(&peak, pburst);
357 } else {
358 max_size = min_t(u64, max_size, psched_ns_t2l(&peak, mtu));
359 }
Hiroaki SHIMODAa135e592014-03-02 17:30:26 +0900360 } else {
361 memset(&peak, 0, sizeof(peak));
Yang Yingliangcc106e42013-12-10 14:59:27 +0800362 }
363
364 if (max_size < psched_mtu(qdisc_dev(sch)))
365 pr_warn_ratelimited("sch_tbf: burst %llu is lower than device %s mtu (%u) !\n",
366 max_size, qdisc_dev(sch)->name,
367 psched_mtu(qdisc_dev(sch)));
368
369 if (!max_size) {
370 err = -EINVAL;
371 goto done;
372 }
373
Hiroaki SHIMODA724b9e12014-02-26 21:43:42 +0900374 if (q->qdisc != &noop_qdisc) {
375 err = fifo_set_limit(q->qdisc, qopt->limit);
376 if (err)
377 goto done;
378 } else if (qopt->limit > 0) {
Alexander Aringa38a9882017-12-20 12:35:21 -0500379 child = fifo_create_dflt(sch, &bfifo_qdisc_ops, qopt->limit,
380 extack);
Hiroaki SHIMODA724b9e12014-02-26 21:43:42 +0900381 if (IS_ERR(child)) {
382 err = PTR_ERR(child);
383 goto done;
384 }
385 }
386
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387 sch_tree_lock(sch);
Patrick McHardy5e50da02006-11-29 17:36:20 -0800388 if (child) {
WANG Cong2ccccf52016-02-25 14:55:01 -0800389 qdisc_tree_reduce_backlog(q->qdisc, q->qdisc->q.qlen,
390 q->qdisc->qstats.backlog);
Patrick McHardyb94c8af2008-11-20 04:11:36 -0800391 qdisc_destroy(q->qdisc);
392 q->qdisc = child;
David S. Millere33cc312017-03-13 00:00:03 -0700393 if (child != &noop_qdisc)
Jiri Kosina49b49972017-03-08 16:03:32 +0100394 qdisc_hash_add(child, true);
Patrick McHardy5e50da02006-11-29 17:36:20 -0800395 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700396 q->limit = qopt->limit;
Yang Yingliang2e04ad42013-12-20 09:24:47 +0800397 if (tb[TCA_TBF_PBURST])
398 q->mtu = mtu;
399 else
400 q->mtu = PSCHED_TICKS2NS(qopt->mtu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700401 q->max_size = max_size;
Yang Yingliang2e04ad42013-12-20 09:24:47 +0800402 if (tb[TCA_TBF_BURST])
403 q->buffer = buffer;
404 else
405 q->buffer = PSCHED_TICKS2NS(qopt->buffer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700406 q->tokens = q->buffer;
407 q->ptokens = q->mtu;
Patrick McHardyb94c8af2008-11-20 04:11:36 -0800408
Yang Yingliangcc106e42013-12-10 14:59:27 +0800409 memcpy(&q->rate, &rate, sizeof(struct psched_ratecfg));
Hiroaki SHIMODAa135e592014-03-02 17:30:26 +0900410 memcpy(&q->peak, &peak, sizeof(struct psched_ratecfg));
Patrick McHardyb94c8af2008-11-20 04:11:36 -0800411
Linus Torvalds1da177e2005-04-16 15:20:36 -0700412 sch_tree_unlock(sch);
413 err = 0;
414done:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700415 return err;
416}
417
Alexander Aringe63d7df2017-12-20 12:35:13 -0500418static int tbf_init(struct Qdisc *sch, struct nlattr *opt,
419 struct netlink_ext_ack *extack)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700420{
421 struct tbf_sched_data *q = qdisc_priv(sch);
422
Nikolay Aleksandrovc2d65112017-08-30 12:49:05 +0300423 qdisc_watchdog_init(&q->watchdog, sch);
424 q->qdisc = &noop_qdisc;
425
Alexander Aringac8ef4a2017-12-20 12:35:11 -0500426 if (!opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700427 return -EINVAL;
428
Eric Dumazetd2de8752014-08-22 18:32:09 -0700429 q->t_c = ktime_get_ns();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700430
Alexander Aring20307212017-12-20 12:35:14 -0500431 return tbf_change(sch, opt, extack);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700432}
433
434static void tbf_destroy(struct Qdisc *sch)
435{
436 struct tbf_sched_data *q = qdisc_priv(sch);
437
Patrick McHardyf7f593e2007-03-16 01:20:07 -0700438 qdisc_watchdog_cancel(&q->watchdog);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700439 qdisc_destroy(q->qdisc);
440}
441
442static int tbf_dump(struct Qdisc *sch, struct sk_buff *skb)
443{
444 struct tbf_sched_data *q = qdisc_priv(sch);
Patrick McHardy4b3550ef2008-01-23 20:34:11 -0800445 struct nlattr *nest;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700446 struct tc_tbf_qopt opt;
447
Eric Dumazetb0460e42011-12-28 23:27:44 +0000448 sch->qstats.backlog = q->qdisc->qstats.backlog;
Patrick McHardy4b3550ef2008-01-23 20:34:11 -0800449 nest = nla_nest_start(skb, TCA_OPTIONS);
450 if (nest == NULL)
451 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700452
453 opt.limit = q->limit;
Eric Dumazet01cb71d2013-06-02 13:55:05 +0000454 psched_ratecfg_getrate(&opt.rate, &q->rate);
Hiroaki SHIMODAa135e592014-03-02 17:30:26 +0900455 if (tbf_peak_present(q))
Eric Dumazet01cb71d2013-06-02 13:55:05 +0000456 psched_ratecfg_getrate(&opt.peakrate, &q->peak);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700457 else
458 memset(&opt.peakrate, 0, sizeof(opt.peakrate));
Jiri Pirkob757c932013-02-12 00:12:05 +0000459 opt.mtu = PSCHED_NS2TICKS(q->mtu);
460 opt.buffer = PSCHED_NS2TICKS(q->buffer);
David S. Miller1b34ec42012-03-29 05:11:39 -0400461 if (nla_put(skb, TCA_TBF_PARMS, sizeof(opt), &opt))
462 goto nla_put_failure;
Yang Yinglianga33c4a22013-11-08 10:23:34 +0800463 if (q->rate.rate_bytes_ps >= (1ULL << 32) &&
Nicolas Dichtel2a51c1e2016-04-25 10:25:15 +0200464 nla_put_u64_64bit(skb, TCA_TBF_RATE64, q->rate.rate_bytes_ps,
465 TCA_TBF_PAD))
Yang Yinglianga33c4a22013-11-08 10:23:34 +0800466 goto nla_put_failure;
Hiroaki SHIMODAa135e592014-03-02 17:30:26 +0900467 if (tbf_peak_present(q) &&
Yang Yinglianga33c4a22013-11-08 10:23:34 +0800468 q->peak.rate_bytes_ps >= (1ULL << 32) &&
Nicolas Dichtel2a51c1e2016-04-25 10:25:15 +0200469 nla_put_u64_64bit(skb, TCA_TBF_PRATE64, q->peak.rate_bytes_ps,
470 TCA_TBF_PAD))
Yang Yinglianga33c4a22013-11-08 10:23:34 +0800471 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700472
Yang Yingliangd59b7d82014-03-12 10:20:32 +0800473 return nla_nest_end(skb, nest);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700474
Patrick McHardy1e904742008-01-22 22:11:17 -0800475nla_put_failure:
Patrick McHardy4b3550ef2008-01-23 20:34:11 -0800476 nla_nest_cancel(skb, nest);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700477 return -1;
478}
479
480static int tbf_dump_class(struct Qdisc *sch, unsigned long cl,
481 struct sk_buff *skb, struct tcmsg *tcm)
482{
483 struct tbf_sched_data *q = qdisc_priv(sch);
484
Linus Torvalds1da177e2005-04-16 15:20:36 -0700485 tcm->tcm_handle |= TC_H_MIN(1);
486 tcm->tcm_info = q->qdisc->handle;
487
488 return 0;
489}
490
491static int tbf_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
Alexander Aring653d6fd2017-12-20 12:35:17 -0500492 struct Qdisc **old, struct netlink_ext_ack *extack)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700493{
494 struct tbf_sched_data *q = qdisc_priv(sch);
495
496 if (new == NULL)
497 new = &noop_qdisc;
498
WANG Cong86a79962016-02-25 14:55:00 -0800499 *old = qdisc_replace(sch, new, &q->qdisc);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700500 return 0;
501}
502
503static struct Qdisc *tbf_leaf(struct Qdisc *sch, unsigned long arg)
504{
505 struct tbf_sched_data *q = qdisc_priv(sch);
506 return q->qdisc;
507}
508
WANG Cong143976c2017-08-24 16:51:29 -0700509static unsigned long tbf_find(struct Qdisc *sch, u32 classid)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700510{
511 return 1;
512}
513
Linus Torvalds1da177e2005-04-16 15:20:36 -0700514static void tbf_walk(struct Qdisc *sch, struct qdisc_walker *walker)
515{
516 if (!walker->stop) {
517 if (walker->count >= walker->skip)
518 if (walker->fn(sch, 1, walker) < 0) {
519 walker->stop = 1;
520 return;
521 }
522 walker->count++;
523 }
524}
525
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000526static const struct Qdisc_class_ops tbf_class_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700527 .graft = tbf_graft,
528 .leaf = tbf_leaf,
WANG Cong143976c2017-08-24 16:51:29 -0700529 .find = tbf_find,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700530 .walk = tbf_walk,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700531 .dump = tbf_dump_class,
532};
533
Eric Dumazet20fea082007-11-14 01:44:41 -0800534static struct Qdisc_ops tbf_qdisc_ops __read_mostly = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700535 .next = NULL,
536 .cl_ops = &tbf_class_ops,
537 .id = "tbf",
538 .priv_size = sizeof(struct tbf_sched_data),
539 .enqueue = tbf_enqueue,
540 .dequeue = tbf_dequeue,
Jarek Poplawski77be1552008-10-31 00:47:01 -0700541 .peek = qdisc_peek_dequeued,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700542 .init = tbf_init,
543 .reset = tbf_reset,
544 .destroy = tbf_destroy,
545 .change = tbf_change,
546 .dump = tbf_dump,
547 .owner = THIS_MODULE,
548};
549
550static int __init tbf_module_init(void)
551{
552 return register_qdisc(&tbf_qdisc_ops);
553}
554
555static void __exit tbf_module_exit(void)
556{
557 unregister_qdisc(&tbf_qdisc_ops);
558}
559module_init(tbf_module_init)
560module_exit(tbf_module_exit)
561MODULE_LICENSE("GPL");