blob: 6f74a426f159e440f33755e13656c849408bbc18 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * net/sched/sch_tbf.c Token Bucket Filter queue.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 * Dmitry Torokhov <dtor@mail.ru> - allow attaching inner qdiscs -
11 * original idea by Martin Devera
12 *
13 */
14
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include <linux/types.h>
17#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include <linux/string.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070019#include <linux/errno.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070020#include <linux/skbuff.h>
Patrick McHardy0ba48052007-07-02 22:49:07 -070021#include <net/netlink.h>
Jiri Pirkob757c932013-02-12 00:12:05 +000022#include <net/sch_generic.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070023#include <net/pkt_sched.h>
24
25
26/* Simple Token Bucket Filter.
27 =======================================
28
29 SOURCE.
30 -------
31
32 None.
33
34 Description.
35 ------------
36
37 A data flow obeys TBF with rate R and depth B, if for any
38 time interval t_i...t_f the number of transmitted bits
39 does not exceed B + R*(t_f-t_i).
40
41 Packetized version of this definition:
42 The sequence of packets of sizes s_i served at moments t_i
43 obeys TBF, if for any i<=k:
44
45 s_i+....+s_k <= B + R*(t_k - t_i)
46
47 Algorithm.
48 ----------
49
50 Let N(t_i) be B/R initially and N(t) grow continuously with time as:
51
52 N(t+delta) = min{B/R, N(t) + delta}
53
54 If the first packet in queue has length S, it may be
55 transmitted only at the time t_* when S/R <= N(t_*),
56 and in this case N(t) jumps:
57
58 N(t_* + 0) = N(t_* - 0) - S/R.
59
60
61
62 Actually, QoS requires two TBF to be applied to a data stream.
63 One of them controls steady state burst size, another
64 one with rate P (peak rate) and depth M (equal to link MTU)
65 limits bursts at a smaller time scale.
66
67 It is easy to see that P>R, and B>M. If P is infinity, this double
68 TBF is equivalent to a single one.
69
70 When TBF works in reshaping mode, latency is estimated as:
71
72 lat = max ((L-B)/R, (L-M)/P)
73
74
75 NOTES.
76 ------
77
78 If TBF throttles, it starts a watchdog timer, which will wake it up
79 when it is ready to transmit.
80 Note that the minimal timer resolution is 1/HZ.
81 If no new packets arrive during this period,
82 or if the device is not awaken by EOI for some previous packet,
83 TBF can stop its activity for 1/HZ.
84
85
86 This means, that with depth B, the maximal rate is
87
88 R_crit = B*HZ
89
90 F.e. for 10Mbit ethernet and HZ=100 the minimal allowed B is ~10Kbytes.
91
92 Note that the peak rate TBF is much more tough: with MTU 1500
93 P_crit = 150Kbytes/sec. So, if you need greater peak
94 rates, use alpha with HZ=1000 :-)
95
96 With classful TBF, limit is just kept for backwards compatibility.
97 It is passed to the default bfifo qdisc - if the inner qdisc is
98 changed the limit is not effective anymore.
99*/
100
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000101struct tbf_sched_data {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700102/* Parameters */
103 u32 limit; /* Maximal length of backlog: bytes */
Hiroaki SHIMODAa135e592014-03-02 17:30:26 +0900104 u32 max_size;
Jiri Pirkob757c932013-02-12 00:12:05 +0000105 s64 buffer; /* Token bucket depth/rate: MUST BE >= MTU/B */
106 s64 mtu;
Jiri Pirkob757c932013-02-12 00:12:05 +0000107 struct psched_ratecfg rate;
108 struct psched_ratecfg peak;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109
110/* Variables */
Jiri Pirkob757c932013-02-12 00:12:05 +0000111 s64 tokens; /* Current number of B tokens */
112 s64 ptokens; /* Current number of P tokens */
113 s64 t_c; /* Time check-point */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114 struct Qdisc *qdisc; /* Inner qdisc, default - bfifo queue */
Patrick McHardyf7f593e2007-03-16 01:20:07 -0700115 struct qdisc_watchdog watchdog; /* Watchdog timer */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116};
117
Eric Dumazete43ac792013-05-21 08:16:46 +0000118
Yang Yingliangcc106e42013-12-10 14:59:27 +0800119/* Time to Length, convert time in ns to length in bytes
120 * to determinate how many bytes can be sent in given time.
121 */
122static u64 psched_ns_t2l(const struct psched_ratecfg *r,
123 u64 time_in_ns)
124{
125 /* The formula is :
126 * len = (time_in_ns * r->rate_bytes_ps) / NSEC_PER_SEC
127 */
128 u64 len = time_in_ns * r->rate_bytes_ps;
129
130 do_div(len, NSEC_PER_SEC);
131
Yang Yingliangd55d2822013-12-12 10:57:22 +0800132 if (unlikely(r->linklayer == TC_LINKLAYER_ATM)) {
133 do_div(len, 53);
134 len = len * 48;
135 }
Yang Yingliangcc106e42013-12-10 14:59:27 +0800136
137 if (len > r->overhead)
138 len -= r->overhead;
139 else
140 len = 0;
141
142 return len;
143}
144
Eric Dumazete43ac792013-05-21 08:16:46 +0000145/* GSO packet is too big, segment it so that tbf can transmit
146 * each segment in time
147 */
Eric Dumazet520ac302016-06-21 23:16:49 -0700148static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch,
149 struct sk_buff **to_free)
Eric Dumazete43ac792013-05-21 08:16:46 +0000150{
151 struct tbf_sched_data *q = qdisc_priv(sch);
152 struct sk_buff *segs, *nskb;
153 netdev_features_t features = netif_skb_features(skb);
WANG Cong2ccccf52016-02-25 14:55:01 -0800154 unsigned int len = 0, prev_len = qdisc_pkt_len(skb);
Eric Dumazete43ac792013-05-21 08:16:46 +0000155 int ret, nb;
156
157 segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
158
159 if (IS_ERR_OR_NULL(segs))
Eric Dumazet520ac302016-06-21 23:16:49 -0700160 return qdisc_drop(skb, sch, to_free);
Eric Dumazete43ac792013-05-21 08:16:46 +0000161
162 nb = 0;
163 while (segs) {
164 nskb = segs->next;
165 segs->next = NULL;
Eric Dumazet4d0820c2013-11-23 12:59:20 -0800166 qdisc_skb_cb(segs)->pkt_len = segs->len;
WANG Cong2ccccf52016-02-25 14:55:01 -0800167 len += segs->len;
Eric Dumazet520ac302016-06-21 23:16:49 -0700168 ret = qdisc_enqueue(segs, q->qdisc, to_free);
Eric Dumazete43ac792013-05-21 08:16:46 +0000169 if (ret != NET_XMIT_SUCCESS) {
170 if (net_xmit_drop_count(ret))
John Fastabend25331d62014-09-28 11:53:29 -0700171 qdisc_qstats_drop(sch);
Eric Dumazete43ac792013-05-21 08:16:46 +0000172 } else {
173 nb++;
174 }
175 segs = nskb;
176 }
177 sch->q.qlen += nb;
178 if (nb > 1)
WANG Cong2ccccf52016-02-25 14:55:01 -0800179 qdisc_tree_reduce_backlog(sch, 1 - nb, prev_len - len);
Eric Dumazete43ac792013-05-21 08:16:46 +0000180 consume_skb(skb);
181 return nb > 0 ? NET_XMIT_SUCCESS : NET_XMIT_DROP;
182}
183
Eric Dumazet520ac302016-06-21 23:16:49 -0700184static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch,
185 struct sk_buff **to_free)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186{
187 struct tbf_sched_data *q = qdisc_priv(sch);
188 int ret;
189
Eric Dumazete43ac792013-05-21 08:16:46 +0000190 if (qdisc_pkt_len(skb) > q->max_size) {
Daniel Axtensee78bbe2018-03-01 17:13:38 +1100191 if (skb_is_gso(skb) &&
192 skb_gso_validate_mac_len(skb, q->max_size))
Eric Dumazet520ac302016-06-21 23:16:49 -0700193 return tbf_segment(skb, sch, to_free);
194 return qdisc_drop(skb, sch, to_free);
Eric Dumazete43ac792013-05-21 08:16:46 +0000195 }
Eric Dumazet520ac302016-06-21 23:16:49 -0700196 ret = qdisc_enqueue(skb, q->qdisc, to_free);
Ben Greear9871e502010-08-10 01:45:40 -0700197 if (ret != NET_XMIT_SUCCESS) {
Jarek Poplawski378a2f02008-08-04 22:31:03 -0700198 if (net_xmit_drop_count(ret))
John Fastabend25331d62014-09-28 11:53:29 -0700199 qdisc_qstats_drop(sch);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200 return ret;
201 }
202
WANG Cong8d5958f2016-06-01 16:15:19 -0700203 qdisc_qstats_backlog_inc(sch, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204 sch->q.qlen++;
Ben Greear9871e502010-08-10 01:45:40 -0700205 return NET_XMIT_SUCCESS;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206}
207
Hiroaki SHIMODAa135e592014-03-02 17:30:26 +0900208static bool tbf_peak_present(const struct tbf_sched_data *q)
209{
210 return q->peak.rate_bytes_ps;
211}
212
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000213static struct sk_buff *tbf_dequeue(struct Qdisc *sch)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214{
215 struct tbf_sched_data *q = qdisc_priv(sch);
216 struct sk_buff *skb;
217
Jarek Poplawski03c05f02008-10-31 00:46:19 -0700218 skb = q->qdisc->ops->peek(q->qdisc);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700219
220 if (skb) {
Jiri Pirkob757c932013-02-12 00:12:05 +0000221 s64 now;
222 s64 toks;
223 s64 ptoks = 0;
Jussi Kivilinna0abf77e2008-07-20 00:08:27 -0700224 unsigned int len = qdisc_pkt_len(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700225
Eric Dumazetd2de8752014-08-22 18:32:09 -0700226 now = ktime_get_ns();
Jiri Pirkob757c932013-02-12 00:12:05 +0000227 toks = min_t(s64, now - q->t_c, q->buffer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700228
Hiroaki SHIMODAa135e592014-03-02 17:30:26 +0900229 if (tbf_peak_present(q)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230 ptoks = toks + q->ptokens;
Jiri Pirkob757c932013-02-12 00:12:05 +0000231 if (ptoks > q->mtu)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232 ptoks = q->mtu;
Jiri Pirkob757c932013-02-12 00:12:05 +0000233 ptoks -= (s64) psched_l2t_ns(&q->peak, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234 }
235 toks += q->tokens;
Jiri Pirkob757c932013-02-12 00:12:05 +0000236 if (toks > q->buffer)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700237 toks = q->buffer;
Jiri Pirkob757c932013-02-12 00:12:05 +0000238 toks -= (s64) psched_l2t_ns(&q->rate, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700239
240 if ((toks|ptoks) >= 0) {
Jarek Poplawski77be1552008-10-31 00:47:01 -0700241 skb = qdisc_dequeue_peeked(q->qdisc);
Jarek Poplawski03c05f02008-10-31 00:46:19 -0700242 if (unlikely(!skb))
243 return NULL;
244
Linus Torvalds1da177e2005-04-16 15:20:36 -0700245 q->t_c = now;
246 q->tokens = toks;
247 q->ptokens = ptoks;
WANG Cong8d5958f2016-06-01 16:15:19 -0700248 qdisc_qstats_backlog_dec(sch, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700249 sch->q.qlen--;
Eric Dumazet9190b3b2011-01-20 23:31:33 -0800250 qdisc_bstats_update(sch, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251 return skb;
252 }
253
Jiri Pirkob757c932013-02-12 00:12:05 +0000254 qdisc_watchdog_schedule_ns(&q->watchdog,
Eric Dumazet45f50be2016-06-10 16:41:39 -0700255 now + max_t(long, -toks, -ptoks));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256
257 /* Maybe we have a shorter packet in the queue,
258 which can be sent now. It sounds cool,
259 but, however, this is wrong in principle.
260 We MUST NOT reorder packets under these circumstances.
261
262 Really, if we split the flow into independent
263 subflows, it would be a very good solution.
264 This is the main idea of all FQ algorithms
265 (cf. CSZ, HPFQ, HFSC)
266 */
267
John Fastabend25331d62014-09-28 11:53:29 -0700268 qdisc_qstats_overlimit(sch);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269 }
270 return NULL;
271}
272
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000273static void tbf_reset(struct Qdisc *sch)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700274{
275 struct tbf_sched_data *q = qdisc_priv(sch);
276
277 qdisc_reset(q->qdisc);
WANG Cong8d5958f2016-06-01 16:15:19 -0700278 sch->qstats.backlog = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700279 sch->q.qlen = 0;
Eric Dumazetd2de8752014-08-22 18:32:09 -0700280 q->t_c = ktime_get_ns();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281 q->tokens = q->buffer;
282 q->ptokens = q->mtu;
Patrick McHardyf7f593e2007-03-16 01:20:07 -0700283 qdisc_watchdog_cancel(&q->watchdog);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700284}
285
Patrick McHardy27a34212008-01-23 20:35:39 -0800286static const struct nla_policy tbf_policy[TCA_TBF_MAX + 1] = {
287 [TCA_TBF_PARMS] = { .len = sizeof(struct tc_tbf_qopt) },
288 [TCA_TBF_RTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
289 [TCA_TBF_PTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
Yang Yinglianga33c4a22013-11-08 10:23:34 +0800290 [TCA_TBF_RATE64] = { .type = NLA_U64 },
291 [TCA_TBF_PRATE64] = { .type = NLA_U64 },
Yang Yingliang2e04ad42013-12-20 09:24:47 +0800292 [TCA_TBF_BURST] = { .type = NLA_U32 },
293 [TCA_TBF_PBURST] = { .type = NLA_U32 },
Patrick McHardy27a34212008-01-23 20:35:39 -0800294};
295
Alexander Aring20307212017-12-20 12:35:14 -0500296static int tbf_change(struct Qdisc *sch, struct nlattr *opt,
297 struct netlink_ext_ack *extack)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700298{
Patrick McHardycee63722008-01-23 20:33:32 -0800299 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300 struct tbf_sched_data *q = qdisc_priv(sch);
Yang Yinglianga33c4a22013-11-08 10:23:34 +0800301 struct nlattr *tb[TCA_TBF_MAX + 1];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700302 struct tc_tbf_qopt *qopt;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700303 struct Qdisc *child = NULL;
Yang Yingliangcc106e42013-12-10 14:59:27 +0800304 struct psched_ratecfg rate;
305 struct psched_ratecfg peak;
306 u64 max_size;
307 s64 buffer, mtu;
Yang Yinglianga33c4a22013-11-08 10:23:34 +0800308 u64 rate64 = 0, prate64 = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700309
Johannes Bergfceb6432017-04-12 14:34:07 +0200310 err = nla_parse_nested(tb, TCA_TBF_MAX, opt, tbf_policy, NULL);
Patrick McHardycee63722008-01-23 20:33:32 -0800311 if (err < 0)
312 return err;
313
314 err = -EINVAL;
Patrick McHardy27a34212008-01-23 20:35:39 -0800315 if (tb[TCA_TBF_PARMS] == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700316 goto done;
317
Patrick McHardy1e904742008-01-22 22:11:17 -0800318 qopt = nla_data(tb[TCA_TBF_PARMS]);
Yang Yingliangcc106e42013-12-10 14:59:27 +0800319 if (qopt->rate.linklayer == TC_LINKLAYER_UNAWARE)
320 qdisc_put_rtab(qdisc_get_rtab(&qopt->rate,
Alexander Aringe9bc3fa2017-12-20 12:35:18 -0500321 tb[TCA_TBF_RTAB],
322 NULL));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700323
Yang Yingliangcc106e42013-12-10 14:59:27 +0800324 if (qopt->peakrate.linklayer == TC_LINKLAYER_UNAWARE)
325 qdisc_put_rtab(qdisc_get_rtab(&qopt->peakrate,
Alexander Aringe9bc3fa2017-12-20 12:35:18 -0500326 tb[TCA_TBF_PTAB],
327 NULL));
Eric Dumazet4d0820c2013-11-23 12:59:20 -0800328
Yang Yingliangcc106e42013-12-10 14:59:27 +0800329 buffer = min_t(u64, PSCHED_TICKS2NS(qopt->buffer), ~0U);
330 mtu = min_t(u64, PSCHED_TICKS2NS(qopt->mtu), ~0U);
331
332 if (tb[TCA_TBF_RATE64])
333 rate64 = nla_get_u64(tb[TCA_TBF_RATE64]);
334 psched_ratecfg_precompute(&rate, &qopt->rate, rate64);
335
Yang Yingliang2e04ad42013-12-20 09:24:47 +0800336 if (tb[TCA_TBF_BURST]) {
337 max_size = nla_get_u32(tb[TCA_TBF_BURST]);
338 buffer = psched_l2t_ns(&rate, max_size);
339 } else {
340 max_size = min_t(u64, psched_ns_t2l(&rate, buffer), ~0U);
341 }
Yang Yingliangcc106e42013-12-10 14:59:27 +0800342
343 if (qopt->peakrate.rate) {
344 if (tb[TCA_TBF_PRATE64])
345 prate64 = nla_get_u64(tb[TCA_TBF_PRATE64]);
346 psched_ratecfg_precompute(&peak, &qopt->peakrate, prate64);
347 if (peak.rate_bytes_ps <= rate.rate_bytes_ps) {
348 pr_warn_ratelimited("sch_tbf: peakrate %llu is lower than or equals to rate %llu !\n",
Yang Yingliang2e04ad42013-12-20 09:24:47 +0800349 peak.rate_bytes_ps, rate.rate_bytes_ps);
Yang Yingliangcc106e42013-12-10 14:59:27 +0800350 err = -EINVAL;
351 goto done;
352 }
353
Yang Yingliang2e04ad42013-12-20 09:24:47 +0800354 if (tb[TCA_TBF_PBURST]) {
355 u32 pburst = nla_get_u32(tb[TCA_TBF_PBURST]);
356 max_size = min_t(u32, max_size, pburst);
357 mtu = psched_l2t_ns(&peak, pburst);
358 } else {
359 max_size = min_t(u64, max_size, psched_ns_t2l(&peak, mtu));
360 }
Hiroaki SHIMODAa135e592014-03-02 17:30:26 +0900361 } else {
362 memset(&peak, 0, sizeof(peak));
Yang Yingliangcc106e42013-12-10 14:59:27 +0800363 }
364
365 if (max_size < psched_mtu(qdisc_dev(sch)))
366 pr_warn_ratelimited("sch_tbf: burst %llu is lower than device %s mtu (%u) !\n",
367 max_size, qdisc_dev(sch)->name,
368 psched_mtu(qdisc_dev(sch)));
369
370 if (!max_size) {
371 err = -EINVAL;
372 goto done;
373 }
374
Hiroaki SHIMODA724b9e12014-02-26 21:43:42 +0900375 if (q->qdisc != &noop_qdisc) {
376 err = fifo_set_limit(q->qdisc, qopt->limit);
377 if (err)
378 goto done;
379 } else if (qopt->limit > 0) {
Alexander Aringa38a9882017-12-20 12:35:21 -0500380 child = fifo_create_dflt(sch, &bfifo_qdisc_ops, qopt->limit,
381 extack);
Hiroaki SHIMODA724b9e12014-02-26 21:43:42 +0900382 if (IS_ERR(child)) {
383 err = PTR_ERR(child);
384 goto done;
385 }
Paolo Abeni44a63b132018-05-18 14:51:44 +0200386
387 /* child is fifo, no need to check for noop_qdisc */
388 qdisc_hash_add(child, true);
Hiroaki SHIMODA724b9e12014-02-26 21:43:42 +0900389 }
390
Linus Torvalds1da177e2005-04-16 15:20:36 -0700391 sch_tree_lock(sch);
Patrick McHardy5e50da02006-11-29 17:36:20 -0800392 if (child) {
WANG Cong2ccccf52016-02-25 14:55:01 -0800393 qdisc_tree_reduce_backlog(q->qdisc, q->qdisc->q.qlen,
394 q->qdisc->qstats.backlog);
Patrick McHardyb94c8af2008-11-20 04:11:36 -0800395 qdisc_destroy(q->qdisc);
396 q->qdisc = child;
Patrick McHardy5e50da02006-11-29 17:36:20 -0800397 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700398 q->limit = qopt->limit;
Yang Yingliang2e04ad42013-12-20 09:24:47 +0800399 if (tb[TCA_TBF_PBURST])
400 q->mtu = mtu;
401 else
402 q->mtu = PSCHED_TICKS2NS(qopt->mtu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700403 q->max_size = max_size;
Yang Yingliang2e04ad42013-12-20 09:24:47 +0800404 if (tb[TCA_TBF_BURST])
405 q->buffer = buffer;
406 else
407 q->buffer = PSCHED_TICKS2NS(qopt->buffer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700408 q->tokens = q->buffer;
409 q->ptokens = q->mtu;
Patrick McHardyb94c8af2008-11-20 04:11:36 -0800410
Yang Yingliangcc106e42013-12-10 14:59:27 +0800411 memcpy(&q->rate, &rate, sizeof(struct psched_ratecfg));
Hiroaki SHIMODAa135e592014-03-02 17:30:26 +0900412 memcpy(&q->peak, &peak, sizeof(struct psched_ratecfg));
Patrick McHardyb94c8af2008-11-20 04:11:36 -0800413
Linus Torvalds1da177e2005-04-16 15:20:36 -0700414 sch_tree_unlock(sch);
415 err = 0;
416done:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700417 return err;
418}
419
Alexander Aringe63d7df2017-12-20 12:35:13 -0500420static int tbf_init(struct Qdisc *sch, struct nlattr *opt,
421 struct netlink_ext_ack *extack)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700422{
423 struct tbf_sched_data *q = qdisc_priv(sch);
424
Nikolay Aleksandrovc2d65112017-08-30 12:49:05 +0300425 qdisc_watchdog_init(&q->watchdog, sch);
426 q->qdisc = &noop_qdisc;
427
Alexander Aringac8ef4a2017-12-20 12:35:11 -0500428 if (!opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700429 return -EINVAL;
430
Eric Dumazetd2de8752014-08-22 18:32:09 -0700431 q->t_c = ktime_get_ns();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700432
Alexander Aring20307212017-12-20 12:35:14 -0500433 return tbf_change(sch, opt, extack);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700434}
435
436static void tbf_destroy(struct Qdisc *sch)
437{
438 struct tbf_sched_data *q = qdisc_priv(sch);
439
Patrick McHardyf7f593e2007-03-16 01:20:07 -0700440 qdisc_watchdog_cancel(&q->watchdog);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700441 qdisc_destroy(q->qdisc);
442}
443
444static int tbf_dump(struct Qdisc *sch, struct sk_buff *skb)
445{
446 struct tbf_sched_data *q = qdisc_priv(sch);
Patrick McHardy4b3550ef2008-01-23 20:34:11 -0800447 struct nlattr *nest;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700448 struct tc_tbf_qopt opt;
449
Eric Dumazetb0460e42011-12-28 23:27:44 +0000450 sch->qstats.backlog = q->qdisc->qstats.backlog;
Patrick McHardy4b3550ef2008-01-23 20:34:11 -0800451 nest = nla_nest_start(skb, TCA_OPTIONS);
452 if (nest == NULL)
453 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700454
455 opt.limit = q->limit;
Eric Dumazet01cb71d2013-06-02 13:55:05 +0000456 psched_ratecfg_getrate(&opt.rate, &q->rate);
Hiroaki SHIMODAa135e592014-03-02 17:30:26 +0900457 if (tbf_peak_present(q))
Eric Dumazet01cb71d2013-06-02 13:55:05 +0000458 psched_ratecfg_getrate(&opt.peakrate, &q->peak);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700459 else
460 memset(&opt.peakrate, 0, sizeof(opt.peakrate));
Jiri Pirkob757c932013-02-12 00:12:05 +0000461 opt.mtu = PSCHED_NS2TICKS(q->mtu);
462 opt.buffer = PSCHED_NS2TICKS(q->buffer);
David S. Miller1b34ec42012-03-29 05:11:39 -0400463 if (nla_put(skb, TCA_TBF_PARMS, sizeof(opt), &opt))
464 goto nla_put_failure;
Yang Yinglianga33c4a22013-11-08 10:23:34 +0800465 if (q->rate.rate_bytes_ps >= (1ULL << 32) &&
Nicolas Dichtel2a51c1e2016-04-25 10:25:15 +0200466 nla_put_u64_64bit(skb, TCA_TBF_RATE64, q->rate.rate_bytes_ps,
467 TCA_TBF_PAD))
Yang Yinglianga33c4a22013-11-08 10:23:34 +0800468 goto nla_put_failure;
Hiroaki SHIMODAa135e592014-03-02 17:30:26 +0900469 if (tbf_peak_present(q) &&
Yang Yinglianga33c4a22013-11-08 10:23:34 +0800470 q->peak.rate_bytes_ps >= (1ULL << 32) &&
Nicolas Dichtel2a51c1e2016-04-25 10:25:15 +0200471 nla_put_u64_64bit(skb, TCA_TBF_PRATE64, q->peak.rate_bytes_ps,
472 TCA_TBF_PAD))
Yang Yinglianga33c4a22013-11-08 10:23:34 +0800473 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700474
Yang Yingliangd59b7d82014-03-12 10:20:32 +0800475 return nla_nest_end(skb, nest);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700476
Patrick McHardy1e904742008-01-22 22:11:17 -0800477nla_put_failure:
Patrick McHardy4b3550ef2008-01-23 20:34:11 -0800478 nla_nest_cancel(skb, nest);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700479 return -1;
480}
481
482static int tbf_dump_class(struct Qdisc *sch, unsigned long cl,
483 struct sk_buff *skb, struct tcmsg *tcm)
484{
485 struct tbf_sched_data *q = qdisc_priv(sch);
486
Linus Torvalds1da177e2005-04-16 15:20:36 -0700487 tcm->tcm_handle |= TC_H_MIN(1);
488 tcm->tcm_info = q->qdisc->handle;
489
490 return 0;
491}
492
493static int tbf_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
Alexander Aring653d6fd2017-12-20 12:35:17 -0500494 struct Qdisc **old, struct netlink_ext_ack *extack)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700495{
496 struct tbf_sched_data *q = qdisc_priv(sch);
497
498 if (new == NULL)
499 new = &noop_qdisc;
500
WANG Cong86a79962016-02-25 14:55:00 -0800501 *old = qdisc_replace(sch, new, &q->qdisc);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700502 return 0;
503}
504
505static struct Qdisc *tbf_leaf(struct Qdisc *sch, unsigned long arg)
506{
507 struct tbf_sched_data *q = qdisc_priv(sch);
508 return q->qdisc;
509}
510
WANG Cong143976c2017-08-24 16:51:29 -0700511static unsigned long tbf_find(struct Qdisc *sch, u32 classid)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700512{
513 return 1;
514}
515
Linus Torvalds1da177e2005-04-16 15:20:36 -0700516static void tbf_walk(struct Qdisc *sch, struct qdisc_walker *walker)
517{
518 if (!walker->stop) {
519 if (walker->count >= walker->skip)
520 if (walker->fn(sch, 1, walker) < 0) {
521 walker->stop = 1;
522 return;
523 }
524 walker->count++;
525 }
526}
527
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000528static const struct Qdisc_class_ops tbf_class_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700529 .graft = tbf_graft,
530 .leaf = tbf_leaf,
WANG Cong143976c2017-08-24 16:51:29 -0700531 .find = tbf_find,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700532 .walk = tbf_walk,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700533 .dump = tbf_dump_class,
534};
535
Eric Dumazet20fea082007-11-14 01:44:41 -0800536static struct Qdisc_ops tbf_qdisc_ops __read_mostly = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700537 .next = NULL,
538 .cl_ops = &tbf_class_ops,
539 .id = "tbf",
540 .priv_size = sizeof(struct tbf_sched_data),
541 .enqueue = tbf_enqueue,
542 .dequeue = tbf_dequeue,
Jarek Poplawski77be1552008-10-31 00:47:01 -0700543 .peek = qdisc_peek_dequeued,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700544 .init = tbf_init,
545 .reset = tbf_reset,
546 .destroy = tbf_destroy,
547 .change = tbf_change,
548 .dump = tbf_dump,
549 .owner = THIS_MODULE,
550};
551
552static int __init tbf_module_init(void)
553{
554 return register_qdisc(&tbf_qdisc_ops);
555}
556
557static void __exit tbf_module_exit(void)
558{
559 unregister_qdisc(&tbf_qdisc_ops);
560}
561module_init(tbf_module_init)
562module_exit(tbf_module_exit)
563MODULE_LICENSE("GPL");