blob: fe6963d2151956c508b510edec680b89201173ce [file] [log] [blame]
Eric Dumazete13e02a2011-02-23 10:56:17 +00001/*
2 * net/sched/sch_sfb.c Stochastic Fair Blue
3 *
4 * Copyright (c) 2008-2011 Juliusz Chroboczek <jch@pps.jussieu.fr>
5 * Copyright (c) 2011 Eric Dumazet <eric.dumazet@gmail.com>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * version 2 as published by the Free Software Foundation.
10 *
11 * W. Feng, D. Kandlur, D. Saha, K. Shin. Blue:
12 * A New Class of Active Queue Management Algorithms.
13 * U. Michigan CSE-TR-387-99, April 1999.
14 *
15 * http://www.thefengs.com/wuchang/blue/CSE-TR-387-99.pdf
16 *
17 */
18
19#include <linux/module.h>
20#include <linux/types.h>
21#include <linux/kernel.h>
22#include <linux/errno.h>
23#include <linux/skbuff.h>
24#include <linux/random.h>
25#include <linux/jhash.h>
26#include <net/ip.h>
27#include <net/pkt_sched.h>
Jiri Pirkocf1facd2017-02-09 14:38:56 +010028#include <net/pkt_cls.h>
Eric Dumazete13e02a2011-02-23 10:56:17 +000029#include <net/inet_ecn.h>
30
31/*
32 * SFB uses two B[l][n] : L x N arrays of bins (L levels, N bins per level)
33 * This implementation uses L = 8 and N = 16
34 * This permits us to split one 32bit hash (provided per packet by rxhash or
35 * external classifier) into 8 subhashes of 4 bits.
36 */
37#define SFB_BUCKET_SHIFT 4
38#define SFB_NUMBUCKETS (1 << SFB_BUCKET_SHIFT) /* N bins per Level */
39#define SFB_BUCKET_MASK (SFB_NUMBUCKETS - 1)
40#define SFB_LEVELS (32 / SFB_BUCKET_SHIFT) /* L */
41
42/* SFB algo uses a virtual queue, named "bin" */
43struct sfb_bucket {
44 u16 qlen; /* length of virtual queue */
45 u16 p_mark; /* marking probability */
46};
47
48/* We use a double buffering right before hash change
49 * (Section 4.4 of SFB reference : moving hash functions)
50 */
51struct sfb_bins {
52 u32 perturbation; /* jhash perturbation */
53 struct sfb_bucket bins[SFB_LEVELS][SFB_NUMBUCKETS];
54};
55
56struct sfb_sched_data {
57 struct Qdisc *qdisc;
John Fastabend25d8c0d2014-09-12 20:05:27 -070058 struct tcf_proto __rcu *filter_list;
Eric Dumazete13e02a2011-02-23 10:56:17 +000059 unsigned long rehash_interval;
60 unsigned long warmup_time; /* double buffering warmup time in jiffies */
61 u32 max;
62 u32 bin_size; /* maximum queue length per bin */
63 u32 increment; /* d1 */
64 u32 decrement; /* d2 */
65 u32 limit; /* HARD maximal queue length */
66 u32 penalty_rate;
67 u32 penalty_burst;
68 u32 tokens_avail;
69 unsigned long rehash_time;
70 unsigned long token_time;
71
72 u8 slot; /* current active bins (0 or 1) */
73 bool double_buffering;
74 struct sfb_bins bins[2];
75
76 struct {
77 u32 earlydrop;
78 u32 penaltydrop;
79 u32 bucketdrop;
80 u32 queuedrop;
81 u32 childdrop; /* drops in child qdisc */
82 u32 marked; /* ECN mark */
83 } stats;
84};
85
86/*
87 * Each queued skb might be hashed on one or two bins
88 * We store in skb_cb the two hash values.
89 * (A zero value means double buffering was not used)
90 */
91struct sfb_skb_cb {
92 u32 hashes[2];
93};
94
95static inline struct sfb_skb_cb *sfb_skb_cb(const struct sk_buff *skb)
96{
David S. Miller16bda132012-02-06 15:14:37 -050097 qdisc_cb_private_validate(skb, sizeof(struct sfb_skb_cb));
Eric Dumazete13e02a2011-02-23 10:56:17 +000098 return (struct sfb_skb_cb *)qdisc_skb_cb(skb)->data;
99}
100
101/*
102 * If using 'internal' SFB flow classifier, hash comes from skb rxhash
103 * If using external classifier, hash comes from the classid.
104 */
105static u32 sfb_hash(const struct sk_buff *skb, u32 slot)
106{
107 return sfb_skb_cb(skb)->hashes[slot];
108}
109
110/* Probabilities are coded as Q0.16 fixed-point values,
111 * with 0xFFFF representing 65535/65536 (almost 1.0)
112 * Addition and subtraction are saturating in [0, 65535]
113 */
114static u32 prob_plus(u32 p1, u32 p2)
115{
116 u32 res = p1 + p2;
117
118 return min_t(u32, res, SFB_MAX_PROB);
119}
120
121static u32 prob_minus(u32 p1, u32 p2)
122{
123 return p1 > p2 ? p1 - p2 : 0;
124}
125
126static void increment_one_qlen(u32 sfbhash, u32 slot, struct sfb_sched_data *q)
127{
128 int i;
129 struct sfb_bucket *b = &q->bins[slot].bins[0][0];
130
131 for (i = 0; i < SFB_LEVELS; i++) {
132 u32 hash = sfbhash & SFB_BUCKET_MASK;
133
134 sfbhash >>= SFB_BUCKET_SHIFT;
135 if (b[hash].qlen < 0xFFFF)
136 b[hash].qlen++;
137 b += SFB_NUMBUCKETS; /* next level */
138 }
139}
140
141static void increment_qlen(const struct sk_buff *skb, struct sfb_sched_data *q)
142{
143 u32 sfbhash;
144
145 sfbhash = sfb_hash(skb, 0);
146 if (sfbhash)
147 increment_one_qlen(sfbhash, 0, q);
148
149 sfbhash = sfb_hash(skb, 1);
150 if (sfbhash)
151 increment_one_qlen(sfbhash, 1, q);
152}
153
154static void decrement_one_qlen(u32 sfbhash, u32 slot,
155 struct sfb_sched_data *q)
156{
157 int i;
158 struct sfb_bucket *b = &q->bins[slot].bins[0][0];
159
160 for (i = 0; i < SFB_LEVELS; i++) {
161 u32 hash = sfbhash & SFB_BUCKET_MASK;
162
163 sfbhash >>= SFB_BUCKET_SHIFT;
164 if (b[hash].qlen > 0)
165 b[hash].qlen--;
166 b += SFB_NUMBUCKETS; /* next level */
167 }
168}
169
170static void decrement_qlen(const struct sk_buff *skb, struct sfb_sched_data *q)
171{
172 u32 sfbhash;
173
174 sfbhash = sfb_hash(skb, 0);
175 if (sfbhash)
176 decrement_one_qlen(sfbhash, 0, q);
177
178 sfbhash = sfb_hash(skb, 1);
179 if (sfbhash)
180 decrement_one_qlen(sfbhash, 1, q);
181}
182
183static void decrement_prob(struct sfb_bucket *b, struct sfb_sched_data *q)
184{
185 b->p_mark = prob_minus(b->p_mark, q->decrement);
186}
187
188static void increment_prob(struct sfb_bucket *b, struct sfb_sched_data *q)
189{
190 b->p_mark = prob_plus(b->p_mark, q->increment);
191}
192
193static void sfb_zero_all_buckets(struct sfb_sched_data *q)
194{
195 memset(&q->bins, 0, sizeof(q->bins));
196}
197
198/*
199 * compute max qlen, max p_mark, and avg p_mark
200 */
201static u32 sfb_compute_qlen(u32 *prob_r, u32 *avgpm_r, const struct sfb_sched_data *q)
202{
203 int i;
204 u32 qlen = 0, prob = 0, totalpm = 0;
205 const struct sfb_bucket *b = &q->bins[q->slot].bins[0][0];
206
207 for (i = 0; i < SFB_LEVELS * SFB_NUMBUCKETS; i++) {
208 if (qlen < b->qlen)
209 qlen = b->qlen;
210 totalpm += b->p_mark;
211 if (prob < b->p_mark)
212 prob = b->p_mark;
213 b++;
214 }
215 *prob_r = prob;
216 *avgpm_r = totalpm / (SFB_LEVELS * SFB_NUMBUCKETS);
217 return qlen;
218}
219
220
221static void sfb_init_perturbation(u32 slot, struct sfb_sched_data *q)
222{
Aruna-Hewapathirane63862b52014-01-11 07:15:59 -0500223 q->bins[slot].perturbation = prandom_u32();
Eric Dumazete13e02a2011-02-23 10:56:17 +0000224}
225
226static void sfb_swap_slot(struct sfb_sched_data *q)
227{
228 sfb_init_perturbation(q->slot, q);
229 q->slot ^= 1;
230 q->double_buffering = false;
231}
232
233/* Non elastic flows are allowed to use part of the bandwidth, expressed
234 * in "penalty_rate" packets per second, with "penalty_burst" burst
235 */
236static bool sfb_rate_limit(struct sk_buff *skb, struct sfb_sched_data *q)
237{
238 if (q->penalty_rate == 0 || q->penalty_burst == 0)
239 return true;
240
241 if (q->tokens_avail < 1) {
242 unsigned long age = min(10UL * HZ, jiffies - q->token_time);
243
244 q->tokens_avail = (age * q->penalty_rate) / HZ;
245 if (q->tokens_avail > q->penalty_burst)
246 q->tokens_avail = q->penalty_burst;
247 q->token_time = jiffies;
248 if (q->tokens_avail < 1)
249 return true;
250 }
251
252 q->tokens_avail--;
253 return false;
254}
255
John Fastabend25d8c0d2014-09-12 20:05:27 -0700256static bool sfb_classify(struct sk_buff *skb, struct tcf_proto *fl,
Eric Dumazete13e02a2011-02-23 10:56:17 +0000257 int *qerr, u32 *salt)
258{
259 struct tcf_result res;
260 int result;
261
Daniel Borkmann3b3ae882015-08-26 23:00:06 +0200262 result = tc_classify(skb, fl, &res, false);
Eric Dumazete13e02a2011-02-23 10:56:17 +0000263 if (result >= 0) {
264#ifdef CONFIG_NET_CLS_ACT
265 switch (result) {
266 case TC_ACT_STOLEN:
267 case TC_ACT_QUEUED:
268 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
269 case TC_ACT_SHOT:
270 return false;
271 }
272#endif
273 *salt = TC_H_MIN(res.classid);
274 return true;
275 }
276 return false;
277}
278
Eric Dumazet520ac302016-06-21 23:16:49 -0700279static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch,
280 struct sk_buff **to_free)
Eric Dumazete13e02a2011-02-23 10:56:17 +0000281{
282
283 struct sfb_sched_data *q = qdisc_priv(sch);
284 struct Qdisc *child = q->qdisc;
John Fastabend25d8c0d2014-09-12 20:05:27 -0700285 struct tcf_proto *fl;
Eric Dumazete13e02a2011-02-23 10:56:17 +0000286 int i;
287 u32 p_min = ~0;
288 u32 minqlen = ~0;
Tom Herbert63c0ad42015-05-01 11:30:15 -0700289 u32 r, sfbhash;
290 u32 slot = q->slot;
Eric Dumazete13e02a2011-02-23 10:56:17 +0000291 int ret = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
292
Eric Dumazet363437f2011-08-25 06:21:32 +0000293 if (unlikely(sch->q.qlen >= q->limit)) {
John Fastabend25331d62014-09-28 11:53:29 -0700294 qdisc_qstats_overlimit(sch);
Eric Dumazet363437f2011-08-25 06:21:32 +0000295 q->stats.queuedrop++;
296 goto drop;
297 }
298
Eric Dumazete13e02a2011-02-23 10:56:17 +0000299 if (q->rehash_interval > 0) {
300 unsigned long limit = q->rehash_time + q->rehash_interval;
301
302 if (unlikely(time_after(jiffies, limit))) {
303 sfb_swap_slot(q);
304 q->rehash_time = jiffies;
305 } else if (unlikely(!q->double_buffering && q->warmup_time > 0 &&
306 time_after(jiffies, limit - q->warmup_time))) {
307 q->double_buffering = true;
308 }
309 }
310
John Fastabend25d8c0d2014-09-12 20:05:27 -0700311 fl = rcu_dereference_bh(q->filter_list);
312 if (fl) {
Tom Herbert63c0ad42015-05-01 11:30:15 -0700313 u32 salt;
314
Eric Dumazete13e02a2011-02-23 10:56:17 +0000315 /* If using external classifiers, get result and record it. */
John Fastabend25d8c0d2014-09-12 20:05:27 -0700316 if (!sfb_classify(skb, fl, &ret, &salt))
Eric Dumazete13e02a2011-02-23 10:56:17 +0000317 goto other_drop;
Tom Herbert63c0ad42015-05-01 11:30:15 -0700318 sfbhash = jhash_1word(salt, q->bins[slot].perturbation);
Eric Dumazete13e02a2011-02-23 10:56:17 +0000319 } else {
Tom Herbert63c0ad42015-05-01 11:30:15 -0700320 sfbhash = skb_get_hash_perturb(skb, q->bins[slot].perturbation);
Eric Dumazete13e02a2011-02-23 10:56:17 +0000321 }
322
Eric Dumazete13e02a2011-02-23 10:56:17 +0000323
Eric Dumazete13e02a2011-02-23 10:56:17 +0000324 if (!sfbhash)
325 sfbhash = 1;
326 sfb_skb_cb(skb)->hashes[slot] = sfbhash;
327
328 for (i = 0; i < SFB_LEVELS; i++) {
329 u32 hash = sfbhash & SFB_BUCKET_MASK;
330 struct sfb_bucket *b = &q->bins[slot].bins[i][hash];
331
332 sfbhash >>= SFB_BUCKET_SHIFT;
333 if (b->qlen == 0)
334 decrement_prob(b, q);
335 else if (b->qlen >= q->bin_size)
336 increment_prob(b, q);
337 if (minqlen > b->qlen)
338 minqlen = b->qlen;
339 if (p_min > b->p_mark)
340 p_min = b->p_mark;
341 }
342
343 slot ^= 1;
344 sfb_skb_cb(skb)->hashes[slot] = 0;
345
Eric Dumazet363437f2011-08-25 06:21:32 +0000346 if (unlikely(minqlen >= q->max)) {
John Fastabend25331d62014-09-28 11:53:29 -0700347 qdisc_qstats_overlimit(sch);
Eric Dumazet363437f2011-08-25 06:21:32 +0000348 q->stats.bucketdrop++;
Eric Dumazete13e02a2011-02-23 10:56:17 +0000349 goto drop;
350 }
351
352 if (unlikely(p_min >= SFB_MAX_PROB)) {
353 /* Inelastic flow */
354 if (q->double_buffering) {
Tom Herbert63c0ad42015-05-01 11:30:15 -0700355 sfbhash = skb_get_hash_perturb(skb,
356 q->bins[slot].perturbation);
Eric Dumazete13e02a2011-02-23 10:56:17 +0000357 if (!sfbhash)
358 sfbhash = 1;
359 sfb_skb_cb(skb)->hashes[slot] = sfbhash;
360
361 for (i = 0; i < SFB_LEVELS; i++) {
362 u32 hash = sfbhash & SFB_BUCKET_MASK;
363 struct sfb_bucket *b = &q->bins[slot].bins[i][hash];
364
365 sfbhash >>= SFB_BUCKET_SHIFT;
366 if (b->qlen == 0)
367 decrement_prob(b, q);
368 else if (b->qlen >= q->bin_size)
369 increment_prob(b, q);
370 }
371 }
372 if (sfb_rate_limit(skb, q)) {
John Fastabend25331d62014-09-28 11:53:29 -0700373 qdisc_qstats_overlimit(sch);
Eric Dumazete13e02a2011-02-23 10:56:17 +0000374 q->stats.penaltydrop++;
375 goto drop;
376 }
377 goto enqueue;
378 }
379
Aruna-Hewapathirane63862b52014-01-11 07:15:59 -0500380 r = prandom_u32() & SFB_MAX_PROB;
Eric Dumazete13e02a2011-02-23 10:56:17 +0000381
382 if (unlikely(r < p_min)) {
383 if (unlikely(p_min > SFB_MAX_PROB / 2)) {
384 /* If we're marking that many packets, then either
385 * this flow is unresponsive, or we're badly congested.
386 * In either case, we want to start dropping packets.
387 */
388 if (r < (p_min - SFB_MAX_PROB / 2) * 2) {
389 q->stats.earlydrop++;
390 goto drop;
391 }
392 }
393 if (INET_ECN_set_ce(skb)) {
394 q->stats.marked++;
395 } else {
396 q->stats.earlydrop++;
397 goto drop;
398 }
399 }
400
401enqueue:
Eric Dumazet520ac302016-06-21 23:16:49 -0700402 ret = qdisc_enqueue(skb, child, to_free);
Eric Dumazete13e02a2011-02-23 10:56:17 +0000403 if (likely(ret == NET_XMIT_SUCCESS)) {
WANG Cong3d4357f2016-09-18 16:22:48 -0700404 qdisc_qstats_backlog_inc(sch, skb);
Eric Dumazete13e02a2011-02-23 10:56:17 +0000405 sch->q.qlen++;
406 increment_qlen(skb, q);
407 } else if (net_xmit_drop_count(ret)) {
408 q->stats.childdrop++;
John Fastabend25331d62014-09-28 11:53:29 -0700409 qdisc_qstats_drop(sch);
Eric Dumazete13e02a2011-02-23 10:56:17 +0000410 }
411 return ret;
412
413drop:
Eric Dumazet520ac302016-06-21 23:16:49 -0700414 qdisc_drop(skb, sch, to_free);
Eric Dumazete13e02a2011-02-23 10:56:17 +0000415 return NET_XMIT_CN;
416other_drop:
417 if (ret & __NET_XMIT_BYPASS)
John Fastabend25331d62014-09-28 11:53:29 -0700418 qdisc_qstats_drop(sch);
Eric Dumazete13e02a2011-02-23 10:56:17 +0000419 kfree_skb(skb);
420 return ret;
421}
422
423static struct sk_buff *sfb_dequeue(struct Qdisc *sch)
424{
425 struct sfb_sched_data *q = qdisc_priv(sch);
426 struct Qdisc *child = q->qdisc;
427 struct sk_buff *skb;
428
429 skb = child->dequeue(q->qdisc);
430
431 if (skb) {
432 qdisc_bstats_update(sch, skb);
WANG Cong3d4357f2016-09-18 16:22:48 -0700433 qdisc_qstats_backlog_dec(sch, skb);
Eric Dumazete13e02a2011-02-23 10:56:17 +0000434 sch->q.qlen--;
435 decrement_qlen(skb, q);
436 }
437
438 return skb;
439}
440
441static struct sk_buff *sfb_peek(struct Qdisc *sch)
442{
443 struct sfb_sched_data *q = qdisc_priv(sch);
444 struct Qdisc *child = q->qdisc;
445
446 return child->ops->peek(child);
447}
448
449/* No sfb_drop -- impossible since the child doesn't return the dropped skb. */
450
451static void sfb_reset(struct Qdisc *sch)
452{
453 struct sfb_sched_data *q = qdisc_priv(sch);
454
455 qdisc_reset(q->qdisc);
WANG Cong3d4357f2016-09-18 16:22:48 -0700456 sch->qstats.backlog = 0;
Eric Dumazete13e02a2011-02-23 10:56:17 +0000457 sch->q.qlen = 0;
458 q->slot = 0;
459 q->double_buffering = false;
460 sfb_zero_all_buckets(q);
461 sfb_init_perturbation(0, q);
462}
463
464static void sfb_destroy(struct Qdisc *sch)
465{
466 struct sfb_sched_data *q = qdisc_priv(sch);
467
468 tcf_destroy_chain(&q->filter_list);
469 qdisc_destroy(q->qdisc);
470}
471
472static const struct nla_policy sfb_policy[TCA_SFB_MAX + 1] = {
473 [TCA_SFB_PARMS] = { .len = sizeof(struct tc_sfb_qopt) },
474};
475
476static const struct tc_sfb_qopt sfb_default_ops = {
477 .rehash_interval = 600 * MSEC_PER_SEC,
478 .warmup_time = 60 * MSEC_PER_SEC,
479 .limit = 0,
480 .max = 25,
481 .bin_size = 20,
482 .increment = (SFB_MAX_PROB + 500) / 1000, /* 0.1 % */
483 .decrement = (SFB_MAX_PROB + 3000) / 6000,
484 .penalty_rate = 10,
485 .penalty_burst = 20,
486};
487
488static int sfb_change(struct Qdisc *sch, struct nlattr *opt)
489{
490 struct sfb_sched_data *q = qdisc_priv(sch);
491 struct Qdisc *child;
492 struct nlattr *tb[TCA_SFB_MAX + 1];
493 const struct tc_sfb_qopt *ctl = &sfb_default_ops;
494 u32 limit;
495 int err;
496
497 if (opt) {
498 err = nla_parse_nested(tb, TCA_SFB_MAX, opt, sfb_policy);
499 if (err < 0)
500 return -EINVAL;
501
502 if (tb[TCA_SFB_PARMS] == NULL)
503 return -EINVAL;
504
505 ctl = nla_data(tb[TCA_SFB_PARMS]);
506 }
507
508 limit = ctl->limit;
509 if (limit == 0)
Phil Sutter348e3432015-08-18 10:30:49 +0200510 limit = qdisc_dev(sch)->tx_queue_len;
Eric Dumazete13e02a2011-02-23 10:56:17 +0000511
512 child = fifo_create_dflt(sch, &pfifo_qdisc_ops, limit);
513 if (IS_ERR(child))
514 return PTR_ERR(child);
515
516 sch_tree_lock(sch);
517
WANG Cong2ccccf52016-02-25 14:55:01 -0800518 qdisc_tree_reduce_backlog(q->qdisc, q->qdisc->q.qlen,
519 q->qdisc->qstats.backlog);
Eric Dumazete13e02a2011-02-23 10:56:17 +0000520 qdisc_destroy(q->qdisc);
521 q->qdisc = child;
522
523 q->rehash_interval = msecs_to_jiffies(ctl->rehash_interval);
524 q->warmup_time = msecs_to_jiffies(ctl->warmup_time);
525 q->rehash_time = jiffies;
526 q->limit = limit;
527 q->increment = ctl->increment;
528 q->decrement = ctl->decrement;
529 q->max = ctl->max;
530 q->bin_size = ctl->bin_size;
531 q->penalty_rate = ctl->penalty_rate;
532 q->penalty_burst = ctl->penalty_burst;
533 q->tokens_avail = ctl->penalty_burst;
534 q->token_time = jiffies;
535
536 q->slot = 0;
537 q->double_buffering = false;
538 sfb_zero_all_buckets(q);
539 sfb_init_perturbation(0, q);
540 sfb_init_perturbation(1, q);
541
542 sch_tree_unlock(sch);
543
544 return 0;
545}
546
547static int sfb_init(struct Qdisc *sch, struct nlattr *opt)
548{
549 struct sfb_sched_data *q = qdisc_priv(sch);
550
551 q->qdisc = &noop_qdisc;
552 return sfb_change(sch, opt);
553}
554
555static int sfb_dump(struct Qdisc *sch, struct sk_buff *skb)
556{
557 struct sfb_sched_data *q = qdisc_priv(sch);
558 struct nlattr *opts;
559 struct tc_sfb_qopt opt = {
560 .rehash_interval = jiffies_to_msecs(q->rehash_interval),
561 .warmup_time = jiffies_to_msecs(q->warmup_time),
562 .limit = q->limit,
563 .max = q->max,
564 .bin_size = q->bin_size,
565 .increment = q->increment,
566 .decrement = q->decrement,
567 .penalty_rate = q->penalty_rate,
568 .penalty_burst = q->penalty_burst,
569 };
570
571 sch->qstats.backlog = q->qdisc->qstats.backlog;
572 opts = nla_nest_start(skb, TCA_OPTIONS);
Alan Cox7ac29082012-07-12 03:39:11 +0000573 if (opts == NULL)
574 goto nla_put_failure;
David S. Miller1b34ec42012-03-29 05:11:39 -0400575 if (nla_put(skb, TCA_SFB_PARMS, sizeof(opt), &opt))
576 goto nla_put_failure;
Eric Dumazete13e02a2011-02-23 10:56:17 +0000577 return nla_nest_end(skb, opts);
578
579nla_put_failure:
580 nla_nest_cancel(skb, opts);
581 return -EMSGSIZE;
582}
583
584static int sfb_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
585{
586 struct sfb_sched_data *q = qdisc_priv(sch);
587 struct tc_sfb_xstats st = {
588 .earlydrop = q->stats.earlydrop,
589 .penaltydrop = q->stats.penaltydrop,
590 .bucketdrop = q->stats.bucketdrop,
591 .queuedrop = q->stats.queuedrop,
592 .childdrop = q->stats.childdrop,
593 .marked = q->stats.marked,
594 };
595
596 st.maxqlen = sfb_compute_qlen(&st.maxprob, &st.avgprob, q);
597
598 return gnet_stats_copy_app(d, &st, sizeof(st));
599}
600
601static int sfb_dump_class(struct Qdisc *sch, unsigned long cl,
602 struct sk_buff *skb, struct tcmsg *tcm)
603{
604 return -ENOSYS;
605}
606
607static int sfb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
608 struct Qdisc **old)
609{
610 struct sfb_sched_data *q = qdisc_priv(sch);
611
612 if (new == NULL)
613 new = &noop_qdisc;
614
WANG Cong86a79962016-02-25 14:55:00 -0800615 *old = qdisc_replace(sch, new, &q->qdisc);
Eric Dumazete13e02a2011-02-23 10:56:17 +0000616 return 0;
617}
618
619static struct Qdisc *sfb_leaf(struct Qdisc *sch, unsigned long arg)
620{
621 struct sfb_sched_data *q = qdisc_priv(sch);
622
623 return q->qdisc;
624}
625
626static unsigned long sfb_get(struct Qdisc *sch, u32 classid)
627{
628 return 1;
629}
630
631static void sfb_put(struct Qdisc *sch, unsigned long arg)
632{
633}
634
635static int sfb_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
636 struct nlattr **tca, unsigned long *arg)
637{
638 return -ENOSYS;
639}
640
641static int sfb_delete(struct Qdisc *sch, unsigned long cl)
642{
643 return -ENOSYS;
644}
645
646static void sfb_walk(struct Qdisc *sch, struct qdisc_walker *walker)
647{
648 if (!walker->stop) {
649 if (walker->count >= walker->skip)
650 if (walker->fn(sch, 1, walker) < 0) {
651 walker->stop = 1;
652 return;
653 }
654 walker->count++;
655 }
656}
657
John Fastabend25d8c0d2014-09-12 20:05:27 -0700658static struct tcf_proto __rcu **sfb_find_tcf(struct Qdisc *sch,
659 unsigned long cl)
Eric Dumazete13e02a2011-02-23 10:56:17 +0000660{
661 struct sfb_sched_data *q = qdisc_priv(sch);
662
663 if (cl)
664 return NULL;
665 return &q->filter_list;
666}
667
668static unsigned long sfb_bind(struct Qdisc *sch, unsigned long parent,
669 u32 classid)
670{
671 return 0;
672}
673
674
675static const struct Qdisc_class_ops sfb_class_ops = {
676 .graft = sfb_graft,
677 .leaf = sfb_leaf,
678 .get = sfb_get,
679 .put = sfb_put,
680 .change = sfb_change_class,
681 .delete = sfb_delete,
682 .walk = sfb_walk,
683 .tcf_chain = sfb_find_tcf,
684 .bind_tcf = sfb_bind,
685 .unbind_tcf = sfb_put,
686 .dump = sfb_dump_class,
687};
688
689static struct Qdisc_ops sfb_qdisc_ops __read_mostly = {
690 .id = "sfb",
691 .priv_size = sizeof(struct sfb_sched_data),
692 .cl_ops = &sfb_class_ops,
693 .enqueue = sfb_enqueue,
694 .dequeue = sfb_dequeue,
695 .peek = sfb_peek,
696 .init = sfb_init,
697 .reset = sfb_reset,
698 .destroy = sfb_destroy,
699 .change = sfb_change,
700 .dump = sfb_dump,
701 .dump_stats = sfb_dump_stats,
702 .owner = THIS_MODULE,
703};
704
705static int __init sfb_module_init(void)
706{
707 return register_qdisc(&sfb_qdisc_ops);
708}
709
710static void __exit sfb_module_exit(void)
711{
712 unregister_qdisc(&sfb_qdisc_ops);
713}
714
715module_init(sfb_module_init)
716module_exit(sfb_module_exit)
717
718MODULE_DESCRIPTION("Stochastic Fair Blue queue discipline");
719MODULE_AUTHOR("Juliusz Chroboczek");
720MODULE_AUTHOR("Eric Dumazet");
721MODULE_LICENSE("GPL");