blob: b5c52caf2e735d1d3efb528f0a96efd4f6b04af8 [file] [log] [blame]
stephen hemminger0545a302011-04-04 05:30:58 +00001/*
Paolo Valente462dbc92012-11-23 11:03:19 +00002 * net/sched/sch_qfq.c Quick Fair Queueing Plus Scheduler.
stephen hemminger0545a302011-04-04 05:30:58 +00003 *
4 * Copyright (c) 2009 Fabio Checconi, Luigi Rizzo, and Paolo Valente.
Paolo Valente462dbc92012-11-23 11:03:19 +00005 * Copyright (c) 2012 Paolo Valente.
stephen hemminger0545a302011-04-04 05:30:58 +00006 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * version 2 as published by the Free Software Foundation.
10 */
11
12#include <linux/module.h>
13#include <linux/init.h>
14#include <linux/bitops.h>
15#include <linux/errno.h>
16#include <linux/netdevice.h>
17#include <linux/pkt_sched.h>
18#include <net/sch_generic.h>
19#include <net/pkt_sched.h>
20#include <net/pkt_cls.h>
21
22
Paolo Valente462dbc92012-11-23 11:03:19 +000023/* Quick Fair Queueing Plus
24 ========================
stephen hemminger0545a302011-04-04 05:30:58 +000025
26 Sources:
27
Paolo Valente462dbc92012-11-23 11:03:19 +000028 [1] Paolo Valente,
29 "Reducing the Execution Time of Fair-Queueing Schedulers."
30 http://algo.ing.unimo.it/people/paolo/agg-sched/agg-sched.pdf
31
32 Sources for QFQ:
33
34 [2] Fabio Checconi, Luigi Rizzo, and Paolo Valente: "QFQ: Efficient
stephen hemminger0545a302011-04-04 05:30:58 +000035 Packet Scheduling with Tight Bandwidth Distribution Guarantees."
36
37 See also:
38 http://retis.sssup.it/~fabio/linux/qfq/
39 */
40
41/*
42
Paolo Valente462dbc92012-11-23 11:03:19 +000043 QFQ+ divides classes into aggregates of at most MAX_AGG_CLASSES
44 classes. Each aggregate is timestamped with a virtual start time S
45 and a virtual finish time F, and scheduled according to its
46 timestamps. S and F are computed as a function of a system virtual
47 time function V. The classes within each aggregate are instead
48 scheduled with DRR.
49
50 To speed up operations, QFQ+ divides also aggregates into a limited
51 number of groups. Which group a class belongs to depends on the
52 ratio between the maximum packet length for the class and the weight
53 of the class. Groups have their own S and F. In the end, QFQ+
54 schedules groups, then aggregates within groups, then classes within
55 aggregates. See [1] and [2] for a full description.
56
stephen hemminger0545a302011-04-04 05:30:58 +000057 Virtual time computations.
58
59 S, F and V are all computed in fixed point arithmetic with
60 FRAC_BITS decimal bits.
61
62 QFQ_MAX_INDEX is the maximum index allowed for a group. We need
63 one bit per index.
64 QFQ_MAX_WSHIFT is the maximum power of two supported as a weight.
65
66 The layout of the bits is as below:
67
68 [ MTU_SHIFT ][ FRAC_BITS ]
69 [ MAX_INDEX ][ MIN_SLOT_SHIFT ]
70 ^.__grp->index = 0
71 *.__grp->slot_shift
72
73 where MIN_SLOT_SHIFT is derived by difference from the others.
74
75 The max group index corresponds to Lmax/w_min, where
76 Lmax=1<<MTU_SHIFT, w_min = 1 .
77 From this, and knowing how many groups (MAX_INDEX) we want,
78 we can derive the shift corresponding to each group.
79
80 Because we often need to compute
81 F = S + len/w_i and V = V + len/wsum
82 instead of storing w_i store the value
83 inv_w = (1<<FRAC_BITS)/w_i
84 so we can do F = S + len * inv_w * wsum.
85 We use W_TOT in the formulas so we can easily move between
86 static and adaptive weight sum.
87
88 The per-scheduler-instance data contain all the data structures
89 for the scheduler: bitmaps and bucket lists.
90
91 */
92
93/*
94 * Maximum number of consecutive slots occupied by backlogged classes
95 * inside a group.
96 */
97#define QFQ_MAX_SLOTS 32
98
99/*
Paolo Valente462dbc92012-11-23 11:03:19 +0000100 * Shifts used for aggregate<->group mapping. We allow class weights that are
101 * in the range [1, 2^MAX_WSHIFT], and we try to map each aggregate i to the
stephen hemminger0545a302011-04-04 05:30:58 +0000102 * group with the smallest index that can support the L_i / r_i configured
Paolo Valente462dbc92012-11-23 11:03:19 +0000103 * for the classes in the aggregate.
stephen hemminger0545a302011-04-04 05:30:58 +0000104 *
105 * grp->index is the index of the group; and grp->slot_shift
106 * is the shift for the corresponding (scaled) sigma_i.
107 */
Paolo Valente3015f3d2012-11-05 20:29:24 +0000108#define QFQ_MAX_INDEX 24
Paolo Valente462dbc92012-11-23 11:03:19 +0000109#define QFQ_MAX_WSHIFT 10
stephen hemminger0545a302011-04-04 05:30:58 +0000110
Paolo Valente462dbc92012-11-23 11:03:19 +0000111#define QFQ_MAX_WEIGHT (1<<QFQ_MAX_WSHIFT) /* see qfq_slot_insert */
112#define QFQ_MAX_WSUM (64*QFQ_MAX_WEIGHT)
stephen hemminger0545a302011-04-04 05:30:58 +0000113
114#define FRAC_BITS 30 /* fixed point arithmetic */
115#define ONE_FP (1UL << FRAC_BITS)
stephen hemminger0545a302011-04-04 05:30:58 +0000116
Paolo Valente3015f3d2012-11-05 20:29:24 +0000117#define QFQ_MTU_SHIFT 16 /* to support TSO/GSO */
Paolo Valente462dbc92012-11-23 11:03:19 +0000118#define QFQ_MIN_LMAX 512 /* see qfq_slot_insert */
119
120#define QFQ_MAX_AGG_CLASSES 8 /* max num classes per aggregate allowed */
stephen hemminger0545a302011-04-04 05:30:58 +0000121
122/*
123 * Possible group states. These values are used as indexes for the bitmaps
124 * array of struct qfq_queue.
125 */
126enum qfq_state { ER, IR, EB, IB, QFQ_MAX_STATE };
127
128struct qfq_group;
129
Paolo Valente462dbc92012-11-23 11:03:19 +0000130struct qfq_aggregate;
131
stephen hemminger0545a302011-04-04 05:30:58 +0000132struct qfq_class {
133 struct Qdisc_class_common common;
134
135 unsigned int refcnt;
136 unsigned int filter_cnt;
137
138 struct gnet_stats_basic_packed bstats;
139 struct gnet_stats_queue qstats;
Eric Dumazet45203a32013-06-06 08:43:22 -0700140 struct gnet_stats_rate_est64 rate_est;
stephen hemminger0545a302011-04-04 05:30:58 +0000141 struct Qdisc *qdisc;
Paolo Valente462dbc92012-11-23 11:03:19 +0000142 struct list_head alist; /* Link for active-classes list. */
143 struct qfq_aggregate *agg; /* Parent aggregate. */
144 int deficit; /* DRR deficit counter. */
145};
stephen hemminger0545a302011-04-04 05:30:58 +0000146
Paolo Valente462dbc92012-11-23 11:03:19 +0000147struct qfq_aggregate {
stephen hemminger0545a302011-04-04 05:30:58 +0000148 struct hlist_node next; /* Link for the slot list. */
149 u64 S, F; /* flow timestamps (exact) */
150
151 /* group we belong to. In principle we would need the index,
152 * which is log_2(lmax/weight), but we never reference it
153 * directly, only the group.
154 */
155 struct qfq_group *grp;
156
157 /* these are copied from the flowset. */
Paolo Valente462dbc92012-11-23 11:03:19 +0000158 u32 class_weight; /* Weight of each class in this aggregate. */
159 /* Max pkt size for the classes in this aggregate, DRR quantum. */
160 int lmax;
161
162 u32 inv_w; /* ONE_FP/(sum of weights of classes in aggr.). */
163 u32 budgetmax; /* Max budget for this aggregate. */
164 u32 initial_budget, budget; /* Initial and current budget. */
165
166 int num_classes; /* Number of classes in this aggr. */
167 struct list_head active; /* DRR queue of active classes. */
168
169 struct hlist_node nonfull_next; /* See nonfull_aggs in qfq_sched. */
stephen hemminger0545a302011-04-04 05:30:58 +0000170};
171
172struct qfq_group {
173 u64 S, F; /* group timestamps (approx). */
174 unsigned int slot_shift; /* Slot shift. */
175 unsigned int index; /* Group index. */
176 unsigned int front; /* Index of the front slot. */
177 unsigned long full_slots; /* non-empty slots */
178
Paolo Valente462dbc92012-11-23 11:03:19 +0000179 /* Array of RR lists of active aggregates. */
stephen hemminger0545a302011-04-04 05:30:58 +0000180 struct hlist_head slots[QFQ_MAX_SLOTS];
181};
182
183struct qfq_sched {
John Fastabend25d8c0d2014-09-12 20:05:27 -0700184 struct tcf_proto __rcu *filter_list;
stephen hemminger0545a302011-04-04 05:30:58 +0000185 struct Qdisc_class_hash clhash;
186
Paolo Valente462dbc92012-11-23 11:03:19 +0000187 u64 oldV, V; /* Precise virtual times. */
188 struct qfq_aggregate *in_serv_agg; /* Aggregate being served. */
Paolo Valente462dbc92012-11-23 11:03:19 +0000189 u32 wsum; /* weight sum */
Paolo Valente87f40dd2013-07-16 08:52:30 +0200190 u32 iwsum; /* inverse weight sum */
stephen hemminger0545a302011-04-04 05:30:58 +0000191
192 unsigned long bitmaps[QFQ_MAX_STATE]; /* Group bitmaps. */
193 struct qfq_group groups[QFQ_MAX_INDEX + 1]; /* The groups. */
Paolo Valente462dbc92012-11-23 11:03:19 +0000194 u32 min_slot_shift; /* Index of the group-0 bit in the bitmaps. */
195
196 u32 max_agg_classes; /* Max number of classes per aggr. */
197 struct hlist_head nonfull_aggs; /* Aggs with room for more classes. */
stephen hemminger0545a302011-04-04 05:30:58 +0000198};
199
Paolo Valente462dbc92012-11-23 11:03:19 +0000200/*
201 * Possible reasons why the timestamps of an aggregate are updated
202 * enqueue: the aggregate switches from idle to active and must scheduled
203 * for service
204 * requeue: the aggregate finishes its budget, so it stops being served and
205 * must be rescheduled for service
206 */
207enum update_reason {enqueue, requeue};
208
stephen hemminger0545a302011-04-04 05:30:58 +0000209static struct qfq_class *qfq_find_class(struct Qdisc *sch, u32 classid)
210{
211 struct qfq_sched *q = qdisc_priv(sch);
212 struct Qdisc_class_common *clc;
213
214 clc = qdisc_class_find(&q->clhash, classid);
215 if (clc == NULL)
216 return NULL;
217 return container_of(clc, struct qfq_class, common);
218}
219
220static void qfq_purge_queue(struct qfq_class *cl)
221{
222 unsigned int len = cl->qdisc->q.qlen;
223
224 qdisc_reset(cl->qdisc);
225 qdisc_tree_decrease_qlen(cl->qdisc, len);
226}
227
228static const struct nla_policy qfq_policy[TCA_QFQ_MAX + 1] = {
229 [TCA_QFQ_WEIGHT] = { .type = NLA_U32 },
230 [TCA_QFQ_LMAX] = { .type = NLA_U32 },
231};
232
233/*
234 * Calculate a flow index, given its weight and maximum packet length.
235 * index = log_2(maxlen/weight) but we need to apply the scaling.
236 * This is used only once at flow creation.
237 */
Paolo Valente462dbc92012-11-23 11:03:19 +0000238static int qfq_calc_index(u32 inv_w, unsigned int maxlen, u32 min_slot_shift)
stephen hemminger0545a302011-04-04 05:30:58 +0000239{
240 u64 slot_size = (u64)maxlen * inv_w;
241 unsigned long size_map;
242 int index = 0;
243
Paolo Valente462dbc92012-11-23 11:03:19 +0000244 size_map = slot_size >> min_slot_shift;
stephen hemminger0545a302011-04-04 05:30:58 +0000245 if (!size_map)
246 goto out;
247
248 index = __fls(size_map) + 1; /* basically a log_2 */
Paolo Valente462dbc92012-11-23 11:03:19 +0000249 index -= !(slot_size - (1ULL << (index + min_slot_shift - 1)));
stephen hemminger0545a302011-04-04 05:30:58 +0000250
251 if (index < 0)
252 index = 0;
253out:
254 pr_debug("qfq calc_index: W = %lu, L = %u, I = %d\n",
255 (unsigned long) ONE_FP/inv_w, maxlen, index);
256
257 return index;
258}
259
Paolo Valente462dbc92012-11-23 11:03:19 +0000260static void qfq_deactivate_agg(struct qfq_sched *, struct qfq_aggregate *);
261static void qfq_activate_agg(struct qfq_sched *, struct qfq_aggregate *,
262 enum update_reason);
Paolo Valentebe72f632012-08-07 07:27:25 +0000263
Paolo Valente462dbc92012-11-23 11:03:19 +0000264static void qfq_init_agg(struct qfq_sched *q, struct qfq_aggregate *agg,
265 u32 lmax, u32 weight)
266{
267 INIT_LIST_HEAD(&agg->active);
268 hlist_add_head(&agg->nonfull_next, &q->nonfull_aggs);
269
270 agg->lmax = lmax;
271 agg->class_weight = weight;
Paolo Valentebe72f632012-08-07 07:27:25 +0000272}
273
Paolo Valente462dbc92012-11-23 11:03:19 +0000274static struct qfq_aggregate *qfq_find_agg(struct qfq_sched *q,
275 u32 lmax, u32 weight)
Paolo Valentebe72f632012-08-07 07:27:25 +0000276{
Paolo Valente462dbc92012-11-23 11:03:19 +0000277 struct qfq_aggregate *agg;
Paolo Valentebe72f632012-08-07 07:27:25 +0000278
Sasha Levinb67bfe02013-02-27 17:06:00 -0800279 hlist_for_each_entry(agg, &q->nonfull_aggs, nonfull_next)
Paolo Valente462dbc92012-11-23 11:03:19 +0000280 if (agg->lmax == lmax && agg->class_weight == weight)
281 return agg;
Paolo Valentebe72f632012-08-07 07:27:25 +0000282
Paolo Valente462dbc92012-11-23 11:03:19 +0000283 return NULL;
Paolo Valentebe72f632012-08-07 07:27:25 +0000284}
285
Paolo Valente3015f3d2012-11-05 20:29:24 +0000286
Paolo Valente462dbc92012-11-23 11:03:19 +0000287/* Update aggregate as a function of the new number of classes. */
288static void qfq_update_agg(struct qfq_sched *q, struct qfq_aggregate *agg,
289 int new_num_classes)
290{
291 u32 new_agg_weight;
292
293 if (new_num_classes == q->max_agg_classes)
294 hlist_del_init(&agg->nonfull_next);
295
296 if (agg->num_classes > new_num_classes &&
297 new_num_classes == q->max_agg_classes - 1) /* agg no more full */
298 hlist_add_head(&agg->nonfull_next, &q->nonfull_aggs);
299
Paolo Valente9b99b7e2013-03-05 08:04:57 +0000300 /* The next assignment may let
301 * agg->initial_budget > agg->budgetmax
302 * hold, we will take it into account in charge_actual_service().
303 */
Paolo Valente462dbc92012-11-23 11:03:19 +0000304 agg->budgetmax = new_num_classes * agg->lmax;
305 new_agg_weight = agg->class_weight * new_num_classes;
306 agg->inv_w = ONE_FP/new_agg_weight;
307
308 if (agg->grp == NULL) {
309 int i = qfq_calc_index(agg->inv_w, agg->budgetmax,
310 q->min_slot_shift);
311 agg->grp = &q->groups[i];
Paolo Valente3015f3d2012-11-05 20:29:24 +0000312 }
313
Paolo Valente462dbc92012-11-23 11:03:19 +0000314 q->wsum +=
315 (int) agg->class_weight * (new_num_classes - agg->num_classes);
Paolo Valente87f40dd2013-07-16 08:52:30 +0200316 q->iwsum = ONE_FP / q->wsum;
Paolo Valente3015f3d2012-11-05 20:29:24 +0000317
Paolo Valente462dbc92012-11-23 11:03:19 +0000318 agg->num_classes = new_num_classes;
Paolo Valente3015f3d2012-11-05 20:29:24 +0000319}
320
Paolo Valente462dbc92012-11-23 11:03:19 +0000321/* Add class to aggregate. */
322static void qfq_add_to_agg(struct qfq_sched *q,
323 struct qfq_aggregate *agg,
324 struct qfq_class *cl)
325{
326 cl->agg = agg;
327
328 qfq_update_agg(q, agg, agg->num_classes+1);
329 if (cl->qdisc->q.qlen > 0) { /* adding an active class */
330 list_add_tail(&cl->alist, &agg->active);
331 if (list_first_entry(&agg->active, struct qfq_class, alist) ==
332 cl && q->in_serv_agg != agg) /* agg was inactive */
333 qfq_activate_agg(q, agg, enqueue); /* schedule agg */
334 }
335}
336
337static struct qfq_aggregate *qfq_choose_next_agg(struct qfq_sched *);
338
339static void qfq_destroy_agg(struct qfq_sched *q, struct qfq_aggregate *agg)
340{
Andrea Parria55e1c52015-06-17 00:16:59 +0200341 hlist_del_init(&agg->nonfull_next);
Paolo Valente87f40dd2013-07-16 08:52:30 +0200342 q->wsum -= agg->class_weight;
343 if (q->wsum != 0)
344 q->iwsum = ONE_FP / q->wsum;
345
Paolo Valente462dbc92012-11-23 11:03:19 +0000346 if (q->in_serv_agg == agg)
347 q->in_serv_agg = qfq_choose_next_agg(q);
348 kfree(agg);
349}
350
351/* Deschedule class from within its parent aggregate. */
352static void qfq_deactivate_class(struct qfq_sched *q, struct qfq_class *cl)
353{
354 struct qfq_aggregate *agg = cl->agg;
355
356
357 list_del(&cl->alist); /* remove from RR queue of the aggregate */
358 if (list_empty(&agg->active)) /* agg is now inactive */
359 qfq_deactivate_agg(q, agg);
360}
361
362/* Remove class from its parent aggregate. */
363static void qfq_rm_from_agg(struct qfq_sched *q, struct qfq_class *cl)
364{
365 struct qfq_aggregate *agg = cl->agg;
366
367 cl->agg = NULL;
368 if (agg->num_classes == 1) { /* agg being emptied, destroy it */
369 qfq_destroy_agg(q, agg);
370 return;
371 }
372 qfq_update_agg(q, agg, agg->num_classes-1);
373}
374
375/* Deschedule class and remove it from its parent aggregate. */
376static void qfq_deact_rm_from_agg(struct qfq_sched *q, struct qfq_class *cl)
377{
378 if (cl->qdisc->q.qlen > 0) /* class is active */
379 qfq_deactivate_class(q, cl);
380
381 qfq_rm_from_agg(q, cl);
382}
383
384/* Move class to a new aggregate, matching the new class weight and/or lmax */
385static int qfq_change_agg(struct Qdisc *sch, struct qfq_class *cl, u32 weight,
386 u32 lmax)
387{
388 struct qfq_sched *q = qdisc_priv(sch);
389 struct qfq_aggregate *new_agg = qfq_find_agg(q, lmax, weight);
390
391 if (new_agg == NULL) { /* create new aggregate */
392 new_agg = kzalloc(sizeof(*new_agg), GFP_ATOMIC);
393 if (new_agg == NULL)
394 return -ENOBUFS;
395 qfq_init_agg(q, new_agg, lmax, weight);
396 }
397 qfq_deact_rm_from_agg(q, cl);
398 qfq_add_to_agg(q, new_agg, cl);
399
400 return 0;
401}
Paolo Valente3015f3d2012-11-05 20:29:24 +0000402
stephen hemminger0545a302011-04-04 05:30:58 +0000403static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
404 struct nlattr **tca, unsigned long *arg)
405{
406 struct qfq_sched *q = qdisc_priv(sch);
407 struct qfq_class *cl = (struct qfq_class *)*arg;
Paolo Valente462dbc92012-11-23 11:03:19 +0000408 bool existing = false;
stephen hemminger0545a302011-04-04 05:30:58 +0000409 struct nlattr *tb[TCA_QFQ_MAX + 1];
Paolo Valente462dbc92012-11-23 11:03:19 +0000410 struct qfq_aggregate *new_agg = NULL;
stephen hemminger0545a302011-04-04 05:30:58 +0000411 u32 weight, lmax, inv_w;
Paolo Valente3015f3d2012-11-05 20:29:24 +0000412 int err;
Eric Dumazetd32ae76f2012-01-02 11:47:50 +0000413 int delta_w;
stephen hemminger0545a302011-04-04 05:30:58 +0000414
415 if (tca[TCA_OPTIONS] == NULL) {
416 pr_notice("qfq: no options\n");
417 return -EINVAL;
418 }
419
420 err = nla_parse_nested(tb, TCA_QFQ_MAX, tca[TCA_OPTIONS], qfq_policy);
421 if (err < 0)
422 return err;
423
424 if (tb[TCA_QFQ_WEIGHT]) {
425 weight = nla_get_u32(tb[TCA_QFQ_WEIGHT]);
426 if (!weight || weight > (1UL << QFQ_MAX_WSHIFT)) {
427 pr_notice("qfq: invalid weight %u\n", weight);
428 return -EINVAL;
429 }
430 } else
431 weight = 1;
432
stephen hemminger0545a302011-04-04 05:30:58 +0000433 if (tb[TCA_QFQ_LMAX]) {
434 lmax = nla_get_u32(tb[TCA_QFQ_LMAX]);
Paolo Valente3015f3d2012-11-05 20:29:24 +0000435 if (lmax < QFQ_MIN_LMAX || lmax > (1UL << QFQ_MTU_SHIFT)) {
stephen hemminger0545a302011-04-04 05:30:58 +0000436 pr_notice("qfq: invalid max length %u\n", lmax);
437 return -EINVAL;
438 }
439 } else
Paolo Valente3015f3d2012-11-05 20:29:24 +0000440 lmax = psched_mtu(qdisc_dev(sch));
stephen hemminger0545a302011-04-04 05:30:58 +0000441
Paolo Valente462dbc92012-11-23 11:03:19 +0000442 inv_w = ONE_FP / weight;
443 weight = ONE_FP / inv_w;
444
445 if (cl != NULL &&
446 lmax == cl->agg->lmax &&
447 weight == cl->agg->class_weight)
448 return 0; /* nothing to change */
449
450 delta_w = weight - (cl ? cl->agg->class_weight : 0);
451
452 if (q->wsum + delta_w > QFQ_MAX_WSUM) {
453 pr_notice("qfq: total weight out of range (%d + %u)\n",
454 delta_w, q->wsum);
455 return -EINVAL;
456 }
457
458 if (cl != NULL) { /* modify existing class */
stephen hemminger0545a302011-04-04 05:30:58 +0000459 if (tca[TCA_RATE]) {
John Fastabend22e0f8b2014-09-28 11:52:56 -0700460 err = gen_replace_estimator(&cl->bstats, NULL,
461 &cl->rate_est,
stephen hemminger0545a302011-04-04 05:30:58 +0000462 qdisc_root_sleeping_lock(sch),
463 tca[TCA_RATE]);
464 if (err)
465 return err;
466 }
Paolo Valente462dbc92012-11-23 11:03:19 +0000467 existing = true;
468 goto set_change_agg;
stephen hemminger0545a302011-04-04 05:30:58 +0000469 }
470
Paolo Valente462dbc92012-11-23 11:03:19 +0000471 /* create and init new class */
stephen hemminger0545a302011-04-04 05:30:58 +0000472 cl = kzalloc(sizeof(struct qfq_class), GFP_KERNEL);
473 if (cl == NULL)
474 return -ENOBUFS;
475
476 cl->refcnt = 1;
477 cl->common.classid = classid;
Paolo Valente462dbc92012-11-23 11:03:19 +0000478 cl->deficit = lmax;
stephen hemminger0545a302011-04-04 05:30:58 +0000479
480 cl->qdisc = qdisc_create_dflt(sch->dev_queue,
481 &pfifo_qdisc_ops, classid);
482 if (cl->qdisc == NULL)
483 cl->qdisc = &noop_qdisc;
484
485 if (tca[TCA_RATE]) {
John Fastabend22e0f8b2014-09-28 11:52:56 -0700486 err = gen_new_estimator(&cl->bstats, NULL,
487 &cl->rate_est,
stephen hemminger0545a302011-04-04 05:30:58 +0000488 qdisc_root_sleeping_lock(sch),
489 tca[TCA_RATE]);
Paolo Valente462dbc92012-11-23 11:03:19 +0000490 if (err)
491 goto destroy_class;
stephen hemminger0545a302011-04-04 05:30:58 +0000492 }
stephen hemminger0545a302011-04-04 05:30:58 +0000493
494 sch_tree_lock(sch);
495 qdisc_class_hash_insert(&q->clhash, &cl->common);
496 sch_tree_unlock(sch);
497
498 qdisc_class_hash_grow(sch, &q->clhash);
499
Paolo Valente462dbc92012-11-23 11:03:19 +0000500set_change_agg:
501 sch_tree_lock(sch);
502 new_agg = qfq_find_agg(q, lmax, weight);
503 if (new_agg == NULL) { /* create new aggregate */
504 sch_tree_unlock(sch);
505 new_agg = kzalloc(sizeof(*new_agg), GFP_KERNEL);
506 if (new_agg == NULL) {
507 err = -ENOBUFS;
508 gen_kill_estimator(&cl->bstats, &cl->rate_est);
509 goto destroy_class;
510 }
511 sch_tree_lock(sch);
512 qfq_init_agg(q, new_agg, lmax, weight);
513 }
514 if (existing)
515 qfq_deact_rm_from_agg(q, cl);
516 qfq_add_to_agg(q, new_agg, cl);
517 sch_tree_unlock(sch);
518
stephen hemminger0545a302011-04-04 05:30:58 +0000519 *arg = (unsigned long)cl;
520 return 0;
Paolo Valente462dbc92012-11-23 11:03:19 +0000521
522destroy_class:
523 qdisc_destroy(cl->qdisc);
524 kfree(cl);
525 return err;
stephen hemminger0545a302011-04-04 05:30:58 +0000526}
527
528static void qfq_destroy_class(struct Qdisc *sch, struct qfq_class *cl)
529{
530 struct qfq_sched *q = qdisc_priv(sch);
531
Paolo Valente462dbc92012-11-23 11:03:19 +0000532 qfq_rm_from_agg(q, cl);
stephen hemminger0545a302011-04-04 05:30:58 +0000533 gen_kill_estimator(&cl->bstats, &cl->rate_est);
534 qdisc_destroy(cl->qdisc);
535 kfree(cl);
536}
537
538static int qfq_delete_class(struct Qdisc *sch, unsigned long arg)
539{
540 struct qfq_sched *q = qdisc_priv(sch);
541 struct qfq_class *cl = (struct qfq_class *)arg;
542
543 if (cl->filter_cnt > 0)
544 return -EBUSY;
545
546 sch_tree_lock(sch);
547
548 qfq_purge_queue(cl);
549 qdisc_class_hash_remove(&q->clhash, &cl->common);
550
551 BUG_ON(--cl->refcnt == 0);
552 /*
553 * This shouldn't happen: we "hold" one cops->get() when called
554 * from tc_ctl_tclass; the destroy method is done from cops->put().
555 */
556
557 sch_tree_unlock(sch);
558 return 0;
559}
560
561static unsigned long qfq_get_class(struct Qdisc *sch, u32 classid)
562{
563 struct qfq_class *cl = qfq_find_class(sch, classid);
564
565 if (cl != NULL)
566 cl->refcnt++;
567
568 return (unsigned long)cl;
569}
570
571static void qfq_put_class(struct Qdisc *sch, unsigned long arg)
572{
573 struct qfq_class *cl = (struct qfq_class *)arg;
574
575 if (--cl->refcnt == 0)
576 qfq_destroy_class(sch, cl);
577}
578
John Fastabend25d8c0d2014-09-12 20:05:27 -0700579static struct tcf_proto __rcu **qfq_tcf_chain(struct Qdisc *sch,
580 unsigned long cl)
stephen hemminger0545a302011-04-04 05:30:58 +0000581{
582 struct qfq_sched *q = qdisc_priv(sch);
583
584 if (cl)
585 return NULL;
586
587 return &q->filter_list;
588}
589
590static unsigned long qfq_bind_tcf(struct Qdisc *sch, unsigned long parent,
591 u32 classid)
592{
593 struct qfq_class *cl = qfq_find_class(sch, classid);
594
595 if (cl != NULL)
596 cl->filter_cnt++;
597
598 return (unsigned long)cl;
599}
600
601static void qfq_unbind_tcf(struct Qdisc *sch, unsigned long arg)
602{
603 struct qfq_class *cl = (struct qfq_class *)arg;
604
605 cl->filter_cnt--;
606}
607
608static int qfq_graft_class(struct Qdisc *sch, unsigned long arg,
609 struct Qdisc *new, struct Qdisc **old)
610{
611 struct qfq_class *cl = (struct qfq_class *)arg;
612
613 if (new == NULL) {
614 new = qdisc_create_dflt(sch->dev_queue,
615 &pfifo_qdisc_ops, cl->common.classid);
616 if (new == NULL)
617 new = &noop_qdisc;
618 }
619
WANG Cong86a79962016-02-25 14:55:00 -0800620 *old = qdisc_replace(sch, new, &cl->qdisc);
stephen hemminger0545a302011-04-04 05:30:58 +0000621 return 0;
622}
623
624static struct Qdisc *qfq_class_leaf(struct Qdisc *sch, unsigned long arg)
625{
626 struct qfq_class *cl = (struct qfq_class *)arg;
627
628 return cl->qdisc;
629}
630
631static int qfq_dump_class(struct Qdisc *sch, unsigned long arg,
632 struct sk_buff *skb, struct tcmsg *tcm)
633{
634 struct qfq_class *cl = (struct qfq_class *)arg;
635 struct nlattr *nest;
636
637 tcm->tcm_parent = TC_H_ROOT;
638 tcm->tcm_handle = cl->common.classid;
639 tcm->tcm_info = cl->qdisc->handle;
640
641 nest = nla_nest_start(skb, TCA_OPTIONS);
642 if (nest == NULL)
643 goto nla_put_failure;
Paolo Valente462dbc92012-11-23 11:03:19 +0000644 if (nla_put_u32(skb, TCA_QFQ_WEIGHT, cl->agg->class_weight) ||
645 nla_put_u32(skb, TCA_QFQ_LMAX, cl->agg->lmax))
David S. Miller1b34ec42012-03-29 05:11:39 -0400646 goto nla_put_failure;
stephen hemminger0545a302011-04-04 05:30:58 +0000647 return nla_nest_end(skb, nest);
648
649nla_put_failure:
650 nla_nest_cancel(skb, nest);
651 return -EMSGSIZE;
652}
653
654static int qfq_dump_class_stats(struct Qdisc *sch, unsigned long arg,
655 struct gnet_dump *d)
656{
657 struct qfq_class *cl = (struct qfq_class *)arg;
658 struct tc_qfq_stats xstats;
659
660 memset(&xstats, 0, sizeof(xstats));
stephen hemminger0545a302011-04-04 05:30:58 +0000661
Paolo Valente462dbc92012-11-23 11:03:19 +0000662 xstats.weight = cl->agg->class_weight;
663 xstats.lmax = cl->agg->lmax;
stephen hemminger0545a302011-04-04 05:30:58 +0000664
John Fastabend22e0f8b2014-09-28 11:52:56 -0700665 if (gnet_stats_copy_basic(d, NULL, &cl->bstats) < 0 ||
stephen hemminger0545a302011-04-04 05:30:58 +0000666 gnet_stats_copy_rate_est(d, &cl->bstats, &cl->rate_est) < 0 ||
John Fastabendb0ab6f92014-09-28 11:54:24 -0700667 gnet_stats_copy_queue(d, NULL,
668 &cl->qdisc->qstats, cl->qdisc->q.qlen) < 0)
stephen hemminger0545a302011-04-04 05:30:58 +0000669 return -1;
670
671 return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
672}
673
674static void qfq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
675{
676 struct qfq_sched *q = qdisc_priv(sch);
677 struct qfq_class *cl;
stephen hemminger0545a302011-04-04 05:30:58 +0000678 unsigned int i;
679
680 if (arg->stop)
681 return;
682
683 for (i = 0; i < q->clhash.hashsize; i++) {
Sasha Levinb67bfe02013-02-27 17:06:00 -0800684 hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
stephen hemminger0545a302011-04-04 05:30:58 +0000685 if (arg->count < arg->skip) {
686 arg->count++;
687 continue;
688 }
689 if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
690 arg->stop = 1;
691 return;
692 }
693 arg->count++;
694 }
695 }
696}
697
698static struct qfq_class *qfq_classify(struct sk_buff *skb, struct Qdisc *sch,
699 int *qerr)
700{
701 struct qfq_sched *q = qdisc_priv(sch);
702 struct qfq_class *cl;
703 struct tcf_result res;
John Fastabend25d8c0d2014-09-12 20:05:27 -0700704 struct tcf_proto *fl;
stephen hemminger0545a302011-04-04 05:30:58 +0000705 int result;
706
707 if (TC_H_MAJ(skb->priority ^ sch->handle) == 0) {
708 pr_debug("qfq_classify: found %d\n", skb->priority);
709 cl = qfq_find_class(sch, skb->priority);
710 if (cl != NULL)
711 return cl;
712 }
713
714 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
John Fastabend25d8c0d2014-09-12 20:05:27 -0700715 fl = rcu_dereference_bh(q->filter_list);
Daniel Borkmann3b3ae882015-08-26 23:00:06 +0200716 result = tc_classify(skb, fl, &res, false);
stephen hemminger0545a302011-04-04 05:30:58 +0000717 if (result >= 0) {
718#ifdef CONFIG_NET_CLS_ACT
719 switch (result) {
720 case TC_ACT_QUEUED:
721 case TC_ACT_STOLEN:
722 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
723 case TC_ACT_SHOT:
724 return NULL;
725 }
726#endif
727 cl = (struct qfq_class *)res.class;
728 if (cl == NULL)
729 cl = qfq_find_class(sch, res.classid);
730 return cl;
731 }
732
733 return NULL;
734}
735
736/* Generic comparison function, handling wraparound. */
737static inline int qfq_gt(u64 a, u64 b)
738{
739 return (s64)(a - b) > 0;
740}
741
742/* Round a precise timestamp to its slotted value. */
743static inline u64 qfq_round_down(u64 ts, unsigned int shift)
744{
745 return ts & ~((1ULL << shift) - 1);
746}
747
748/* return the pointer to the group with lowest index in the bitmap */
749static inline struct qfq_group *qfq_ffs(struct qfq_sched *q,
750 unsigned long bitmap)
751{
752 int index = __ffs(bitmap);
753 return &q->groups[index];
754}
755/* Calculate a mask to mimic what would be ffs_from(). */
756static inline unsigned long mask_from(unsigned long bitmap, int from)
757{
758 return bitmap & ~((1UL << from) - 1);
759}
760
761/*
762 * The state computation relies on ER=0, IR=1, EB=2, IB=3
763 * First compute eligibility comparing grp->S, q->V,
764 * then check if someone is blocking us and possibly add EB
765 */
766static int qfq_calc_state(struct qfq_sched *q, const struct qfq_group *grp)
767{
768 /* if S > V we are not eligible */
769 unsigned int state = qfq_gt(grp->S, q->V);
770 unsigned long mask = mask_from(q->bitmaps[ER], grp->index);
771 struct qfq_group *next;
772
773 if (mask) {
774 next = qfq_ffs(q, mask);
775 if (qfq_gt(grp->F, next->F))
776 state |= EB;
777 }
778
779 return state;
780}
781
782
783/*
784 * In principle
785 * q->bitmaps[dst] |= q->bitmaps[src] & mask;
786 * q->bitmaps[src] &= ~mask;
787 * but we should make sure that src != dst
788 */
789static inline void qfq_move_groups(struct qfq_sched *q, unsigned long mask,
790 int src, int dst)
791{
792 q->bitmaps[dst] |= q->bitmaps[src] & mask;
793 q->bitmaps[src] &= ~mask;
794}
795
796static void qfq_unblock_groups(struct qfq_sched *q, int index, u64 old_F)
797{
798 unsigned long mask = mask_from(q->bitmaps[ER], index + 1);
799 struct qfq_group *next;
800
801 if (mask) {
802 next = qfq_ffs(q, mask);
803 if (!qfq_gt(next->F, old_F))
804 return;
805 }
806
807 mask = (1UL << index) - 1;
808 qfq_move_groups(q, mask, EB, ER);
809 qfq_move_groups(q, mask, IB, IR);
810}
811
812/*
813 * perhaps
814 *
815 old_V ^= q->V;
Paolo Valente462dbc92012-11-23 11:03:19 +0000816 old_V >>= q->min_slot_shift;
stephen hemminger0545a302011-04-04 05:30:58 +0000817 if (old_V) {
818 ...
819 }
820 *
821 */
Paolo Valente462dbc92012-11-23 11:03:19 +0000822static void qfq_make_eligible(struct qfq_sched *q)
stephen hemminger0545a302011-04-04 05:30:58 +0000823{
Paolo Valente462dbc92012-11-23 11:03:19 +0000824 unsigned long vslot = q->V >> q->min_slot_shift;
825 unsigned long old_vslot = q->oldV >> q->min_slot_shift;
stephen hemminger0545a302011-04-04 05:30:58 +0000826
827 if (vslot != old_vslot) {
Paolo Valente87f13692013-07-10 15:46:08 +0200828 unsigned long mask;
829 int last_flip_pos = fls(vslot ^ old_vslot);
830
831 if (last_flip_pos > 31) /* higher than the number of groups */
832 mask = ~0UL; /* make all groups eligible */
833 else
834 mask = (1UL << last_flip_pos) - 1;
835
stephen hemminger0545a302011-04-04 05:30:58 +0000836 qfq_move_groups(q, mask, IR, ER);
837 qfq_move_groups(q, mask, IB, EB);
838 }
839}
840
stephen hemminger0545a302011-04-04 05:30:58 +0000841/*
Paolo Valente87f40dd2013-07-16 08:52:30 +0200842 * The index of the slot in which the input aggregate agg is to be
843 * inserted must not be higher than QFQ_MAX_SLOTS-2. There is a '-2'
844 * and not a '-1' because the start time of the group may be moved
845 * backward by one slot after the aggregate has been inserted, and
846 * this would cause non-empty slots to be right-shifted by one
847 * position.
Paolo Valente3015f3d2012-11-05 20:29:24 +0000848 *
Paolo Valente87f40dd2013-07-16 08:52:30 +0200849 * QFQ+ fully satisfies this bound to the slot index if the parameters
850 * of the classes are not changed dynamically, and if QFQ+ never
851 * happens to postpone the service of agg unjustly, i.e., it never
852 * happens that the aggregate becomes backlogged and eligible, or just
853 * eligible, while an aggregate with a higher approximated finish time
854 * is being served. In particular, in this case QFQ+ guarantees that
855 * the timestamps of agg are low enough that the slot index is never
856 * higher than 2. Unfortunately, QFQ+ cannot provide the same
857 * guarantee if it happens to unjustly postpone the service of agg, or
858 * if the parameters of some class are changed.
Paolo Valente3015f3d2012-11-05 20:29:24 +0000859 *
Paolo Valente87f40dd2013-07-16 08:52:30 +0200860 * As for the first event, i.e., an out-of-order service, the
861 * upper bound to the slot index guaranteed by QFQ+ grows to
862 * 2 +
863 * QFQ_MAX_AGG_CLASSES * ((1<<QFQ_MTU_SHIFT)/QFQ_MIN_LMAX) *
864 * (current_max_weight/current_wsum) <= 2 + 8 * 128 * 1.
865 *
866 * The following function deals with this problem by backward-shifting
867 * the timestamps of agg, if needed, so as to guarantee that the slot
868 * index is never higher than QFQ_MAX_SLOTS-2. This backward-shift may
869 * cause the service of other aggregates to be postponed, yet the
870 * worst-case guarantees of these aggregates are not violated. In
871 * fact, in case of no out-of-order service, the timestamps of agg
872 * would have been even lower than they are after the backward shift,
873 * because QFQ+ would have guaranteed a maximum value equal to 2 for
874 * the slot index, and 2 < QFQ_MAX_SLOTS-2. Hence the aggregates whose
875 * service is postponed because of the backward-shift would have
876 * however waited for the service of agg before being served.
877 *
878 * The other event that may cause the slot index to be higher than 2
879 * for agg is a recent change of the parameters of some class. If the
880 * weight of a class is increased or the lmax (max_pkt_size) of the
881 * class is decreased, then a new aggregate with smaller slot size
882 * than the original parent aggregate of the class may happen to be
883 * activated. The activation of this aggregate should be properly
884 * delayed to when the service of the class has finished in the ideal
885 * system tracked by QFQ+. If the activation of the aggregate is not
886 * delayed to this reference time instant, then this aggregate may be
887 * unjustly served before other aggregates waiting for service. This
888 * may cause the above bound to the slot index to be violated for some
889 * of these unlucky aggregates.
Paolo Valente3015f3d2012-11-05 20:29:24 +0000890 *
Paolo Valente462dbc92012-11-23 11:03:19 +0000891 * Instead of delaying the activation of the new aggregate, which is
Paolo Valente87f40dd2013-07-16 08:52:30 +0200892 * quite complex, the above-discussed capping of the slot index is
893 * used to handle also the consequences of a change of the parameters
894 * of a class.
stephen hemminger0545a302011-04-04 05:30:58 +0000895 */
Paolo Valente462dbc92012-11-23 11:03:19 +0000896static void qfq_slot_insert(struct qfq_group *grp, struct qfq_aggregate *agg,
stephen hemminger0545a302011-04-04 05:30:58 +0000897 u64 roundedS)
898{
899 u64 slot = (roundedS - grp->S) >> grp->slot_shift;
Paolo Valente3015f3d2012-11-05 20:29:24 +0000900 unsigned int i; /* slot index in the bucket list */
901
902 if (unlikely(slot > QFQ_MAX_SLOTS - 2)) {
903 u64 deltaS = roundedS - grp->S -
904 ((u64)(QFQ_MAX_SLOTS - 2)<<grp->slot_shift);
Paolo Valente462dbc92012-11-23 11:03:19 +0000905 agg->S -= deltaS;
906 agg->F -= deltaS;
Paolo Valente3015f3d2012-11-05 20:29:24 +0000907 slot = QFQ_MAX_SLOTS - 2;
908 }
909
910 i = (grp->front + slot) % QFQ_MAX_SLOTS;
stephen hemminger0545a302011-04-04 05:30:58 +0000911
Paolo Valente462dbc92012-11-23 11:03:19 +0000912 hlist_add_head(&agg->next, &grp->slots[i]);
stephen hemminger0545a302011-04-04 05:30:58 +0000913 __set_bit(slot, &grp->full_slots);
914}
915
916/* Maybe introduce hlist_first_entry?? */
Paolo Valente462dbc92012-11-23 11:03:19 +0000917static struct qfq_aggregate *qfq_slot_head(struct qfq_group *grp)
stephen hemminger0545a302011-04-04 05:30:58 +0000918{
919 return hlist_entry(grp->slots[grp->front].first,
Paolo Valente462dbc92012-11-23 11:03:19 +0000920 struct qfq_aggregate, next);
stephen hemminger0545a302011-04-04 05:30:58 +0000921}
922
923/*
924 * remove the entry from the slot
925 */
926static void qfq_front_slot_remove(struct qfq_group *grp)
927{
Paolo Valente462dbc92012-11-23 11:03:19 +0000928 struct qfq_aggregate *agg = qfq_slot_head(grp);
stephen hemminger0545a302011-04-04 05:30:58 +0000929
Paolo Valente462dbc92012-11-23 11:03:19 +0000930 BUG_ON(!agg);
931 hlist_del(&agg->next);
stephen hemminger0545a302011-04-04 05:30:58 +0000932 if (hlist_empty(&grp->slots[grp->front]))
933 __clear_bit(0, &grp->full_slots);
934}
935
936/*
Paolo Valente462dbc92012-11-23 11:03:19 +0000937 * Returns the first aggregate in the first non-empty bucket of the
938 * group. As a side effect, adjusts the bucket list so the first
939 * non-empty bucket is at position 0 in full_slots.
stephen hemminger0545a302011-04-04 05:30:58 +0000940 */
Paolo Valente462dbc92012-11-23 11:03:19 +0000941static struct qfq_aggregate *qfq_slot_scan(struct qfq_group *grp)
stephen hemminger0545a302011-04-04 05:30:58 +0000942{
943 unsigned int i;
944
945 pr_debug("qfq slot_scan: grp %u full %#lx\n",
946 grp->index, grp->full_slots);
947
948 if (grp->full_slots == 0)
949 return NULL;
950
951 i = __ffs(grp->full_slots); /* zero based */
952 if (i > 0) {
953 grp->front = (grp->front + i) % QFQ_MAX_SLOTS;
954 grp->full_slots >>= i;
955 }
956
957 return qfq_slot_head(grp);
958}
959
960/*
961 * adjust the bucket list. When the start time of a group decreases,
962 * we move the index down (modulo QFQ_MAX_SLOTS) so we don't need to
963 * move the objects. The mask of occupied slots must be shifted
964 * because we use ffs() to find the first non-empty slot.
965 * This covers decreases in the group's start time, but what about
966 * increases of the start time ?
967 * Here too we should make sure that i is less than 32
968 */
969static void qfq_slot_rotate(struct qfq_group *grp, u64 roundedS)
970{
971 unsigned int i = (grp->S - roundedS) >> grp->slot_shift;
972
973 grp->full_slots <<= i;
974 grp->front = (grp->front - i) % QFQ_MAX_SLOTS;
975}
976
Paolo Valente462dbc92012-11-23 11:03:19 +0000977static void qfq_update_eligible(struct qfq_sched *q)
stephen hemminger0545a302011-04-04 05:30:58 +0000978{
979 struct qfq_group *grp;
980 unsigned long ineligible;
981
982 ineligible = q->bitmaps[IR] | q->bitmaps[IB];
983 if (ineligible) {
984 if (!q->bitmaps[ER]) {
985 grp = qfq_ffs(q, ineligible);
986 if (qfq_gt(grp->S, q->V))
987 q->V = grp->S;
988 }
Paolo Valente462dbc92012-11-23 11:03:19 +0000989 qfq_make_eligible(q);
stephen hemminger0545a302011-04-04 05:30:58 +0000990 }
991}
992
Paolo Valente462dbc92012-11-23 11:03:19 +0000993/* Dequeue head packet of the head class in the DRR queue of the aggregate. */
994static void agg_dequeue(struct qfq_aggregate *agg,
995 struct qfq_class *cl, unsigned int len)
stephen hemminger0545a302011-04-04 05:30:58 +0000996{
Paolo Valente462dbc92012-11-23 11:03:19 +0000997 qdisc_dequeue_peeked(cl->qdisc);
stephen hemminger0545a302011-04-04 05:30:58 +0000998
Paolo Valente462dbc92012-11-23 11:03:19 +0000999 cl->deficit -= (int) len;
stephen hemminger0545a302011-04-04 05:30:58 +00001000
Paolo Valente462dbc92012-11-23 11:03:19 +00001001 if (cl->qdisc->q.qlen == 0) /* no more packets, remove from list */
1002 list_del(&cl->alist);
1003 else if (cl->deficit < qdisc_pkt_len(cl->qdisc->ops->peek(cl->qdisc))) {
1004 cl->deficit += agg->lmax;
1005 list_move_tail(&cl->alist, &agg->active);
stephen hemminger0545a302011-04-04 05:30:58 +00001006 }
Paolo Valente462dbc92012-11-23 11:03:19 +00001007}
stephen hemminger0545a302011-04-04 05:30:58 +00001008
Paolo Valente462dbc92012-11-23 11:03:19 +00001009static inline struct sk_buff *qfq_peek_skb(struct qfq_aggregate *agg,
1010 struct qfq_class **cl,
1011 unsigned int *len)
1012{
1013 struct sk_buff *skb;
1014
1015 *cl = list_first_entry(&agg->active, struct qfq_class, alist);
1016 skb = (*cl)->qdisc->ops->peek((*cl)->qdisc);
1017 if (skb == NULL)
1018 WARN_ONCE(1, "qfq_dequeue: non-workconserving leaf\n");
1019 else
1020 *len = qdisc_pkt_len(skb);
1021
1022 return skb;
1023}
1024
1025/* Update F according to the actual service received by the aggregate. */
1026static inline void charge_actual_service(struct qfq_aggregate *agg)
1027{
Paolo Valente9b99b7e2013-03-05 08:04:57 +00001028 /* Compute the service received by the aggregate, taking into
1029 * account that, after decreasing the number of classes in
1030 * agg, it may happen that
1031 * agg->initial_budget - agg->budget > agg->bugdetmax
1032 */
1033 u32 service_received = min(agg->budgetmax,
1034 agg->initial_budget - agg->budget);
Paolo Valente462dbc92012-11-23 11:03:19 +00001035
1036 agg->F = agg->S + (u64)service_received * agg->inv_w;
stephen hemminger0545a302011-04-04 05:30:58 +00001037}
1038
Paolo Valente88d4f412013-07-10 15:46:09 +02001039/* Assign a reasonable start time for a new aggregate in group i.
1040 * Admissible values for \hat(F) are multiples of \sigma_i
1041 * no greater than V+\sigma_i . Larger values mean that
1042 * we had a wraparound so we consider the timestamp to be stale.
1043 *
1044 * If F is not stale and F >= V then we set S = F.
1045 * Otherwise we should assign S = V, but this may violate
1046 * the ordering in EB (see [2]). So, if we have groups in ER,
1047 * set S to the F_j of the first group j which would be blocking us.
1048 * We are guaranteed not to move S backward because
1049 * otherwise our group i would still be blocked.
1050 */
1051static void qfq_update_start(struct qfq_sched *q, struct qfq_aggregate *agg)
1052{
1053 unsigned long mask;
1054 u64 limit, roundedF;
1055 int slot_shift = agg->grp->slot_shift;
1056
1057 roundedF = qfq_round_down(agg->F, slot_shift);
1058 limit = qfq_round_down(q->V, slot_shift) + (1ULL << slot_shift);
1059
1060 if (!qfq_gt(agg->F, q->V) || qfq_gt(roundedF, limit)) {
1061 /* timestamp was stale */
1062 mask = mask_from(q->bitmaps[ER], agg->grp->index);
1063 if (mask) {
1064 struct qfq_group *next = qfq_ffs(q, mask);
1065 if (qfq_gt(roundedF, next->F)) {
1066 if (qfq_gt(limit, next->F))
1067 agg->S = next->F;
1068 else /* preserve timestamp correctness */
1069 agg->S = limit;
1070 return;
1071 }
1072 }
1073 agg->S = q->V;
1074 } else /* timestamp is not stale */
1075 agg->S = agg->F;
1076}
1077
1078/* Update the timestamps of agg before scheduling/rescheduling it for
1079 * service. In particular, assign to agg->F its maximum possible
1080 * value, i.e., the virtual finish time with which the aggregate
1081 * should be labeled if it used all its budget once in service.
1082 */
1083static inline void
1084qfq_update_agg_ts(struct qfq_sched *q,
1085 struct qfq_aggregate *agg, enum update_reason reason)
1086{
1087 if (reason != requeue)
1088 qfq_update_start(q, agg);
1089 else /* just charge agg for the service received */
1090 agg->S = agg->F;
1091
1092 agg->F = agg->S + (u64)agg->budgetmax * agg->inv_w;
1093}
Paolo Valente2f3b89a2013-03-05 08:04:59 +00001094
1095static void qfq_schedule_agg(struct qfq_sched *q, struct qfq_aggregate *agg);
1096
stephen hemminger0545a302011-04-04 05:30:58 +00001097static struct sk_buff *qfq_dequeue(struct Qdisc *sch)
1098{
1099 struct qfq_sched *q = qdisc_priv(sch);
Paolo Valente462dbc92012-11-23 11:03:19 +00001100 struct qfq_aggregate *in_serv_agg = q->in_serv_agg;
stephen hemminger0545a302011-04-04 05:30:58 +00001101 struct qfq_class *cl;
Paolo Valente462dbc92012-11-23 11:03:19 +00001102 struct sk_buff *skb = NULL;
1103 /* next-packet len, 0 means no more active classes in in-service agg */
1104 unsigned int len = 0;
1105
1106 if (in_serv_agg == NULL)
1107 return NULL;
1108
1109 if (!list_empty(&in_serv_agg->active))
1110 skb = qfq_peek_skb(in_serv_agg, &cl, &len);
1111
1112 /*
1113 * If there are no active classes in the in-service aggregate,
1114 * or if the aggregate has not enough budget to serve its next
1115 * class, then choose the next aggregate to serve.
1116 */
1117 if (len == 0 || in_serv_agg->budget < len) {
1118 charge_actual_service(in_serv_agg);
1119
1120 /* recharge the budget of the aggregate */
1121 in_serv_agg->initial_budget = in_serv_agg->budget =
1122 in_serv_agg->budgetmax;
1123
Paolo Valente2f3b89a2013-03-05 08:04:59 +00001124 if (!list_empty(&in_serv_agg->active)) {
Paolo Valente462dbc92012-11-23 11:03:19 +00001125 /*
1126 * Still active: reschedule for
1127 * service. Possible optimization: if no other
1128 * aggregate is active, then there is no point
1129 * in rescheduling this aggregate, and we can
1130 * just keep it as the in-service one. This
1131 * should be however a corner case, and to
1132 * handle it, we would need to maintain an
1133 * extra num_active_aggs field.
1134 */
Paolo Valente2f3b89a2013-03-05 08:04:59 +00001135 qfq_update_agg_ts(q, in_serv_agg, requeue);
1136 qfq_schedule_agg(q, in_serv_agg);
1137 } else if (sch->q.qlen == 0) { /* no aggregate to serve */
Paolo Valente462dbc92012-11-23 11:03:19 +00001138 q->in_serv_agg = NULL;
1139 return NULL;
1140 }
1141
1142 /*
1143 * If we get here, there are other aggregates queued:
1144 * choose the new aggregate to serve.
1145 */
1146 in_serv_agg = q->in_serv_agg = qfq_choose_next_agg(q);
1147 skb = qfq_peek_skb(in_serv_agg, &cl, &len);
1148 }
1149 if (!skb)
1150 return NULL;
1151
1152 sch->q.qlen--;
1153 qdisc_bstats_update(sch, skb);
1154
1155 agg_dequeue(in_serv_agg, cl, len);
Paolo Valentea0143ef2013-03-05 08:05:00 +00001156 /* If lmax is lowered, through qfq_change_class, for a class
1157 * owning pending packets with larger size than the new value
1158 * of lmax, then the following condition may hold.
1159 */
1160 if (unlikely(in_serv_agg->budget < len))
1161 in_serv_agg->budget = 0;
1162 else
1163 in_serv_agg->budget -= len;
1164
Paolo Valente87f40dd2013-07-16 08:52:30 +02001165 q->V += (u64)len * q->iwsum;
Paolo Valente462dbc92012-11-23 11:03:19 +00001166 pr_debug("qfq dequeue: len %u F %lld now %lld\n",
1167 len, (unsigned long long) in_serv_agg->F,
1168 (unsigned long long) q->V);
1169
1170 return skb;
1171}
1172
1173static struct qfq_aggregate *qfq_choose_next_agg(struct qfq_sched *q)
1174{
1175 struct qfq_group *grp;
1176 struct qfq_aggregate *agg, *new_front_agg;
1177 u64 old_F;
1178
1179 qfq_update_eligible(q);
1180 q->oldV = q->V;
stephen hemminger0545a302011-04-04 05:30:58 +00001181
1182 if (!q->bitmaps[ER])
1183 return NULL;
1184
1185 grp = qfq_ffs(q, q->bitmaps[ER]);
Paolo Valente462dbc92012-11-23 11:03:19 +00001186 old_F = grp->F;
stephen hemminger0545a302011-04-04 05:30:58 +00001187
Paolo Valente462dbc92012-11-23 11:03:19 +00001188 agg = qfq_slot_head(grp);
1189
1190 /* agg starts to be served, remove it from schedule */
1191 qfq_front_slot_remove(grp);
1192
1193 new_front_agg = qfq_slot_scan(grp);
1194
1195 if (new_front_agg == NULL) /* group is now inactive, remove from ER */
1196 __clear_bit(grp->index, &q->bitmaps[ER]);
1197 else {
1198 u64 roundedS = qfq_round_down(new_front_agg->S,
1199 grp->slot_shift);
1200 unsigned int s;
1201
1202 if (grp->S == roundedS)
1203 return agg;
1204 grp->S = roundedS;
1205 grp->F = roundedS + (2ULL << grp->slot_shift);
1206 __clear_bit(grp->index, &q->bitmaps[ER]);
1207 s = qfq_calc_state(q, grp);
1208 __set_bit(grp->index, &q->bitmaps[s]);
stephen hemminger0545a302011-04-04 05:30:58 +00001209 }
1210
Paolo Valente462dbc92012-11-23 11:03:19 +00001211 qfq_unblock_groups(q, grp->index, old_F);
stephen hemminger0545a302011-04-04 05:30:58 +00001212
Paolo Valente462dbc92012-11-23 11:03:19 +00001213 return agg;
stephen hemminger0545a302011-04-04 05:30:58 +00001214}
1215
stephen hemminger0545a302011-04-04 05:30:58 +00001216static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
1217{
1218 struct qfq_sched *q = qdisc_priv(sch);
stephen hemminger0545a302011-04-04 05:30:58 +00001219 struct qfq_class *cl;
Paolo Valente462dbc92012-11-23 11:03:19 +00001220 struct qfq_aggregate *agg;
David S. Millerf54ba772012-09-27 18:35:47 -04001221 int err = 0;
stephen hemminger0545a302011-04-04 05:30:58 +00001222
1223 cl = qfq_classify(skb, sch, &err);
1224 if (cl == NULL) {
1225 if (err & __NET_XMIT_BYPASS)
John Fastabend25331d62014-09-28 11:53:29 -07001226 qdisc_qstats_drop(sch);
stephen hemminger0545a302011-04-04 05:30:58 +00001227 kfree_skb(skb);
1228 return err;
1229 }
1230 pr_debug("qfq_enqueue: cl = %x\n", cl->common.classid);
1231
Paolo Valente462dbc92012-11-23 11:03:19 +00001232 if (unlikely(cl->agg->lmax < qdisc_pkt_len(skb))) {
Paolo Valente3015f3d2012-11-05 20:29:24 +00001233 pr_debug("qfq: increasing maxpkt from %u to %u for class %u",
Paolo Valente462dbc92012-11-23 11:03:19 +00001234 cl->agg->lmax, qdisc_pkt_len(skb), cl->common.classid);
1235 err = qfq_change_agg(sch, cl, cl->agg->class_weight,
1236 qdisc_pkt_len(skb));
1237 if (err)
1238 return err;
Paolo Valente3015f3d2012-11-05 20:29:24 +00001239 }
1240
stephen hemminger0545a302011-04-04 05:30:58 +00001241 err = qdisc_enqueue(skb, cl->qdisc);
1242 if (unlikely(err != NET_XMIT_SUCCESS)) {
1243 pr_debug("qfq_enqueue: enqueue failed %d\n", err);
1244 if (net_xmit_drop_count(err)) {
1245 cl->qstats.drops++;
John Fastabend25331d62014-09-28 11:53:29 -07001246 qdisc_qstats_drop(sch);
stephen hemminger0545a302011-04-04 05:30:58 +00001247 }
1248 return err;
1249 }
1250
1251 bstats_update(&cl->bstats, skb);
1252 ++sch->q.qlen;
1253
Paolo Valente462dbc92012-11-23 11:03:19 +00001254 agg = cl->agg;
1255 /* if the queue was not empty, then done here */
1256 if (cl->qdisc->q.qlen != 1) {
1257 if (unlikely(skb == cl->qdisc->ops->peek(cl->qdisc)) &&
1258 list_first_entry(&agg->active, struct qfq_class, alist)
1259 == cl && cl->deficit < qdisc_pkt_len(skb))
1260 list_move_tail(&cl->alist, &agg->active);
stephen hemminger0545a302011-04-04 05:30:58 +00001261
Paolo Valente462dbc92012-11-23 11:03:19 +00001262 return err;
1263 }
1264
1265 /* schedule class for service within the aggregate */
1266 cl->deficit = agg->lmax;
1267 list_add_tail(&cl->alist, &agg->active);
1268
Paolo Valente2f3b89a2013-03-05 08:04:59 +00001269 if (list_first_entry(&agg->active, struct qfq_class, alist) != cl ||
1270 q->in_serv_agg == agg)
1271 return err; /* non-empty or in service, nothing else to do */
Paolo Valente462dbc92012-11-23 11:03:19 +00001272
Paolo Valente2f3b89a2013-03-05 08:04:59 +00001273 qfq_activate_agg(q, agg, enqueue);
Paolo Valentebe72f632012-08-07 07:27:25 +00001274
1275 return err;
1276}
1277
1278/*
Paolo Valente462dbc92012-11-23 11:03:19 +00001279 * Schedule aggregate according to its timestamps.
Paolo Valentebe72f632012-08-07 07:27:25 +00001280 */
Paolo Valente462dbc92012-11-23 11:03:19 +00001281static void qfq_schedule_agg(struct qfq_sched *q, struct qfq_aggregate *agg)
Paolo Valentebe72f632012-08-07 07:27:25 +00001282{
Paolo Valente462dbc92012-11-23 11:03:19 +00001283 struct qfq_group *grp = agg->grp;
Paolo Valentebe72f632012-08-07 07:27:25 +00001284 u64 roundedS;
1285 int s;
1286
Paolo Valente462dbc92012-11-23 11:03:19 +00001287 roundedS = qfq_round_down(agg->S, grp->slot_shift);
stephen hemminger0545a302011-04-04 05:30:58 +00001288
1289 /*
Paolo Valente462dbc92012-11-23 11:03:19 +00001290 * Insert agg in the correct bucket.
1291 * If agg->S >= grp->S we don't need to adjust the
stephen hemminger0545a302011-04-04 05:30:58 +00001292 * bucket list and simply go to the insertion phase.
1293 * Otherwise grp->S is decreasing, we must make room
1294 * in the bucket list, and also recompute the group state.
1295 * Finally, if there were no flows in this group and nobody
1296 * was in ER make sure to adjust V.
1297 */
1298 if (grp->full_slots) {
Paolo Valente462dbc92012-11-23 11:03:19 +00001299 if (!qfq_gt(grp->S, agg->S))
stephen hemminger0545a302011-04-04 05:30:58 +00001300 goto skip_update;
1301
Paolo Valente462dbc92012-11-23 11:03:19 +00001302 /* create a slot for this agg->S */
stephen hemminger0545a302011-04-04 05:30:58 +00001303 qfq_slot_rotate(grp, roundedS);
1304 /* group was surely ineligible, remove */
1305 __clear_bit(grp->index, &q->bitmaps[IR]);
1306 __clear_bit(grp->index, &q->bitmaps[IB]);
Paolo Valente40dd2d52013-03-05 08:05:01 +00001307 } else if (!q->bitmaps[ER] && qfq_gt(roundedS, q->V) &&
1308 q->in_serv_agg == NULL)
stephen hemminger0545a302011-04-04 05:30:58 +00001309 q->V = roundedS;
1310
1311 grp->S = roundedS;
1312 grp->F = roundedS + (2ULL << grp->slot_shift);
1313 s = qfq_calc_state(q, grp);
1314 __set_bit(grp->index, &q->bitmaps[s]);
1315
1316 pr_debug("qfq enqueue: new state %d %#lx S %lld F %lld V %lld\n",
1317 s, q->bitmaps[s],
Paolo Valente462dbc92012-11-23 11:03:19 +00001318 (unsigned long long) agg->S,
1319 (unsigned long long) agg->F,
stephen hemminger0545a302011-04-04 05:30:58 +00001320 (unsigned long long) q->V);
1321
1322skip_update:
Paolo Valente462dbc92012-11-23 11:03:19 +00001323 qfq_slot_insert(grp, agg, roundedS);
stephen hemminger0545a302011-04-04 05:30:58 +00001324}
1325
1326
Paolo Valente462dbc92012-11-23 11:03:19 +00001327/* Update agg ts and schedule agg for service */
1328static void qfq_activate_agg(struct qfq_sched *q, struct qfq_aggregate *agg,
1329 enum update_reason reason)
1330{
Paolo Valente2f3b89a2013-03-05 08:04:59 +00001331 agg->initial_budget = agg->budget = agg->budgetmax; /* recharge budg. */
1332
Paolo Valente462dbc92012-11-23 11:03:19 +00001333 qfq_update_agg_ts(q, agg, reason);
Paolo Valente2f3b89a2013-03-05 08:04:59 +00001334 if (q->in_serv_agg == NULL) { /* no aggr. in service or scheduled */
1335 q->in_serv_agg = agg; /* start serving this aggregate */
1336 /* update V: to be in service, agg must be eligible */
1337 q->oldV = q->V = agg->S;
1338 } else if (agg != q->in_serv_agg)
1339 qfq_schedule_agg(q, agg);
Paolo Valente462dbc92012-11-23 11:03:19 +00001340}
1341
stephen hemminger0545a302011-04-04 05:30:58 +00001342static void qfq_slot_remove(struct qfq_sched *q, struct qfq_group *grp,
Paolo Valente462dbc92012-11-23 11:03:19 +00001343 struct qfq_aggregate *agg)
stephen hemminger0545a302011-04-04 05:30:58 +00001344{
1345 unsigned int i, offset;
1346 u64 roundedS;
1347
Paolo Valente462dbc92012-11-23 11:03:19 +00001348 roundedS = qfq_round_down(agg->S, grp->slot_shift);
stephen hemminger0545a302011-04-04 05:30:58 +00001349 offset = (roundedS - grp->S) >> grp->slot_shift;
Paolo Valente462dbc92012-11-23 11:03:19 +00001350
stephen hemminger0545a302011-04-04 05:30:58 +00001351 i = (grp->front + offset) % QFQ_MAX_SLOTS;
1352
Paolo Valente462dbc92012-11-23 11:03:19 +00001353 hlist_del(&agg->next);
stephen hemminger0545a302011-04-04 05:30:58 +00001354 if (hlist_empty(&grp->slots[i]))
1355 __clear_bit(offset, &grp->full_slots);
1356}
1357
1358/*
Paolo Valente462dbc92012-11-23 11:03:19 +00001359 * Called to forcibly deschedule an aggregate. If the aggregate is
1360 * not in the front bucket, or if the latter has other aggregates in
1361 * the front bucket, we can simply remove the aggregate with no other
1362 * side effects.
stephen hemminger0545a302011-04-04 05:30:58 +00001363 * Otherwise we must propagate the event up.
1364 */
Paolo Valente462dbc92012-11-23 11:03:19 +00001365static void qfq_deactivate_agg(struct qfq_sched *q, struct qfq_aggregate *agg)
stephen hemminger0545a302011-04-04 05:30:58 +00001366{
Paolo Valente462dbc92012-11-23 11:03:19 +00001367 struct qfq_group *grp = agg->grp;
stephen hemminger0545a302011-04-04 05:30:58 +00001368 unsigned long mask;
1369 u64 roundedS;
1370 int s;
1371
Paolo Valente462dbc92012-11-23 11:03:19 +00001372 if (agg == q->in_serv_agg) {
1373 charge_actual_service(agg);
1374 q->in_serv_agg = qfq_choose_next_agg(q);
1375 return;
1376 }
1377
1378 agg->F = agg->S;
1379 qfq_slot_remove(q, grp, agg);
stephen hemminger0545a302011-04-04 05:30:58 +00001380
1381 if (!grp->full_slots) {
1382 __clear_bit(grp->index, &q->bitmaps[IR]);
1383 __clear_bit(grp->index, &q->bitmaps[EB]);
1384 __clear_bit(grp->index, &q->bitmaps[IB]);
1385
1386 if (test_bit(grp->index, &q->bitmaps[ER]) &&
1387 !(q->bitmaps[ER] & ~((1UL << grp->index) - 1))) {
1388 mask = q->bitmaps[ER] & ((1UL << grp->index) - 1);
1389 if (mask)
1390 mask = ~((1UL << __fls(mask)) - 1);
1391 else
1392 mask = ~0UL;
1393 qfq_move_groups(q, mask, EB, ER);
1394 qfq_move_groups(q, mask, IB, IR);
1395 }
1396 __clear_bit(grp->index, &q->bitmaps[ER]);
1397 } else if (hlist_empty(&grp->slots[grp->front])) {
Paolo Valente462dbc92012-11-23 11:03:19 +00001398 agg = qfq_slot_scan(grp);
1399 roundedS = qfq_round_down(agg->S, grp->slot_shift);
stephen hemminger0545a302011-04-04 05:30:58 +00001400 if (grp->S != roundedS) {
1401 __clear_bit(grp->index, &q->bitmaps[ER]);
1402 __clear_bit(grp->index, &q->bitmaps[IR]);
1403 __clear_bit(grp->index, &q->bitmaps[EB]);
1404 __clear_bit(grp->index, &q->bitmaps[IB]);
1405 grp->S = roundedS;
1406 grp->F = roundedS + (2ULL << grp->slot_shift);
1407 s = qfq_calc_state(q, grp);
1408 __set_bit(grp->index, &q->bitmaps[s]);
1409 }
1410 }
stephen hemminger0545a302011-04-04 05:30:58 +00001411}
1412
1413static void qfq_qlen_notify(struct Qdisc *sch, unsigned long arg)
1414{
1415 struct qfq_sched *q = qdisc_priv(sch);
1416 struct qfq_class *cl = (struct qfq_class *)arg;
1417
1418 if (cl->qdisc->q.qlen == 0)
1419 qfq_deactivate_class(q, cl);
1420}
1421
Paolo Valente462dbc92012-11-23 11:03:19 +00001422static unsigned int qfq_drop_from_slot(struct qfq_sched *q,
1423 struct hlist_head *slot)
1424{
1425 struct qfq_aggregate *agg;
Paolo Valente462dbc92012-11-23 11:03:19 +00001426 struct qfq_class *cl;
1427 unsigned int len;
1428
Sasha Levinb67bfe02013-02-27 17:06:00 -08001429 hlist_for_each_entry(agg, slot, next) {
Paolo Valente462dbc92012-11-23 11:03:19 +00001430 list_for_each_entry(cl, &agg->active, alist) {
1431
1432 if (!cl->qdisc->ops->drop)
1433 continue;
1434
1435 len = cl->qdisc->ops->drop(cl->qdisc);
1436 if (len > 0) {
1437 if (cl->qdisc->q.qlen == 0)
1438 qfq_deactivate_class(q, cl);
1439
1440 return len;
1441 }
1442 }
1443 }
1444 return 0;
1445}
1446
stephen hemminger0545a302011-04-04 05:30:58 +00001447static unsigned int qfq_drop(struct Qdisc *sch)
1448{
1449 struct qfq_sched *q = qdisc_priv(sch);
1450 struct qfq_group *grp;
1451 unsigned int i, j, len;
1452
1453 for (i = 0; i <= QFQ_MAX_INDEX; i++) {
1454 grp = &q->groups[i];
1455 for (j = 0; j < QFQ_MAX_SLOTS; j++) {
Paolo Valente462dbc92012-11-23 11:03:19 +00001456 len = qfq_drop_from_slot(q, &grp->slots[j]);
1457 if (len > 0) {
1458 sch->q.qlen--;
1459 return len;
stephen hemminger0545a302011-04-04 05:30:58 +00001460 }
1461 }
Paolo Valente462dbc92012-11-23 11:03:19 +00001462
stephen hemminger0545a302011-04-04 05:30:58 +00001463 }
1464
1465 return 0;
1466}
1467
1468static int qfq_init_qdisc(struct Qdisc *sch, struct nlattr *opt)
1469{
1470 struct qfq_sched *q = qdisc_priv(sch);
1471 struct qfq_group *grp;
1472 int i, j, err;
Paolo Valente462dbc92012-11-23 11:03:19 +00001473 u32 max_cl_shift, maxbudg_shift, max_classes;
stephen hemminger0545a302011-04-04 05:30:58 +00001474
1475 err = qdisc_class_hash_init(&q->clhash);
1476 if (err < 0)
1477 return err;
1478
Paolo Valente462dbc92012-11-23 11:03:19 +00001479 if (qdisc_dev(sch)->tx_queue_len + 1 > QFQ_MAX_AGG_CLASSES)
1480 max_classes = QFQ_MAX_AGG_CLASSES;
1481 else
1482 max_classes = qdisc_dev(sch)->tx_queue_len + 1;
1483 /* max_cl_shift = floor(log_2(max_classes)) */
1484 max_cl_shift = __fls(max_classes);
1485 q->max_agg_classes = 1<<max_cl_shift;
1486
1487 /* maxbudg_shift = log2(max_len * max_classes_per_agg) */
1488 maxbudg_shift = QFQ_MTU_SHIFT + max_cl_shift;
1489 q->min_slot_shift = FRAC_BITS + maxbudg_shift - QFQ_MAX_INDEX;
1490
stephen hemminger0545a302011-04-04 05:30:58 +00001491 for (i = 0; i <= QFQ_MAX_INDEX; i++) {
1492 grp = &q->groups[i];
1493 grp->index = i;
Paolo Valente462dbc92012-11-23 11:03:19 +00001494 grp->slot_shift = q->min_slot_shift + i;
stephen hemminger0545a302011-04-04 05:30:58 +00001495 for (j = 0; j < QFQ_MAX_SLOTS; j++)
1496 INIT_HLIST_HEAD(&grp->slots[j]);
1497 }
1498
Paolo Valente462dbc92012-11-23 11:03:19 +00001499 INIT_HLIST_HEAD(&q->nonfull_aggs);
1500
stephen hemminger0545a302011-04-04 05:30:58 +00001501 return 0;
1502}
1503
1504static void qfq_reset_qdisc(struct Qdisc *sch)
1505{
1506 struct qfq_sched *q = qdisc_priv(sch);
stephen hemminger0545a302011-04-04 05:30:58 +00001507 struct qfq_class *cl;
Paolo Valente462dbc92012-11-23 11:03:19 +00001508 unsigned int i;
stephen hemminger0545a302011-04-04 05:30:58 +00001509
1510 for (i = 0; i < q->clhash.hashsize; i++) {
Sasha Levinb67bfe02013-02-27 17:06:00 -08001511 hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
Paolo Valente462dbc92012-11-23 11:03:19 +00001512 if (cl->qdisc->q.qlen > 0)
1513 qfq_deactivate_class(q, cl);
1514
stephen hemminger0545a302011-04-04 05:30:58 +00001515 qdisc_reset(cl->qdisc);
Paolo Valente462dbc92012-11-23 11:03:19 +00001516 }
stephen hemminger0545a302011-04-04 05:30:58 +00001517 }
1518 sch->q.qlen = 0;
1519}
1520
1521static void qfq_destroy_qdisc(struct Qdisc *sch)
1522{
1523 struct qfq_sched *q = qdisc_priv(sch);
1524 struct qfq_class *cl;
Sasha Levinb67bfe02013-02-27 17:06:00 -08001525 struct hlist_node *next;
stephen hemminger0545a302011-04-04 05:30:58 +00001526 unsigned int i;
1527
1528 tcf_destroy_chain(&q->filter_list);
1529
1530 for (i = 0; i < q->clhash.hashsize; i++) {
Sasha Levinb67bfe02013-02-27 17:06:00 -08001531 hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i],
stephen hemminger0545a302011-04-04 05:30:58 +00001532 common.hnode) {
1533 qfq_destroy_class(sch, cl);
1534 }
1535 }
1536 qdisc_class_hash_destroy(&q->clhash);
1537}
1538
1539static const struct Qdisc_class_ops qfq_class_ops = {
1540 .change = qfq_change_class,
1541 .delete = qfq_delete_class,
1542 .get = qfq_get_class,
1543 .put = qfq_put_class,
1544 .tcf_chain = qfq_tcf_chain,
1545 .bind_tcf = qfq_bind_tcf,
1546 .unbind_tcf = qfq_unbind_tcf,
1547 .graft = qfq_graft_class,
1548 .leaf = qfq_class_leaf,
1549 .qlen_notify = qfq_qlen_notify,
1550 .dump = qfq_dump_class,
1551 .dump_stats = qfq_dump_class_stats,
1552 .walk = qfq_walk,
1553};
1554
1555static struct Qdisc_ops qfq_qdisc_ops __read_mostly = {
1556 .cl_ops = &qfq_class_ops,
1557 .id = "qfq",
1558 .priv_size = sizeof(struct qfq_sched),
1559 .enqueue = qfq_enqueue,
1560 .dequeue = qfq_dequeue,
1561 .peek = qdisc_peek_dequeued,
1562 .drop = qfq_drop,
1563 .init = qfq_init_qdisc,
1564 .reset = qfq_reset_qdisc,
1565 .destroy = qfq_destroy_qdisc,
1566 .owner = THIS_MODULE,
1567};
1568
1569static int __init qfq_init(void)
1570{
1571 return register_qdisc(&qfq_qdisc_ops);
1572}
1573
1574static void __exit qfq_exit(void)
1575{
1576 unregister_qdisc(&qfq_qdisc_ops);
1577}
1578
1579module_init(qfq_init);
1580module_exit(qfq_exit);
1581MODULE_LICENSE("GPL");