blob: c244c45b78d7feca32fda3b925f7605aebf0a5b6 [file] [log] [blame]
Eric Dumazet4b549a22012-05-11 09:30:50 +00001/*
2 * Fair Queue CoDel discipline
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Copyright (C) 2012 Eric Dumazet <edumazet@google.com>
10 */
11
12#include <linux/module.h>
13#include <linux/types.h>
14#include <linux/kernel.h>
15#include <linux/jiffies.h>
16#include <linux/string.h>
17#include <linux/in.h>
18#include <linux/errno.h>
19#include <linux/init.h>
20#include <linux/skbuff.h>
21#include <linux/jhash.h>
22#include <linux/slab.h>
23#include <linux/vmalloc.h>
24#include <net/netlink.h>
25#include <net/pkt_sched.h>
26#include <net/flow_keys.h>
27#include <net/codel.h>
28
29/* Fair Queue CoDel.
30 *
31 * Principles :
32 * Packets are classified (internal classifier or external) on flows.
33 * This is a Stochastic model (as we use a hash, several flows
34 * might be hashed on same slot)
35 * Each flow has a CoDel managed queue.
36 * Flows are linked onto two (Round Robin) lists,
37 * so that new flows have priority on old ones.
38 *
39 * For a given flow, packets are not reordered (CoDel uses a FIFO)
40 * head drops only.
41 * ECN capability is on by default.
42 * Low memory footprint (64 bytes per flow)
43 */
44
45struct fq_codel_flow {
46 struct sk_buff *head;
47 struct sk_buff *tail;
48 struct list_head flowchain;
49 int deficit;
50 u32 dropped; /* number of drops (or ECN marks) on this flow */
51 struct codel_vars cvars;
52}; /* please try to keep this structure <= 64 bytes */
53
54struct fq_codel_sched_data {
John Fastabend25d8c0d2014-09-12 20:05:27 -070055 struct tcf_proto __rcu *filter_list; /* optional external classifier */
Eric Dumazet4b549a22012-05-11 09:30:50 +000056 struct fq_codel_flow *flows; /* Flows table [flows_cnt] */
57 u32 *backlogs; /* backlog table [flows_cnt] */
58 u32 flows_cnt; /* number of flows */
59 u32 perturbation; /* hash perturbation */
60 u32 quantum; /* psched_mtu(qdisc_dev(sch)); */
61 struct codel_params cparams;
62 struct codel_stats cstats;
63 u32 drop_overlimit;
64 u32 new_flow_count;
65
66 struct list_head new_flows; /* list of new flows */
67 struct list_head old_flows; /* list of old flows */
68};
69
70static unsigned int fq_codel_hash(const struct fq_codel_sched_data *q,
71 const struct sk_buff *skb)
72{
73 struct flow_keys keys;
74 unsigned int hash;
75
76 skb_flow_dissect(skb, &keys);
77 hash = jhash_3words((__force u32)keys.dst,
78 (__force u32)keys.src ^ keys.ip_proto,
79 (__force u32)keys.ports, q->perturbation);
Daniel Borkmann8fc54f62014-08-23 20:58:54 +020080
81 return reciprocal_scale(hash, q->flows_cnt);
Eric Dumazet4b549a22012-05-11 09:30:50 +000082}
83
84static unsigned int fq_codel_classify(struct sk_buff *skb, struct Qdisc *sch,
85 int *qerr)
86{
87 struct fq_codel_sched_data *q = qdisc_priv(sch);
John Fastabend25d8c0d2014-09-12 20:05:27 -070088 struct tcf_proto *filter;
Eric Dumazet4b549a22012-05-11 09:30:50 +000089 struct tcf_result res;
90 int result;
91
92 if (TC_H_MAJ(skb->priority) == sch->handle &&
93 TC_H_MIN(skb->priority) > 0 &&
94 TC_H_MIN(skb->priority) <= q->flows_cnt)
95 return TC_H_MIN(skb->priority);
96
Valdis.Kletnieks@vt.edu69204cf2014-12-09 16:15:50 -050097 filter = rcu_dereference_bh(q->filter_list);
John Fastabend25d8c0d2014-09-12 20:05:27 -070098 if (!filter)
Eric Dumazet4b549a22012-05-11 09:30:50 +000099 return fq_codel_hash(q, skb) + 1;
100
101 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
John Fastabend25d8c0d2014-09-12 20:05:27 -0700102 result = tc_classify(skb, filter, &res);
Eric Dumazet4b549a22012-05-11 09:30:50 +0000103 if (result >= 0) {
104#ifdef CONFIG_NET_CLS_ACT
105 switch (result) {
106 case TC_ACT_STOLEN:
107 case TC_ACT_QUEUED:
108 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
109 case TC_ACT_SHOT:
110 return 0;
111 }
112#endif
113 if (TC_H_MIN(res.classid) <= q->flows_cnt)
114 return TC_H_MIN(res.classid);
115 }
116 return 0;
117}
118
119/* helper functions : might be changed when/if skb use a standard list_head */
120
121/* remove one skb from head of slot queue */
122static inline struct sk_buff *dequeue_head(struct fq_codel_flow *flow)
123{
124 struct sk_buff *skb = flow->head;
125
126 flow->head = skb->next;
127 skb->next = NULL;
128 return skb;
129}
130
131/* add skb to flow queue (tail add) */
132static inline void flow_queue_add(struct fq_codel_flow *flow,
133 struct sk_buff *skb)
134{
135 if (flow->head == NULL)
136 flow->head = skb;
137 else
138 flow->tail->next = skb;
139 flow->tail = skb;
140 skb->next = NULL;
141}
142
143static unsigned int fq_codel_drop(struct Qdisc *sch)
144{
145 struct fq_codel_sched_data *q = qdisc_priv(sch);
146 struct sk_buff *skb;
147 unsigned int maxbacklog = 0, idx = 0, i, len;
148 struct fq_codel_flow *flow;
149
150 /* Queue is full! Find the fat flow and drop packet from it.
151 * This might sound expensive, but with 1024 flows, we scan
152 * 4KB of memory, and we dont need to handle a complex tree
153 * in fast path (packet queue/enqueue) with many cache misses.
154 */
155 for (i = 0; i < q->flows_cnt; i++) {
156 if (q->backlogs[i] > maxbacklog) {
157 maxbacklog = q->backlogs[i];
158 idx = i;
159 }
160 }
161 flow = &q->flows[idx];
162 skb = dequeue_head(flow);
163 len = qdisc_pkt_len(skb);
164 q->backlogs[idx] -= len;
165 kfree_skb(skb);
166 sch->q.qlen--;
John Fastabend25331d62014-09-28 11:53:29 -0700167 qdisc_qstats_drop(sch);
168 qdisc_qstats_backlog_dec(sch, skb);
Eric Dumazet4b549a22012-05-11 09:30:50 +0000169 flow->dropped++;
170 return idx;
171}
172
173static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
174{
175 struct fq_codel_sched_data *q = qdisc_priv(sch);
176 unsigned int idx;
177 struct fq_codel_flow *flow;
178 int uninitialized_var(ret);
179
180 idx = fq_codel_classify(skb, sch, &ret);
181 if (idx == 0) {
182 if (ret & __NET_XMIT_BYPASS)
John Fastabend25331d62014-09-28 11:53:29 -0700183 qdisc_qstats_drop(sch);
Eric Dumazet4b549a22012-05-11 09:30:50 +0000184 kfree_skb(skb);
185 return ret;
186 }
187 idx--;
188
189 codel_set_enqueue_time(skb);
190 flow = &q->flows[idx];
191 flow_queue_add(flow, skb);
192 q->backlogs[idx] += qdisc_pkt_len(skb);
John Fastabend25331d62014-09-28 11:53:29 -0700193 qdisc_qstats_backlog_inc(sch, skb);
Eric Dumazet4b549a22012-05-11 09:30:50 +0000194
195 if (list_empty(&flow->flowchain)) {
196 list_add_tail(&flow->flowchain, &q->new_flows);
Eric Dumazet4b549a22012-05-11 09:30:50 +0000197 q->new_flow_count++;
198 flow->deficit = q->quantum;
199 flow->dropped = 0;
200 }
Vijay Subramaniancd68ddd2013-03-28 13:52:00 +0000201 if (++sch->q.qlen <= sch->limit)
Eric Dumazet4b549a22012-05-11 09:30:50 +0000202 return NET_XMIT_SUCCESS;
203
204 q->drop_overlimit++;
205 /* Return Congestion Notification only if we dropped a packet
206 * from this flow.
207 */
208 if (fq_codel_drop(sch) == idx)
209 return NET_XMIT_CN;
210
211 /* As we dropped a packet, better let upper stack know this */
212 qdisc_tree_decrease_qlen(sch, 1);
213 return NET_XMIT_SUCCESS;
214}
215
216/* This is the specific function called from codel_dequeue()
217 * to dequeue a packet from queue. Note: backlog is handled in
218 * codel, we dont need to reduce it here.
219 */
220static struct sk_buff *dequeue(struct codel_vars *vars, struct Qdisc *sch)
221{
Eric Dumazet865ec552012-05-16 04:39:09 +0000222 struct fq_codel_sched_data *q = qdisc_priv(sch);
Eric Dumazet4b549a22012-05-11 09:30:50 +0000223 struct fq_codel_flow *flow;
224 struct sk_buff *skb = NULL;
225
226 flow = container_of(vars, struct fq_codel_flow, cvars);
227 if (flow->head) {
228 skb = dequeue_head(flow);
Eric Dumazet865ec552012-05-16 04:39:09 +0000229 q->backlogs[flow - q->flows] -= qdisc_pkt_len(skb);
Eric Dumazet4b549a22012-05-11 09:30:50 +0000230 sch->q.qlen--;
231 }
232 return skb;
233}
234
235static struct sk_buff *fq_codel_dequeue(struct Qdisc *sch)
236{
237 struct fq_codel_sched_data *q = qdisc_priv(sch);
238 struct sk_buff *skb;
239 struct fq_codel_flow *flow;
240 struct list_head *head;
241 u32 prev_drop_count, prev_ecn_mark;
242
243begin:
244 head = &q->new_flows;
245 if (list_empty(head)) {
246 head = &q->old_flows;
247 if (list_empty(head))
248 return NULL;
249 }
250 flow = list_first_entry(head, struct fq_codel_flow, flowchain);
251
252 if (flow->deficit <= 0) {
253 flow->deficit += q->quantum;
254 list_move_tail(&flow->flowchain, &q->old_flows);
255 goto begin;
256 }
257
258 prev_drop_count = q->cstats.drop_count;
259 prev_ecn_mark = q->cstats.ecn_mark;
260
261 skb = codel_dequeue(sch, &q->cparams, &flow->cvars, &q->cstats,
Eric Dumazet865ec552012-05-16 04:39:09 +0000262 dequeue);
Eric Dumazet4b549a22012-05-11 09:30:50 +0000263
264 flow->dropped += q->cstats.drop_count - prev_drop_count;
265 flow->dropped += q->cstats.ecn_mark - prev_ecn_mark;
266
267 if (!skb) {
268 /* force a pass through old_flows to prevent starvation */
269 if ((head == &q->new_flows) && !list_empty(&q->old_flows))
270 list_move_tail(&flow->flowchain, &q->old_flows);
271 else
272 list_del_init(&flow->flowchain);
273 goto begin;
274 }
275 qdisc_bstats_update(sch, skb);
276 flow->deficit -= qdisc_pkt_len(skb);
277 /* We cant call qdisc_tree_decrease_qlen() if our qlen is 0,
278 * or HTB crashes. Defer it for next round.
279 */
280 if (q->cstats.drop_count && sch->q.qlen) {
281 qdisc_tree_decrease_qlen(sch, q->cstats.drop_count);
282 q->cstats.drop_count = 0;
283 }
284 return skb;
285}
286
287static void fq_codel_reset(struct Qdisc *sch)
288{
289 struct sk_buff *skb;
290
291 while ((skb = fq_codel_dequeue(sch)) != NULL)
292 kfree_skb(skb);
293}
294
295static const struct nla_policy fq_codel_policy[TCA_FQ_CODEL_MAX + 1] = {
296 [TCA_FQ_CODEL_TARGET] = { .type = NLA_U32 },
297 [TCA_FQ_CODEL_LIMIT] = { .type = NLA_U32 },
298 [TCA_FQ_CODEL_INTERVAL] = { .type = NLA_U32 },
299 [TCA_FQ_CODEL_ECN] = { .type = NLA_U32 },
300 [TCA_FQ_CODEL_FLOWS] = { .type = NLA_U32 },
301 [TCA_FQ_CODEL_QUANTUM] = { .type = NLA_U32 },
302};
303
304static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt)
305{
306 struct fq_codel_sched_data *q = qdisc_priv(sch);
307 struct nlattr *tb[TCA_FQ_CODEL_MAX + 1];
308 int err;
309
310 if (!opt)
311 return -EINVAL;
312
313 err = nla_parse_nested(tb, TCA_FQ_CODEL_MAX, opt, fq_codel_policy);
314 if (err < 0)
315 return err;
316 if (tb[TCA_FQ_CODEL_FLOWS]) {
317 if (q->flows)
318 return -EINVAL;
319 q->flows_cnt = nla_get_u32(tb[TCA_FQ_CODEL_FLOWS]);
320 if (!q->flows_cnt ||
321 q->flows_cnt > 65536)
322 return -EINVAL;
323 }
324 sch_tree_lock(sch);
325
326 if (tb[TCA_FQ_CODEL_TARGET]) {
327 u64 target = nla_get_u32(tb[TCA_FQ_CODEL_TARGET]);
328
329 q->cparams.target = (target * NSEC_PER_USEC) >> CODEL_SHIFT;
330 }
331
332 if (tb[TCA_FQ_CODEL_INTERVAL]) {
333 u64 interval = nla_get_u32(tb[TCA_FQ_CODEL_INTERVAL]);
334
335 q->cparams.interval = (interval * NSEC_PER_USEC) >> CODEL_SHIFT;
336 }
337
338 if (tb[TCA_FQ_CODEL_LIMIT])
339 sch->limit = nla_get_u32(tb[TCA_FQ_CODEL_LIMIT]);
340
341 if (tb[TCA_FQ_CODEL_ECN])
342 q->cparams.ecn = !!nla_get_u32(tb[TCA_FQ_CODEL_ECN]);
343
344 if (tb[TCA_FQ_CODEL_QUANTUM])
345 q->quantum = max(256U, nla_get_u32(tb[TCA_FQ_CODEL_QUANTUM]));
346
347 while (sch->q.qlen > sch->limit) {
348 struct sk_buff *skb = fq_codel_dequeue(sch);
349
350 kfree_skb(skb);
351 q->cstats.drop_count++;
352 }
353 qdisc_tree_decrease_qlen(sch, q->cstats.drop_count);
354 q->cstats.drop_count = 0;
355
356 sch_tree_unlock(sch);
357 return 0;
358}
359
360static void *fq_codel_zalloc(size_t sz)
361{
362 void *ptr = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN);
363
364 if (!ptr)
365 ptr = vzalloc(sz);
366 return ptr;
367}
368
369static void fq_codel_free(void *addr)
370{
WANG Cong4cb28972014-06-02 15:55:22 -0700371 kvfree(addr);
Eric Dumazet4b549a22012-05-11 09:30:50 +0000372}
373
374static void fq_codel_destroy(struct Qdisc *sch)
375{
376 struct fq_codel_sched_data *q = qdisc_priv(sch);
377
378 tcf_destroy_chain(&q->filter_list);
379 fq_codel_free(q->backlogs);
380 fq_codel_free(q->flows);
381}
382
383static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt)
384{
385 struct fq_codel_sched_data *q = qdisc_priv(sch);
386 int i;
387
388 sch->limit = 10*1024;
389 q->flows_cnt = 1024;
390 q->quantum = psched_mtu(qdisc_dev(sch));
Aruna-Hewapathirane63862b52014-01-11 07:15:59 -0500391 q->perturbation = prandom_u32();
Eric Dumazet4b549a22012-05-11 09:30:50 +0000392 INIT_LIST_HEAD(&q->new_flows);
393 INIT_LIST_HEAD(&q->old_flows);
Eric Dumazeta5d28092015-04-30 09:40:40 -0700394 codel_params_init(&q->cparams, sch);
Eric Dumazet4b549a22012-05-11 09:30:50 +0000395 codel_stats_init(&q->cstats);
396 q->cparams.ecn = true;
397
398 if (opt) {
399 int err = fq_codel_change(sch, opt);
400 if (err)
401 return err;
402 }
403
404 if (!q->flows) {
405 q->flows = fq_codel_zalloc(q->flows_cnt *
406 sizeof(struct fq_codel_flow));
407 if (!q->flows)
408 return -ENOMEM;
409 q->backlogs = fq_codel_zalloc(q->flows_cnt * sizeof(u32));
410 if (!q->backlogs) {
411 fq_codel_free(q->flows);
412 return -ENOMEM;
413 }
414 for (i = 0; i < q->flows_cnt; i++) {
415 struct fq_codel_flow *flow = q->flows + i;
416
417 INIT_LIST_HEAD(&flow->flowchain);
Eric Dumazetb3791352012-09-01 03:19:57 +0000418 codel_vars_init(&flow->cvars);
Eric Dumazet4b549a22012-05-11 09:30:50 +0000419 }
420 }
421 if (sch->limit >= 1)
422 sch->flags |= TCQ_F_CAN_BYPASS;
423 else
424 sch->flags &= ~TCQ_F_CAN_BYPASS;
425 return 0;
426}
427
428static int fq_codel_dump(struct Qdisc *sch, struct sk_buff *skb)
429{
430 struct fq_codel_sched_data *q = qdisc_priv(sch);
431 struct nlattr *opts;
432
433 opts = nla_nest_start(skb, TCA_OPTIONS);
434 if (opts == NULL)
435 goto nla_put_failure;
436
437 if (nla_put_u32(skb, TCA_FQ_CODEL_TARGET,
438 codel_time_to_us(q->cparams.target)) ||
439 nla_put_u32(skb, TCA_FQ_CODEL_LIMIT,
440 sch->limit) ||
441 nla_put_u32(skb, TCA_FQ_CODEL_INTERVAL,
442 codel_time_to_us(q->cparams.interval)) ||
443 nla_put_u32(skb, TCA_FQ_CODEL_ECN,
444 q->cparams.ecn) ||
445 nla_put_u32(skb, TCA_FQ_CODEL_QUANTUM,
446 q->quantum) ||
447 nla_put_u32(skb, TCA_FQ_CODEL_FLOWS,
448 q->flows_cnt))
449 goto nla_put_failure;
450
Yang Yingliangd59b7d82014-03-12 10:20:32 +0800451 return nla_nest_end(skb, opts);
Eric Dumazet4b549a22012-05-11 09:30:50 +0000452
453nla_put_failure:
454 return -1;
455}
456
457static int fq_codel_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
458{
459 struct fq_codel_sched_data *q = qdisc_priv(sch);
460 struct tc_fq_codel_xstats st = {
461 .type = TCA_FQ_CODEL_XSTATS_QDISC,
Eric Dumazet4b549a22012-05-11 09:30:50 +0000462 };
463 struct list_head *pos;
464
Sasha Levin669d67b2012-05-14 11:57:06 +0000465 st.qdisc_stats.maxpacket = q->cstats.maxpacket;
466 st.qdisc_stats.drop_overlimit = q->drop_overlimit;
467 st.qdisc_stats.ecn_mark = q->cstats.ecn_mark;
468 st.qdisc_stats.new_flow_count = q->new_flow_count;
469
Eric Dumazet4b549a22012-05-11 09:30:50 +0000470 list_for_each(pos, &q->new_flows)
471 st.qdisc_stats.new_flows_len++;
472
473 list_for_each(pos, &q->old_flows)
474 st.qdisc_stats.old_flows_len++;
475
476 return gnet_stats_copy_app(d, &st, sizeof(st));
477}
478
479static struct Qdisc *fq_codel_leaf(struct Qdisc *sch, unsigned long arg)
480{
481 return NULL;
482}
483
484static unsigned long fq_codel_get(struct Qdisc *sch, u32 classid)
485{
486 return 0;
487}
488
489static unsigned long fq_codel_bind(struct Qdisc *sch, unsigned long parent,
490 u32 classid)
491{
492 /* we cannot bypass queue discipline anymore */
493 sch->flags &= ~TCQ_F_CAN_BYPASS;
494 return 0;
495}
496
497static void fq_codel_put(struct Qdisc *q, unsigned long cl)
498{
499}
500
John Fastabend25d8c0d2014-09-12 20:05:27 -0700501static struct tcf_proto __rcu **fq_codel_find_tcf(struct Qdisc *sch,
502 unsigned long cl)
Eric Dumazet4b549a22012-05-11 09:30:50 +0000503{
504 struct fq_codel_sched_data *q = qdisc_priv(sch);
505
506 if (cl)
507 return NULL;
508 return &q->filter_list;
509}
510
511static int fq_codel_dump_class(struct Qdisc *sch, unsigned long cl,
512 struct sk_buff *skb, struct tcmsg *tcm)
513{
514 tcm->tcm_handle |= TC_H_MIN(cl);
515 return 0;
516}
517
518static int fq_codel_dump_class_stats(struct Qdisc *sch, unsigned long cl,
519 struct gnet_dump *d)
520{
521 struct fq_codel_sched_data *q = qdisc_priv(sch);
522 u32 idx = cl - 1;
523 struct gnet_stats_queue qs = { 0 };
524 struct tc_fq_codel_xstats xstats;
525
526 if (idx < q->flows_cnt) {
527 const struct fq_codel_flow *flow = &q->flows[idx];
528 const struct sk_buff *skb = flow->head;
529
530 memset(&xstats, 0, sizeof(xstats));
531 xstats.type = TCA_FQ_CODEL_XSTATS_CLASS;
532 xstats.class_stats.deficit = flow->deficit;
533 xstats.class_stats.ldelay =
534 codel_time_to_us(flow->cvars.ldelay);
535 xstats.class_stats.count = flow->cvars.count;
536 xstats.class_stats.lastcount = flow->cvars.lastcount;
537 xstats.class_stats.dropping = flow->cvars.dropping;
538 if (flow->cvars.dropping) {
539 codel_tdiff_t delta = flow->cvars.drop_next -
540 codel_get_time();
541
542 xstats.class_stats.drop_next = (delta >= 0) ?
543 codel_time_to_us(delta) :
544 -codel_time_to_us(-delta);
545 }
546 while (skb) {
547 qs.qlen++;
548 skb = skb->next;
549 }
550 qs.backlog = q->backlogs[idx];
551 qs.drops = flow->dropped;
552 }
John Fastabendb0ab6f92014-09-28 11:54:24 -0700553 if (gnet_stats_copy_queue(d, NULL, &qs, 0) < 0)
Eric Dumazet4b549a22012-05-11 09:30:50 +0000554 return -1;
555 if (idx < q->flows_cnt)
556 return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
557 return 0;
558}
559
560static void fq_codel_walk(struct Qdisc *sch, struct qdisc_walker *arg)
561{
562 struct fq_codel_sched_data *q = qdisc_priv(sch);
563 unsigned int i;
564
565 if (arg->stop)
566 return;
567
568 for (i = 0; i < q->flows_cnt; i++) {
569 if (list_empty(&q->flows[i].flowchain) ||
570 arg->count < arg->skip) {
571 arg->count++;
572 continue;
573 }
574 if (arg->fn(sch, i + 1, arg) < 0) {
575 arg->stop = 1;
576 break;
577 }
578 arg->count++;
579 }
580}
581
582static const struct Qdisc_class_ops fq_codel_class_ops = {
583 .leaf = fq_codel_leaf,
584 .get = fq_codel_get,
585 .put = fq_codel_put,
586 .tcf_chain = fq_codel_find_tcf,
587 .bind_tcf = fq_codel_bind,
588 .unbind_tcf = fq_codel_put,
589 .dump = fq_codel_dump_class,
590 .dump_stats = fq_codel_dump_class_stats,
591 .walk = fq_codel_walk,
592};
593
594static struct Qdisc_ops fq_codel_qdisc_ops __read_mostly = {
595 .cl_ops = &fq_codel_class_ops,
596 .id = "fq_codel",
597 .priv_size = sizeof(struct fq_codel_sched_data),
598 .enqueue = fq_codel_enqueue,
599 .dequeue = fq_codel_dequeue,
600 .peek = qdisc_peek_dequeued,
601 .drop = fq_codel_drop,
602 .init = fq_codel_init,
603 .reset = fq_codel_reset,
604 .destroy = fq_codel_destroy,
605 .change = fq_codel_change,
606 .dump = fq_codel_dump,
607 .dump_stats = fq_codel_dump_stats,
608 .owner = THIS_MODULE,
609};
610
611static int __init fq_codel_module_init(void)
612{
613 return register_qdisc(&fq_codel_qdisc_ops);
614}
615
616static void __exit fq_codel_module_exit(void)
617{
618 unregister_qdisc(&fq_codel_qdisc_ops);
619}
620
621module_init(fq_codel_module_init)
622module_exit(fq_codel_module_exit)
623MODULE_AUTHOR("Eric Dumazet");
624MODULE_LICENSE("GPL");