blob: a5ea0e9b6be485c383251636bbe681379f975ed1 [file] [log] [blame]
Eric Dumazet4b549a22012-05-11 09:30:50 +00001/*
2 * Fair Queue CoDel discipline
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
Eric Dumazet80ba92f2015-05-08 15:05:12 -07009 * Copyright (C) 2012,2015 Eric Dumazet <edumazet@google.com>
Eric Dumazet4b549a22012-05-11 09:30:50 +000010 */
11
12#include <linux/module.h>
13#include <linux/types.h>
14#include <linux/kernel.h>
15#include <linux/jiffies.h>
16#include <linux/string.h>
17#include <linux/in.h>
18#include <linux/errno.h>
19#include <linux/init.h>
20#include <linux/skbuff.h>
21#include <linux/jhash.h>
22#include <linux/slab.h>
23#include <linux/vmalloc.h>
24#include <net/netlink.h>
25#include <net/pkt_sched.h>
Eric Dumazet4b549a22012-05-11 09:30:50 +000026#include <net/codel.h>
Michal Kaziord068ca22016-04-22 14:15:59 +020027#include <net/codel_impl.h>
28#include <net/codel_qdisc.h>
Eric Dumazet4b549a22012-05-11 09:30:50 +000029
30/* Fair Queue CoDel.
31 *
32 * Principles :
33 * Packets are classified (internal classifier or external) on flows.
34 * This is a Stochastic model (as we use a hash, several flows
35 * might be hashed on same slot)
36 * Each flow has a CoDel managed queue.
37 * Flows are linked onto two (Round Robin) lists,
38 * so that new flows have priority on old ones.
39 *
40 * For a given flow, packets are not reordered (CoDel uses a FIFO)
41 * head drops only.
42 * ECN capability is on by default.
43 * Low memory footprint (64 bytes per flow)
44 */
45
46struct fq_codel_flow {
47 struct sk_buff *head;
48 struct sk_buff *tail;
49 struct list_head flowchain;
50 int deficit;
51 u32 dropped; /* number of drops (or ECN marks) on this flow */
52 struct codel_vars cvars;
53}; /* please try to keep this structure <= 64 bytes */
54
55struct fq_codel_sched_data {
John Fastabend25d8c0d2014-09-12 20:05:27 -070056 struct tcf_proto __rcu *filter_list; /* optional external classifier */
Eric Dumazet4b549a22012-05-11 09:30:50 +000057 struct fq_codel_flow *flows; /* Flows table [flows_cnt] */
58 u32 *backlogs; /* backlog table [flows_cnt] */
59 u32 flows_cnt; /* number of flows */
60 u32 perturbation; /* hash perturbation */
61 u32 quantum; /* psched_mtu(qdisc_dev(sch)); */
Eric Dumazet9d185622016-05-01 16:47:26 -070062 u32 drop_batch_size;
Eric Dumazet95b58432016-05-06 08:55:12 -070063 u32 memory_limit;
Eric Dumazet4b549a22012-05-11 09:30:50 +000064 struct codel_params cparams;
65 struct codel_stats cstats;
Eric Dumazet95b58432016-05-06 08:55:12 -070066 u32 memory_usage;
67 u32 drop_overmemory;
Eric Dumazet4b549a22012-05-11 09:30:50 +000068 u32 drop_overlimit;
69 u32 new_flow_count;
70
71 struct list_head new_flows; /* list of new flows */
72 struct list_head old_flows; /* list of old flows */
73};
74
75static unsigned int fq_codel_hash(const struct fq_codel_sched_data *q,
Tom Herbert342db222015-05-01 11:30:13 -070076 struct sk_buff *skb)
Eric Dumazet4b549a22012-05-11 09:30:50 +000077{
Tom Herbert342db222015-05-01 11:30:13 -070078 u32 hash = skb_get_hash_perturb(skb, q->perturbation);
Daniel Borkmann8fc54f62014-08-23 20:58:54 +020079
80 return reciprocal_scale(hash, q->flows_cnt);
Eric Dumazet4b549a22012-05-11 09:30:50 +000081}
82
83static unsigned int fq_codel_classify(struct sk_buff *skb, struct Qdisc *sch,
84 int *qerr)
85{
86 struct fq_codel_sched_data *q = qdisc_priv(sch);
John Fastabend25d8c0d2014-09-12 20:05:27 -070087 struct tcf_proto *filter;
Eric Dumazet4b549a22012-05-11 09:30:50 +000088 struct tcf_result res;
89 int result;
90
91 if (TC_H_MAJ(skb->priority) == sch->handle &&
92 TC_H_MIN(skb->priority) > 0 &&
93 TC_H_MIN(skb->priority) <= q->flows_cnt)
94 return TC_H_MIN(skb->priority);
95
Valdis.Kletnieks@vt.edu69204cf2014-12-09 16:15:50 -050096 filter = rcu_dereference_bh(q->filter_list);
John Fastabend25d8c0d2014-09-12 20:05:27 -070097 if (!filter)
Eric Dumazet4b549a22012-05-11 09:30:50 +000098 return fq_codel_hash(q, skb) + 1;
99
100 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
Daniel Borkmann3b3ae882015-08-26 23:00:06 +0200101 result = tc_classify(skb, filter, &res, false);
Eric Dumazet4b549a22012-05-11 09:30:50 +0000102 if (result >= 0) {
103#ifdef CONFIG_NET_CLS_ACT
104 switch (result) {
105 case TC_ACT_STOLEN:
106 case TC_ACT_QUEUED:
107 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
108 case TC_ACT_SHOT:
109 return 0;
110 }
111#endif
112 if (TC_H_MIN(res.classid) <= q->flows_cnt)
113 return TC_H_MIN(res.classid);
114 }
115 return 0;
116}
117
118/* helper functions : might be changed when/if skb use a standard list_head */
119
120/* remove one skb from head of slot queue */
121static inline struct sk_buff *dequeue_head(struct fq_codel_flow *flow)
122{
123 struct sk_buff *skb = flow->head;
124
125 flow->head = skb->next;
126 skb->next = NULL;
127 return skb;
128}
129
130/* add skb to flow queue (tail add) */
131static inline void flow_queue_add(struct fq_codel_flow *flow,
132 struct sk_buff *skb)
133{
134 if (flow->head == NULL)
135 flow->head = skb;
136 else
137 flow->tail->next = skb;
138 flow->tail = skb;
139 skb->next = NULL;
140}
141
Eric Dumazet520ac302016-06-21 23:16:49 -0700142static unsigned int fq_codel_drop(struct Qdisc *sch, unsigned int max_packets,
143 struct sk_buff **to_free)
Eric Dumazet4b549a22012-05-11 09:30:50 +0000144{
145 struct fq_codel_sched_data *q = qdisc_priv(sch);
146 struct sk_buff *skb;
147 unsigned int maxbacklog = 0, idx = 0, i, len;
148 struct fq_codel_flow *flow;
Eric Dumazet9d185622016-05-01 16:47:26 -0700149 unsigned int threshold;
Eric Dumazet95b58432016-05-06 08:55:12 -0700150 unsigned int mem = 0;
Eric Dumazet4b549a22012-05-11 09:30:50 +0000151
Eric Dumazet9d185622016-05-01 16:47:26 -0700152 /* Queue is full! Find the fat flow and drop packet(s) from it.
Eric Dumazet4b549a22012-05-11 09:30:50 +0000153 * This might sound expensive, but with 1024 flows, we scan
154 * 4KB of memory, and we dont need to handle a complex tree
155 * in fast path (packet queue/enqueue) with many cache misses.
Eric Dumazet9d185622016-05-01 16:47:26 -0700156 * In stress mode, we'll try to drop 64 packets from the flow,
157 * amortizing this linear lookup to one cache line per drop.
Eric Dumazet4b549a22012-05-11 09:30:50 +0000158 */
159 for (i = 0; i < q->flows_cnt; i++) {
160 if (q->backlogs[i] > maxbacklog) {
161 maxbacklog = q->backlogs[i];
162 idx = i;
163 }
164 }
Eric Dumazet9d185622016-05-01 16:47:26 -0700165
166 /* Our goal is to drop half of this fat flow backlog */
167 threshold = maxbacklog >> 1;
168
Eric Dumazet4b549a22012-05-11 09:30:50 +0000169 flow = &q->flows[idx];
Eric Dumazet9d185622016-05-01 16:47:26 -0700170 len = 0;
171 i = 0;
172 do {
173 skb = dequeue_head(flow);
174 len += qdisc_pkt_len(skb);
Eric Dumazet008830b2016-06-21 23:16:50 -0700175 mem += get_codel_cb(skb)->mem_usage;
Eric Dumazet520ac302016-06-21 23:16:49 -0700176 __qdisc_drop(skb, to_free);
Eric Dumazet9d185622016-05-01 16:47:26 -0700177 } while (++i < max_packets && len < threshold);
178
179 flow->dropped += i;
Eric Dumazet4b549a22012-05-11 09:30:50 +0000180 q->backlogs[idx] -= len;
Eric Dumazet95b58432016-05-06 08:55:12 -0700181 q->memory_usage -= mem;
Eric Dumazet9d185622016-05-01 16:47:26 -0700182 sch->qstats.drops += i;
183 sch->qstats.backlog -= len;
184 sch->q.qlen -= i;
Eric Dumazet4b549a22012-05-11 09:30:50 +0000185 return idx;
186}
187
Eric Dumazet520ac302016-06-21 23:16:49 -0700188static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch,
189 struct sk_buff **to_free)
Eric Dumazet4b549a22012-05-11 09:30:50 +0000190{
191 struct fq_codel_sched_data *q = qdisc_priv(sch);
Eric Dumazet9d185622016-05-01 16:47:26 -0700192 unsigned int idx, prev_backlog, prev_qlen;
Eric Dumazet4b549a22012-05-11 09:30:50 +0000193 struct fq_codel_flow *flow;
194 int uninitialized_var(ret);
Eric Dumazet80e509d2016-06-04 12:55:13 -0700195 unsigned int pkt_len;
Eric Dumazet95b58432016-05-06 08:55:12 -0700196 bool memory_limited;
Eric Dumazet4b549a22012-05-11 09:30:50 +0000197
198 idx = fq_codel_classify(skb, sch, &ret);
199 if (idx == 0) {
200 if (ret & __NET_XMIT_BYPASS)
John Fastabend25331d62014-09-28 11:53:29 -0700201 qdisc_qstats_drop(sch);
Eric Dumazet520ac302016-06-21 23:16:49 -0700202 __qdisc_drop(skb, to_free);
Eric Dumazet4b549a22012-05-11 09:30:50 +0000203 return ret;
204 }
205 idx--;
206
207 codel_set_enqueue_time(skb);
208 flow = &q->flows[idx];
209 flow_queue_add(flow, skb);
210 q->backlogs[idx] += qdisc_pkt_len(skb);
John Fastabend25331d62014-09-28 11:53:29 -0700211 qdisc_qstats_backlog_inc(sch, skb);
Eric Dumazet4b549a22012-05-11 09:30:50 +0000212
213 if (list_empty(&flow->flowchain)) {
214 list_add_tail(&flow->flowchain, &q->new_flows);
Eric Dumazet4b549a22012-05-11 09:30:50 +0000215 q->new_flow_count++;
216 flow->deficit = q->quantum;
217 flow->dropped = 0;
218 }
Eric Dumazet008830b2016-06-21 23:16:50 -0700219 get_codel_cb(skb)->mem_usage = skb->truesize;
220 q->memory_usage += get_codel_cb(skb)->mem_usage;
Eric Dumazet95b58432016-05-06 08:55:12 -0700221 memory_limited = q->memory_usage > q->memory_limit;
222 if (++sch->q.qlen <= sch->limit && !memory_limited)
Eric Dumazet4b549a22012-05-11 09:30:50 +0000223 return NET_XMIT_SUCCESS;
224
WANG Cong2ccccf52016-02-25 14:55:01 -0800225 prev_backlog = sch->qstats.backlog;
Eric Dumazet9d185622016-05-01 16:47:26 -0700226 prev_qlen = sch->q.qlen;
Eric Dumazet4b549a22012-05-11 09:30:50 +0000227
Eric Dumazet80e509d2016-06-04 12:55:13 -0700228 /* save this packet length as it might be dropped by fq_codel_drop() */
229 pkt_len = qdisc_pkt_len(skb);
Eric Dumazet9d185622016-05-01 16:47:26 -0700230 /* fq_codel_drop() is quite expensive, as it performs a linear search
231 * in q->backlogs[] to find a fat flow.
232 * So instead of dropping a single packet, drop half of its backlog
233 * with a 64 packets limit to not add a too big cpu spike here.
234 */
Eric Dumazet520ac302016-06-21 23:16:49 -0700235 ret = fq_codel_drop(sch, q->drop_batch_size, to_free);
Eric Dumazet9d185622016-05-01 16:47:26 -0700236
Eric Dumazet80e509d2016-06-04 12:55:13 -0700237 prev_qlen -= sch->q.qlen;
238 prev_backlog -= sch->qstats.backlog;
239 q->drop_overlimit += prev_qlen;
Eric Dumazet95b58432016-05-06 08:55:12 -0700240 if (memory_limited)
Eric Dumazet80e509d2016-06-04 12:55:13 -0700241 q->drop_overmemory += prev_qlen;
Eric Dumazet9d185622016-05-01 16:47:26 -0700242
Eric Dumazet80e509d2016-06-04 12:55:13 -0700243 /* As we dropped packet(s), better let upper stack know this.
244 * If we dropped a packet for this flow, return NET_XMIT_CN,
245 * but in this case, our parents wont increase their backlogs.
246 */
247 if (ret == idx) {
248 qdisc_tree_reduce_backlog(sch, prev_qlen - 1,
249 prev_backlog - pkt_len);
250 return NET_XMIT_CN;
251 }
252 qdisc_tree_reduce_backlog(sch, prev_qlen, prev_backlog);
253 return NET_XMIT_SUCCESS;
Eric Dumazet4b549a22012-05-11 09:30:50 +0000254}
255
256/* This is the specific function called from codel_dequeue()
257 * to dequeue a packet from queue. Note: backlog is handled in
258 * codel, we dont need to reduce it here.
259 */
Michal Kazior79bdc4c2016-04-22 14:15:58 +0200260static struct sk_buff *dequeue_func(struct codel_vars *vars, void *ctx)
Eric Dumazet4b549a22012-05-11 09:30:50 +0000261{
Michal Kazior79bdc4c2016-04-22 14:15:58 +0200262 struct Qdisc *sch = ctx;
Eric Dumazet865ec552012-05-16 04:39:09 +0000263 struct fq_codel_sched_data *q = qdisc_priv(sch);
Eric Dumazet4b549a22012-05-11 09:30:50 +0000264 struct fq_codel_flow *flow;
265 struct sk_buff *skb = NULL;
266
267 flow = container_of(vars, struct fq_codel_flow, cvars);
268 if (flow->head) {
269 skb = dequeue_head(flow);
Eric Dumazet865ec552012-05-16 04:39:09 +0000270 q->backlogs[flow - q->flows] -= qdisc_pkt_len(skb);
Eric Dumazet008830b2016-06-21 23:16:50 -0700271 q->memory_usage -= get_codel_cb(skb)->mem_usage;
Eric Dumazet4b549a22012-05-11 09:30:50 +0000272 sch->q.qlen--;
Michal Kazior79bdc4c2016-04-22 14:15:58 +0200273 sch->qstats.backlog -= qdisc_pkt_len(skb);
Eric Dumazet4b549a22012-05-11 09:30:50 +0000274 }
275 return skb;
276}
277
Michal Kazior79bdc4c2016-04-22 14:15:58 +0200278static void drop_func(struct sk_buff *skb, void *ctx)
279{
280 struct Qdisc *sch = ctx;
281
Eric Dumazet520ac302016-06-21 23:16:49 -0700282 kfree_skb(skb);
283 qdisc_qstats_drop(sch);
Michal Kazior79bdc4c2016-04-22 14:15:58 +0200284}
285
Eric Dumazet4b549a22012-05-11 09:30:50 +0000286static struct sk_buff *fq_codel_dequeue(struct Qdisc *sch)
287{
288 struct fq_codel_sched_data *q = qdisc_priv(sch);
289 struct sk_buff *skb;
290 struct fq_codel_flow *flow;
291 struct list_head *head;
292 u32 prev_drop_count, prev_ecn_mark;
WANG Cong2ccccf52016-02-25 14:55:01 -0800293 unsigned int prev_backlog;
Eric Dumazet4b549a22012-05-11 09:30:50 +0000294
295begin:
296 head = &q->new_flows;
297 if (list_empty(head)) {
298 head = &q->old_flows;
299 if (list_empty(head))
300 return NULL;
301 }
302 flow = list_first_entry(head, struct fq_codel_flow, flowchain);
303
304 if (flow->deficit <= 0) {
305 flow->deficit += q->quantum;
306 list_move_tail(&flow->flowchain, &q->old_flows);
307 goto begin;
308 }
309
310 prev_drop_count = q->cstats.drop_count;
311 prev_ecn_mark = q->cstats.ecn_mark;
WANG Cong2ccccf52016-02-25 14:55:01 -0800312 prev_backlog = sch->qstats.backlog;
Eric Dumazet4b549a22012-05-11 09:30:50 +0000313
Michal Kazior79bdc4c2016-04-22 14:15:58 +0200314 skb = codel_dequeue(sch, &sch->qstats.backlog, &q->cparams,
315 &flow->cvars, &q->cstats, qdisc_pkt_len,
316 codel_get_enqueue_time, drop_func, dequeue_func);
Eric Dumazet4b549a22012-05-11 09:30:50 +0000317
318 flow->dropped += q->cstats.drop_count - prev_drop_count;
319 flow->dropped += q->cstats.ecn_mark - prev_ecn_mark;
320
321 if (!skb) {
322 /* force a pass through old_flows to prevent starvation */
323 if ((head == &q->new_flows) && !list_empty(&q->old_flows))
324 list_move_tail(&flow->flowchain, &q->old_flows);
325 else
326 list_del_init(&flow->flowchain);
327 goto begin;
328 }
329 qdisc_bstats_update(sch, skb);
330 flow->deficit -= qdisc_pkt_len(skb);
WANG Cong2ccccf52016-02-25 14:55:01 -0800331 /* We cant call qdisc_tree_reduce_backlog() if our qlen is 0,
Eric Dumazet4b549a22012-05-11 09:30:50 +0000332 * or HTB crashes. Defer it for next round.
333 */
334 if (q->cstats.drop_count && sch->q.qlen) {
WANG Cong2ccccf52016-02-25 14:55:01 -0800335 qdisc_tree_reduce_backlog(sch, q->cstats.drop_count,
336 q->cstats.drop_len);
Eric Dumazet4b549a22012-05-11 09:30:50 +0000337 q->cstats.drop_count = 0;
WANG Cong2ccccf52016-02-25 14:55:01 -0800338 q->cstats.drop_len = 0;
Eric Dumazet4b549a22012-05-11 09:30:50 +0000339 }
340 return skb;
341}
342
Eric Dumazetece5d4c2016-06-13 20:21:54 -0700343static void fq_codel_flow_purge(struct fq_codel_flow *flow)
344{
345 rtnl_kfree_skbs(flow->head, flow->tail);
346 flow->head = NULL;
347}
348
Eric Dumazet4b549a22012-05-11 09:30:50 +0000349static void fq_codel_reset(struct Qdisc *sch)
350{
Eric Dumazet3d0e0af2015-07-31 17:53:39 -0700351 struct fq_codel_sched_data *q = qdisc_priv(sch);
352 int i;
Eric Dumazet4b549a22012-05-11 09:30:50 +0000353
Eric Dumazet3d0e0af2015-07-31 17:53:39 -0700354 INIT_LIST_HEAD(&q->new_flows);
355 INIT_LIST_HEAD(&q->old_flows);
356 for (i = 0; i < q->flows_cnt; i++) {
357 struct fq_codel_flow *flow = q->flows + i;
358
Eric Dumazetece5d4c2016-06-13 20:21:54 -0700359 fq_codel_flow_purge(flow);
Eric Dumazet3d0e0af2015-07-31 17:53:39 -0700360 INIT_LIST_HEAD(&flow->flowchain);
361 codel_vars_init(&flow->cvars);
362 }
363 memset(q->backlogs, 0, q->flows_cnt * sizeof(u32));
364 sch->q.qlen = 0;
Eric Dumazetece5d4c2016-06-13 20:21:54 -0700365 sch->qstats.backlog = 0;
Eric Dumazet77f57762016-05-15 18:16:38 -0700366 q->memory_usage = 0;
Eric Dumazet4b549a22012-05-11 09:30:50 +0000367}
368
369static const struct nla_policy fq_codel_policy[TCA_FQ_CODEL_MAX + 1] = {
370 [TCA_FQ_CODEL_TARGET] = { .type = NLA_U32 },
371 [TCA_FQ_CODEL_LIMIT] = { .type = NLA_U32 },
372 [TCA_FQ_CODEL_INTERVAL] = { .type = NLA_U32 },
373 [TCA_FQ_CODEL_ECN] = { .type = NLA_U32 },
374 [TCA_FQ_CODEL_FLOWS] = { .type = NLA_U32 },
375 [TCA_FQ_CODEL_QUANTUM] = { .type = NLA_U32 },
Eric Dumazet80ba92f2015-05-08 15:05:12 -0700376 [TCA_FQ_CODEL_CE_THRESHOLD] = { .type = NLA_U32 },
Eric Dumazet9d185622016-05-01 16:47:26 -0700377 [TCA_FQ_CODEL_DROP_BATCH_SIZE] = { .type = NLA_U32 },
Eric Dumazet95b58432016-05-06 08:55:12 -0700378 [TCA_FQ_CODEL_MEMORY_LIMIT] = { .type = NLA_U32 },
Eric Dumazet4b549a22012-05-11 09:30:50 +0000379};
380
381static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt)
382{
383 struct fq_codel_sched_data *q = qdisc_priv(sch);
384 struct nlattr *tb[TCA_FQ_CODEL_MAX + 1];
385 int err;
386
387 if (!opt)
388 return -EINVAL;
389
390 err = nla_parse_nested(tb, TCA_FQ_CODEL_MAX, opt, fq_codel_policy);
391 if (err < 0)
392 return err;
393 if (tb[TCA_FQ_CODEL_FLOWS]) {
394 if (q->flows)
395 return -EINVAL;
396 q->flows_cnt = nla_get_u32(tb[TCA_FQ_CODEL_FLOWS]);
397 if (!q->flows_cnt ||
398 q->flows_cnt > 65536)
399 return -EINVAL;
400 }
401 sch_tree_lock(sch);
402
403 if (tb[TCA_FQ_CODEL_TARGET]) {
404 u64 target = nla_get_u32(tb[TCA_FQ_CODEL_TARGET]);
405
406 q->cparams.target = (target * NSEC_PER_USEC) >> CODEL_SHIFT;
407 }
408
Eric Dumazet80ba92f2015-05-08 15:05:12 -0700409 if (tb[TCA_FQ_CODEL_CE_THRESHOLD]) {
410 u64 val = nla_get_u32(tb[TCA_FQ_CODEL_CE_THRESHOLD]);
411
412 q->cparams.ce_threshold = (val * NSEC_PER_USEC) >> CODEL_SHIFT;
413 }
414
Eric Dumazet4b549a22012-05-11 09:30:50 +0000415 if (tb[TCA_FQ_CODEL_INTERVAL]) {
416 u64 interval = nla_get_u32(tb[TCA_FQ_CODEL_INTERVAL]);
417
418 q->cparams.interval = (interval * NSEC_PER_USEC) >> CODEL_SHIFT;
419 }
420
421 if (tb[TCA_FQ_CODEL_LIMIT])
422 sch->limit = nla_get_u32(tb[TCA_FQ_CODEL_LIMIT]);
423
424 if (tb[TCA_FQ_CODEL_ECN])
425 q->cparams.ecn = !!nla_get_u32(tb[TCA_FQ_CODEL_ECN]);
426
427 if (tb[TCA_FQ_CODEL_QUANTUM])
428 q->quantum = max(256U, nla_get_u32(tb[TCA_FQ_CODEL_QUANTUM]));
429
Eric Dumazet9d185622016-05-01 16:47:26 -0700430 if (tb[TCA_FQ_CODEL_DROP_BATCH_SIZE])
431 q->drop_batch_size = min(1U, nla_get_u32(tb[TCA_FQ_CODEL_DROP_BATCH_SIZE]));
432
Eric Dumazet95b58432016-05-06 08:55:12 -0700433 if (tb[TCA_FQ_CODEL_MEMORY_LIMIT])
434 q->memory_limit = min(1U << 31, nla_get_u32(tb[TCA_FQ_CODEL_MEMORY_LIMIT]));
435
436 while (sch->q.qlen > sch->limit ||
437 q->memory_usage > q->memory_limit) {
Eric Dumazet4b549a22012-05-11 09:30:50 +0000438 struct sk_buff *skb = fq_codel_dequeue(sch);
439
WANG Cong2ccccf52016-02-25 14:55:01 -0800440 q->cstats.drop_len += qdisc_pkt_len(skb);
Eric Dumazetece5d4c2016-06-13 20:21:54 -0700441 rtnl_kfree_skbs(skb, skb);
Eric Dumazet4b549a22012-05-11 09:30:50 +0000442 q->cstats.drop_count++;
443 }
WANG Cong2ccccf52016-02-25 14:55:01 -0800444 qdisc_tree_reduce_backlog(sch, q->cstats.drop_count, q->cstats.drop_len);
Eric Dumazet4b549a22012-05-11 09:30:50 +0000445 q->cstats.drop_count = 0;
WANG Cong2ccccf52016-02-25 14:55:01 -0800446 q->cstats.drop_len = 0;
Eric Dumazet4b549a22012-05-11 09:30:50 +0000447
448 sch_tree_unlock(sch);
449 return 0;
450}
451
452static void *fq_codel_zalloc(size_t sz)
453{
454 void *ptr = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN);
455
456 if (!ptr)
457 ptr = vzalloc(sz);
458 return ptr;
459}
460
461static void fq_codel_free(void *addr)
462{
WANG Cong4cb28972014-06-02 15:55:22 -0700463 kvfree(addr);
Eric Dumazet4b549a22012-05-11 09:30:50 +0000464}
465
466static void fq_codel_destroy(struct Qdisc *sch)
467{
468 struct fq_codel_sched_data *q = qdisc_priv(sch);
469
470 tcf_destroy_chain(&q->filter_list);
471 fq_codel_free(q->backlogs);
472 fq_codel_free(q->flows);
473}
474
475static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt)
476{
477 struct fq_codel_sched_data *q = qdisc_priv(sch);
478 int i;
479
480 sch->limit = 10*1024;
481 q->flows_cnt = 1024;
Eric Dumazet95b58432016-05-06 08:55:12 -0700482 q->memory_limit = 32 << 20; /* 32 MBytes */
Eric Dumazet9d185622016-05-01 16:47:26 -0700483 q->drop_batch_size = 64;
Eric Dumazet4b549a22012-05-11 09:30:50 +0000484 q->quantum = psched_mtu(qdisc_dev(sch));
Aruna-Hewapathirane63862b52014-01-11 07:15:59 -0500485 q->perturbation = prandom_u32();
Eric Dumazet4b549a22012-05-11 09:30:50 +0000486 INIT_LIST_HEAD(&q->new_flows);
487 INIT_LIST_HEAD(&q->old_flows);
Michal Kazior79bdc4c2016-04-22 14:15:58 +0200488 codel_params_init(&q->cparams);
Eric Dumazet4b549a22012-05-11 09:30:50 +0000489 codel_stats_init(&q->cstats);
490 q->cparams.ecn = true;
Michal Kazior79bdc4c2016-04-22 14:15:58 +0200491 q->cparams.mtu = psched_mtu(qdisc_dev(sch));
Eric Dumazet4b549a22012-05-11 09:30:50 +0000492
493 if (opt) {
494 int err = fq_codel_change(sch, opt);
495 if (err)
496 return err;
497 }
498
499 if (!q->flows) {
500 q->flows = fq_codel_zalloc(q->flows_cnt *
501 sizeof(struct fq_codel_flow));
502 if (!q->flows)
503 return -ENOMEM;
504 q->backlogs = fq_codel_zalloc(q->flows_cnt * sizeof(u32));
505 if (!q->backlogs) {
506 fq_codel_free(q->flows);
507 return -ENOMEM;
508 }
509 for (i = 0; i < q->flows_cnt; i++) {
510 struct fq_codel_flow *flow = q->flows + i;
511
512 INIT_LIST_HEAD(&flow->flowchain);
Eric Dumazetb3791352012-09-01 03:19:57 +0000513 codel_vars_init(&flow->cvars);
Eric Dumazet4b549a22012-05-11 09:30:50 +0000514 }
515 }
516 if (sch->limit >= 1)
517 sch->flags |= TCQ_F_CAN_BYPASS;
518 else
519 sch->flags &= ~TCQ_F_CAN_BYPASS;
520 return 0;
521}
522
523static int fq_codel_dump(struct Qdisc *sch, struct sk_buff *skb)
524{
525 struct fq_codel_sched_data *q = qdisc_priv(sch);
526 struct nlattr *opts;
527
528 opts = nla_nest_start(skb, TCA_OPTIONS);
529 if (opts == NULL)
530 goto nla_put_failure;
531
532 if (nla_put_u32(skb, TCA_FQ_CODEL_TARGET,
533 codel_time_to_us(q->cparams.target)) ||
534 nla_put_u32(skb, TCA_FQ_CODEL_LIMIT,
535 sch->limit) ||
536 nla_put_u32(skb, TCA_FQ_CODEL_INTERVAL,
537 codel_time_to_us(q->cparams.interval)) ||
538 nla_put_u32(skb, TCA_FQ_CODEL_ECN,
539 q->cparams.ecn) ||
540 nla_put_u32(skb, TCA_FQ_CODEL_QUANTUM,
541 q->quantum) ||
Eric Dumazet9d185622016-05-01 16:47:26 -0700542 nla_put_u32(skb, TCA_FQ_CODEL_DROP_BATCH_SIZE,
543 q->drop_batch_size) ||
Eric Dumazet95b58432016-05-06 08:55:12 -0700544 nla_put_u32(skb, TCA_FQ_CODEL_MEMORY_LIMIT,
545 q->memory_limit) ||
Eric Dumazet4b549a22012-05-11 09:30:50 +0000546 nla_put_u32(skb, TCA_FQ_CODEL_FLOWS,
547 q->flows_cnt))
548 goto nla_put_failure;
549
Eric Dumazet80ba92f2015-05-08 15:05:12 -0700550 if (q->cparams.ce_threshold != CODEL_DISABLED_THRESHOLD &&
551 nla_put_u32(skb, TCA_FQ_CODEL_CE_THRESHOLD,
552 codel_time_to_us(q->cparams.ce_threshold)))
553 goto nla_put_failure;
554
Yang Yingliangd59b7d82014-03-12 10:20:32 +0800555 return nla_nest_end(skb, opts);
Eric Dumazet4b549a22012-05-11 09:30:50 +0000556
557nla_put_failure:
558 return -1;
559}
560
561static int fq_codel_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
562{
563 struct fq_codel_sched_data *q = qdisc_priv(sch);
564 struct tc_fq_codel_xstats st = {
565 .type = TCA_FQ_CODEL_XSTATS_QDISC,
Eric Dumazet4b549a22012-05-11 09:30:50 +0000566 };
567 struct list_head *pos;
568
Sasha Levin669d67b2012-05-14 11:57:06 +0000569 st.qdisc_stats.maxpacket = q->cstats.maxpacket;
570 st.qdisc_stats.drop_overlimit = q->drop_overlimit;
571 st.qdisc_stats.ecn_mark = q->cstats.ecn_mark;
572 st.qdisc_stats.new_flow_count = q->new_flow_count;
Eric Dumazet80ba92f2015-05-08 15:05:12 -0700573 st.qdisc_stats.ce_mark = q->cstats.ce_mark;
Eric Dumazet95b58432016-05-06 08:55:12 -0700574 st.qdisc_stats.memory_usage = q->memory_usage;
575 st.qdisc_stats.drop_overmemory = q->drop_overmemory;
Sasha Levin669d67b2012-05-14 11:57:06 +0000576
Eric Dumazetedb09eb2016-06-06 09:37:16 -0700577 sch_tree_lock(sch);
Eric Dumazet4b549a22012-05-11 09:30:50 +0000578 list_for_each(pos, &q->new_flows)
579 st.qdisc_stats.new_flows_len++;
580
581 list_for_each(pos, &q->old_flows)
582 st.qdisc_stats.old_flows_len++;
Eric Dumazetedb09eb2016-06-06 09:37:16 -0700583 sch_tree_unlock(sch);
Eric Dumazet4b549a22012-05-11 09:30:50 +0000584
585 return gnet_stats_copy_app(d, &st, sizeof(st));
586}
587
588static struct Qdisc *fq_codel_leaf(struct Qdisc *sch, unsigned long arg)
589{
590 return NULL;
591}
592
593static unsigned long fq_codel_get(struct Qdisc *sch, u32 classid)
594{
595 return 0;
596}
597
598static unsigned long fq_codel_bind(struct Qdisc *sch, unsigned long parent,
599 u32 classid)
600{
601 /* we cannot bypass queue discipline anymore */
602 sch->flags &= ~TCQ_F_CAN_BYPASS;
603 return 0;
604}
605
606static void fq_codel_put(struct Qdisc *q, unsigned long cl)
607{
608}
609
John Fastabend25d8c0d2014-09-12 20:05:27 -0700610static struct tcf_proto __rcu **fq_codel_find_tcf(struct Qdisc *sch,
611 unsigned long cl)
Eric Dumazet4b549a22012-05-11 09:30:50 +0000612{
613 struct fq_codel_sched_data *q = qdisc_priv(sch);
614
615 if (cl)
616 return NULL;
617 return &q->filter_list;
618}
619
620static int fq_codel_dump_class(struct Qdisc *sch, unsigned long cl,
621 struct sk_buff *skb, struct tcmsg *tcm)
622{
623 tcm->tcm_handle |= TC_H_MIN(cl);
624 return 0;
625}
626
627static int fq_codel_dump_class_stats(struct Qdisc *sch, unsigned long cl,
628 struct gnet_dump *d)
629{
630 struct fq_codel_sched_data *q = qdisc_priv(sch);
631 u32 idx = cl - 1;
632 struct gnet_stats_queue qs = { 0 };
633 struct tc_fq_codel_xstats xstats;
634
635 if (idx < q->flows_cnt) {
636 const struct fq_codel_flow *flow = &q->flows[idx];
Eric Dumazetedb09eb2016-06-06 09:37:16 -0700637 const struct sk_buff *skb;
Eric Dumazet4b549a22012-05-11 09:30:50 +0000638
639 memset(&xstats, 0, sizeof(xstats));
640 xstats.type = TCA_FQ_CODEL_XSTATS_CLASS;
641 xstats.class_stats.deficit = flow->deficit;
642 xstats.class_stats.ldelay =
643 codel_time_to_us(flow->cvars.ldelay);
644 xstats.class_stats.count = flow->cvars.count;
645 xstats.class_stats.lastcount = flow->cvars.lastcount;
646 xstats.class_stats.dropping = flow->cvars.dropping;
647 if (flow->cvars.dropping) {
648 codel_tdiff_t delta = flow->cvars.drop_next -
649 codel_get_time();
650
651 xstats.class_stats.drop_next = (delta >= 0) ?
652 codel_time_to_us(delta) :
653 -codel_time_to_us(-delta);
654 }
Eric Dumazetedb09eb2016-06-06 09:37:16 -0700655 if (flow->head) {
656 sch_tree_lock(sch);
657 skb = flow->head;
658 while (skb) {
659 qs.qlen++;
660 skb = skb->next;
661 }
662 sch_tree_unlock(sch);
Eric Dumazet4b549a22012-05-11 09:30:50 +0000663 }
664 qs.backlog = q->backlogs[idx];
665 qs.drops = flow->dropped;
666 }
Eric Dumazetaafddbf2016-06-06 09:12:39 -0700667 if (gnet_stats_copy_queue(d, NULL, &qs, qs.qlen) < 0)
Eric Dumazet4b549a22012-05-11 09:30:50 +0000668 return -1;
669 if (idx < q->flows_cnt)
670 return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
671 return 0;
672}
673
674static void fq_codel_walk(struct Qdisc *sch, struct qdisc_walker *arg)
675{
676 struct fq_codel_sched_data *q = qdisc_priv(sch);
677 unsigned int i;
678
679 if (arg->stop)
680 return;
681
682 for (i = 0; i < q->flows_cnt; i++) {
683 if (list_empty(&q->flows[i].flowchain) ||
684 arg->count < arg->skip) {
685 arg->count++;
686 continue;
687 }
688 if (arg->fn(sch, i + 1, arg) < 0) {
689 arg->stop = 1;
690 break;
691 }
692 arg->count++;
693 }
694}
695
696static const struct Qdisc_class_ops fq_codel_class_ops = {
697 .leaf = fq_codel_leaf,
698 .get = fq_codel_get,
699 .put = fq_codel_put,
700 .tcf_chain = fq_codel_find_tcf,
701 .bind_tcf = fq_codel_bind,
702 .unbind_tcf = fq_codel_put,
703 .dump = fq_codel_dump_class,
704 .dump_stats = fq_codel_dump_class_stats,
705 .walk = fq_codel_walk,
706};
707
708static struct Qdisc_ops fq_codel_qdisc_ops __read_mostly = {
709 .cl_ops = &fq_codel_class_ops,
710 .id = "fq_codel",
711 .priv_size = sizeof(struct fq_codel_sched_data),
712 .enqueue = fq_codel_enqueue,
713 .dequeue = fq_codel_dequeue,
714 .peek = qdisc_peek_dequeued,
Eric Dumazet4b549a22012-05-11 09:30:50 +0000715 .init = fq_codel_init,
716 .reset = fq_codel_reset,
717 .destroy = fq_codel_destroy,
718 .change = fq_codel_change,
719 .dump = fq_codel_dump,
720 .dump_stats = fq_codel_dump_stats,
721 .owner = THIS_MODULE,
722};
723
724static int __init fq_codel_module_init(void)
725{
726 return register_qdisc(&fq_codel_qdisc_ops);
727}
728
729static void __exit fq_codel_module_exit(void)
730{
731 unregister_qdisc(&fq_codel_qdisc_ops);
732}
733
734module_init(fq_codel_module_init)
735module_exit(fq_codel_module_exit)
736MODULE_AUTHOR("Eric Dumazet");
737MODULE_LICENSE("GPL");