blob: bef00acb8bd2e9f392f433c9d7579a5b02d80ffb [file] [log] [blame]
stephen hemminger45e14432011-02-02 15:21:10 +00001/*
2 * net/sched/sch_choke.c CHOKE scheduler
3 *
4 * Copyright (c) 2011 Stephen Hemminger <shemminger@vyatta.com>
5 * Copyright (c) 2011 Eric Dumazet <eric.dumazet@gmail.com>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * version 2 as published by the Free Software Foundation.
10 *
11 */
12
13#include <linux/module.h>
14#include <linux/types.h>
15#include <linux/kernel.h>
16#include <linux/skbuff.h>
17#include <linux/reciprocal_div.h>
David S. Millercdfb74d2011-02-02 23:06:31 -080018#include <linux/vmalloc.h>
stephen hemminger45e14432011-02-02 15:21:10 +000019#include <net/pkt_sched.h>
20#include <net/inet_ecn.h>
21#include <net/red.h>
Eric Dumazet2bcc34b2011-11-29 04:22:15 +000022#include <net/flow_keys.h>
stephen hemminger45e14432011-02-02 15:21:10 +000023
24/*
25 CHOKe stateless AQM for fair bandwidth allocation
26 =================================================
27
28 CHOKe (CHOose and Keep for responsive flows, CHOose and Kill for
29 unresponsive flows) is a variant of RED that penalizes misbehaving flows but
30 maintains no flow state. The difference from RED is an additional step
31 during the enqueuing process. If average queue size is over the
32 low threshold (qmin), a packet is chosen at random from the queue.
33 If both the new and chosen packet are from the same flow, both
34 are dropped. Unlike RED, CHOKe is not really a "classful" qdisc because it
35 needs to access packets in queue randomly. It has a minimal class
36 interface to allow overriding the builtin flow classifier with
37 filters.
38
39 Source:
40 R. Pan, B. Prabhakar, and K. Psounis, "CHOKe, A Stateless
41 Active Queue Management Scheme for Approximating Fair Bandwidth Allocation",
42 IEEE INFOCOM, 2000.
43
44 A. Tang, J. Wang, S. Low, "Understanding CHOKe: Throughput and Spatial
45 Characteristics", IEEE/ACM Transactions on Networking, 2004
46
47 */
48
49/* Upper bound on size of sk_buff table (packets) */
50#define CHOKE_MAX_QUEUE (128*1024 - 1)
51
52struct choke_sched_data {
53/* Parameters */
54 u32 limit;
55 unsigned char flags;
56
57 struct red_parms parms;
58
59/* Variables */
60 struct tcf_proto *filter_list;
61 struct {
62 u32 prob_drop; /* Early probability drops */
63 u32 prob_mark; /* Early probability marks */
64 u32 forced_drop; /* Forced drops, qavg > max_thresh */
65 u32 forced_mark; /* Forced marks, qavg > max_thresh */
66 u32 pdrop; /* Drops due to queue limits */
67 u32 other; /* Drops due to drop() calls */
68 u32 matched; /* Drops to flow match */
69 } stats;
70
71 unsigned int head;
72 unsigned int tail;
73
74 unsigned int tab_mask; /* size - 1 */
75
76 struct sk_buff **tab;
77};
78
79/* deliver a random number between 0 and N - 1 */
80static u32 random_N(unsigned int N)
81{
82 return reciprocal_divide(random32(), N);
83}
84
85/* number of elements in queue including holes */
86static unsigned int choke_len(const struct choke_sched_data *q)
87{
88 return (q->tail - q->head) & q->tab_mask;
89}
90
91/* Is ECN parameter configured */
92static int use_ecn(const struct choke_sched_data *q)
93{
94 return q->flags & TC_RED_ECN;
95}
96
97/* Should packets over max just be dropped (versus marked) */
98static int use_harddrop(const struct choke_sched_data *q)
99{
100 return q->flags & TC_RED_HARDDROP;
101}
102
103/* Move head pointer forward to skip over holes */
104static void choke_zap_head_holes(struct choke_sched_data *q)
105{
106 do {
107 q->head = (q->head + 1) & q->tab_mask;
108 if (q->head == q->tail)
109 break;
110 } while (q->tab[q->head] == NULL);
111}
112
113/* Move tail pointer backwards to reuse holes */
114static void choke_zap_tail_holes(struct choke_sched_data *q)
115{
116 do {
117 q->tail = (q->tail - 1) & q->tab_mask;
118 if (q->head == q->tail)
119 break;
120 } while (q->tab[q->tail] == NULL);
121}
122
123/* Drop packet from queue array by creating a "hole" */
124static void choke_drop_by_idx(struct Qdisc *sch, unsigned int idx)
125{
126 struct choke_sched_data *q = qdisc_priv(sch);
127 struct sk_buff *skb = q->tab[idx];
128
129 q->tab[idx] = NULL;
130
131 if (idx == q->head)
132 choke_zap_head_holes(q);
133 if (idx == q->tail)
134 choke_zap_tail_holes(q);
135
136 sch->qstats.backlog -= qdisc_pkt_len(skb);
137 qdisc_drop(skb, sch);
138 qdisc_tree_decrease_qlen(sch, 1);
139 --sch->q.qlen;
140}
141
Eric Dumazet26f70e12011-02-24 17:45:41 +0000142struct choke_skb_cb {
Eric Dumazet2bcc34b2011-11-29 04:22:15 +0000143 u16 classid;
144 u8 keys_valid;
145 struct flow_keys keys;
Eric Dumazet26f70e12011-02-24 17:45:41 +0000146};
147
148static inline struct choke_skb_cb *choke_skb_cb(const struct sk_buff *skb)
149{
150 BUILD_BUG_ON(sizeof(skb->cb) <
151 sizeof(struct qdisc_skb_cb) + sizeof(struct choke_skb_cb));
152 return (struct choke_skb_cb *)qdisc_skb_cb(skb)->data;
153}
154
stephen hemminger45e14432011-02-02 15:21:10 +0000155static inline void choke_set_classid(struct sk_buff *skb, u16 classid)
156{
Eric Dumazet26f70e12011-02-24 17:45:41 +0000157 choke_skb_cb(skb)->classid = classid;
stephen hemminger45e14432011-02-02 15:21:10 +0000158}
159
160static u16 choke_get_classid(const struct sk_buff *skb)
161{
Eric Dumazet26f70e12011-02-24 17:45:41 +0000162 return choke_skb_cb(skb)->classid;
stephen hemminger45e14432011-02-02 15:21:10 +0000163}
164
165/*
Eric Dumazet2bcc34b2011-11-29 04:22:15 +0000166 * Compare flow of two packets
167 * Returns true only if source and destination address and port match.
168 * false for special cases
169 */
170static bool choke_match_flow(struct sk_buff *skb1,
171 struct sk_buff *skb2)
172{
173 if (skb1->protocol != skb2->protocol)
174 return false;
175
176 if (!choke_skb_cb(skb1)->keys_valid) {
177 choke_skb_cb(skb1)->keys_valid = 1;
178 skb_flow_dissect(skb1, &choke_skb_cb(skb1)->keys);
179 }
180
181 if (!choke_skb_cb(skb2)->keys_valid) {
182 choke_skb_cb(skb2)->keys_valid = 1;
183 skb_flow_dissect(skb2, &choke_skb_cb(skb2)->keys);
184 }
185
186 return !memcmp(&choke_skb_cb(skb1)->keys,
187 &choke_skb_cb(skb2)->keys,
188 sizeof(struct flow_keys));
189}
190
191/*
stephen hemminger45e14432011-02-02 15:21:10 +0000192 * Classify flow using either:
193 * 1. pre-existing classification result in skb
194 * 2. fast internal classification
195 * 3. use TC filter based classification
196 */
197static bool choke_classify(struct sk_buff *skb,
198 struct Qdisc *sch, int *qerr)
199
200{
201 struct choke_sched_data *q = qdisc_priv(sch);
202 struct tcf_result res;
203 int result;
204
205 result = tc_classify(skb, q->filter_list, &res);
206 if (result >= 0) {
207#ifdef CONFIG_NET_CLS_ACT
208 switch (result) {
209 case TC_ACT_STOLEN:
210 case TC_ACT_QUEUED:
211 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
212 case TC_ACT_SHOT:
213 return false;
214 }
215#endif
216 choke_set_classid(skb, TC_H_MIN(res.classid));
217 return true;
218 }
219
220 return false;
221}
222
223/*
224 * Select a packet at random from queue
225 * HACK: since queue can have holes from previous deletion; retry several
226 * times to find a random skb but then just give up and return the head
227 * Will return NULL if queue is empty (q->head == q->tail)
228 */
229static struct sk_buff *choke_peek_random(const struct choke_sched_data *q,
230 unsigned int *pidx)
231{
232 struct sk_buff *skb;
233 int retrys = 3;
234
235 do {
236 *pidx = (q->head + random_N(choke_len(q))) & q->tab_mask;
237 skb = q->tab[*pidx];
238 if (skb)
239 return skb;
240 } while (--retrys > 0);
241
242 return q->tab[*pidx = q->head];
243}
244
245/*
246 * Compare new packet with random packet in queue
247 * returns true if matched and sets *pidx
248 */
249static bool choke_match_random(const struct choke_sched_data *q,
250 struct sk_buff *nskb,
251 unsigned int *pidx)
252{
253 struct sk_buff *oskb;
254
255 if (q->head == q->tail)
256 return false;
257
258 oskb = choke_peek_random(q, pidx);
259 if (q->filter_list)
260 return choke_get_classid(nskb) == choke_get_classid(oskb);
261
262 return choke_match_flow(oskb, nskb);
263}
264
265static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch)
266{
267 struct choke_sched_data *q = qdisc_priv(sch);
268 struct red_parms *p = &q->parms;
269 int ret = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
270
271 if (q->filter_list) {
272 /* If using external classifiers, get result and record it. */
273 if (!choke_classify(skb, sch, &ret))
274 goto other_drop; /* Packet was eaten by filter */
275 }
276
Eric Dumazet2bcc34b2011-11-29 04:22:15 +0000277 choke_skb_cb(skb)->keys_valid = 0;
stephen hemminger45e14432011-02-02 15:21:10 +0000278 /* Compute average queue usage (see RED) */
279 p->qavg = red_calc_qavg(p, sch->q.qlen);
280 if (red_is_idling(p))
281 red_end_of_idle_period(p);
282
283 /* Is queue small? */
284 if (p->qavg <= p->qth_min)
285 p->qcount = -1;
286 else {
287 unsigned int idx;
288
289 /* Draw a packet at random from queue and compare flow */
290 if (choke_match_random(q, skb, &idx)) {
291 q->stats.matched++;
292 choke_drop_by_idx(sch, idx);
293 goto congestion_drop;
294 }
295
296 /* Queue is large, always mark/drop */
297 if (p->qavg > p->qth_max) {
298 p->qcount = -1;
299
300 sch->qstats.overlimits++;
301 if (use_harddrop(q) || !use_ecn(q) ||
302 !INET_ECN_set_ce(skb)) {
303 q->stats.forced_drop++;
304 goto congestion_drop;
305 }
306
307 q->stats.forced_mark++;
308 } else if (++p->qcount) {
309 if (red_mark_probability(p, p->qavg)) {
310 p->qcount = 0;
311 p->qR = red_random(p);
312
313 sch->qstats.overlimits++;
314 if (!use_ecn(q) || !INET_ECN_set_ce(skb)) {
315 q->stats.prob_drop++;
316 goto congestion_drop;
317 }
318
319 q->stats.prob_mark++;
320 }
321 } else
322 p->qR = red_random(p);
323 }
324
325 /* Admit new packet */
326 if (sch->q.qlen < q->limit) {
327 q->tab[q->tail] = skb;
328 q->tail = (q->tail + 1) & q->tab_mask;
329 ++sch->q.qlen;
330 sch->qstats.backlog += qdisc_pkt_len(skb);
331 return NET_XMIT_SUCCESS;
332 }
333
334 q->stats.pdrop++;
335 sch->qstats.drops++;
336 kfree_skb(skb);
337 return NET_XMIT_DROP;
338
339 congestion_drop:
340 qdisc_drop(skb, sch);
341 return NET_XMIT_CN;
342
343 other_drop:
344 if (ret & __NET_XMIT_BYPASS)
345 sch->qstats.drops++;
346 kfree_skb(skb);
347 return ret;
348}
349
350static struct sk_buff *choke_dequeue(struct Qdisc *sch)
351{
352 struct choke_sched_data *q = qdisc_priv(sch);
353 struct sk_buff *skb;
354
355 if (q->head == q->tail) {
356 if (!red_is_idling(&q->parms))
357 red_start_of_idle_period(&q->parms);
358 return NULL;
359 }
360
361 skb = q->tab[q->head];
362 q->tab[q->head] = NULL;
363 choke_zap_head_holes(q);
364 --sch->q.qlen;
365 sch->qstats.backlog -= qdisc_pkt_len(skb);
366 qdisc_bstats_update(sch, skb);
367
368 return skb;
369}
370
371static unsigned int choke_drop(struct Qdisc *sch)
372{
373 struct choke_sched_data *q = qdisc_priv(sch);
374 unsigned int len;
375
376 len = qdisc_queue_drop(sch);
377 if (len > 0)
378 q->stats.other++;
379 else {
380 if (!red_is_idling(&q->parms))
381 red_start_of_idle_period(&q->parms);
382 }
383
384 return len;
385}
386
387static void choke_reset(struct Qdisc *sch)
388{
389 struct choke_sched_data *q = qdisc_priv(sch);
390
391 red_restart(&q->parms);
392}
393
394static const struct nla_policy choke_policy[TCA_CHOKE_MAX + 1] = {
395 [TCA_CHOKE_PARMS] = { .len = sizeof(struct tc_red_qopt) },
396 [TCA_CHOKE_STAB] = { .len = RED_STAB_SIZE },
Eric Dumazeta73ed262011-12-09 02:46:45 +0000397 [TCA_CHOKE_MAX_P] = { .type = NLA_U32 },
stephen hemminger45e14432011-02-02 15:21:10 +0000398};
399
400
401static void choke_free(void *addr)
402{
403 if (addr) {
404 if (is_vmalloc_addr(addr))
405 vfree(addr);
406 else
407 kfree(addr);
408 }
409}
410
411static int choke_change(struct Qdisc *sch, struct nlattr *opt)
412{
413 struct choke_sched_data *q = qdisc_priv(sch);
414 struct nlattr *tb[TCA_CHOKE_MAX + 1];
415 const struct tc_red_qopt *ctl;
416 int err;
417 struct sk_buff **old = NULL;
418 unsigned int mask;
Eric Dumazeta73ed262011-12-09 02:46:45 +0000419 u32 max_P;
stephen hemminger45e14432011-02-02 15:21:10 +0000420
421 if (opt == NULL)
422 return -EINVAL;
423
424 err = nla_parse_nested(tb, TCA_CHOKE_MAX, opt, choke_policy);
425 if (err < 0)
426 return err;
427
428 if (tb[TCA_CHOKE_PARMS] == NULL ||
429 tb[TCA_CHOKE_STAB] == NULL)
430 return -EINVAL;
431
Eric Dumazeta73ed262011-12-09 02:46:45 +0000432 max_P = tb[TCA_CHOKE_MAX_P] ? nla_get_u32(tb[TCA_CHOKE_MAX_P]) : 0;
433
stephen hemminger45e14432011-02-02 15:21:10 +0000434 ctl = nla_data(tb[TCA_CHOKE_PARMS]);
435
436 if (ctl->limit > CHOKE_MAX_QUEUE)
437 return -EINVAL;
438
439 mask = roundup_pow_of_two(ctl->limit + 1) - 1;
440 if (mask != q->tab_mask) {
441 struct sk_buff **ntab;
442
443 ntab = kcalloc(mask + 1, sizeof(struct sk_buff *), GFP_KERNEL);
444 if (!ntab)
445 ntab = vzalloc((mask + 1) * sizeof(struct sk_buff *));
446 if (!ntab)
447 return -ENOMEM;
448
449 sch_tree_lock(sch);
450 old = q->tab;
451 if (old) {
452 unsigned int oqlen = sch->q.qlen, tail = 0;
453
454 while (q->head != q->tail) {
455 struct sk_buff *skb = q->tab[q->head];
456
457 q->head = (q->head + 1) & q->tab_mask;
458 if (!skb)
459 continue;
460 if (tail < mask) {
461 ntab[tail++] = skb;
462 continue;
463 }
464 sch->qstats.backlog -= qdisc_pkt_len(skb);
465 --sch->q.qlen;
466 qdisc_drop(skb, sch);
467 }
468 qdisc_tree_decrease_qlen(sch, oqlen - sch->q.qlen);
469 q->head = 0;
470 q->tail = tail;
471 }
472
473 q->tab_mask = mask;
474 q->tab = ntab;
475 } else
476 sch_tree_lock(sch);
477
478 q->flags = ctl->flags;
479 q->limit = ctl->limit;
480
481 red_set_parms(&q->parms, ctl->qth_min, ctl->qth_max, ctl->Wlog,
482 ctl->Plog, ctl->Scell_log,
Eric Dumazeta73ed262011-12-09 02:46:45 +0000483 nla_data(tb[TCA_CHOKE_STAB]),
484 max_P);
stephen hemminger45e14432011-02-02 15:21:10 +0000485
486 if (q->head == q->tail)
487 red_end_of_idle_period(&q->parms);
488
489 sch_tree_unlock(sch);
490 choke_free(old);
491 return 0;
492}
493
494static int choke_init(struct Qdisc *sch, struct nlattr *opt)
495{
496 return choke_change(sch, opt);
497}
498
499static int choke_dump(struct Qdisc *sch, struct sk_buff *skb)
500{
501 struct choke_sched_data *q = qdisc_priv(sch);
502 struct nlattr *opts = NULL;
503 struct tc_red_qopt opt = {
504 .limit = q->limit,
505 .flags = q->flags,
506 .qth_min = q->parms.qth_min >> q->parms.Wlog,
507 .qth_max = q->parms.qth_max >> q->parms.Wlog,
508 .Wlog = q->parms.Wlog,
509 .Plog = q->parms.Plog,
510 .Scell_log = q->parms.Scell_log,
511 };
512
513 opts = nla_nest_start(skb, TCA_OPTIONS);
514 if (opts == NULL)
515 goto nla_put_failure;
516
517 NLA_PUT(skb, TCA_CHOKE_PARMS, sizeof(opt), &opt);
Eric Dumazeta73ed262011-12-09 02:46:45 +0000518 NLA_PUT_U32(skb, TCA_CHOKE_MAX_P, q->parms.max_P);
stephen hemminger45e14432011-02-02 15:21:10 +0000519 return nla_nest_end(skb, opts);
520
521nla_put_failure:
522 nla_nest_cancel(skb, opts);
523 return -EMSGSIZE;
524}
525
526static int choke_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
527{
528 struct choke_sched_data *q = qdisc_priv(sch);
529 struct tc_choke_xstats st = {
530 .early = q->stats.prob_drop + q->stats.forced_drop,
531 .marked = q->stats.prob_mark + q->stats.forced_mark,
532 .pdrop = q->stats.pdrop,
533 .other = q->stats.other,
534 .matched = q->stats.matched,
535 };
536
537 return gnet_stats_copy_app(d, &st, sizeof(st));
538}
539
540static void choke_destroy(struct Qdisc *sch)
541{
542 struct choke_sched_data *q = qdisc_priv(sch);
543
544 tcf_destroy_chain(&q->filter_list);
545 choke_free(q->tab);
546}
547
548static struct Qdisc *choke_leaf(struct Qdisc *sch, unsigned long arg)
549{
550 return NULL;
551}
552
553static unsigned long choke_get(struct Qdisc *sch, u32 classid)
554{
555 return 0;
556}
557
558static void choke_put(struct Qdisc *q, unsigned long cl)
559{
560}
561
562static unsigned long choke_bind(struct Qdisc *sch, unsigned long parent,
563 u32 classid)
564{
565 return 0;
566}
567
568static struct tcf_proto **choke_find_tcf(struct Qdisc *sch, unsigned long cl)
569{
570 struct choke_sched_data *q = qdisc_priv(sch);
571
572 if (cl)
573 return NULL;
574 return &q->filter_list;
575}
576
577static int choke_dump_class(struct Qdisc *sch, unsigned long cl,
578 struct sk_buff *skb, struct tcmsg *tcm)
579{
580 tcm->tcm_handle |= TC_H_MIN(cl);
581 return 0;
582}
583
584static void choke_walk(struct Qdisc *sch, struct qdisc_walker *arg)
585{
586 if (!arg->stop) {
587 if (arg->fn(sch, 1, arg) < 0) {
588 arg->stop = 1;
589 return;
590 }
591 arg->count++;
592 }
593}
594
595static const struct Qdisc_class_ops choke_class_ops = {
596 .leaf = choke_leaf,
597 .get = choke_get,
598 .put = choke_put,
599 .tcf_chain = choke_find_tcf,
600 .bind_tcf = choke_bind,
601 .unbind_tcf = choke_put,
602 .dump = choke_dump_class,
603 .walk = choke_walk,
604};
605
606static struct sk_buff *choke_peek_head(struct Qdisc *sch)
607{
608 struct choke_sched_data *q = qdisc_priv(sch);
609
610 return (q->head != q->tail) ? q->tab[q->head] : NULL;
611}
612
613static struct Qdisc_ops choke_qdisc_ops __read_mostly = {
614 .id = "choke",
615 .priv_size = sizeof(struct choke_sched_data),
616
617 .enqueue = choke_enqueue,
618 .dequeue = choke_dequeue,
619 .peek = choke_peek_head,
620 .drop = choke_drop,
621 .init = choke_init,
622 .destroy = choke_destroy,
623 .reset = choke_reset,
624 .change = choke_change,
625 .dump = choke_dump,
626 .dump_stats = choke_dump_stats,
627 .owner = THIS_MODULE,
628};
629
630static int __init choke_module_init(void)
631{
632 return register_qdisc(&choke_qdisc_ops);
633}
634
635static void __exit choke_module_exit(void)
636{
637 unregister_qdisc(&choke_qdisc_ops);
638}
639
640module_init(choke_module_init)
641module_exit(choke_module_exit)
642
643MODULE_LICENSE("GPL");