blob: 061bcb744bbd3852df7e8c5df6c01293156afad2 [file] [log] [blame]
stephen hemminger45e14432011-02-02 15:21:10 +00001/*
2 * net/sched/sch_choke.c CHOKE scheduler
3 *
4 * Copyright (c) 2011 Stephen Hemminger <shemminger@vyatta.com>
5 * Copyright (c) 2011 Eric Dumazet <eric.dumazet@gmail.com>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * version 2 as published by the Free Software Foundation.
10 *
11 */
12
13#include <linux/module.h>
14#include <linux/types.h>
15#include <linux/kernel.h>
16#include <linux/skbuff.h>
17#include <linux/reciprocal_div.h>
David S. Millercdfb74d2011-02-02 23:06:31 -080018#include <linux/vmalloc.h>
stephen hemminger45e14432011-02-02 15:21:10 +000019#include <net/pkt_sched.h>
20#include <net/inet_ecn.h>
21#include <net/red.h>
22#include <linux/ip.h>
23#include <net/ip.h>
24#include <linux/ipv6.h>
25#include <net/ipv6.h>
26
27/*
28 CHOKe stateless AQM for fair bandwidth allocation
29 =================================================
30
31 CHOKe (CHOose and Keep for responsive flows, CHOose and Kill for
32 unresponsive flows) is a variant of RED that penalizes misbehaving flows but
33 maintains no flow state. The difference from RED is an additional step
34 during the enqueuing process. If average queue size is over the
35 low threshold (qmin), a packet is chosen at random from the queue.
36 If both the new and chosen packet are from the same flow, both
37 are dropped. Unlike RED, CHOKe is not really a "classful" qdisc because it
38 needs to access packets in queue randomly. It has a minimal class
39 interface to allow overriding the builtin flow classifier with
40 filters.
41
42 Source:
43 R. Pan, B. Prabhakar, and K. Psounis, "CHOKe, A Stateless
44 Active Queue Management Scheme for Approximating Fair Bandwidth Allocation",
45 IEEE INFOCOM, 2000.
46
47 A. Tang, J. Wang, S. Low, "Understanding CHOKe: Throughput and Spatial
48 Characteristics", IEEE/ACM Transactions on Networking, 2004
49
50 */
51
52/* Upper bound on size of sk_buff table (packets) */
53#define CHOKE_MAX_QUEUE (128*1024 - 1)
54
55struct choke_sched_data {
56/* Parameters */
57 u32 limit;
58 unsigned char flags;
59
60 struct red_parms parms;
61
62/* Variables */
63 struct tcf_proto *filter_list;
64 struct {
65 u32 prob_drop; /* Early probability drops */
66 u32 prob_mark; /* Early probability marks */
67 u32 forced_drop; /* Forced drops, qavg > max_thresh */
68 u32 forced_mark; /* Forced marks, qavg > max_thresh */
69 u32 pdrop; /* Drops due to queue limits */
70 u32 other; /* Drops due to drop() calls */
71 u32 matched; /* Drops to flow match */
72 } stats;
73
74 unsigned int head;
75 unsigned int tail;
76
77 unsigned int tab_mask; /* size - 1 */
78
79 struct sk_buff **tab;
80};
81
82/* deliver a random number between 0 and N - 1 */
83static u32 random_N(unsigned int N)
84{
85 return reciprocal_divide(random32(), N);
86}
87
88/* number of elements in queue including holes */
89static unsigned int choke_len(const struct choke_sched_data *q)
90{
91 return (q->tail - q->head) & q->tab_mask;
92}
93
94/* Is ECN parameter configured */
95static int use_ecn(const struct choke_sched_data *q)
96{
97 return q->flags & TC_RED_ECN;
98}
99
100/* Should packets over max just be dropped (versus marked) */
101static int use_harddrop(const struct choke_sched_data *q)
102{
103 return q->flags & TC_RED_HARDDROP;
104}
105
106/* Move head pointer forward to skip over holes */
107static void choke_zap_head_holes(struct choke_sched_data *q)
108{
109 do {
110 q->head = (q->head + 1) & q->tab_mask;
111 if (q->head == q->tail)
112 break;
113 } while (q->tab[q->head] == NULL);
114}
115
116/* Move tail pointer backwards to reuse holes */
117static void choke_zap_tail_holes(struct choke_sched_data *q)
118{
119 do {
120 q->tail = (q->tail - 1) & q->tab_mask;
121 if (q->head == q->tail)
122 break;
123 } while (q->tab[q->tail] == NULL);
124}
125
126/* Drop packet from queue array by creating a "hole" */
127static void choke_drop_by_idx(struct Qdisc *sch, unsigned int idx)
128{
129 struct choke_sched_data *q = qdisc_priv(sch);
130 struct sk_buff *skb = q->tab[idx];
131
132 q->tab[idx] = NULL;
133
134 if (idx == q->head)
135 choke_zap_head_holes(q);
136 if (idx == q->tail)
137 choke_zap_tail_holes(q);
138
139 sch->qstats.backlog -= qdisc_pkt_len(skb);
140 qdisc_drop(skb, sch);
141 qdisc_tree_decrease_qlen(sch, 1);
142 --sch->q.qlen;
143}
144
145/*
146 * Compare flow of two packets
147 * Returns true only if source and destination address and port match.
148 * false for special cases
149 */
150static bool choke_match_flow(struct sk_buff *skb1,
151 struct sk_buff *skb2)
152{
153 int off1, off2, poff;
154 const u32 *ports1, *ports2;
Eric Dumazet9ecd04b2011-11-08 10:45:04 +0000155 u32 _ports1, _ports2;
stephen hemminger45e14432011-02-02 15:21:10 +0000156 u8 ip_proto;
157 __u32 hash1;
158
159 if (skb1->protocol != skb2->protocol)
160 return false;
161
Eric Dumazet9ecd04b2011-11-08 10:45:04 +0000162 /* Use rxhash value as quick check */
stephen hemminger45e14432011-02-02 15:21:10 +0000163 hash1 = skb_get_rxhash(skb1);
164 if (!hash1 || hash1 != skb_get_rxhash(skb2))
165 return false;
166
167 /* Probably match, but be sure to avoid hash collisions */
168 off1 = skb_network_offset(skb1);
169 off2 = skb_network_offset(skb2);
170
171 switch (skb1->protocol) {
172 case __constant_htons(ETH_P_IP): {
173 const struct iphdr *ip1, *ip2;
Eric Dumazet9ecd04b2011-11-08 10:45:04 +0000174 struct iphdr _ip1, _ip2;
stephen hemminger45e14432011-02-02 15:21:10 +0000175
Eric Dumazet9ecd04b2011-11-08 10:45:04 +0000176 ip1 = skb_header_pointer(skb1, off1, sizeof(_ip1), &_ip1);
177 ip2 = skb_header_pointer(skb2, off2, sizeof(_ip2), &_ip2);
178 if (!ip1 || !ip2)
179 return false;
stephen hemminger45e14432011-02-02 15:21:10 +0000180 ip_proto = ip1->protocol;
181 if (ip_proto != ip2->protocol ||
182 ip1->saddr != ip2->saddr || ip1->daddr != ip2->daddr)
183 return false;
184
Paul Gortmaker56f8a752011-06-21 20:33:34 -0700185 if (ip_is_fragment(ip1) | ip_is_fragment(ip2))
stephen hemminger45e14432011-02-02 15:21:10 +0000186 ip_proto = 0;
187 off1 += ip1->ihl * 4;
188 off2 += ip2->ihl * 4;
189 break;
190 }
191
192 case __constant_htons(ETH_P_IPV6): {
193 const struct ipv6hdr *ip1, *ip2;
Eric Dumazet9ecd04b2011-11-08 10:45:04 +0000194 struct ipv6hdr _ip1, _ip2;
stephen hemminger45e14432011-02-02 15:21:10 +0000195
Eric Dumazet9ecd04b2011-11-08 10:45:04 +0000196 ip1 = skb_header_pointer(skb1, off1, sizeof(_ip1), &_ip1);
197 ip2 = skb_header_pointer(skb2, off2, sizeof(_ip2), &_ip2);
198 if (!ip1 || !ip2)
199 return false;
stephen hemminger45e14432011-02-02 15:21:10 +0000200
201 ip_proto = ip1->nexthdr;
202 if (ip_proto != ip2->nexthdr ||
203 ipv6_addr_cmp(&ip1->saddr, &ip2->saddr) ||
204 ipv6_addr_cmp(&ip1->daddr, &ip2->daddr))
205 return false;
206 off1 += 40;
207 off2 += 40;
208 }
209
210 default: /* Maybe compare MAC header here? */
211 return false;
212 }
213
214 poff = proto_ports_offset(ip_proto);
215 if (poff < 0)
216 return true;
217
218 off1 += poff;
219 off2 += poff;
220
Eric Dumazet9ecd04b2011-11-08 10:45:04 +0000221 ports1 = skb_header_pointer(skb1, off1, sizeof(_ports1), &_ports1);
222 ports2 = skb_header_pointer(skb2, off2, sizeof(_ports2), &_ports2);
223 if (!ports1 || !ports2)
224 return false;
225
stephen hemminger45e14432011-02-02 15:21:10 +0000226 return *ports1 == *ports2;
227}
228
Eric Dumazet26f70e12011-02-24 17:45:41 +0000229struct choke_skb_cb {
230 u16 classid;
231};
232
233static inline struct choke_skb_cb *choke_skb_cb(const struct sk_buff *skb)
234{
235 BUILD_BUG_ON(sizeof(skb->cb) <
236 sizeof(struct qdisc_skb_cb) + sizeof(struct choke_skb_cb));
237 return (struct choke_skb_cb *)qdisc_skb_cb(skb)->data;
238}
239
stephen hemminger45e14432011-02-02 15:21:10 +0000240static inline void choke_set_classid(struct sk_buff *skb, u16 classid)
241{
Eric Dumazet26f70e12011-02-24 17:45:41 +0000242 choke_skb_cb(skb)->classid = classid;
stephen hemminger45e14432011-02-02 15:21:10 +0000243}
244
245static u16 choke_get_classid(const struct sk_buff *skb)
246{
Eric Dumazet26f70e12011-02-24 17:45:41 +0000247 return choke_skb_cb(skb)->classid;
stephen hemminger45e14432011-02-02 15:21:10 +0000248}
249
250/*
251 * Classify flow using either:
252 * 1. pre-existing classification result in skb
253 * 2. fast internal classification
254 * 3. use TC filter based classification
255 */
256static bool choke_classify(struct sk_buff *skb,
257 struct Qdisc *sch, int *qerr)
258
259{
260 struct choke_sched_data *q = qdisc_priv(sch);
261 struct tcf_result res;
262 int result;
263
264 result = tc_classify(skb, q->filter_list, &res);
265 if (result >= 0) {
266#ifdef CONFIG_NET_CLS_ACT
267 switch (result) {
268 case TC_ACT_STOLEN:
269 case TC_ACT_QUEUED:
270 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
271 case TC_ACT_SHOT:
272 return false;
273 }
274#endif
275 choke_set_classid(skb, TC_H_MIN(res.classid));
276 return true;
277 }
278
279 return false;
280}
281
282/*
283 * Select a packet at random from queue
284 * HACK: since queue can have holes from previous deletion; retry several
285 * times to find a random skb but then just give up and return the head
286 * Will return NULL if queue is empty (q->head == q->tail)
287 */
288static struct sk_buff *choke_peek_random(const struct choke_sched_data *q,
289 unsigned int *pidx)
290{
291 struct sk_buff *skb;
292 int retrys = 3;
293
294 do {
295 *pidx = (q->head + random_N(choke_len(q))) & q->tab_mask;
296 skb = q->tab[*pidx];
297 if (skb)
298 return skb;
299 } while (--retrys > 0);
300
301 return q->tab[*pidx = q->head];
302}
303
304/*
305 * Compare new packet with random packet in queue
306 * returns true if matched and sets *pidx
307 */
308static bool choke_match_random(const struct choke_sched_data *q,
309 struct sk_buff *nskb,
310 unsigned int *pidx)
311{
312 struct sk_buff *oskb;
313
314 if (q->head == q->tail)
315 return false;
316
317 oskb = choke_peek_random(q, pidx);
318 if (q->filter_list)
319 return choke_get_classid(nskb) == choke_get_classid(oskb);
320
321 return choke_match_flow(oskb, nskb);
322}
323
324static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch)
325{
326 struct choke_sched_data *q = qdisc_priv(sch);
327 struct red_parms *p = &q->parms;
328 int ret = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
329
330 if (q->filter_list) {
331 /* If using external classifiers, get result and record it. */
332 if (!choke_classify(skb, sch, &ret))
333 goto other_drop; /* Packet was eaten by filter */
334 }
335
336 /* Compute average queue usage (see RED) */
337 p->qavg = red_calc_qavg(p, sch->q.qlen);
338 if (red_is_idling(p))
339 red_end_of_idle_period(p);
340
341 /* Is queue small? */
342 if (p->qavg <= p->qth_min)
343 p->qcount = -1;
344 else {
345 unsigned int idx;
346
347 /* Draw a packet at random from queue and compare flow */
348 if (choke_match_random(q, skb, &idx)) {
349 q->stats.matched++;
350 choke_drop_by_idx(sch, idx);
351 goto congestion_drop;
352 }
353
354 /* Queue is large, always mark/drop */
355 if (p->qavg > p->qth_max) {
356 p->qcount = -1;
357
358 sch->qstats.overlimits++;
359 if (use_harddrop(q) || !use_ecn(q) ||
360 !INET_ECN_set_ce(skb)) {
361 q->stats.forced_drop++;
362 goto congestion_drop;
363 }
364
365 q->stats.forced_mark++;
366 } else if (++p->qcount) {
367 if (red_mark_probability(p, p->qavg)) {
368 p->qcount = 0;
369 p->qR = red_random(p);
370
371 sch->qstats.overlimits++;
372 if (!use_ecn(q) || !INET_ECN_set_ce(skb)) {
373 q->stats.prob_drop++;
374 goto congestion_drop;
375 }
376
377 q->stats.prob_mark++;
378 }
379 } else
380 p->qR = red_random(p);
381 }
382
383 /* Admit new packet */
384 if (sch->q.qlen < q->limit) {
385 q->tab[q->tail] = skb;
386 q->tail = (q->tail + 1) & q->tab_mask;
387 ++sch->q.qlen;
388 sch->qstats.backlog += qdisc_pkt_len(skb);
389 return NET_XMIT_SUCCESS;
390 }
391
392 q->stats.pdrop++;
393 sch->qstats.drops++;
394 kfree_skb(skb);
395 return NET_XMIT_DROP;
396
397 congestion_drop:
398 qdisc_drop(skb, sch);
399 return NET_XMIT_CN;
400
401 other_drop:
402 if (ret & __NET_XMIT_BYPASS)
403 sch->qstats.drops++;
404 kfree_skb(skb);
405 return ret;
406}
407
408static struct sk_buff *choke_dequeue(struct Qdisc *sch)
409{
410 struct choke_sched_data *q = qdisc_priv(sch);
411 struct sk_buff *skb;
412
413 if (q->head == q->tail) {
414 if (!red_is_idling(&q->parms))
415 red_start_of_idle_period(&q->parms);
416 return NULL;
417 }
418
419 skb = q->tab[q->head];
420 q->tab[q->head] = NULL;
421 choke_zap_head_holes(q);
422 --sch->q.qlen;
423 sch->qstats.backlog -= qdisc_pkt_len(skb);
424 qdisc_bstats_update(sch, skb);
425
426 return skb;
427}
428
429static unsigned int choke_drop(struct Qdisc *sch)
430{
431 struct choke_sched_data *q = qdisc_priv(sch);
432 unsigned int len;
433
434 len = qdisc_queue_drop(sch);
435 if (len > 0)
436 q->stats.other++;
437 else {
438 if (!red_is_idling(&q->parms))
439 red_start_of_idle_period(&q->parms);
440 }
441
442 return len;
443}
444
445static void choke_reset(struct Qdisc *sch)
446{
447 struct choke_sched_data *q = qdisc_priv(sch);
448
449 red_restart(&q->parms);
450}
451
452static const struct nla_policy choke_policy[TCA_CHOKE_MAX + 1] = {
453 [TCA_CHOKE_PARMS] = { .len = sizeof(struct tc_red_qopt) },
454 [TCA_CHOKE_STAB] = { .len = RED_STAB_SIZE },
455};
456
457
458static void choke_free(void *addr)
459{
460 if (addr) {
461 if (is_vmalloc_addr(addr))
462 vfree(addr);
463 else
464 kfree(addr);
465 }
466}
467
468static int choke_change(struct Qdisc *sch, struct nlattr *opt)
469{
470 struct choke_sched_data *q = qdisc_priv(sch);
471 struct nlattr *tb[TCA_CHOKE_MAX + 1];
472 const struct tc_red_qopt *ctl;
473 int err;
474 struct sk_buff **old = NULL;
475 unsigned int mask;
476
477 if (opt == NULL)
478 return -EINVAL;
479
480 err = nla_parse_nested(tb, TCA_CHOKE_MAX, opt, choke_policy);
481 if (err < 0)
482 return err;
483
484 if (tb[TCA_CHOKE_PARMS] == NULL ||
485 tb[TCA_CHOKE_STAB] == NULL)
486 return -EINVAL;
487
488 ctl = nla_data(tb[TCA_CHOKE_PARMS]);
489
490 if (ctl->limit > CHOKE_MAX_QUEUE)
491 return -EINVAL;
492
493 mask = roundup_pow_of_two(ctl->limit + 1) - 1;
494 if (mask != q->tab_mask) {
495 struct sk_buff **ntab;
496
497 ntab = kcalloc(mask + 1, sizeof(struct sk_buff *), GFP_KERNEL);
498 if (!ntab)
499 ntab = vzalloc((mask + 1) * sizeof(struct sk_buff *));
500 if (!ntab)
501 return -ENOMEM;
502
503 sch_tree_lock(sch);
504 old = q->tab;
505 if (old) {
506 unsigned int oqlen = sch->q.qlen, tail = 0;
507
508 while (q->head != q->tail) {
509 struct sk_buff *skb = q->tab[q->head];
510
511 q->head = (q->head + 1) & q->tab_mask;
512 if (!skb)
513 continue;
514 if (tail < mask) {
515 ntab[tail++] = skb;
516 continue;
517 }
518 sch->qstats.backlog -= qdisc_pkt_len(skb);
519 --sch->q.qlen;
520 qdisc_drop(skb, sch);
521 }
522 qdisc_tree_decrease_qlen(sch, oqlen - sch->q.qlen);
523 q->head = 0;
524 q->tail = tail;
525 }
526
527 q->tab_mask = mask;
528 q->tab = ntab;
529 } else
530 sch_tree_lock(sch);
531
532 q->flags = ctl->flags;
533 q->limit = ctl->limit;
534
535 red_set_parms(&q->parms, ctl->qth_min, ctl->qth_max, ctl->Wlog,
536 ctl->Plog, ctl->Scell_log,
537 nla_data(tb[TCA_CHOKE_STAB]));
538
539 if (q->head == q->tail)
540 red_end_of_idle_period(&q->parms);
541
542 sch_tree_unlock(sch);
543 choke_free(old);
544 return 0;
545}
546
547static int choke_init(struct Qdisc *sch, struct nlattr *opt)
548{
549 return choke_change(sch, opt);
550}
551
552static int choke_dump(struct Qdisc *sch, struct sk_buff *skb)
553{
554 struct choke_sched_data *q = qdisc_priv(sch);
555 struct nlattr *opts = NULL;
556 struct tc_red_qopt opt = {
557 .limit = q->limit,
558 .flags = q->flags,
559 .qth_min = q->parms.qth_min >> q->parms.Wlog,
560 .qth_max = q->parms.qth_max >> q->parms.Wlog,
561 .Wlog = q->parms.Wlog,
562 .Plog = q->parms.Plog,
563 .Scell_log = q->parms.Scell_log,
564 };
565
566 opts = nla_nest_start(skb, TCA_OPTIONS);
567 if (opts == NULL)
568 goto nla_put_failure;
569
570 NLA_PUT(skb, TCA_CHOKE_PARMS, sizeof(opt), &opt);
571 return nla_nest_end(skb, opts);
572
573nla_put_failure:
574 nla_nest_cancel(skb, opts);
575 return -EMSGSIZE;
576}
577
578static int choke_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
579{
580 struct choke_sched_data *q = qdisc_priv(sch);
581 struct tc_choke_xstats st = {
582 .early = q->stats.prob_drop + q->stats.forced_drop,
583 .marked = q->stats.prob_mark + q->stats.forced_mark,
584 .pdrop = q->stats.pdrop,
585 .other = q->stats.other,
586 .matched = q->stats.matched,
587 };
588
589 return gnet_stats_copy_app(d, &st, sizeof(st));
590}
591
592static void choke_destroy(struct Qdisc *sch)
593{
594 struct choke_sched_data *q = qdisc_priv(sch);
595
596 tcf_destroy_chain(&q->filter_list);
597 choke_free(q->tab);
598}
599
600static struct Qdisc *choke_leaf(struct Qdisc *sch, unsigned long arg)
601{
602 return NULL;
603}
604
605static unsigned long choke_get(struct Qdisc *sch, u32 classid)
606{
607 return 0;
608}
609
610static void choke_put(struct Qdisc *q, unsigned long cl)
611{
612}
613
614static unsigned long choke_bind(struct Qdisc *sch, unsigned long parent,
615 u32 classid)
616{
617 return 0;
618}
619
620static struct tcf_proto **choke_find_tcf(struct Qdisc *sch, unsigned long cl)
621{
622 struct choke_sched_data *q = qdisc_priv(sch);
623
624 if (cl)
625 return NULL;
626 return &q->filter_list;
627}
628
629static int choke_dump_class(struct Qdisc *sch, unsigned long cl,
630 struct sk_buff *skb, struct tcmsg *tcm)
631{
632 tcm->tcm_handle |= TC_H_MIN(cl);
633 return 0;
634}
635
636static void choke_walk(struct Qdisc *sch, struct qdisc_walker *arg)
637{
638 if (!arg->stop) {
639 if (arg->fn(sch, 1, arg) < 0) {
640 arg->stop = 1;
641 return;
642 }
643 arg->count++;
644 }
645}
646
647static const struct Qdisc_class_ops choke_class_ops = {
648 .leaf = choke_leaf,
649 .get = choke_get,
650 .put = choke_put,
651 .tcf_chain = choke_find_tcf,
652 .bind_tcf = choke_bind,
653 .unbind_tcf = choke_put,
654 .dump = choke_dump_class,
655 .walk = choke_walk,
656};
657
658static struct sk_buff *choke_peek_head(struct Qdisc *sch)
659{
660 struct choke_sched_data *q = qdisc_priv(sch);
661
662 return (q->head != q->tail) ? q->tab[q->head] : NULL;
663}
664
665static struct Qdisc_ops choke_qdisc_ops __read_mostly = {
666 .id = "choke",
667 .priv_size = sizeof(struct choke_sched_data),
668
669 .enqueue = choke_enqueue,
670 .dequeue = choke_dequeue,
671 .peek = choke_peek_head,
672 .drop = choke_drop,
673 .init = choke_init,
674 .destroy = choke_destroy,
675 .reset = choke_reset,
676 .change = choke_change,
677 .dump = choke_dump,
678 .dump_stats = choke_dump_stats,
679 .owner = THIS_MODULE,
680};
681
682static int __init choke_module_init(void)
683{
684 return register_qdisc(&choke_qdisc_ops);
685}
686
687static void __exit choke_module_exit(void)
688{
689 unregister_qdisc(&choke_qdisc_ops);
690}
691
692module_init(choke_module_init)
693module_exit(choke_module_exit)
694
695MODULE_LICENSE("GPL");