blob: ee1e2090eebe2ddf6e6e24d9606fc6d728665244 [file] [log] [blame]
stephen hemminger45e14432011-02-02 15:21:10 +00001/*
2 * net/sched/sch_choke.c CHOKE scheduler
3 *
4 * Copyright (c) 2011 Stephen Hemminger <shemminger@vyatta.com>
5 * Copyright (c) 2011 Eric Dumazet <eric.dumazet@gmail.com>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * version 2 as published by the Free Software Foundation.
10 *
11 */
12
13#include <linux/module.h>
14#include <linux/types.h>
15#include <linux/kernel.h>
16#include <linux/skbuff.h>
17#include <linux/reciprocal_div.h>
David S. Millercdfb74d2011-02-02 23:06:31 -080018#include <linux/vmalloc.h>
stephen hemminger45e14432011-02-02 15:21:10 +000019#include <net/pkt_sched.h>
20#include <net/inet_ecn.h>
21#include <net/red.h>
22#include <linux/ip.h>
23#include <net/ip.h>
24#include <linux/ipv6.h>
25#include <net/ipv6.h>
26
27/*
28 CHOKe stateless AQM for fair bandwidth allocation
29 =================================================
30
31 CHOKe (CHOose and Keep for responsive flows, CHOose and Kill for
32 unresponsive flows) is a variant of RED that penalizes misbehaving flows but
33 maintains no flow state. The difference from RED is an additional step
34 during the enqueuing process. If average queue size is over the
35 low threshold (qmin), a packet is chosen at random from the queue.
36 If both the new and chosen packet are from the same flow, both
37 are dropped. Unlike RED, CHOKe is not really a "classful" qdisc because it
38 needs to access packets in queue randomly. It has a minimal class
39 interface to allow overriding the builtin flow classifier with
40 filters.
41
42 Source:
43 R. Pan, B. Prabhakar, and K. Psounis, "CHOKe, A Stateless
44 Active Queue Management Scheme for Approximating Fair Bandwidth Allocation",
45 IEEE INFOCOM, 2000.
46
47 A. Tang, J. Wang, S. Low, "Understanding CHOKe: Throughput and Spatial
48 Characteristics", IEEE/ACM Transactions on Networking, 2004
49
50 */
51
52/* Upper bound on size of sk_buff table (packets) */
53#define CHOKE_MAX_QUEUE (128*1024 - 1)
54
55struct choke_sched_data {
56/* Parameters */
57 u32 limit;
58 unsigned char flags;
59
60 struct red_parms parms;
61
62/* Variables */
63 struct tcf_proto *filter_list;
64 struct {
65 u32 prob_drop; /* Early probability drops */
66 u32 prob_mark; /* Early probability marks */
67 u32 forced_drop; /* Forced drops, qavg > max_thresh */
68 u32 forced_mark; /* Forced marks, qavg > max_thresh */
69 u32 pdrop; /* Drops due to queue limits */
70 u32 other; /* Drops due to drop() calls */
71 u32 matched; /* Drops to flow match */
72 } stats;
73
74 unsigned int head;
75 unsigned int tail;
76
77 unsigned int tab_mask; /* size - 1 */
78
79 struct sk_buff **tab;
80};
81
82/* deliver a random number between 0 and N - 1 */
83static u32 random_N(unsigned int N)
84{
85 return reciprocal_divide(random32(), N);
86}
87
88/* number of elements in queue including holes */
89static unsigned int choke_len(const struct choke_sched_data *q)
90{
91 return (q->tail - q->head) & q->tab_mask;
92}
93
94/* Is ECN parameter configured */
95static int use_ecn(const struct choke_sched_data *q)
96{
97 return q->flags & TC_RED_ECN;
98}
99
100/* Should packets over max just be dropped (versus marked) */
101static int use_harddrop(const struct choke_sched_data *q)
102{
103 return q->flags & TC_RED_HARDDROP;
104}
105
106/* Move head pointer forward to skip over holes */
107static void choke_zap_head_holes(struct choke_sched_data *q)
108{
109 do {
110 q->head = (q->head + 1) & q->tab_mask;
111 if (q->head == q->tail)
112 break;
113 } while (q->tab[q->head] == NULL);
114}
115
116/* Move tail pointer backwards to reuse holes */
117static void choke_zap_tail_holes(struct choke_sched_data *q)
118{
119 do {
120 q->tail = (q->tail - 1) & q->tab_mask;
121 if (q->head == q->tail)
122 break;
123 } while (q->tab[q->tail] == NULL);
124}
125
126/* Drop packet from queue array by creating a "hole" */
127static void choke_drop_by_idx(struct Qdisc *sch, unsigned int idx)
128{
129 struct choke_sched_data *q = qdisc_priv(sch);
130 struct sk_buff *skb = q->tab[idx];
131
132 q->tab[idx] = NULL;
133
134 if (idx == q->head)
135 choke_zap_head_holes(q);
136 if (idx == q->tail)
137 choke_zap_tail_holes(q);
138
139 sch->qstats.backlog -= qdisc_pkt_len(skb);
140 qdisc_drop(skb, sch);
141 qdisc_tree_decrease_qlen(sch, 1);
142 --sch->q.qlen;
143}
144
145/*
146 * Compare flow of two packets
147 * Returns true only if source and destination address and port match.
148 * false for special cases
149 */
150static bool choke_match_flow(struct sk_buff *skb1,
151 struct sk_buff *skb2)
152{
153 int off1, off2, poff;
154 const u32 *ports1, *ports2;
155 u8 ip_proto;
156 __u32 hash1;
157
158 if (skb1->protocol != skb2->protocol)
159 return false;
160
161 /* Use hash value as quick check
162 * Assumes that __skb_get_rxhash makes IP header and ports linear
163 */
164 hash1 = skb_get_rxhash(skb1);
165 if (!hash1 || hash1 != skb_get_rxhash(skb2))
166 return false;
167
168 /* Probably match, but be sure to avoid hash collisions */
169 off1 = skb_network_offset(skb1);
170 off2 = skb_network_offset(skb2);
171
172 switch (skb1->protocol) {
173 case __constant_htons(ETH_P_IP): {
174 const struct iphdr *ip1, *ip2;
175
176 ip1 = (const struct iphdr *) (skb1->data + off1);
177 ip2 = (const struct iphdr *) (skb2->data + off2);
178
179 ip_proto = ip1->protocol;
180 if (ip_proto != ip2->protocol ||
181 ip1->saddr != ip2->saddr || ip1->daddr != ip2->daddr)
182 return false;
183
184 if ((ip1->frag_off | ip2->frag_off) & htons(IP_MF | IP_OFFSET))
185 ip_proto = 0;
186 off1 += ip1->ihl * 4;
187 off2 += ip2->ihl * 4;
188 break;
189 }
190
191 case __constant_htons(ETH_P_IPV6): {
192 const struct ipv6hdr *ip1, *ip2;
193
194 ip1 = (const struct ipv6hdr *) (skb1->data + off1);
195 ip2 = (const struct ipv6hdr *) (skb2->data + off2);
196
197 ip_proto = ip1->nexthdr;
198 if (ip_proto != ip2->nexthdr ||
199 ipv6_addr_cmp(&ip1->saddr, &ip2->saddr) ||
200 ipv6_addr_cmp(&ip1->daddr, &ip2->daddr))
201 return false;
202 off1 += 40;
203 off2 += 40;
204 }
205
206 default: /* Maybe compare MAC header here? */
207 return false;
208 }
209
210 poff = proto_ports_offset(ip_proto);
211 if (poff < 0)
212 return true;
213
214 off1 += poff;
215 off2 += poff;
216
217 ports1 = (__force u32 *)(skb1->data + off1);
218 ports2 = (__force u32 *)(skb2->data + off2);
219 return *ports1 == *ports2;
220}
221
222static inline void choke_set_classid(struct sk_buff *skb, u16 classid)
223{
224 *(unsigned int *)(qdisc_skb_cb(skb)->data) = classid;
225}
226
227static u16 choke_get_classid(const struct sk_buff *skb)
228{
229 return *(unsigned int *)(qdisc_skb_cb(skb)->data);
230}
231
232/*
233 * Classify flow using either:
234 * 1. pre-existing classification result in skb
235 * 2. fast internal classification
236 * 3. use TC filter based classification
237 */
238static bool choke_classify(struct sk_buff *skb,
239 struct Qdisc *sch, int *qerr)
240
241{
242 struct choke_sched_data *q = qdisc_priv(sch);
243 struct tcf_result res;
244 int result;
245
246 result = tc_classify(skb, q->filter_list, &res);
247 if (result >= 0) {
248#ifdef CONFIG_NET_CLS_ACT
249 switch (result) {
250 case TC_ACT_STOLEN:
251 case TC_ACT_QUEUED:
252 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
253 case TC_ACT_SHOT:
254 return false;
255 }
256#endif
257 choke_set_classid(skb, TC_H_MIN(res.classid));
258 return true;
259 }
260
261 return false;
262}
263
264/*
265 * Select a packet at random from queue
266 * HACK: since queue can have holes from previous deletion; retry several
267 * times to find a random skb but then just give up and return the head
268 * Will return NULL if queue is empty (q->head == q->tail)
269 */
270static struct sk_buff *choke_peek_random(const struct choke_sched_data *q,
271 unsigned int *pidx)
272{
273 struct sk_buff *skb;
274 int retrys = 3;
275
276 do {
277 *pidx = (q->head + random_N(choke_len(q))) & q->tab_mask;
278 skb = q->tab[*pidx];
279 if (skb)
280 return skb;
281 } while (--retrys > 0);
282
283 return q->tab[*pidx = q->head];
284}
285
286/*
287 * Compare new packet with random packet in queue
288 * returns true if matched and sets *pidx
289 */
290static bool choke_match_random(const struct choke_sched_data *q,
291 struct sk_buff *nskb,
292 unsigned int *pidx)
293{
294 struct sk_buff *oskb;
295
296 if (q->head == q->tail)
297 return false;
298
299 oskb = choke_peek_random(q, pidx);
300 if (q->filter_list)
301 return choke_get_classid(nskb) == choke_get_classid(oskb);
302
303 return choke_match_flow(oskb, nskb);
304}
305
306static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch)
307{
308 struct choke_sched_data *q = qdisc_priv(sch);
309 struct red_parms *p = &q->parms;
310 int ret = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
311
312 if (q->filter_list) {
313 /* If using external classifiers, get result and record it. */
314 if (!choke_classify(skb, sch, &ret))
315 goto other_drop; /* Packet was eaten by filter */
316 }
317
318 /* Compute average queue usage (see RED) */
319 p->qavg = red_calc_qavg(p, sch->q.qlen);
320 if (red_is_idling(p))
321 red_end_of_idle_period(p);
322
323 /* Is queue small? */
324 if (p->qavg <= p->qth_min)
325 p->qcount = -1;
326 else {
327 unsigned int idx;
328
329 /* Draw a packet at random from queue and compare flow */
330 if (choke_match_random(q, skb, &idx)) {
331 q->stats.matched++;
332 choke_drop_by_idx(sch, idx);
333 goto congestion_drop;
334 }
335
336 /* Queue is large, always mark/drop */
337 if (p->qavg > p->qth_max) {
338 p->qcount = -1;
339
340 sch->qstats.overlimits++;
341 if (use_harddrop(q) || !use_ecn(q) ||
342 !INET_ECN_set_ce(skb)) {
343 q->stats.forced_drop++;
344 goto congestion_drop;
345 }
346
347 q->stats.forced_mark++;
348 } else if (++p->qcount) {
349 if (red_mark_probability(p, p->qavg)) {
350 p->qcount = 0;
351 p->qR = red_random(p);
352
353 sch->qstats.overlimits++;
354 if (!use_ecn(q) || !INET_ECN_set_ce(skb)) {
355 q->stats.prob_drop++;
356 goto congestion_drop;
357 }
358
359 q->stats.prob_mark++;
360 }
361 } else
362 p->qR = red_random(p);
363 }
364
365 /* Admit new packet */
366 if (sch->q.qlen < q->limit) {
367 q->tab[q->tail] = skb;
368 q->tail = (q->tail + 1) & q->tab_mask;
369 ++sch->q.qlen;
370 sch->qstats.backlog += qdisc_pkt_len(skb);
371 return NET_XMIT_SUCCESS;
372 }
373
374 q->stats.pdrop++;
375 sch->qstats.drops++;
376 kfree_skb(skb);
377 return NET_XMIT_DROP;
378
379 congestion_drop:
380 qdisc_drop(skb, sch);
381 return NET_XMIT_CN;
382
383 other_drop:
384 if (ret & __NET_XMIT_BYPASS)
385 sch->qstats.drops++;
386 kfree_skb(skb);
387 return ret;
388}
389
390static struct sk_buff *choke_dequeue(struct Qdisc *sch)
391{
392 struct choke_sched_data *q = qdisc_priv(sch);
393 struct sk_buff *skb;
394
395 if (q->head == q->tail) {
396 if (!red_is_idling(&q->parms))
397 red_start_of_idle_period(&q->parms);
398 return NULL;
399 }
400
401 skb = q->tab[q->head];
402 q->tab[q->head] = NULL;
403 choke_zap_head_holes(q);
404 --sch->q.qlen;
405 sch->qstats.backlog -= qdisc_pkt_len(skb);
406 qdisc_bstats_update(sch, skb);
407
408 return skb;
409}
410
411static unsigned int choke_drop(struct Qdisc *sch)
412{
413 struct choke_sched_data *q = qdisc_priv(sch);
414 unsigned int len;
415
416 len = qdisc_queue_drop(sch);
417 if (len > 0)
418 q->stats.other++;
419 else {
420 if (!red_is_idling(&q->parms))
421 red_start_of_idle_period(&q->parms);
422 }
423
424 return len;
425}
426
427static void choke_reset(struct Qdisc *sch)
428{
429 struct choke_sched_data *q = qdisc_priv(sch);
430
431 red_restart(&q->parms);
432}
433
434static const struct nla_policy choke_policy[TCA_CHOKE_MAX + 1] = {
435 [TCA_CHOKE_PARMS] = { .len = sizeof(struct tc_red_qopt) },
436 [TCA_CHOKE_STAB] = { .len = RED_STAB_SIZE },
437};
438
439
440static void choke_free(void *addr)
441{
442 if (addr) {
443 if (is_vmalloc_addr(addr))
444 vfree(addr);
445 else
446 kfree(addr);
447 }
448}
449
450static int choke_change(struct Qdisc *sch, struct nlattr *opt)
451{
452 struct choke_sched_data *q = qdisc_priv(sch);
453 struct nlattr *tb[TCA_CHOKE_MAX + 1];
454 const struct tc_red_qopt *ctl;
455 int err;
456 struct sk_buff **old = NULL;
457 unsigned int mask;
458
459 if (opt == NULL)
460 return -EINVAL;
461
462 err = nla_parse_nested(tb, TCA_CHOKE_MAX, opt, choke_policy);
463 if (err < 0)
464 return err;
465
466 if (tb[TCA_CHOKE_PARMS] == NULL ||
467 tb[TCA_CHOKE_STAB] == NULL)
468 return -EINVAL;
469
470 ctl = nla_data(tb[TCA_CHOKE_PARMS]);
471
472 if (ctl->limit > CHOKE_MAX_QUEUE)
473 return -EINVAL;
474
475 mask = roundup_pow_of_two(ctl->limit + 1) - 1;
476 if (mask != q->tab_mask) {
477 struct sk_buff **ntab;
478
479 ntab = kcalloc(mask + 1, sizeof(struct sk_buff *), GFP_KERNEL);
480 if (!ntab)
481 ntab = vzalloc((mask + 1) * sizeof(struct sk_buff *));
482 if (!ntab)
483 return -ENOMEM;
484
485 sch_tree_lock(sch);
486 old = q->tab;
487 if (old) {
488 unsigned int oqlen = sch->q.qlen, tail = 0;
489
490 while (q->head != q->tail) {
491 struct sk_buff *skb = q->tab[q->head];
492
493 q->head = (q->head + 1) & q->tab_mask;
494 if (!skb)
495 continue;
496 if (tail < mask) {
497 ntab[tail++] = skb;
498 continue;
499 }
500 sch->qstats.backlog -= qdisc_pkt_len(skb);
501 --sch->q.qlen;
502 qdisc_drop(skb, sch);
503 }
504 qdisc_tree_decrease_qlen(sch, oqlen - sch->q.qlen);
505 q->head = 0;
506 q->tail = tail;
507 }
508
509 q->tab_mask = mask;
510 q->tab = ntab;
511 } else
512 sch_tree_lock(sch);
513
514 q->flags = ctl->flags;
515 q->limit = ctl->limit;
516
517 red_set_parms(&q->parms, ctl->qth_min, ctl->qth_max, ctl->Wlog,
518 ctl->Plog, ctl->Scell_log,
519 nla_data(tb[TCA_CHOKE_STAB]));
520
521 if (q->head == q->tail)
522 red_end_of_idle_period(&q->parms);
523
524 sch_tree_unlock(sch);
525 choke_free(old);
526 return 0;
527}
528
529static int choke_init(struct Qdisc *sch, struct nlattr *opt)
530{
531 return choke_change(sch, opt);
532}
533
534static int choke_dump(struct Qdisc *sch, struct sk_buff *skb)
535{
536 struct choke_sched_data *q = qdisc_priv(sch);
537 struct nlattr *opts = NULL;
538 struct tc_red_qopt opt = {
539 .limit = q->limit,
540 .flags = q->flags,
541 .qth_min = q->parms.qth_min >> q->parms.Wlog,
542 .qth_max = q->parms.qth_max >> q->parms.Wlog,
543 .Wlog = q->parms.Wlog,
544 .Plog = q->parms.Plog,
545 .Scell_log = q->parms.Scell_log,
546 };
547
548 opts = nla_nest_start(skb, TCA_OPTIONS);
549 if (opts == NULL)
550 goto nla_put_failure;
551
552 NLA_PUT(skb, TCA_CHOKE_PARMS, sizeof(opt), &opt);
553 return nla_nest_end(skb, opts);
554
555nla_put_failure:
556 nla_nest_cancel(skb, opts);
557 return -EMSGSIZE;
558}
559
560static int choke_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
561{
562 struct choke_sched_data *q = qdisc_priv(sch);
563 struct tc_choke_xstats st = {
564 .early = q->stats.prob_drop + q->stats.forced_drop,
565 .marked = q->stats.prob_mark + q->stats.forced_mark,
566 .pdrop = q->stats.pdrop,
567 .other = q->stats.other,
568 .matched = q->stats.matched,
569 };
570
571 return gnet_stats_copy_app(d, &st, sizeof(st));
572}
573
574static void choke_destroy(struct Qdisc *sch)
575{
576 struct choke_sched_data *q = qdisc_priv(sch);
577
578 tcf_destroy_chain(&q->filter_list);
579 choke_free(q->tab);
580}
581
582static struct Qdisc *choke_leaf(struct Qdisc *sch, unsigned long arg)
583{
584 return NULL;
585}
586
587static unsigned long choke_get(struct Qdisc *sch, u32 classid)
588{
589 return 0;
590}
591
592static void choke_put(struct Qdisc *q, unsigned long cl)
593{
594}
595
596static unsigned long choke_bind(struct Qdisc *sch, unsigned long parent,
597 u32 classid)
598{
599 return 0;
600}
601
602static struct tcf_proto **choke_find_tcf(struct Qdisc *sch, unsigned long cl)
603{
604 struct choke_sched_data *q = qdisc_priv(sch);
605
606 if (cl)
607 return NULL;
608 return &q->filter_list;
609}
610
611static int choke_dump_class(struct Qdisc *sch, unsigned long cl,
612 struct sk_buff *skb, struct tcmsg *tcm)
613{
614 tcm->tcm_handle |= TC_H_MIN(cl);
615 return 0;
616}
617
618static void choke_walk(struct Qdisc *sch, struct qdisc_walker *arg)
619{
620 if (!arg->stop) {
621 if (arg->fn(sch, 1, arg) < 0) {
622 arg->stop = 1;
623 return;
624 }
625 arg->count++;
626 }
627}
628
629static const struct Qdisc_class_ops choke_class_ops = {
630 .leaf = choke_leaf,
631 .get = choke_get,
632 .put = choke_put,
633 .tcf_chain = choke_find_tcf,
634 .bind_tcf = choke_bind,
635 .unbind_tcf = choke_put,
636 .dump = choke_dump_class,
637 .walk = choke_walk,
638};
639
640static struct sk_buff *choke_peek_head(struct Qdisc *sch)
641{
642 struct choke_sched_data *q = qdisc_priv(sch);
643
644 return (q->head != q->tail) ? q->tab[q->head] : NULL;
645}
646
647static struct Qdisc_ops choke_qdisc_ops __read_mostly = {
648 .id = "choke",
649 .priv_size = sizeof(struct choke_sched_data),
650
651 .enqueue = choke_enqueue,
652 .dequeue = choke_dequeue,
653 .peek = choke_peek_head,
654 .drop = choke_drop,
655 .init = choke_init,
656 .destroy = choke_destroy,
657 .reset = choke_reset,
658 .change = choke_change,
659 .dump = choke_dump,
660 .dump_stats = choke_dump_stats,
661 .owner = THIS_MODULE,
662};
663
664static int __init choke_module_init(void)
665{
666 return register_qdisc(&choke_qdisc_ops);
667}
668
669static void __exit choke_module_exit(void)
670{
671 unregister_qdisc(&choke_qdisc_ops);
672}
673
674module_init(choke_module_init)
675module_exit(choke_module_exit)
676
677MODULE_LICENSE("GPL");