blob: 04e0b0583e003148fc8bbd1c4c49ef179512bef5 [file] [log] [blame]
stephen hemminger45e14432011-02-02 15:21:10 +00001/*
2 * net/sched/sch_choke.c CHOKE scheduler
3 *
4 * Copyright (c) 2011 Stephen Hemminger <shemminger@vyatta.com>
5 * Copyright (c) 2011 Eric Dumazet <eric.dumazet@gmail.com>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * version 2 as published by the Free Software Foundation.
10 *
11 */
12
13#include <linux/module.h>
14#include <linux/types.h>
15#include <linux/kernel.h>
16#include <linux/skbuff.h>
David S. Millercdfb74d2011-02-02 23:06:31 -080017#include <linux/vmalloc.h>
stephen hemminger45e14432011-02-02 15:21:10 +000018#include <net/pkt_sched.h>
19#include <net/inet_ecn.h>
20#include <net/red.h>
Jiri Pirko1bd758e2015-05-12 14:56:07 +020021#include <net/flow_dissector.h>
stephen hemminger45e14432011-02-02 15:21:10 +000022
23/*
24 CHOKe stateless AQM for fair bandwidth allocation
25 =================================================
26
27 CHOKe (CHOose and Keep for responsive flows, CHOose and Kill for
28 unresponsive flows) is a variant of RED that penalizes misbehaving flows but
29 maintains no flow state. The difference from RED is an additional step
30 during the enqueuing process. If average queue size is over the
31 low threshold (qmin), a packet is chosen at random from the queue.
32 If both the new and chosen packet are from the same flow, both
33 are dropped. Unlike RED, CHOKe is not really a "classful" qdisc because it
34 needs to access packets in queue randomly. It has a minimal class
35 interface to allow overriding the builtin flow classifier with
36 filters.
37
38 Source:
39 R. Pan, B. Prabhakar, and K. Psounis, "CHOKe, A Stateless
40 Active Queue Management Scheme for Approximating Fair Bandwidth Allocation",
41 IEEE INFOCOM, 2000.
42
43 A. Tang, J. Wang, S. Low, "Understanding CHOKe: Throughput and Spatial
44 Characteristics", IEEE/ACM Transactions on Networking, 2004
45
46 */
47
48/* Upper bound on size of sk_buff table (packets) */
49#define CHOKE_MAX_QUEUE (128*1024 - 1)
50
51struct choke_sched_data {
52/* Parameters */
53 u32 limit;
54 unsigned char flags;
55
56 struct red_parms parms;
57
58/* Variables */
Eric Dumazeteeca6682012-01-05 02:25:16 +000059 struct red_vars vars;
John Fastabend25d8c0d2014-09-12 20:05:27 -070060 struct tcf_proto __rcu *filter_list;
stephen hemminger45e14432011-02-02 15:21:10 +000061 struct {
62 u32 prob_drop; /* Early probability drops */
63 u32 prob_mark; /* Early probability marks */
64 u32 forced_drop; /* Forced drops, qavg > max_thresh */
65 u32 forced_mark; /* Forced marks, qavg > max_thresh */
66 u32 pdrop; /* Drops due to queue limits */
67 u32 other; /* Drops due to drop() calls */
68 u32 matched; /* Drops to flow match */
69 } stats;
70
71 unsigned int head;
72 unsigned int tail;
73
74 unsigned int tab_mask; /* size - 1 */
75
76 struct sk_buff **tab;
77};
78
stephen hemminger45e14432011-02-02 15:21:10 +000079/* number of elements in queue including holes */
80static unsigned int choke_len(const struct choke_sched_data *q)
81{
82 return (q->tail - q->head) & q->tab_mask;
83}
84
85/* Is ECN parameter configured */
86static int use_ecn(const struct choke_sched_data *q)
87{
88 return q->flags & TC_RED_ECN;
89}
90
91/* Should packets over max just be dropped (versus marked) */
92static int use_harddrop(const struct choke_sched_data *q)
93{
94 return q->flags & TC_RED_HARDDROP;
95}
96
97/* Move head pointer forward to skip over holes */
98static void choke_zap_head_holes(struct choke_sched_data *q)
99{
100 do {
101 q->head = (q->head + 1) & q->tab_mask;
102 if (q->head == q->tail)
103 break;
104 } while (q->tab[q->head] == NULL);
105}
106
107/* Move tail pointer backwards to reuse holes */
108static void choke_zap_tail_holes(struct choke_sched_data *q)
109{
110 do {
111 q->tail = (q->tail - 1) & q->tab_mask;
112 if (q->head == q->tail)
113 break;
114 } while (q->tab[q->tail] == NULL);
115}
116
117/* Drop packet from queue array by creating a "hole" */
118static void choke_drop_by_idx(struct Qdisc *sch, unsigned int idx)
119{
120 struct choke_sched_data *q = qdisc_priv(sch);
121 struct sk_buff *skb = q->tab[idx];
122
123 q->tab[idx] = NULL;
124
125 if (idx == q->head)
126 choke_zap_head_holes(q);
127 if (idx == q->tail)
128 choke_zap_tail_holes(q);
129
John Fastabend25331d62014-09-28 11:53:29 -0700130 qdisc_qstats_backlog_dec(sch, skb);
WANG Cong2ccccf52016-02-25 14:55:01 -0800131 qdisc_tree_reduce_backlog(sch, 1, qdisc_pkt_len(skb));
stephen hemminger45e14432011-02-02 15:21:10 +0000132 qdisc_drop(skb, sch);
stephen hemminger45e14432011-02-02 15:21:10 +0000133 --sch->q.qlen;
134}
135
Eric Dumazet26f70e12011-02-24 17:45:41 +0000136struct choke_skb_cb {
Eric Dumazet2bcc34b2011-11-29 04:22:15 +0000137 u16 classid;
138 u8 keys_valid;
Tom Herbert2e994032015-05-01 11:30:18 -0700139 struct flow_keys_digest keys;
Eric Dumazet26f70e12011-02-24 17:45:41 +0000140};
141
142static inline struct choke_skb_cb *choke_skb_cb(const struct sk_buff *skb)
143{
David S. Miller16bda132012-02-06 15:14:37 -0500144 qdisc_cb_private_validate(skb, sizeof(struct choke_skb_cb));
Eric Dumazet26f70e12011-02-24 17:45:41 +0000145 return (struct choke_skb_cb *)qdisc_skb_cb(skb)->data;
146}
147
stephen hemminger45e14432011-02-02 15:21:10 +0000148static inline void choke_set_classid(struct sk_buff *skb, u16 classid)
149{
Eric Dumazet26f70e12011-02-24 17:45:41 +0000150 choke_skb_cb(skb)->classid = classid;
stephen hemminger45e14432011-02-02 15:21:10 +0000151}
152
153static u16 choke_get_classid(const struct sk_buff *skb)
154{
Eric Dumazet26f70e12011-02-24 17:45:41 +0000155 return choke_skb_cb(skb)->classid;
stephen hemminger45e14432011-02-02 15:21:10 +0000156}
157
158/*
Eric Dumazet2bcc34b2011-11-29 04:22:15 +0000159 * Compare flow of two packets
160 * Returns true only if source and destination address and port match.
161 * false for special cases
162 */
163static bool choke_match_flow(struct sk_buff *skb1,
164 struct sk_buff *skb2)
165{
Eric Dumazet25711782014-09-18 08:02:05 -0700166 struct flow_keys temp;
167
Eric Dumazet2bcc34b2011-11-29 04:22:15 +0000168 if (skb1->protocol != skb2->protocol)
169 return false;
170
171 if (!choke_skb_cb(skb1)->keys_valid) {
172 choke_skb_cb(skb1)->keys_valid = 1;
Tom Herbertcd79a232015-09-01 09:24:27 -0700173 skb_flow_dissect_flow_keys(skb1, &temp, 0);
Tom Herbert2e994032015-05-01 11:30:18 -0700174 make_flow_keys_digest(&choke_skb_cb(skb1)->keys, &temp);
Eric Dumazet2bcc34b2011-11-29 04:22:15 +0000175 }
176
177 if (!choke_skb_cb(skb2)->keys_valid) {
178 choke_skb_cb(skb2)->keys_valid = 1;
Tom Herbertcd79a232015-09-01 09:24:27 -0700179 skb_flow_dissect_flow_keys(skb2, &temp, 0);
Tom Herbert2e994032015-05-01 11:30:18 -0700180 make_flow_keys_digest(&choke_skb_cb(skb2)->keys, &temp);
Eric Dumazet2bcc34b2011-11-29 04:22:15 +0000181 }
182
183 return !memcmp(&choke_skb_cb(skb1)->keys,
184 &choke_skb_cb(skb2)->keys,
Tom Herbert2e994032015-05-01 11:30:18 -0700185 sizeof(choke_skb_cb(skb1)->keys));
Eric Dumazet2bcc34b2011-11-29 04:22:15 +0000186}
187
188/*
stephen hemminger45e14432011-02-02 15:21:10 +0000189 * Classify flow using either:
190 * 1. pre-existing classification result in skb
191 * 2. fast internal classification
192 * 3. use TC filter based classification
193 */
194static bool choke_classify(struct sk_buff *skb,
195 struct Qdisc *sch, int *qerr)
196
197{
198 struct choke_sched_data *q = qdisc_priv(sch);
199 struct tcf_result res;
John Fastabend25d8c0d2014-09-12 20:05:27 -0700200 struct tcf_proto *fl;
stephen hemminger45e14432011-02-02 15:21:10 +0000201 int result;
202
John Fastabend25d8c0d2014-09-12 20:05:27 -0700203 fl = rcu_dereference_bh(q->filter_list);
Daniel Borkmann3b3ae882015-08-26 23:00:06 +0200204 result = tc_classify(skb, fl, &res, false);
stephen hemminger45e14432011-02-02 15:21:10 +0000205 if (result >= 0) {
206#ifdef CONFIG_NET_CLS_ACT
207 switch (result) {
208 case TC_ACT_STOLEN:
209 case TC_ACT_QUEUED:
210 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
211 case TC_ACT_SHOT:
212 return false;
213 }
214#endif
215 choke_set_classid(skb, TC_H_MIN(res.classid));
216 return true;
217 }
218
219 return false;
220}
221
222/*
223 * Select a packet at random from queue
224 * HACK: since queue can have holes from previous deletion; retry several
225 * times to find a random skb but then just give up and return the head
226 * Will return NULL if queue is empty (q->head == q->tail)
227 */
228static struct sk_buff *choke_peek_random(const struct choke_sched_data *q,
229 unsigned int *pidx)
230{
231 struct sk_buff *skb;
232 int retrys = 3;
233
234 do {
Daniel Borkmannf337db62014-01-22 02:29:39 +0100235 *pidx = (q->head + prandom_u32_max(choke_len(q))) & q->tab_mask;
stephen hemminger45e14432011-02-02 15:21:10 +0000236 skb = q->tab[*pidx];
237 if (skb)
238 return skb;
239 } while (--retrys > 0);
240
241 return q->tab[*pidx = q->head];
242}
243
244/*
245 * Compare new packet with random packet in queue
246 * returns true if matched and sets *pidx
247 */
248static bool choke_match_random(const struct choke_sched_data *q,
249 struct sk_buff *nskb,
250 unsigned int *pidx)
251{
252 struct sk_buff *oskb;
253
254 if (q->head == q->tail)
255 return false;
256
257 oskb = choke_peek_random(q, pidx);
John Fastabend25d8c0d2014-09-12 20:05:27 -0700258 if (rcu_access_pointer(q->filter_list))
stephen hemminger45e14432011-02-02 15:21:10 +0000259 return choke_get_classid(nskb) == choke_get_classid(oskb);
260
261 return choke_match_flow(oskb, nskb);
262}
263
264static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch)
265{
John Fastabend25d8c0d2014-09-12 20:05:27 -0700266 int ret = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
stephen hemminger45e14432011-02-02 15:21:10 +0000267 struct choke_sched_data *q = qdisc_priv(sch);
Eric Dumazeteeca6682012-01-05 02:25:16 +0000268 const struct red_parms *p = &q->parms;
stephen hemminger45e14432011-02-02 15:21:10 +0000269
John Fastabend25d8c0d2014-09-12 20:05:27 -0700270 if (rcu_access_pointer(q->filter_list)) {
stephen hemminger45e14432011-02-02 15:21:10 +0000271 /* If using external classifiers, get result and record it. */
272 if (!choke_classify(skb, sch, &ret))
273 goto other_drop; /* Packet was eaten by filter */
274 }
275
Eric Dumazet2bcc34b2011-11-29 04:22:15 +0000276 choke_skb_cb(skb)->keys_valid = 0;
stephen hemminger45e14432011-02-02 15:21:10 +0000277 /* Compute average queue usage (see RED) */
Eric Dumazeteeca6682012-01-05 02:25:16 +0000278 q->vars.qavg = red_calc_qavg(p, &q->vars, sch->q.qlen);
279 if (red_is_idling(&q->vars))
280 red_end_of_idle_period(&q->vars);
stephen hemminger45e14432011-02-02 15:21:10 +0000281
282 /* Is queue small? */
Eric Dumazeteeca6682012-01-05 02:25:16 +0000283 if (q->vars.qavg <= p->qth_min)
284 q->vars.qcount = -1;
stephen hemminger45e14432011-02-02 15:21:10 +0000285 else {
286 unsigned int idx;
287
288 /* Draw a packet at random from queue and compare flow */
289 if (choke_match_random(q, skb, &idx)) {
290 q->stats.matched++;
291 choke_drop_by_idx(sch, idx);
292 goto congestion_drop;
293 }
294
295 /* Queue is large, always mark/drop */
Eric Dumazeteeca6682012-01-05 02:25:16 +0000296 if (q->vars.qavg > p->qth_max) {
297 q->vars.qcount = -1;
stephen hemminger45e14432011-02-02 15:21:10 +0000298
John Fastabend25331d62014-09-28 11:53:29 -0700299 qdisc_qstats_overlimit(sch);
stephen hemminger45e14432011-02-02 15:21:10 +0000300 if (use_harddrop(q) || !use_ecn(q) ||
301 !INET_ECN_set_ce(skb)) {
302 q->stats.forced_drop++;
303 goto congestion_drop;
304 }
305
306 q->stats.forced_mark++;
Eric Dumazeteeca6682012-01-05 02:25:16 +0000307 } else if (++q->vars.qcount) {
308 if (red_mark_probability(p, &q->vars, q->vars.qavg)) {
309 q->vars.qcount = 0;
310 q->vars.qR = red_random(p);
stephen hemminger45e14432011-02-02 15:21:10 +0000311
John Fastabend25331d62014-09-28 11:53:29 -0700312 qdisc_qstats_overlimit(sch);
stephen hemminger45e14432011-02-02 15:21:10 +0000313 if (!use_ecn(q) || !INET_ECN_set_ce(skb)) {
314 q->stats.prob_drop++;
315 goto congestion_drop;
316 }
317
318 q->stats.prob_mark++;
319 }
320 } else
Eric Dumazeteeca6682012-01-05 02:25:16 +0000321 q->vars.qR = red_random(p);
stephen hemminger45e14432011-02-02 15:21:10 +0000322 }
323
324 /* Admit new packet */
325 if (sch->q.qlen < q->limit) {
326 q->tab[q->tail] = skb;
327 q->tail = (q->tail + 1) & q->tab_mask;
328 ++sch->q.qlen;
John Fastabend25331d62014-09-28 11:53:29 -0700329 qdisc_qstats_backlog_inc(sch, skb);
stephen hemminger45e14432011-02-02 15:21:10 +0000330 return NET_XMIT_SUCCESS;
331 }
332
333 q->stats.pdrop++;
Eric Dumazet17045752012-05-04 04:37:21 +0000334 return qdisc_drop(skb, sch);
stephen hemminger45e14432011-02-02 15:21:10 +0000335
Eric Dumazet17045752012-05-04 04:37:21 +0000336congestion_drop:
stephen hemminger45e14432011-02-02 15:21:10 +0000337 qdisc_drop(skb, sch);
338 return NET_XMIT_CN;
339
Eric Dumazet17045752012-05-04 04:37:21 +0000340other_drop:
stephen hemminger45e14432011-02-02 15:21:10 +0000341 if (ret & __NET_XMIT_BYPASS)
John Fastabend25331d62014-09-28 11:53:29 -0700342 qdisc_qstats_drop(sch);
stephen hemminger45e14432011-02-02 15:21:10 +0000343 kfree_skb(skb);
344 return ret;
345}
346
347static struct sk_buff *choke_dequeue(struct Qdisc *sch)
348{
349 struct choke_sched_data *q = qdisc_priv(sch);
350 struct sk_buff *skb;
351
352 if (q->head == q->tail) {
Eric Dumazeteeca6682012-01-05 02:25:16 +0000353 if (!red_is_idling(&q->vars))
354 red_start_of_idle_period(&q->vars);
stephen hemminger45e14432011-02-02 15:21:10 +0000355 return NULL;
356 }
357
358 skb = q->tab[q->head];
359 q->tab[q->head] = NULL;
360 choke_zap_head_holes(q);
361 --sch->q.qlen;
John Fastabend25331d62014-09-28 11:53:29 -0700362 qdisc_qstats_backlog_dec(sch, skb);
stephen hemminger45e14432011-02-02 15:21:10 +0000363 qdisc_bstats_update(sch, skb);
364
365 return skb;
366}
367
stephen hemminger45e14432011-02-02 15:21:10 +0000368static void choke_reset(struct Qdisc *sch)
369{
370 struct choke_sched_data *q = qdisc_priv(sch);
371
WANG Cong77e62da2015-07-21 16:52:43 -0700372 while (q->head != q->tail) {
373 struct sk_buff *skb = q->tab[q->head];
374
375 q->head = (q->head + 1) & q->tab_mask;
376 if (!skb)
377 continue;
378 qdisc_qstats_backlog_dec(sch, skb);
379 --sch->q.qlen;
380 qdisc_drop(skb, sch);
381 }
382
383 memset(q->tab, 0, (q->tab_mask + 1) * sizeof(struct sk_buff *));
384 q->head = q->tail = 0;
Eric Dumazeteeca6682012-01-05 02:25:16 +0000385 red_restart(&q->vars);
stephen hemminger45e14432011-02-02 15:21:10 +0000386}
387
388static const struct nla_policy choke_policy[TCA_CHOKE_MAX + 1] = {
389 [TCA_CHOKE_PARMS] = { .len = sizeof(struct tc_red_qopt) },
390 [TCA_CHOKE_STAB] = { .len = RED_STAB_SIZE },
Eric Dumazeta73ed262011-12-09 02:46:45 +0000391 [TCA_CHOKE_MAX_P] = { .type = NLA_U32 },
stephen hemminger45e14432011-02-02 15:21:10 +0000392};
393
394
395static void choke_free(void *addr)
396{
WANG Cong4cb28972014-06-02 15:55:22 -0700397 kvfree(addr);
stephen hemminger45e14432011-02-02 15:21:10 +0000398}
399
400static int choke_change(struct Qdisc *sch, struct nlattr *opt)
401{
402 struct choke_sched_data *q = qdisc_priv(sch);
403 struct nlattr *tb[TCA_CHOKE_MAX + 1];
404 const struct tc_red_qopt *ctl;
405 int err;
406 struct sk_buff **old = NULL;
407 unsigned int mask;
Eric Dumazeta73ed262011-12-09 02:46:45 +0000408 u32 max_P;
stephen hemminger45e14432011-02-02 15:21:10 +0000409
410 if (opt == NULL)
411 return -EINVAL;
412
413 err = nla_parse_nested(tb, TCA_CHOKE_MAX, opt, choke_policy);
414 if (err < 0)
415 return err;
416
417 if (tb[TCA_CHOKE_PARMS] == NULL ||
418 tb[TCA_CHOKE_STAB] == NULL)
419 return -EINVAL;
420
Eric Dumazeta73ed262011-12-09 02:46:45 +0000421 max_P = tb[TCA_CHOKE_MAX_P] ? nla_get_u32(tb[TCA_CHOKE_MAX_P]) : 0;
422
stephen hemminger45e14432011-02-02 15:21:10 +0000423 ctl = nla_data(tb[TCA_CHOKE_PARMS]);
424
425 if (ctl->limit > CHOKE_MAX_QUEUE)
426 return -EINVAL;
427
428 mask = roundup_pow_of_two(ctl->limit + 1) - 1;
429 if (mask != q->tab_mask) {
430 struct sk_buff **ntab;
431
Joe Perches8be04b92013-06-19 12:15:53 -0700432 ntab = kcalloc(mask + 1, sizeof(struct sk_buff *),
433 GFP_KERNEL | __GFP_NOWARN);
stephen hemminger45e14432011-02-02 15:21:10 +0000434 if (!ntab)
435 ntab = vzalloc((mask + 1) * sizeof(struct sk_buff *));
436 if (!ntab)
437 return -ENOMEM;
438
439 sch_tree_lock(sch);
440 old = q->tab;
441 if (old) {
442 unsigned int oqlen = sch->q.qlen, tail = 0;
WANG Cong2ccccf52016-02-25 14:55:01 -0800443 unsigned dropped = 0;
stephen hemminger45e14432011-02-02 15:21:10 +0000444
445 while (q->head != q->tail) {
446 struct sk_buff *skb = q->tab[q->head];
447
448 q->head = (q->head + 1) & q->tab_mask;
449 if (!skb)
450 continue;
451 if (tail < mask) {
452 ntab[tail++] = skb;
453 continue;
454 }
WANG Cong2ccccf52016-02-25 14:55:01 -0800455 dropped += qdisc_pkt_len(skb);
John Fastabend25331d62014-09-28 11:53:29 -0700456 qdisc_qstats_backlog_dec(sch, skb);
stephen hemminger45e14432011-02-02 15:21:10 +0000457 --sch->q.qlen;
458 qdisc_drop(skb, sch);
459 }
WANG Cong2ccccf52016-02-25 14:55:01 -0800460 qdisc_tree_reduce_backlog(sch, oqlen - sch->q.qlen, dropped);
stephen hemminger45e14432011-02-02 15:21:10 +0000461 q->head = 0;
462 q->tail = tail;
463 }
464
465 q->tab_mask = mask;
466 q->tab = ntab;
467 } else
468 sch_tree_lock(sch);
469
470 q->flags = ctl->flags;
471 q->limit = ctl->limit;
472
473 red_set_parms(&q->parms, ctl->qth_min, ctl->qth_max, ctl->Wlog,
474 ctl->Plog, ctl->Scell_log,
Eric Dumazeta73ed262011-12-09 02:46:45 +0000475 nla_data(tb[TCA_CHOKE_STAB]),
476 max_P);
Eric Dumazeteeca6682012-01-05 02:25:16 +0000477 red_set_vars(&q->vars);
stephen hemminger45e14432011-02-02 15:21:10 +0000478
479 if (q->head == q->tail)
Eric Dumazeteeca6682012-01-05 02:25:16 +0000480 red_end_of_idle_period(&q->vars);
stephen hemminger45e14432011-02-02 15:21:10 +0000481
482 sch_tree_unlock(sch);
483 choke_free(old);
484 return 0;
485}
486
487static int choke_init(struct Qdisc *sch, struct nlattr *opt)
488{
489 return choke_change(sch, opt);
490}
491
492static int choke_dump(struct Qdisc *sch, struct sk_buff *skb)
493{
494 struct choke_sched_data *q = qdisc_priv(sch);
495 struct nlattr *opts = NULL;
496 struct tc_red_qopt opt = {
497 .limit = q->limit,
498 .flags = q->flags,
499 .qth_min = q->parms.qth_min >> q->parms.Wlog,
500 .qth_max = q->parms.qth_max >> q->parms.Wlog,
501 .Wlog = q->parms.Wlog,
502 .Plog = q->parms.Plog,
503 .Scell_log = q->parms.Scell_log,
504 };
505
506 opts = nla_nest_start(skb, TCA_OPTIONS);
507 if (opts == NULL)
508 goto nla_put_failure;
509
David S. Miller1b34ec42012-03-29 05:11:39 -0400510 if (nla_put(skb, TCA_CHOKE_PARMS, sizeof(opt), &opt) ||
511 nla_put_u32(skb, TCA_CHOKE_MAX_P, q->parms.max_P))
512 goto nla_put_failure;
stephen hemminger45e14432011-02-02 15:21:10 +0000513 return nla_nest_end(skb, opts);
514
515nla_put_failure:
516 nla_nest_cancel(skb, opts);
517 return -EMSGSIZE;
518}
519
520static int choke_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
521{
522 struct choke_sched_data *q = qdisc_priv(sch);
523 struct tc_choke_xstats st = {
524 .early = q->stats.prob_drop + q->stats.forced_drop,
525 .marked = q->stats.prob_mark + q->stats.forced_mark,
526 .pdrop = q->stats.pdrop,
527 .other = q->stats.other,
528 .matched = q->stats.matched,
529 };
530
531 return gnet_stats_copy_app(d, &st, sizeof(st));
532}
533
534static void choke_destroy(struct Qdisc *sch)
535{
536 struct choke_sched_data *q = qdisc_priv(sch);
537
538 tcf_destroy_chain(&q->filter_list);
539 choke_free(q->tab);
540}
541
stephen hemminger45e14432011-02-02 15:21:10 +0000542static struct sk_buff *choke_peek_head(struct Qdisc *sch)
543{
544 struct choke_sched_data *q = qdisc_priv(sch);
545
546 return (q->head != q->tail) ? q->tab[q->head] : NULL;
547}
548
549static struct Qdisc_ops choke_qdisc_ops __read_mostly = {
550 .id = "choke",
551 .priv_size = sizeof(struct choke_sched_data),
552
553 .enqueue = choke_enqueue,
554 .dequeue = choke_dequeue,
555 .peek = choke_peek_head,
stephen hemminger45e14432011-02-02 15:21:10 +0000556 .init = choke_init,
557 .destroy = choke_destroy,
558 .reset = choke_reset,
559 .change = choke_change,
560 .dump = choke_dump,
561 .dump_stats = choke_dump_stats,
562 .owner = THIS_MODULE,
563};
564
565static int __init choke_module_init(void)
566{
567 return register_qdisc(&choke_qdisc_ops);
568}
569
570static void __exit choke_module_exit(void)
571{
572 unregister_qdisc(&choke_qdisc_ops);
573}
574
575module_init(choke_module_init)
576module_exit(choke_module_exit)
577
578MODULE_LICENSE("GPL");