blob: 6125c17cffafb89477fabfa4d92b52a6d77c36fb [file] [log] [blame]
stephen hemminger45e14432011-02-02 15:21:10 +00001/*
2 * net/sched/sch_choke.c CHOKE scheduler
3 *
4 * Copyright (c) 2011 Stephen Hemminger <shemminger@vyatta.com>
5 * Copyright (c) 2011 Eric Dumazet <eric.dumazet@gmail.com>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * version 2 as published by the Free Software Foundation.
10 *
11 */
12
13#include <linux/module.h>
14#include <linux/types.h>
15#include <linux/kernel.h>
16#include <linux/skbuff.h>
David S. Millercdfb74d2011-02-02 23:06:31 -080017#include <linux/vmalloc.h>
stephen hemminger45e14432011-02-02 15:21:10 +000018#include <net/pkt_sched.h>
19#include <net/inet_ecn.h>
20#include <net/red.h>
Jiri Pirko1bd758e2015-05-12 14:56:07 +020021#include <net/flow_dissector.h>
stephen hemminger45e14432011-02-02 15:21:10 +000022
23/*
24 CHOKe stateless AQM for fair bandwidth allocation
25 =================================================
26
27 CHOKe (CHOose and Keep for responsive flows, CHOose and Kill for
28 unresponsive flows) is a variant of RED that penalizes misbehaving flows but
29 maintains no flow state. The difference from RED is an additional step
30 during the enqueuing process. If average queue size is over the
31 low threshold (qmin), a packet is chosen at random from the queue.
32 If both the new and chosen packet are from the same flow, both
33 are dropped. Unlike RED, CHOKe is not really a "classful" qdisc because it
34 needs to access packets in queue randomly. It has a minimal class
35 interface to allow overriding the builtin flow classifier with
36 filters.
37
38 Source:
39 R. Pan, B. Prabhakar, and K. Psounis, "CHOKe, A Stateless
40 Active Queue Management Scheme for Approximating Fair Bandwidth Allocation",
41 IEEE INFOCOM, 2000.
42
43 A. Tang, J. Wang, S. Low, "Understanding CHOKe: Throughput and Spatial
44 Characteristics", IEEE/ACM Transactions on Networking, 2004
45
46 */
47
48/* Upper bound on size of sk_buff table (packets) */
49#define CHOKE_MAX_QUEUE (128*1024 - 1)
50
51struct choke_sched_data {
52/* Parameters */
53 u32 limit;
54 unsigned char flags;
55
56 struct red_parms parms;
57
58/* Variables */
Eric Dumazeteeca6682012-01-05 02:25:16 +000059 struct red_vars vars;
John Fastabend25d8c0d2014-09-12 20:05:27 -070060 struct tcf_proto __rcu *filter_list;
stephen hemminger45e14432011-02-02 15:21:10 +000061 struct {
62 u32 prob_drop; /* Early probability drops */
63 u32 prob_mark; /* Early probability marks */
64 u32 forced_drop; /* Forced drops, qavg > max_thresh */
65 u32 forced_mark; /* Forced marks, qavg > max_thresh */
66 u32 pdrop; /* Drops due to queue limits */
67 u32 other; /* Drops due to drop() calls */
68 u32 matched; /* Drops to flow match */
69 } stats;
70
71 unsigned int head;
72 unsigned int tail;
73
74 unsigned int tab_mask; /* size - 1 */
75
76 struct sk_buff **tab;
77};
78
stephen hemminger45e14432011-02-02 15:21:10 +000079/* number of elements in queue including holes */
80static unsigned int choke_len(const struct choke_sched_data *q)
81{
82 return (q->tail - q->head) & q->tab_mask;
83}
84
85/* Is ECN parameter configured */
86static int use_ecn(const struct choke_sched_data *q)
87{
88 return q->flags & TC_RED_ECN;
89}
90
91/* Should packets over max just be dropped (versus marked) */
92static int use_harddrop(const struct choke_sched_data *q)
93{
94 return q->flags & TC_RED_HARDDROP;
95}
96
97/* Move head pointer forward to skip over holes */
98static void choke_zap_head_holes(struct choke_sched_data *q)
99{
100 do {
101 q->head = (q->head + 1) & q->tab_mask;
102 if (q->head == q->tail)
103 break;
104 } while (q->tab[q->head] == NULL);
105}
106
107/* Move tail pointer backwards to reuse holes */
108static void choke_zap_tail_holes(struct choke_sched_data *q)
109{
110 do {
111 q->tail = (q->tail - 1) & q->tab_mask;
112 if (q->head == q->tail)
113 break;
114 } while (q->tab[q->tail] == NULL);
115}
116
117/* Drop packet from queue array by creating a "hole" */
Eric Dumazet520ac302016-06-21 23:16:49 -0700118static void choke_drop_by_idx(struct Qdisc *sch, unsigned int idx,
119 struct sk_buff **to_free)
stephen hemminger45e14432011-02-02 15:21:10 +0000120{
121 struct choke_sched_data *q = qdisc_priv(sch);
122 struct sk_buff *skb = q->tab[idx];
123
124 q->tab[idx] = NULL;
125
126 if (idx == q->head)
127 choke_zap_head_holes(q);
128 if (idx == q->tail)
129 choke_zap_tail_holes(q);
130
John Fastabend25331d62014-09-28 11:53:29 -0700131 qdisc_qstats_backlog_dec(sch, skb);
WANG Cong2ccccf52016-02-25 14:55:01 -0800132 qdisc_tree_reduce_backlog(sch, 1, qdisc_pkt_len(skb));
Eric Dumazet520ac302016-06-21 23:16:49 -0700133 qdisc_drop(skb, sch, to_free);
stephen hemminger45e14432011-02-02 15:21:10 +0000134 --sch->q.qlen;
135}
136
Eric Dumazet26f70e12011-02-24 17:45:41 +0000137struct choke_skb_cb {
Eric Dumazet2bcc34b2011-11-29 04:22:15 +0000138 u16 classid;
139 u8 keys_valid;
Tom Herbert2e994032015-05-01 11:30:18 -0700140 struct flow_keys_digest keys;
Eric Dumazet26f70e12011-02-24 17:45:41 +0000141};
142
143static inline struct choke_skb_cb *choke_skb_cb(const struct sk_buff *skb)
144{
David S. Miller16bda132012-02-06 15:14:37 -0500145 qdisc_cb_private_validate(skb, sizeof(struct choke_skb_cb));
Eric Dumazet26f70e12011-02-24 17:45:41 +0000146 return (struct choke_skb_cb *)qdisc_skb_cb(skb)->data;
147}
148
stephen hemminger45e14432011-02-02 15:21:10 +0000149static inline void choke_set_classid(struct sk_buff *skb, u16 classid)
150{
Eric Dumazet26f70e12011-02-24 17:45:41 +0000151 choke_skb_cb(skb)->classid = classid;
stephen hemminger45e14432011-02-02 15:21:10 +0000152}
153
154static u16 choke_get_classid(const struct sk_buff *skb)
155{
Eric Dumazet26f70e12011-02-24 17:45:41 +0000156 return choke_skb_cb(skb)->classid;
stephen hemminger45e14432011-02-02 15:21:10 +0000157}
158
159/*
Eric Dumazet2bcc34b2011-11-29 04:22:15 +0000160 * Compare flow of two packets
161 * Returns true only if source and destination address and port match.
162 * false for special cases
163 */
164static bool choke_match_flow(struct sk_buff *skb1,
165 struct sk_buff *skb2)
166{
Eric Dumazet25711782014-09-18 08:02:05 -0700167 struct flow_keys temp;
168
Eric Dumazet2bcc34b2011-11-29 04:22:15 +0000169 if (skb1->protocol != skb2->protocol)
170 return false;
171
172 if (!choke_skb_cb(skb1)->keys_valid) {
173 choke_skb_cb(skb1)->keys_valid = 1;
Tom Herbertcd79a232015-09-01 09:24:27 -0700174 skb_flow_dissect_flow_keys(skb1, &temp, 0);
Tom Herbert2e994032015-05-01 11:30:18 -0700175 make_flow_keys_digest(&choke_skb_cb(skb1)->keys, &temp);
Eric Dumazet2bcc34b2011-11-29 04:22:15 +0000176 }
177
178 if (!choke_skb_cb(skb2)->keys_valid) {
179 choke_skb_cb(skb2)->keys_valid = 1;
Tom Herbertcd79a232015-09-01 09:24:27 -0700180 skb_flow_dissect_flow_keys(skb2, &temp, 0);
Tom Herbert2e994032015-05-01 11:30:18 -0700181 make_flow_keys_digest(&choke_skb_cb(skb2)->keys, &temp);
Eric Dumazet2bcc34b2011-11-29 04:22:15 +0000182 }
183
184 return !memcmp(&choke_skb_cb(skb1)->keys,
185 &choke_skb_cb(skb2)->keys,
Tom Herbert2e994032015-05-01 11:30:18 -0700186 sizeof(choke_skb_cb(skb1)->keys));
Eric Dumazet2bcc34b2011-11-29 04:22:15 +0000187}
188
189/*
stephen hemminger45e14432011-02-02 15:21:10 +0000190 * Classify flow using either:
191 * 1. pre-existing classification result in skb
192 * 2. fast internal classification
193 * 3. use TC filter based classification
194 */
195static bool choke_classify(struct sk_buff *skb,
196 struct Qdisc *sch, int *qerr)
197
198{
199 struct choke_sched_data *q = qdisc_priv(sch);
200 struct tcf_result res;
John Fastabend25d8c0d2014-09-12 20:05:27 -0700201 struct tcf_proto *fl;
stephen hemminger45e14432011-02-02 15:21:10 +0000202 int result;
203
John Fastabend25d8c0d2014-09-12 20:05:27 -0700204 fl = rcu_dereference_bh(q->filter_list);
Daniel Borkmann3b3ae882015-08-26 23:00:06 +0200205 result = tc_classify(skb, fl, &res, false);
stephen hemminger45e14432011-02-02 15:21:10 +0000206 if (result >= 0) {
207#ifdef CONFIG_NET_CLS_ACT
208 switch (result) {
209 case TC_ACT_STOLEN:
210 case TC_ACT_QUEUED:
211 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
212 case TC_ACT_SHOT:
213 return false;
214 }
215#endif
216 choke_set_classid(skb, TC_H_MIN(res.classid));
217 return true;
218 }
219
220 return false;
221}
222
223/*
224 * Select a packet at random from queue
225 * HACK: since queue can have holes from previous deletion; retry several
226 * times to find a random skb but then just give up and return the head
227 * Will return NULL if queue is empty (q->head == q->tail)
228 */
229static struct sk_buff *choke_peek_random(const struct choke_sched_data *q,
230 unsigned int *pidx)
231{
232 struct sk_buff *skb;
233 int retrys = 3;
234
235 do {
Daniel Borkmannf337db62014-01-22 02:29:39 +0100236 *pidx = (q->head + prandom_u32_max(choke_len(q))) & q->tab_mask;
stephen hemminger45e14432011-02-02 15:21:10 +0000237 skb = q->tab[*pidx];
238 if (skb)
239 return skb;
240 } while (--retrys > 0);
241
242 return q->tab[*pidx = q->head];
243}
244
245/*
246 * Compare new packet with random packet in queue
247 * returns true if matched and sets *pidx
248 */
249static bool choke_match_random(const struct choke_sched_data *q,
250 struct sk_buff *nskb,
251 unsigned int *pidx)
252{
253 struct sk_buff *oskb;
254
255 if (q->head == q->tail)
256 return false;
257
258 oskb = choke_peek_random(q, pidx);
John Fastabend25d8c0d2014-09-12 20:05:27 -0700259 if (rcu_access_pointer(q->filter_list))
stephen hemminger45e14432011-02-02 15:21:10 +0000260 return choke_get_classid(nskb) == choke_get_classid(oskb);
261
262 return choke_match_flow(oskb, nskb);
263}
264
Eric Dumazet520ac302016-06-21 23:16:49 -0700265static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch,
266 struct sk_buff **to_free)
stephen hemminger45e14432011-02-02 15:21:10 +0000267{
John Fastabend25d8c0d2014-09-12 20:05:27 -0700268 int ret = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
stephen hemminger45e14432011-02-02 15:21:10 +0000269 struct choke_sched_data *q = qdisc_priv(sch);
Eric Dumazeteeca6682012-01-05 02:25:16 +0000270 const struct red_parms *p = &q->parms;
stephen hemminger45e14432011-02-02 15:21:10 +0000271
John Fastabend25d8c0d2014-09-12 20:05:27 -0700272 if (rcu_access_pointer(q->filter_list)) {
stephen hemminger45e14432011-02-02 15:21:10 +0000273 /* If using external classifiers, get result and record it. */
274 if (!choke_classify(skb, sch, &ret))
275 goto other_drop; /* Packet was eaten by filter */
276 }
277
Eric Dumazet2bcc34b2011-11-29 04:22:15 +0000278 choke_skb_cb(skb)->keys_valid = 0;
stephen hemminger45e14432011-02-02 15:21:10 +0000279 /* Compute average queue usage (see RED) */
Eric Dumazeteeca6682012-01-05 02:25:16 +0000280 q->vars.qavg = red_calc_qavg(p, &q->vars, sch->q.qlen);
281 if (red_is_idling(&q->vars))
282 red_end_of_idle_period(&q->vars);
stephen hemminger45e14432011-02-02 15:21:10 +0000283
284 /* Is queue small? */
Eric Dumazeteeca6682012-01-05 02:25:16 +0000285 if (q->vars.qavg <= p->qth_min)
286 q->vars.qcount = -1;
stephen hemminger45e14432011-02-02 15:21:10 +0000287 else {
288 unsigned int idx;
289
290 /* Draw a packet at random from queue and compare flow */
291 if (choke_match_random(q, skb, &idx)) {
292 q->stats.matched++;
Eric Dumazet520ac302016-06-21 23:16:49 -0700293 choke_drop_by_idx(sch, idx, to_free);
stephen hemminger45e14432011-02-02 15:21:10 +0000294 goto congestion_drop;
295 }
296
297 /* Queue is large, always mark/drop */
Eric Dumazeteeca6682012-01-05 02:25:16 +0000298 if (q->vars.qavg > p->qth_max) {
299 q->vars.qcount = -1;
stephen hemminger45e14432011-02-02 15:21:10 +0000300
John Fastabend25331d62014-09-28 11:53:29 -0700301 qdisc_qstats_overlimit(sch);
stephen hemminger45e14432011-02-02 15:21:10 +0000302 if (use_harddrop(q) || !use_ecn(q) ||
303 !INET_ECN_set_ce(skb)) {
304 q->stats.forced_drop++;
305 goto congestion_drop;
306 }
307
308 q->stats.forced_mark++;
Eric Dumazeteeca6682012-01-05 02:25:16 +0000309 } else if (++q->vars.qcount) {
310 if (red_mark_probability(p, &q->vars, q->vars.qavg)) {
311 q->vars.qcount = 0;
312 q->vars.qR = red_random(p);
stephen hemminger45e14432011-02-02 15:21:10 +0000313
John Fastabend25331d62014-09-28 11:53:29 -0700314 qdisc_qstats_overlimit(sch);
stephen hemminger45e14432011-02-02 15:21:10 +0000315 if (!use_ecn(q) || !INET_ECN_set_ce(skb)) {
316 q->stats.prob_drop++;
317 goto congestion_drop;
318 }
319
320 q->stats.prob_mark++;
321 }
322 } else
Eric Dumazeteeca6682012-01-05 02:25:16 +0000323 q->vars.qR = red_random(p);
stephen hemminger45e14432011-02-02 15:21:10 +0000324 }
325
326 /* Admit new packet */
327 if (sch->q.qlen < q->limit) {
328 q->tab[q->tail] = skb;
329 q->tail = (q->tail + 1) & q->tab_mask;
330 ++sch->q.qlen;
John Fastabend25331d62014-09-28 11:53:29 -0700331 qdisc_qstats_backlog_inc(sch, skb);
stephen hemminger45e14432011-02-02 15:21:10 +0000332 return NET_XMIT_SUCCESS;
333 }
334
335 q->stats.pdrop++;
Eric Dumazet520ac302016-06-21 23:16:49 -0700336 return qdisc_drop(skb, sch, to_free);
stephen hemminger45e14432011-02-02 15:21:10 +0000337
Eric Dumazet17045752012-05-04 04:37:21 +0000338congestion_drop:
Eric Dumazet520ac302016-06-21 23:16:49 -0700339 qdisc_drop(skb, sch, to_free);
stephen hemminger45e14432011-02-02 15:21:10 +0000340 return NET_XMIT_CN;
341
Eric Dumazet17045752012-05-04 04:37:21 +0000342other_drop:
stephen hemminger45e14432011-02-02 15:21:10 +0000343 if (ret & __NET_XMIT_BYPASS)
John Fastabend25331d62014-09-28 11:53:29 -0700344 qdisc_qstats_drop(sch);
Eric Dumazet520ac302016-06-21 23:16:49 -0700345 __qdisc_drop(skb, to_free);
stephen hemminger45e14432011-02-02 15:21:10 +0000346 return ret;
347}
348
349static struct sk_buff *choke_dequeue(struct Qdisc *sch)
350{
351 struct choke_sched_data *q = qdisc_priv(sch);
352 struct sk_buff *skb;
353
354 if (q->head == q->tail) {
Eric Dumazeteeca6682012-01-05 02:25:16 +0000355 if (!red_is_idling(&q->vars))
356 red_start_of_idle_period(&q->vars);
stephen hemminger45e14432011-02-02 15:21:10 +0000357 return NULL;
358 }
359
360 skb = q->tab[q->head];
361 q->tab[q->head] = NULL;
362 choke_zap_head_holes(q);
363 --sch->q.qlen;
John Fastabend25331d62014-09-28 11:53:29 -0700364 qdisc_qstats_backlog_dec(sch, skb);
stephen hemminger45e14432011-02-02 15:21:10 +0000365 qdisc_bstats_update(sch, skb);
366
367 return skb;
368}
369
stephen hemminger45e14432011-02-02 15:21:10 +0000370static void choke_reset(struct Qdisc *sch)
371{
372 struct choke_sched_data *q = qdisc_priv(sch);
373
WANG Cong77e62da2015-07-21 16:52:43 -0700374 while (q->head != q->tail) {
375 struct sk_buff *skb = q->tab[q->head];
376
377 q->head = (q->head + 1) & q->tab_mask;
378 if (!skb)
379 continue;
Eric Dumazetf9aed312016-06-13 20:21:51 -0700380 rtnl_qdisc_drop(skb, sch);
WANG Cong77e62da2015-07-21 16:52:43 -0700381 }
382
Eric Dumazetf9aed312016-06-13 20:21:51 -0700383 sch->q.qlen = 0;
384 sch->qstats.backlog = 0;
WANG Cong77e62da2015-07-21 16:52:43 -0700385 memset(q->tab, 0, (q->tab_mask + 1) * sizeof(struct sk_buff *));
386 q->head = q->tail = 0;
Eric Dumazeteeca6682012-01-05 02:25:16 +0000387 red_restart(&q->vars);
stephen hemminger45e14432011-02-02 15:21:10 +0000388}
389
390static const struct nla_policy choke_policy[TCA_CHOKE_MAX + 1] = {
391 [TCA_CHOKE_PARMS] = { .len = sizeof(struct tc_red_qopt) },
392 [TCA_CHOKE_STAB] = { .len = RED_STAB_SIZE },
Eric Dumazeta73ed262011-12-09 02:46:45 +0000393 [TCA_CHOKE_MAX_P] = { .type = NLA_U32 },
stephen hemminger45e14432011-02-02 15:21:10 +0000394};
395
396
397static void choke_free(void *addr)
398{
WANG Cong4cb28972014-06-02 15:55:22 -0700399 kvfree(addr);
stephen hemminger45e14432011-02-02 15:21:10 +0000400}
401
402static int choke_change(struct Qdisc *sch, struct nlattr *opt)
403{
404 struct choke_sched_data *q = qdisc_priv(sch);
405 struct nlattr *tb[TCA_CHOKE_MAX + 1];
406 const struct tc_red_qopt *ctl;
407 int err;
408 struct sk_buff **old = NULL;
409 unsigned int mask;
Eric Dumazeta73ed262011-12-09 02:46:45 +0000410 u32 max_P;
stephen hemminger45e14432011-02-02 15:21:10 +0000411
412 if (opt == NULL)
413 return -EINVAL;
414
415 err = nla_parse_nested(tb, TCA_CHOKE_MAX, opt, choke_policy);
416 if (err < 0)
417 return err;
418
419 if (tb[TCA_CHOKE_PARMS] == NULL ||
420 tb[TCA_CHOKE_STAB] == NULL)
421 return -EINVAL;
422
Eric Dumazeta73ed262011-12-09 02:46:45 +0000423 max_P = tb[TCA_CHOKE_MAX_P] ? nla_get_u32(tb[TCA_CHOKE_MAX_P]) : 0;
424
stephen hemminger45e14432011-02-02 15:21:10 +0000425 ctl = nla_data(tb[TCA_CHOKE_PARMS]);
426
Nogah Frankela0514c02017-12-04 13:31:11 +0200427 if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog))
428 return -EINVAL;
429
stephen hemminger45e14432011-02-02 15:21:10 +0000430 if (ctl->limit > CHOKE_MAX_QUEUE)
431 return -EINVAL;
432
433 mask = roundup_pow_of_two(ctl->limit + 1) - 1;
434 if (mask != q->tab_mask) {
435 struct sk_buff **ntab;
436
Joe Perches8be04b92013-06-19 12:15:53 -0700437 ntab = kcalloc(mask + 1, sizeof(struct sk_buff *),
438 GFP_KERNEL | __GFP_NOWARN);
stephen hemminger45e14432011-02-02 15:21:10 +0000439 if (!ntab)
440 ntab = vzalloc((mask + 1) * sizeof(struct sk_buff *));
441 if (!ntab)
442 return -ENOMEM;
443
444 sch_tree_lock(sch);
445 old = q->tab;
446 if (old) {
447 unsigned int oqlen = sch->q.qlen, tail = 0;
WANG Cong2ccccf52016-02-25 14:55:01 -0800448 unsigned dropped = 0;
stephen hemminger45e14432011-02-02 15:21:10 +0000449
450 while (q->head != q->tail) {
451 struct sk_buff *skb = q->tab[q->head];
452
453 q->head = (q->head + 1) & q->tab_mask;
454 if (!skb)
455 continue;
456 if (tail < mask) {
457 ntab[tail++] = skb;
458 continue;
459 }
WANG Cong2ccccf52016-02-25 14:55:01 -0800460 dropped += qdisc_pkt_len(skb);
John Fastabend25331d62014-09-28 11:53:29 -0700461 qdisc_qstats_backlog_dec(sch, skb);
stephen hemminger45e14432011-02-02 15:21:10 +0000462 --sch->q.qlen;
Eric Dumazetf9aed312016-06-13 20:21:51 -0700463 rtnl_qdisc_drop(skb, sch);
stephen hemminger45e14432011-02-02 15:21:10 +0000464 }
WANG Cong2ccccf52016-02-25 14:55:01 -0800465 qdisc_tree_reduce_backlog(sch, oqlen - sch->q.qlen, dropped);
stephen hemminger45e14432011-02-02 15:21:10 +0000466 q->head = 0;
467 q->tail = tail;
468 }
469
470 q->tab_mask = mask;
471 q->tab = ntab;
472 } else
473 sch_tree_lock(sch);
474
475 q->flags = ctl->flags;
476 q->limit = ctl->limit;
477
478 red_set_parms(&q->parms, ctl->qth_min, ctl->qth_max, ctl->Wlog,
479 ctl->Plog, ctl->Scell_log,
Eric Dumazeta73ed262011-12-09 02:46:45 +0000480 nla_data(tb[TCA_CHOKE_STAB]),
481 max_P);
Eric Dumazeteeca6682012-01-05 02:25:16 +0000482 red_set_vars(&q->vars);
stephen hemminger45e14432011-02-02 15:21:10 +0000483
484 if (q->head == q->tail)
Eric Dumazeteeca6682012-01-05 02:25:16 +0000485 red_end_of_idle_period(&q->vars);
stephen hemminger45e14432011-02-02 15:21:10 +0000486
487 sch_tree_unlock(sch);
488 choke_free(old);
489 return 0;
490}
491
492static int choke_init(struct Qdisc *sch, struct nlattr *opt)
493{
494 return choke_change(sch, opt);
495}
496
497static int choke_dump(struct Qdisc *sch, struct sk_buff *skb)
498{
499 struct choke_sched_data *q = qdisc_priv(sch);
500 struct nlattr *opts = NULL;
501 struct tc_red_qopt opt = {
502 .limit = q->limit,
503 .flags = q->flags,
504 .qth_min = q->parms.qth_min >> q->parms.Wlog,
505 .qth_max = q->parms.qth_max >> q->parms.Wlog,
506 .Wlog = q->parms.Wlog,
507 .Plog = q->parms.Plog,
508 .Scell_log = q->parms.Scell_log,
509 };
510
511 opts = nla_nest_start(skb, TCA_OPTIONS);
512 if (opts == NULL)
513 goto nla_put_failure;
514
David S. Miller1b34ec42012-03-29 05:11:39 -0400515 if (nla_put(skb, TCA_CHOKE_PARMS, sizeof(opt), &opt) ||
516 nla_put_u32(skb, TCA_CHOKE_MAX_P, q->parms.max_P))
517 goto nla_put_failure;
stephen hemminger45e14432011-02-02 15:21:10 +0000518 return nla_nest_end(skb, opts);
519
520nla_put_failure:
521 nla_nest_cancel(skb, opts);
522 return -EMSGSIZE;
523}
524
525static int choke_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
526{
527 struct choke_sched_data *q = qdisc_priv(sch);
528 struct tc_choke_xstats st = {
529 .early = q->stats.prob_drop + q->stats.forced_drop,
530 .marked = q->stats.prob_mark + q->stats.forced_mark,
531 .pdrop = q->stats.pdrop,
532 .other = q->stats.other,
533 .matched = q->stats.matched,
534 };
535
536 return gnet_stats_copy_app(d, &st, sizeof(st));
537}
538
539static void choke_destroy(struct Qdisc *sch)
540{
541 struct choke_sched_data *q = qdisc_priv(sch);
542
543 tcf_destroy_chain(&q->filter_list);
544 choke_free(q->tab);
545}
546
stephen hemminger45e14432011-02-02 15:21:10 +0000547static struct sk_buff *choke_peek_head(struct Qdisc *sch)
548{
549 struct choke_sched_data *q = qdisc_priv(sch);
550
551 return (q->head != q->tail) ? q->tab[q->head] : NULL;
552}
553
554static struct Qdisc_ops choke_qdisc_ops __read_mostly = {
555 .id = "choke",
556 .priv_size = sizeof(struct choke_sched_data),
557
558 .enqueue = choke_enqueue,
559 .dequeue = choke_dequeue,
560 .peek = choke_peek_head,
stephen hemminger45e14432011-02-02 15:21:10 +0000561 .init = choke_init,
562 .destroy = choke_destroy,
563 .reset = choke_reset,
564 .change = choke_change,
565 .dump = choke_dump,
566 .dump_stats = choke_dump_stats,
567 .owner = THIS_MODULE,
568};
569
570static int __init choke_module_init(void)
571{
572 return register_qdisc(&choke_qdisc_ops);
573}
574
575static void __exit choke_module_exit(void)
576{
577 unregister_qdisc(&choke_qdisc_ops);
578}
579
580module_init(choke_module_init)
581module_exit(choke_module_exit)
582
583MODULE_LICENSE("GPL");