blob: 95c5f2cf3fdf790e2e247eec6b9aba24ebb473b3 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * net/sched/sch_gred.c Generic Random Early Detection queue.
3 *
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version
8 * 2 of the License, or (at your option) any later version.
9 *
10 * Authors: J Hadi Salim (hadi@cyberus.ca) 1998-2002
11 *
12 * 991129: - Bug fix with grio mode
13 * - a better sing. AvgQ mode with Grio(WRED)
14 * - A finer grained VQ dequeue based on sugestion
15 * from Ren Liu
16 * - More error checks
17 *
18 *
19 *
20 * For all the glorious comments look at Alexey's sch_red.c
21 */
22
23#include <linux/config.h>
24#include <linux/module.h>
25#include <asm/uaccess.h>
26#include <asm/system.h>
27#include <linux/bitops.h>
28#include <linux/types.h>
29#include <linux/kernel.h>
30#include <linux/sched.h>
31#include <linux/string.h>
32#include <linux/mm.h>
33#include <linux/socket.h>
34#include <linux/sockios.h>
35#include <linux/in.h>
36#include <linux/errno.h>
37#include <linux/interrupt.h>
38#include <linux/if_ether.h>
39#include <linux/inet.h>
40#include <linux/netdevice.h>
41#include <linux/etherdevice.h>
42#include <linux/notifier.h>
43#include <net/ip.h>
44#include <net/route.h>
45#include <linux/skbuff.h>
46#include <net/sock.h>
47#include <net/pkt_sched.h>
Thomas Graf22b33422005-11-05 21:14:16 +010048#include <net/red.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070049
50#if 1 /* control */
51#define DPRINTK(format,args...) printk(KERN_DEBUG format,##args)
52#else
53#define DPRINTK(format,args...)
54#endif
55
56#if 0 /* data */
57#define D2PRINTK(format,args...) printk(KERN_DEBUG format,##args)
58#else
59#define D2PRINTK(format,args...)
60#endif
61
Thomas Graff62d6b92005-11-05 21:14:15 +010062#define GRED_DEF_PRIO (MAX_DPs / 2)
63
Linus Torvalds1da177e2005-04-16 15:20:36 -070064struct gred_sched_data;
65struct gred_sched;
66
67struct gred_sched_data
68{
Linus Torvalds1da177e2005-04-16 15:20:36 -070069 u32 limit; /* HARD maximal queue length */
Linus Torvalds1da177e2005-04-16 15:20:36 -070070 u32 DP; /* the drop pramaters */
Linus Torvalds1da177e2005-04-16 15:20:36 -070071 u32 bytesin; /* bytes seen on virtualQ so far*/
72 u32 packetsin; /* packets seen on virtualQ so far*/
73 u32 backlog; /* bytes on the virtualQ */
Linus Torvalds1da177e2005-04-16 15:20:36 -070074 u8 prio; /* the prio of this vq */
75
Thomas Graf22b33422005-11-05 21:14:16 +010076 struct red_parms parms;
77 struct red_stats stats;
Linus Torvalds1da177e2005-04-16 15:20:36 -070078};
79
Thomas Grafdea3f622005-11-05 21:14:09 +010080enum {
81 GRED_WRED_MODE = 1,
Thomas Grafd6fd4e92005-11-05 21:14:10 +010082 GRED_RIO_MODE,
Thomas Grafdea3f622005-11-05 21:14:09 +010083};
84
Linus Torvalds1da177e2005-04-16 15:20:36 -070085struct gred_sched
86{
87 struct gred_sched_data *tab[MAX_DPs];
Thomas Grafdea3f622005-11-05 21:14:09 +010088 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -070089 u32 DPs;
90 u32 def;
91 u8 initd;
Linus Torvalds1da177e2005-04-16 15:20:36 -070092};
93
Thomas Grafdea3f622005-11-05 21:14:09 +010094static inline int gred_wred_mode(struct gred_sched *table)
95{
96 return test_bit(GRED_WRED_MODE, &table->flags);
97}
98
99static inline void gred_enable_wred_mode(struct gred_sched *table)
100{
101 __set_bit(GRED_WRED_MODE, &table->flags);
102}
103
104static inline void gred_disable_wred_mode(struct gred_sched *table)
105{
106 __clear_bit(GRED_WRED_MODE, &table->flags);
107}
108
Thomas Grafd6fd4e92005-11-05 21:14:10 +0100109static inline int gred_rio_mode(struct gred_sched *table)
110{
111 return test_bit(GRED_RIO_MODE, &table->flags);
112}
113
114static inline void gred_enable_rio_mode(struct gred_sched *table)
115{
116 __set_bit(GRED_RIO_MODE, &table->flags);
117}
118
119static inline void gred_disable_rio_mode(struct gred_sched *table)
120{
121 __clear_bit(GRED_RIO_MODE, &table->flags);
122}
123
Thomas Grafdea3f622005-11-05 21:14:09 +0100124static inline int gred_wred_mode_check(struct Qdisc *sch)
125{
126 struct gred_sched *table = qdisc_priv(sch);
127 int i;
128
129 /* Really ugly O(n^2) but shouldn't be necessary too frequent. */
130 for (i = 0; i < table->DPs; i++) {
131 struct gred_sched_data *q = table->tab[i];
132 int n;
133
134 if (q == NULL)
135 continue;
136
137 for (n = 0; n < table->DPs; n++)
138 if (table->tab[n] && table->tab[n] != q &&
139 table->tab[n]->prio == q->prio)
140 return 1;
141 }
142
143 return 0;
144}
145
Thomas Graf22b33422005-11-05 21:14:16 +0100146static inline unsigned int gred_backlog(struct gred_sched *table,
147 struct gred_sched_data *q,
148 struct Qdisc *sch)
149{
150 if (gred_wred_mode(table))
151 return sch->qstats.backlog;
152 else
153 return q->backlog;
154}
155
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156static int
157gred_enqueue(struct sk_buff *skb, struct Qdisc* sch)
158{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159 struct gred_sched_data *q=NULL;
160 struct gred_sched *t= qdisc_priv(sch);
Thomas Graf22b33422005-11-05 21:14:16 +0100161 unsigned long qavg = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162 int i=0;
163
164 if (!t->initd && skb_queue_len(&sch->q) < (sch->dev->tx_queue_len ? : 1)) {
165 D2PRINTK("NO GRED Queues setup yet! Enqueued anyway\n");
166 goto do_enqueue;
167 }
168
169
170 if ( ((skb->tc_index&0xf) > (t->DPs -1)) || !(q=t->tab[skb->tc_index&0xf])) {
171 printk("GRED: setting to default (%d)\n ",t->def);
172 if (!(q=t->tab[t->def])) {
173 DPRINTK("GRED: setting to default FAILED! dropping!! "
174 "(%d)\n ", t->def);
175 goto drop;
176 }
177 /* fix tc_index? --could be controvesial but needed for
178 requeueing */
179 skb->tc_index=(skb->tc_index&0xfffffff0) | t->def;
180 }
181
182 D2PRINTK("gred_enqueue virtualQ 0x%x classid %x backlog %d "
183 "general backlog %d\n",skb->tc_index&0xf,sch->handle,q->backlog,
184 sch->qstats.backlog);
185 /* sum up all the qaves of prios <= to ours to get the new qave*/
Thomas Grafd6fd4e92005-11-05 21:14:10 +0100186 if (!gred_wred_mode(t) && gred_rio_mode(t)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187 for (i=0;i<t->DPs;i++) {
188 if ((!t->tab[i]) || (i==q->DP))
189 continue;
190
Thomas Graf22b33422005-11-05 21:14:16 +0100191 if (t->tab[i]->prio < q->prio &&
192 !red_is_idling(&t->tab[i]->parms))
193 qavg +=t->tab[i]->parms.qavg;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194 }
195
196 }
197
198 q->packetsin++;
199 q->bytesin+=skb->len;
200
Thomas Grafdea3f622005-11-05 21:14:09 +0100201 if (gred_wred_mode(t)) {
Thomas Graf22b33422005-11-05 21:14:16 +0100202 qavg = 0;
203 q->parms.qavg = t->tab[t->def]->parms.qavg;
204 q->parms.qidlestart = t->tab[t->def]->parms.qidlestart;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205 }
206
Thomas Graf22b33422005-11-05 21:14:16 +0100207 q->parms.qavg = red_calc_qavg(&q->parms, gred_backlog(t, q, sch));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208
Thomas Graf22b33422005-11-05 21:14:16 +0100209 if (red_is_idling(&q->parms))
210 red_end_of_idle_period(&q->parms);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211
Thomas Grafdea3f622005-11-05 21:14:09 +0100212 if (gred_wred_mode(t))
Thomas Graf22b33422005-11-05 21:14:16 +0100213 t->tab[t->def]->parms.qavg = q->parms.qavg;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214
Thomas Graf22b33422005-11-05 21:14:16 +0100215 switch (red_action(&q->parms, q->parms.qavg + qavg)) {
216 case RED_DONT_MARK:
217 break;
218
219 case RED_PROB_MARK:
220 sch->qstats.overlimits++;
221 q->stats.prob_drop++;
Thomas Grafc3b553c2005-11-05 21:14:18 +0100222 goto congestion_drop;
Thomas Graf22b33422005-11-05 21:14:16 +0100223
224 case RED_HARD_MARK:
225 sch->qstats.overlimits++;
226 q->stats.forced_drop++;
Thomas Grafc3b553c2005-11-05 21:14:18 +0100227 goto congestion_drop;
Thomas Graf22b33422005-11-05 21:14:16 +0100228 }
229
230 if (q->backlog + skb->len <= q->limit) {
231 q->backlog += skb->len;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232do_enqueue:
Thomas Grafedf7a7b2005-11-05 21:14:19 +0100233 return qdisc_enqueue_tail(skb, sch);
Thomas Graf22b33422005-11-05 21:14:16 +0100234 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700235
Thomas Graf22b33422005-11-05 21:14:16 +0100236 q->stats.pdrop++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700237drop:
Thomas Grafedf7a7b2005-11-05 21:14:19 +0100238 return qdisc_drop(skb, sch);
Thomas Grafc3b553c2005-11-05 21:14:18 +0100239
240congestion_drop:
Thomas Grafedf7a7b2005-11-05 21:14:19 +0100241 qdisc_drop(skb, sch);
Thomas Grafc3b553c2005-11-05 21:14:18 +0100242 return NET_XMIT_CN;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243}
244
245static int
246gred_requeue(struct sk_buff *skb, struct Qdisc* sch)
247{
248 struct gred_sched_data *q;
249 struct gred_sched *t= qdisc_priv(sch);
250 q= t->tab[(skb->tc_index&0xf)];
251/* error checking here -- probably unnecessary */
Thomas Graf22b33422005-11-05 21:14:16 +0100252
253 if (red_is_idling(&q->parms))
254 red_end_of_idle_period(&q->parms);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700255
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256 q->backlog += skb->len;
Thomas Grafedf7a7b2005-11-05 21:14:19 +0100257 return qdisc_requeue(skb, sch);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700258}
259
260static struct sk_buff *
261gred_dequeue(struct Qdisc* sch)
262{
263 struct sk_buff *skb;
264 struct gred_sched_data *q;
265 struct gred_sched *t= qdisc_priv(sch);
266
Thomas Grafedf7a7b2005-11-05 21:14:19 +0100267 skb = qdisc_dequeue_head(sch);
268
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269 if (skb) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700270 q= t->tab[(skb->tc_index&0xf)];
271 if (q) {
272 q->backlog -= skb->len;
Thomas Grafdea3f622005-11-05 21:14:09 +0100273 if (!q->backlog && !gred_wred_mode(t))
Thomas Graf22b33422005-11-05 21:14:16 +0100274 red_start_of_idle_period(&q->parms);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275 } else {
276 D2PRINTK("gred_dequeue: skb has bad tcindex %x\n",skb->tc_index&0xf);
277 }
278 return skb;
279 }
280
Thomas Grafdea3f622005-11-05 21:14:09 +0100281 if (gred_wred_mode(t)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700282 q= t->tab[t->def];
283 if (!q)
284 D2PRINTK("no default VQ set: Results will be "
285 "screwed up\n");
286 else
Thomas Graf22b33422005-11-05 21:14:16 +0100287 red_start_of_idle_period(&q->parms);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700288 }
289
290 return NULL;
291}
292
293static unsigned int gred_drop(struct Qdisc* sch)
294{
295 struct sk_buff *skb;
296
297 struct gred_sched_data *q;
298 struct gred_sched *t= qdisc_priv(sch);
299
Thomas Grafedf7a7b2005-11-05 21:14:19 +0100300 skb = qdisc_dequeue_tail(sch);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301 if (skb) {
302 unsigned int len = skb->len;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700303 q= t->tab[(skb->tc_index&0xf)];
304 if (q) {
305 q->backlog -= len;
Thomas Graf22b33422005-11-05 21:14:16 +0100306 q->stats.other++;
Thomas Grafdea3f622005-11-05 21:14:09 +0100307 if (!q->backlog && !gred_wred_mode(t))
Thomas Graf22b33422005-11-05 21:14:16 +0100308 red_start_of_idle_period(&q->parms);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700309 } else {
310 D2PRINTK("gred_dequeue: skb has bad tcindex %x\n",skb->tc_index&0xf);
311 }
312
Thomas Grafedf7a7b2005-11-05 21:14:19 +0100313 qdisc_drop(skb, sch);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700314 return len;
315 }
316
317 q=t->tab[t->def];
318 if (!q) {
319 D2PRINTK("no default VQ set: Results might be screwed up\n");
320 return 0;
321 }
322
Thomas Graf22b33422005-11-05 21:14:16 +0100323 red_start_of_idle_period(&q->parms);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700324 return 0;
325
326}
327
328static void gred_reset(struct Qdisc* sch)
329{
330 int i;
331 struct gred_sched_data *q;
332 struct gred_sched *t= qdisc_priv(sch);
333
Thomas Grafedf7a7b2005-11-05 21:14:19 +0100334 qdisc_reset_queue(sch);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700335
336 for (i=0;i<t->DPs;i++) {
337 q= t->tab[i];
338 if (!q)
339 continue;
Thomas Graf22b33422005-11-05 21:14:16 +0100340 red_restart(&q->parms);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700341 q->backlog = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700342 }
343}
344
Thomas Graf66396072005-11-05 21:14:13 +0100345static inline void gred_destroy_vq(struct gred_sched_data *q)
346{
347 kfree(q);
348}
349
350static inline int gred_change_table_def(struct Qdisc *sch, struct rtattr *dps)
351{
352 struct gred_sched *table = qdisc_priv(sch);
353 struct tc_gred_sopt *sopt;
354 int i;
355
356 if (dps == NULL || RTA_PAYLOAD(dps) < sizeof(*sopt))
357 return -EINVAL;
358
359 sopt = RTA_DATA(dps);
360
361 if (sopt->DPs > MAX_DPs || sopt->DPs == 0 || sopt->def_DP >= sopt->DPs)
362 return -EINVAL;
363
364 sch_tree_lock(sch);
365 table->DPs = sopt->DPs;
366 table->def = sopt->def_DP;
367
368 /*
369 * Every entry point to GRED is synchronized with the above code
370 * and the DP is checked against DPs, i.e. shadowed VQs can no
371 * longer be found so we can unlock right here.
372 */
373 sch_tree_unlock(sch);
374
375 if (sopt->grio) {
376 gred_enable_rio_mode(table);
377 gred_disable_wred_mode(table);
378 if (gred_wred_mode_check(sch))
379 gred_enable_wred_mode(table);
380 } else {
381 gred_disable_rio_mode(table);
382 gred_disable_wred_mode(table);
383 }
384
385 for (i = table->DPs; i < MAX_DPs; i++) {
386 if (table->tab[i]) {
387 printk(KERN_WARNING "GRED: Warning: Destroying "
388 "shadowed VQ 0x%x\n", i);
389 gred_destroy_vq(table->tab[i]);
390 table->tab[i] = NULL;
391 }
392 }
393
394 table->initd = 0;
395
396 return 0;
397}
398
Thomas Graff62d6b92005-11-05 21:14:15 +0100399static inline int gred_change_vq(struct Qdisc *sch, int dp,
400 struct tc_gred_qopt *ctl, int prio, u8 *stab)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700401{
402 struct gred_sched *table = qdisc_priv(sch);
403 struct gred_sched_data *q;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700404
Thomas Graff62d6b92005-11-05 21:14:15 +0100405 if (table->tab[dp] == NULL) {
406 table->tab[dp] = kmalloc(sizeof(*q), GFP_KERNEL);
407 if (table->tab[dp] == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700408 return -ENOMEM;
Thomas Graff62d6b92005-11-05 21:14:15 +0100409 memset(table->tab[dp], 0, sizeof(*q));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700410 }
411
Thomas Graff62d6b92005-11-05 21:14:15 +0100412 q = table->tab[dp];
413 q->DP = dp;
414 q->prio = prio;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700415 q->limit = ctl->limit;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700416
Thomas Graf22b33422005-11-05 21:14:16 +0100417 if (q->backlog == 0)
418 red_end_of_idle_period(&q->parms);
419
420 red_set_parms(&q->parms,
421 ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Plog,
422 ctl->Scell_log, stab);
423
Thomas Graff62d6b92005-11-05 21:14:15 +0100424 return 0;
425}
426
427static int gred_change(struct Qdisc *sch, struct rtattr *opt)
428{
429 struct gred_sched *table = qdisc_priv(sch);
430 struct tc_gred_qopt *ctl;
431 struct rtattr *tb[TCA_GRED_MAX];
432 int err = -EINVAL, prio = GRED_DEF_PRIO;
433 u8 *stab;
434
435 if (opt == NULL || rtattr_parse_nested(tb, TCA_GRED_MAX, opt))
436 return -EINVAL;
437
438 if (tb[TCA_GRED_PARMS-1] == NULL && tb[TCA_GRED_STAB-1] == NULL)
439 return gred_change_table_def(sch, opt);
440
441 if (tb[TCA_GRED_PARMS-1] == NULL ||
442 RTA_PAYLOAD(tb[TCA_GRED_PARMS-1]) < sizeof(*ctl) ||
443 tb[TCA_GRED_STAB-1] == NULL ||
444 RTA_PAYLOAD(tb[TCA_GRED_STAB-1]) < 256)
445 return -EINVAL;
446
447 ctl = RTA_DATA(tb[TCA_GRED_PARMS-1]);
448 stab = RTA_DATA(tb[TCA_GRED_STAB-1]);
449
450 if (ctl->DP >= table->DPs)
451 goto errout;
452
453 if (gred_rio_mode(table)) {
454 if (ctl->prio == 0) {
455 int def_prio = GRED_DEF_PRIO;
456
457 if (table->tab[table->def])
458 def_prio = table->tab[table->def]->prio;
459
460 printk(KERN_DEBUG "GRED: DP %u does not have a prio "
461 "setting default to %d\n", ctl->DP, def_prio);
462
463 prio = def_prio;
464 } else
465 prio = ctl->prio;
466 }
467
468 sch_tree_lock(sch);
469
470 err = gred_change_vq(sch, ctl->DP, ctl, prio, stab);
471 if (err < 0)
472 goto errout_locked;
473
474 if (table->tab[table->def] == NULL) {
475 if (gred_rio_mode(table))
476 prio = table->tab[ctl->DP]->prio;
477
478 err = gred_change_vq(sch, table->def, ctl, prio, stab);
479 if (err < 0)
480 goto errout_locked;
481 }
482
483 table->initd = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700484
Thomas Grafd6fd4e92005-11-05 21:14:10 +0100485 if (gred_rio_mode(table)) {
Thomas Grafdea3f622005-11-05 21:14:09 +0100486 gred_disable_wred_mode(table);
487 if (gred_wred_mode_check(sch))
488 gred_enable_wred_mode(table);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700489 }
490
Thomas Graff62d6b92005-11-05 21:14:15 +0100491 err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700492
Thomas Graff62d6b92005-11-05 21:14:15 +0100493errout_locked:
494 sch_tree_unlock(sch);
495errout:
496 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700497}
498
499static int gred_init(struct Qdisc *sch, struct rtattr *opt)
500{
Thomas Graf66396072005-11-05 21:14:13 +0100501 struct rtattr *tb[TCA_GRED_MAX];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700502
Thomas Graf66396072005-11-05 21:14:13 +0100503 if (opt == NULL || rtattr_parse_nested(tb, TCA_GRED_MAX, opt))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700504 return -EINVAL;
505
Thomas Graf66396072005-11-05 21:14:13 +0100506 if (tb[TCA_GRED_PARMS-1] || tb[TCA_GRED_STAB-1])
507 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700508
Thomas Graf66396072005-11-05 21:14:13 +0100509 return gred_change_table_def(sch, tb[TCA_GRED_DPS-1]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700510}
511
512static int gred_dump(struct Qdisc *sch, struct sk_buff *skb)
513{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700514 struct gred_sched *table = qdisc_priv(sch);
Thomas Graf05f1cc02005-11-05 21:14:11 +0100515 struct rtattr *parms, *opts = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700516 int i;
Thomas Grafe0636822005-11-05 21:14:12 +0100517 struct tc_gred_sopt sopt = {
518 .DPs = table->DPs,
519 .def_DP = table->def,
520 .grio = gred_rio_mode(table),
521 };
Linus Torvalds1da177e2005-04-16 15:20:36 -0700522
Thomas Graf05f1cc02005-11-05 21:14:11 +0100523 opts = RTA_NEST(skb, TCA_OPTIONS);
Thomas Grafe0636822005-11-05 21:14:12 +0100524 RTA_PUT(skb, TCA_GRED_DPS, sizeof(sopt), &sopt);
Thomas Graf05f1cc02005-11-05 21:14:11 +0100525 parms = RTA_NEST(skb, TCA_GRED_PARMS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700526
Thomas Graf05f1cc02005-11-05 21:14:11 +0100527 for (i = 0; i < MAX_DPs; i++) {
528 struct gred_sched_data *q = table->tab[i];
529 struct tc_gred_qopt opt;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700530
Thomas Graf05f1cc02005-11-05 21:14:11 +0100531 memset(&opt, 0, sizeof(opt));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700532
533 if (!q) {
534 /* hack -- fix at some point with proper message
535 This is how we indicate to tc that there is no VQ
536 at this DP */
537
Thomas Graf05f1cc02005-11-05 21:14:11 +0100538 opt.DP = MAX_DPs + i;
539 goto append_opt;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700540 }
541
Thomas Graf05f1cc02005-11-05 21:14:11 +0100542 opt.limit = q->limit;
543 opt.DP = q->DP;
544 opt.backlog = q->backlog;
545 opt.prio = q->prio;
Thomas Graf22b33422005-11-05 21:14:16 +0100546 opt.qth_min = q->parms.qth_min >> q->parms.Wlog;
547 opt.qth_max = q->parms.qth_max >> q->parms.Wlog;
548 opt.Wlog = q->parms.Wlog;
549 opt.Plog = q->parms.Plog;
550 opt.Scell_log = q->parms.Scell_log;
551 opt.other = q->stats.other;
552 opt.early = q->stats.prob_drop;
553 opt.forced = q->stats.forced_drop;
554 opt.pdrop = q->stats.pdrop;
Thomas Graf05f1cc02005-11-05 21:14:11 +0100555 opt.packets = q->packetsin;
556 opt.bytesin = q->bytesin;
557
Thomas Graf22b33422005-11-05 21:14:16 +0100558 if (gred_wred_mode(table)) {
559 q->parms.qidlestart =
560 table->tab[table->def]->parms.qidlestart;
561 q->parms.qavg = table->tab[table->def]->parms.qavg;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700562 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700563
Thomas Graf22b33422005-11-05 21:14:16 +0100564 opt.qave = red_calc_qavg(&q->parms, q->parms.qavg);
565
Thomas Graf05f1cc02005-11-05 21:14:11 +0100566append_opt:
567 RTA_APPEND(skb, sizeof(opt), &opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700568 }
569
Thomas Graf05f1cc02005-11-05 21:14:11 +0100570 RTA_NEST_END(skb, parms);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700571
Thomas Graf05f1cc02005-11-05 21:14:11 +0100572 return RTA_NEST_END(skb, opts);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700573
574rtattr_failure:
Thomas Graf05f1cc02005-11-05 21:14:11 +0100575 return RTA_NEST_CANCEL(skb, opts);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700576}
577
578static void gred_destroy(struct Qdisc *sch)
579{
580 struct gred_sched *table = qdisc_priv(sch);
581 int i;
582
583 for (i = 0;i < table->DPs; i++) {
584 if (table->tab[i])
Thomas Graf66396072005-11-05 21:14:13 +0100585 gred_destroy_vq(table->tab[i]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700586 }
587}
588
589static struct Qdisc_ops gred_qdisc_ops = {
590 .next = NULL,
591 .cl_ops = NULL,
592 .id = "gred",
593 .priv_size = sizeof(struct gred_sched),
594 .enqueue = gred_enqueue,
595 .dequeue = gred_dequeue,
596 .requeue = gred_requeue,
597 .drop = gred_drop,
598 .init = gred_init,
599 .reset = gred_reset,
600 .destroy = gred_destroy,
601 .change = gred_change,
602 .dump = gred_dump,
603 .owner = THIS_MODULE,
604};
605
606static int __init gred_module_init(void)
607{
608 return register_qdisc(&gred_qdisc_ops);
609}
610static void __exit gred_module_exit(void)
611{
612 unregister_qdisc(&gred_qdisc_ops);
613}
614module_init(gred_module_init)
615module_exit(gred_module_exit)
616MODULE_LICENSE("GPL");