blob: 18e81a8ffb012e5aab0b7b6b9d9f1d3965f803d3 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * net/sched/sch_gred.c Generic Random Early Detection queue.
3 *
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version
8 * 2 of the License, or (at your option) any later version.
9 *
10 * Authors: J Hadi Salim (hadi@cyberus.ca) 1998-2002
11 *
12 * 991129: - Bug fix with grio mode
13 * - a better sing. AvgQ mode with Grio(WRED)
14 * - A finer grained VQ dequeue based on sugestion
15 * from Ren Liu
16 * - More error checks
17 *
Thomas Graf1e4dfaf2005-11-05 21:14:25 +010018 * For all the glorious comments look at include/net/red.h
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 */
20
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070022#include <linux/types.h>
23#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include <linux/netdevice.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070025#include <linux/skbuff.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include <net/pkt_sched.h>
Thomas Graf22b33422005-11-05 21:14:16 +010027#include <net/red.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028
Thomas Graff62d6b92005-11-05 21:14:15 +010029#define GRED_DEF_PRIO (MAX_DPs / 2)
Thomas Graf716a1b42005-11-05 21:14:20 +010030#define GRED_VQ_MASK (MAX_DPs - 1)
Thomas Graff62d6b92005-11-05 21:14:15 +010031
Linus Torvalds1da177e2005-04-16 15:20:36 -070032struct gred_sched_data;
33struct gred_sched;
34
35struct gred_sched_data
36{
Linus Torvalds1da177e2005-04-16 15:20:36 -070037 u32 limit; /* HARD maximal queue length */
Linus Torvalds1da177e2005-04-16 15:20:36 -070038 u32 DP; /* the drop pramaters */
Linus Torvalds1da177e2005-04-16 15:20:36 -070039 u32 bytesin; /* bytes seen on virtualQ so far*/
40 u32 packetsin; /* packets seen on virtualQ so far*/
41 u32 backlog; /* bytes on the virtualQ */
Thomas Graf1e4dfaf2005-11-05 21:14:25 +010042 u8 prio; /* the prio of this vq */
Linus Torvalds1da177e2005-04-16 15:20:36 -070043
Thomas Graf22b33422005-11-05 21:14:16 +010044 struct red_parms parms;
45 struct red_stats stats;
Linus Torvalds1da177e2005-04-16 15:20:36 -070046};
47
Thomas Grafdea3f622005-11-05 21:14:09 +010048enum {
49 GRED_WRED_MODE = 1,
Thomas Grafd6fd4e92005-11-05 21:14:10 +010050 GRED_RIO_MODE,
Thomas Grafdea3f622005-11-05 21:14:09 +010051};
52
Linus Torvalds1da177e2005-04-16 15:20:36 -070053struct gred_sched
54{
55 struct gred_sched_data *tab[MAX_DPs];
Thomas Grafdea3f622005-11-05 21:14:09 +010056 unsigned long flags;
Thomas Grafb38c7ee2005-11-05 21:14:27 +010057 u32 red_flags;
Thomas Graf1e4dfaf2005-11-05 21:14:25 +010058 u32 DPs;
59 u32 def;
Thomas Graf70517032005-11-05 21:14:23 +010060 struct red_parms wred_set;
Linus Torvalds1da177e2005-04-16 15:20:36 -070061};
62
Thomas Grafdea3f622005-11-05 21:14:09 +010063static inline int gred_wred_mode(struct gred_sched *table)
64{
65 return test_bit(GRED_WRED_MODE, &table->flags);
66}
67
68static inline void gred_enable_wred_mode(struct gred_sched *table)
69{
70 __set_bit(GRED_WRED_MODE, &table->flags);
71}
72
73static inline void gred_disable_wred_mode(struct gred_sched *table)
74{
75 __clear_bit(GRED_WRED_MODE, &table->flags);
76}
77
Thomas Grafd6fd4e92005-11-05 21:14:10 +010078static inline int gred_rio_mode(struct gred_sched *table)
79{
80 return test_bit(GRED_RIO_MODE, &table->flags);
81}
82
83static inline void gred_enable_rio_mode(struct gred_sched *table)
84{
85 __set_bit(GRED_RIO_MODE, &table->flags);
86}
87
88static inline void gred_disable_rio_mode(struct gred_sched *table)
89{
90 __clear_bit(GRED_RIO_MODE, &table->flags);
91}
92
Thomas Grafdea3f622005-11-05 21:14:09 +010093static inline int gred_wred_mode_check(struct Qdisc *sch)
94{
95 struct gred_sched *table = qdisc_priv(sch);
96 int i;
97
98 /* Really ugly O(n^2) but shouldn't be necessary too frequent. */
99 for (i = 0; i < table->DPs; i++) {
100 struct gred_sched_data *q = table->tab[i];
101 int n;
102
103 if (q == NULL)
104 continue;
105
106 for (n = 0; n < table->DPs; n++)
107 if (table->tab[n] && table->tab[n] != q &&
108 table->tab[n]->prio == q->prio)
109 return 1;
110 }
111
112 return 0;
113}
114
Thomas Graf22b33422005-11-05 21:14:16 +0100115static inline unsigned int gred_backlog(struct gred_sched *table,
116 struct gred_sched_data *q,
117 struct Qdisc *sch)
118{
119 if (gred_wred_mode(table))
120 return sch->qstats.backlog;
121 else
122 return q->backlog;
123}
124
Thomas Graf716a1b42005-11-05 21:14:20 +0100125static inline u16 tc_index_to_dp(struct sk_buff *skb)
126{
127 return skb->tc_index & GRED_VQ_MASK;
128}
129
Thomas Graf70517032005-11-05 21:14:23 +0100130static inline void gred_load_wred_set(struct gred_sched *table,
131 struct gred_sched_data *q)
132{
133 q->parms.qavg = table->wred_set.qavg;
134 q->parms.qidlestart = table->wred_set.qidlestart;
135}
136
137static inline void gred_store_wred_set(struct gred_sched *table,
138 struct gred_sched_data *q)
139{
140 table->wred_set.qavg = q->parms.qavg;
141}
142
Thomas Grafb38c7ee2005-11-05 21:14:27 +0100143static inline int gred_use_ecn(struct gred_sched *t)
144{
145 return t->red_flags & TC_RED_ECN;
146}
147
Thomas Grafbdc450a2005-11-05 21:14:28 +0100148static inline int gred_use_harddrop(struct gred_sched *t)
149{
150 return t->red_flags & TC_RED_HARDDROP;
151}
152
Thomas Graf1e4dfaf2005-11-05 21:14:25 +0100153static int gred_enqueue(struct sk_buff *skb, struct Qdisc* sch)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700154{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700155 struct gred_sched_data *q=NULL;
156 struct gred_sched *t= qdisc_priv(sch);
Thomas Graf22b33422005-11-05 21:14:16 +0100157 unsigned long qavg = 0;
Thomas Graf4a591832005-11-05 21:14:22 +0100158 u16 dp = tc_index_to_dp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159
Thomas Graf716a1b42005-11-05 21:14:20 +0100160 if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
Thomas Graf18e3fb842005-11-05 21:14:21 +0100161 dp = t->def;
162
163 if ((q = t->tab[dp]) == NULL) {
164 /* Pass through packets not assigned to a DP
165 * if no default DP has been configured. This
166 * allows for DP flows to be left untouched.
167 */
168 if (skb_queue_len(&sch->q) < sch->dev->tx_queue_len)
169 return qdisc_enqueue_tail(skb, sch);
170 else
171 goto drop;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700172 }
Thomas Graf18e3fb842005-11-05 21:14:21 +0100173
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174 /* fix tc_index? --could be controvesial but needed for
175 requeueing */
Thomas Graf18e3fb842005-11-05 21:14:21 +0100176 skb->tc_index = (skb->tc_index & ~GRED_VQ_MASK) | dp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177 }
178
Thomas Graf1e4dfaf2005-11-05 21:14:25 +0100179 /* sum up all the qaves of prios <= to ours to get the new qave */
Thomas Grafd6fd4e92005-11-05 21:14:10 +0100180 if (!gred_wred_mode(t) && gred_rio_mode(t)) {
Thomas Graf1e4dfaf2005-11-05 21:14:25 +0100181 int i;
182
183 for (i = 0; i < t->DPs; i++) {
184 if (t->tab[i] && t->tab[i]->prio < q->prio &&
Thomas Graf22b33422005-11-05 21:14:16 +0100185 !red_is_idling(&t->tab[i]->parms))
186 qavg +=t->tab[i]->parms.qavg;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187 }
Thomas Graf1e4dfaf2005-11-05 21:14:25 +0100188
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189 }
190
191 q->packetsin++;
Thomas Graf1e4dfaf2005-11-05 21:14:25 +0100192 q->bytesin += skb->len;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193
Thomas Graf1e4dfaf2005-11-05 21:14:25 +0100194 if (gred_wred_mode(t))
Thomas Graf70517032005-11-05 21:14:23 +0100195 gred_load_wred_set(t, q);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700196
Thomas Graf22b33422005-11-05 21:14:16 +0100197 q->parms.qavg = red_calc_qavg(&q->parms, gred_backlog(t, q, sch));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198
Thomas Graf22b33422005-11-05 21:14:16 +0100199 if (red_is_idling(&q->parms))
200 red_end_of_idle_period(&q->parms);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201
Thomas Grafdea3f622005-11-05 21:14:09 +0100202 if (gred_wred_mode(t))
Thomas Graf70517032005-11-05 21:14:23 +0100203 gred_store_wred_set(t, q);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204
Thomas Graf22b33422005-11-05 21:14:16 +0100205 switch (red_action(&q->parms, q->parms.qavg + qavg)) {
206 case RED_DONT_MARK:
207 break;
208
209 case RED_PROB_MARK:
210 sch->qstats.overlimits++;
Thomas Grafb38c7ee2005-11-05 21:14:27 +0100211 if (!gred_use_ecn(t) || !INET_ECN_set_ce(skb)) {
212 q->stats.prob_drop++;
213 goto congestion_drop;
214 }
215
216 q->stats.prob_mark++;
217 break;
Thomas Graf22b33422005-11-05 21:14:16 +0100218
219 case RED_HARD_MARK:
220 sch->qstats.overlimits++;
Thomas Grafbdc450a2005-11-05 21:14:28 +0100221 if (gred_use_harddrop(t) || !gred_use_ecn(t) ||
222 !INET_ECN_set_ce(skb)) {
Thomas Grafb38c7ee2005-11-05 21:14:27 +0100223 q->stats.forced_drop++;
224 goto congestion_drop;
225 }
226 q->stats.forced_mark++;
227 break;
Thomas Graf22b33422005-11-05 21:14:16 +0100228 }
229
230 if (q->backlog + skb->len <= q->limit) {
231 q->backlog += skb->len;
Thomas Grafedf7a7b2005-11-05 21:14:19 +0100232 return qdisc_enqueue_tail(skb, sch);
Thomas Graf22b33422005-11-05 21:14:16 +0100233 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234
Thomas Graf22b33422005-11-05 21:14:16 +0100235 q->stats.pdrop++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700236drop:
Thomas Grafedf7a7b2005-11-05 21:14:19 +0100237 return qdisc_drop(skb, sch);
Thomas Grafc3b553c2005-11-05 21:14:18 +0100238
239congestion_drop:
Thomas Grafedf7a7b2005-11-05 21:14:19 +0100240 qdisc_drop(skb, sch);
Thomas Grafc3b553c2005-11-05 21:14:18 +0100241 return NET_XMIT_CN;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700242}
243
Thomas Graf1e4dfaf2005-11-05 21:14:25 +0100244static int gred_requeue(struct sk_buff *skb, struct Qdisc* sch)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700245{
Thomas Graf716a1b42005-11-05 21:14:20 +0100246 struct gred_sched *t = qdisc_priv(sch);
Thomas Graf18e3fb842005-11-05 21:14:21 +0100247 struct gred_sched_data *q;
248 u16 dp = tc_index_to_dp(skb);
Thomas Graf22b33422005-11-05 21:14:16 +0100249
Thomas Graf18e3fb842005-11-05 21:14:21 +0100250 if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
251 if (net_ratelimit())
252 printk(KERN_WARNING "GRED: Unable to relocate VQ 0x%x "
253 "for requeue, screwing up backlog.\n",
254 tc_index_to_dp(skb));
255 } else {
256 if (red_is_idling(&q->parms))
257 red_end_of_idle_period(&q->parms);
258 q->backlog += skb->len;
259 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700260
Thomas Grafedf7a7b2005-11-05 21:14:19 +0100261 return qdisc_requeue(skb, sch);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700262}
263
Thomas Graf1e4dfaf2005-11-05 21:14:25 +0100264static struct sk_buff *gred_dequeue(struct Qdisc* sch)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700265{
266 struct sk_buff *skb;
Thomas Graf1e4dfaf2005-11-05 21:14:25 +0100267 struct gred_sched *t = qdisc_priv(sch);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700268
Thomas Grafedf7a7b2005-11-05 21:14:19 +0100269 skb = qdisc_dequeue_head(sch);
270
Linus Torvalds1da177e2005-04-16 15:20:36 -0700271 if (skb) {
Thomas Graf1e4dfaf2005-11-05 21:14:25 +0100272 struct gred_sched_data *q;
Thomas Graf18e3fb842005-11-05 21:14:21 +0100273 u16 dp = tc_index_to_dp(skb);
274
275 if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
276 if (net_ratelimit())
277 printk(KERN_WARNING "GRED: Unable to relocate "
278 "VQ 0x%x after dequeue, screwing up "
279 "backlog.\n", tc_index_to_dp(skb));
280 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281 q->backlog -= skb->len;
Thomas Graf18e3fb842005-11-05 21:14:21 +0100282
Thomas Grafdea3f622005-11-05 21:14:09 +0100283 if (!q->backlog && !gred_wred_mode(t))
Thomas Graf22b33422005-11-05 21:14:16 +0100284 red_start_of_idle_period(&q->parms);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700285 }
Thomas Graf18e3fb842005-11-05 21:14:21 +0100286
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287 return skb;
288 }
289
Thomas Grafd8f64e12005-11-05 21:14:26 +0100290 if (gred_wred_mode(t) && !red_is_idling(&t->wred_set))
Thomas Graf70517032005-11-05 21:14:23 +0100291 red_start_of_idle_period(&t->wred_set);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292
293 return NULL;
294}
295
296static unsigned int gred_drop(struct Qdisc* sch)
297{
298 struct sk_buff *skb;
Thomas Graf1e4dfaf2005-11-05 21:14:25 +0100299 struct gred_sched *t = qdisc_priv(sch);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300
Thomas Grafedf7a7b2005-11-05 21:14:19 +0100301 skb = qdisc_dequeue_tail(sch);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700302 if (skb) {
303 unsigned int len = skb->len;
Thomas Graf1e4dfaf2005-11-05 21:14:25 +0100304 struct gred_sched_data *q;
Thomas Graf18e3fb842005-11-05 21:14:21 +0100305 u16 dp = tc_index_to_dp(skb);
306
307 if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
308 if (net_ratelimit())
309 printk(KERN_WARNING "GRED: Unable to relocate "
310 "VQ 0x%x while dropping, screwing up "
311 "backlog.\n", tc_index_to_dp(skb));
312 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700313 q->backlog -= len;
Thomas Graf22b33422005-11-05 21:14:16 +0100314 q->stats.other++;
Thomas Graf18e3fb842005-11-05 21:14:21 +0100315
Thomas Grafdea3f622005-11-05 21:14:09 +0100316 if (!q->backlog && !gred_wred_mode(t))
Thomas Graf22b33422005-11-05 21:14:16 +0100317 red_start_of_idle_period(&q->parms);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700318 }
319
Thomas Grafedf7a7b2005-11-05 21:14:19 +0100320 qdisc_drop(skb, sch);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700321 return len;
322 }
323
Thomas Grafd8f64e12005-11-05 21:14:26 +0100324 if (gred_wred_mode(t) && !red_is_idling(&t->wred_set))
Thomas Graf70517032005-11-05 21:14:23 +0100325 red_start_of_idle_period(&t->wred_set);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700326
Linus Torvalds1da177e2005-04-16 15:20:36 -0700327 return 0;
328
329}
330
331static void gred_reset(struct Qdisc* sch)
332{
333 int i;
Thomas Graf1e4dfaf2005-11-05 21:14:25 +0100334 struct gred_sched *t = qdisc_priv(sch);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700335
Thomas Grafedf7a7b2005-11-05 21:14:19 +0100336 qdisc_reset_queue(sch);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700337
Thomas Graf1e4dfaf2005-11-05 21:14:25 +0100338 for (i = 0; i < t->DPs; i++) {
339 struct gred_sched_data *q = t->tab[i];
340
341 if (!q)
342 continue;
343
Thomas Graf22b33422005-11-05 21:14:16 +0100344 red_restart(&q->parms);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345 q->backlog = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346 }
347}
348
Thomas Graf66396072005-11-05 21:14:13 +0100349static inline void gred_destroy_vq(struct gred_sched_data *q)
350{
351 kfree(q);
352}
353
354static inline int gred_change_table_def(struct Qdisc *sch, struct rtattr *dps)
355{
356 struct gred_sched *table = qdisc_priv(sch);
357 struct tc_gred_sopt *sopt;
358 int i;
359
360 if (dps == NULL || RTA_PAYLOAD(dps) < sizeof(*sopt))
361 return -EINVAL;
362
363 sopt = RTA_DATA(dps);
364
365 if (sopt->DPs > MAX_DPs || sopt->DPs == 0 || sopt->def_DP >= sopt->DPs)
366 return -EINVAL;
367
368 sch_tree_lock(sch);
369 table->DPs = sopt->DPs;
370 table->def = sopt->def_DP;
Thomas Grafb38c7ee2005-11-05 21:14:27 +0100371 table->red_flags = sopt->flags;
Thomas Graf66396072005-11-05 21:14:13 +0100372
373 /*
374 * Every entry point to GRED is synchronized with the above code
375 * and the DP is checked against DPs, i.e. shadowed VQs can no
376 * longer be found so we can unlock right here.
377 */
378 sch_tree_unlock(sch);
379
380 if (sopt->grio) {
381 gred_enable_rio_mode(table);
382 gred_disable_wred_mode(table);
383 if (gred_wred_mode_check(sch))
384 gred_enable_wred_mode(table);
385 } else {
386 gred_disable_rio_mode(table);
387 gred_disable_wred_mode(table);
388 }
389
390 for (i = table->DPs; i < MAX_DPs; i++) {
391 if (table->tab[i]) {
392 printk(KERN_WARNING "GRED: Warning: Destroying "
393 "shadowed VQ 0x%x\n", i);
394 gred_destroy_vq(table->tab[i]);
395 table->tab[i] = NULL;
396 }
397 }
398
Thomas Graf66396072005-11-05 21:14:13 +0100399 return 0;
400}
401
Thomas Graff62d6b92005-11-05 21:14:15 +0100402static inline int gred_change_vq(struct Qdisc *sch, int dp,
403 struct tc_gred_qopt *ctl, int prio, u8 *stab)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700404{
405 struct gred_sched *table = qdisc_priv(sch);
406 struct gred_sched_data *q;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700407
Thomas Graff62d6b92005-11-05 21:14:15 +0100408 if (table->tab[dp] == NULL) {
Panagiotis Issaris0da974f2006-07-21 14:51:30 -0700409 table->tab[dp] = kzalloc(sizeof(*q), GFP_KERNEL);
Thomas Graff62d6b92005-11-05 21:14:15 +0100410 if (table->tab[dp] == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700411 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700412 }
413
Thomas Graff62d6b92005-11-05 21:14:15 +0100414 q = table->tab[dp];
415 q->DP = dp;
416 q->prio = prio;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700417 q->limit = ctl->limit;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700418
Thomas Graf22b33422005-11-05 21:14:16 +0100419 if (q->backlog == 0)
420 red_end_of_idle_period(&q->parms);
421
422 red_set_parms(&q->parms,
423 ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Plog,
424 ctl->Scell_log, stab);
425
Thomas Graff62d6b92005-11-05 21:14:15 +0100426 return 0;
427}
428
429static int gred_change(struct Qdisc *sch, struct rtattr *opt)
430{
431 struct gred_sched *table = qdisc_priv(sch);
432 struct tc_gred_qopt *ctl;
433 struct rtattr *tb[TCA_GRED_MAX];
434 int err = -EINVAL, prio = GRED_DEF_PRIO;
435 u8 *stab;
436
437 if (opt == NULL || rtattr_parse_nested(tb, TCA_GRED_MAX, opt))
438 return -EINVAL;
439
440 if (tb[TCA_GRED_PARMS-1] == NULL && tb[TCA_GRED_STAB-1] == NULL)
441 return gred_change_table_def(sch, opt);
442
443 if (tb[TCA_GRED_PARMS-1] == NULL ||
444 RTA_PAYLOAD(tb[TCA_GRED_PARMS-1]) < sizeof(*ctl) ||
445 tb[TCA_GRED_STAB-1] == NULL ||
446 RTA_PAYLOAD(tb[TCA_GRED_STAB-1]) < 256)
447 return -EINVAL;
448
449 ctl = RTA_DATA(tb[TCA_GRED_PARMS-1]);
450 stab = RTA_DATA(tb[TCA_GRED_STAB-1]);
451
452 if (ctl->DP >= table->DPs)
453 goto errout;
454
455 if (gred_rio_mode(table)) {
456 if (ctl->prio == 0) {
457 int def_prio = GRED_DEF_PRIO;
458
459 if (table->tab[table->def])
460 def_prio = table->tab[table->def]->prio;
461
462 printk(KERN_DEBUG "GRED: DP %u does not have a prio "
463 "setting default to %d\n", ctl->DP, def_prio);
464
465 prio = def_prio;
466 } else
467 prio = ctl->prio;
468 }
469
470 sch_tree_lock(sch);
471
472 err = gred_change_vq(sch, ctl->DP, ctl, prio, stab);
473 if (err < 0)
474 goto errout_locked;
475
Thomas Grafd6fd4e92005-11-05 21:14:10 +0100476 if (gred_rio_mode(table)) {
Thomas Grafdea3f622005-11-05 21:14:09 +0100477 gred_disable_wred_mode(table);
478 if (gred_wred_mode_check(sch))
479 gred_enable_wred_mode(table);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700480 }
481
Thomas Graff62d6b92005-11-05 21:14:15 +0100482 err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700483
Thomas Graff62d6b92005-11-05 21:14:15 +0100484errout_locked:
485 sch_tree_unlock(sch);
486errout:
487 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700488}
489
490static int gred_init(struct Qdisc *sch, struct rtattr *opt)
491{
Thomas Graf66396072005-11-05 21:14:13 +0100492 struct rtattr *tb[TCA_GRED_MAX];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700493
Thomas Graf66396072005-11-05 21:14:13 +0100494 if (opt == NULL || rtattr_parse_nested(tb, TCA_GRED_MAX, opt))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700495 return -EINVAL;
496
Thomas Graf66396072005-11-05 21:14:13 +0100497 if (tb[TCA_GRED_PARMS-1] || tb[TCA_GRED_STAB-1])
498 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700499
Thomas Graf66396072005-11-05 21:14:13 +0100500 return gred_change_table_def(sch, tb[TCA_GRED_DPS-1]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700501}
502
503static int gred_dump(struct Qdisc *sch, struct sk_buff *skb)
504{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700505 struct gred_sched *table = qdisc_priv(sch);
Thomas Graf05f1cc02005-11-05 21:14:11 +0100506 struct rtattr *parms, *opts = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700507 int i;
Thomas Grafe0636822005-11-05 21:14:12 +0100508 struct tc_gred_sopt sopt = {
509 .DPs = table->DPs,
510 .def_DP = table->def,
511 .grio = gred_rio_mode(table),
Thomas Grafb38c7ee2005-11-05 21:14:27 +0100512 .flags = table->red_flags,
Thomas Grafe0636822005-11-05 21:14:12 +0100513 };
Linus Torvalds1da177e2005-04-16 15:20:36 -0700514
Thomas Graf05f1cc02005-11-05 21:14:11 +0100515 opts = RTA_NEST(skb, TCA_OPTIONS);
Thomas Grafe0636822005-11-05 21:14:12 +0100516 RTA_PUT(skb, TCA_GRED_DPS, sizeof(sopt), &sopt);
Thomas Graf05f1cc02005-11-05 21:14:11 +0100517 parms = RTA_NEST(skb, TCA_GRED_PARMS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700518
Thomas Graf05f1cc02005-11-05 21:14:11 +0100519 for (i = 0; i < MAX_DPs; i++) {
520 struct gred_sched_data *q = table->tab[i];
521 struct tc_gred_qopt opt;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700522
Thomas Graf05f1cc02005-11-05 21:14:11 +0100523 memset(&opt, 0, sizeof(opt));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700524
525 if (!q) {
526 /* hack -- fix at some point with proper message
527 This is how we indicate to tc that there is no VQ
528 at this DP */
529
Thomas Graf05f1cc02005-11-05 21:14:11 +0100530 opt.DP = MAX_DPs + i;
531 goto append_opt;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700532 }
533
Thomas Graf05f1cc02005-11-05 21:14:11 +0100534 opt.limit = q->limit;
535 opt.DP = q->DP;
536 opt.backlog = q->backlog;
537 opt.prio = q->prio;
Thomas Graf22b33422005-11-05 21:14:16 +0100538 opt.qth_min = q->parms.qth_min >> q->parms.Wlog;
539 opt.qth_max = q->parms.qth_max >> q->parms.Wlog;
540 opt.Wlog = q->parms.Wlog;
541 opt.Plog = q->parms.Plog;
542 opt.Scell_log = q->parms.Scell_log;
543 opt.other = q->stats.other;
544 opt.early = q->stats.prob_drop;
545 opt.forced = q->stats.forced_drop;
546 opt.pdrop = q->stats.pdrop;
Thomas Graf05f1cc02005-11-05 21:14:11 +0100547 opt.packets = q->packetsin;
548 opt.bytesin = q->bytesin;
549
Thomas Graf22b33422005-11-05 21:14:16 +0100550 if (gred_wred_mode(table)) {
551 q->parms.qidlestart =
552 table->tab[table->def]->parms.qidlestart;
553 q->parms.qavg = table->tab[table->def]->parms.qavg;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700554 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700555
Thomas Graf22b33422005-11-05 21:14:16 +0100556 opt.qave = red_calc_qavg(&q->parms, q->parms.qavg);
557
Thomas Graf05f1cc02005-11-05 21:14:11 +0100558append_opt:
559 RTA_APPEND(skb, sizeof(opt), &opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700560 }
561
Thomas Graf05f1cc02005-11-05 21:14:11 +0100562 RTA_NEST_END(skb, parms);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700563
Thomas Graf05f1cc02005-11-05 21:14:11 +0100564 return RTA_NEST_END(skb, opts);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700565
566rtattr_failure:
Thomas Graf05f1cc02005-11-05 21:14:11 +0100567 return RTA_NEST_CANCEL(skb, opts);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700568}
569
570static void gred_destroy(struct Qdisc *sch)
571{
572 struct gred_sched *table = qdisc_priv(sch);
573 int i;
574
Thomas Graf1e4dfaf2005-11-05 21:14:25 +0100575 for (i = 0; i < table->DPs; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700576 if (table->tab[i])
Thomas Graf66396072005-11-05 21:14:13 +0100577 gred_destroy_vq(table->tab[i]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700578 }
579}
580
581static struct Qdisc_ops gred_qdisc_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700582 .id = "gred",
583 .priv_size = sizeof(struct gred_sched),
584 .enqueue = gred_enqueue,
585 .dequeue = gred_dequeue,
586 .requeue = gred_requeue,
587 .drop = gred_drop,
588 .init = gred_init,
589 .reset = gred_reset,
590 .destroy = gred_destroy,
591 .change = gred_change,
592 .dump = gred_dump,
593 .owner = THIS_MODULE,
594};
595
596static int __init gred_module_init(void)
597{
598 return register_qdisc(&gred_qdisc_ops);
599}
Thomas Graf1e4dfaf2005-11-05 21:14:25 +0100600
601static void __exit gred_module_exit(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700602{
603 unregister_qdisc(&gred_qdisc_ops);
604}
Thomas Graf1e4dfaf2005-11-05 21:14:25 +0100605
Linus Torvalds1da177e2005-04-16 15:20:36 -0700606module_init(gred_module_init)
607module_exit(gred_module_exit)
Thomas Graf1e4dfaf2005-11-05 21:14:25 +0100608
Linus Torvalds1da177e2005-04-16 15:20:36 -0700609MODULE_LICENSE("GPL");