blob: b417a95df3223e4880f5b654abe7e98c5f424d09 [file] [log] [blame]
Stephen Hemminger87990462006-08-10 23:35:16 -07001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 * net/sched/sch_htb.c Hierarchical token bucket, feed tree version
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Martin Devera, <devik@cdi.cz>
10 *
11 * Credits (in time order) for older HTB versions:
12 * Stef Coene <stef.coene@docum.org>
13 * HTB support at LARTC mailing list
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +090014 * Ondrej Kraus, <krauso@barr.cz>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015 * found missing INIT_QDISC(htb)
16 * Vladimir Smelhaus, Aamer Akhter, Bert Hubert
17 * helped a lot to locate nasty class stall bug
18 * Andi Kleen, Jamal Hadi, Bert Hubert
19 * code review and helpful comments on shaping
20 * Tomasz Wrona, <tw@eter.tym.pl>
21 * created test case so that I was able to fix nasty bug
22 * Wilfried Weissmann
23 * spotted bug in dequeue code and helped with fix
24 * Jiri Fojtasek
25 * fixed requeue routine
26 * and many others. thanks.
27 *
28 * $Id: sch_htb.c,v 1.25 2003/12/07 11:08:25 devik Exp devik $
29 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070030#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070031#include <linux/types.h>
32#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033#include <linux/string.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034#include <linux/errno.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035#include <linux/skbuff.h>
36#include <linux/list.h>
37#include <linux/compiler.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070038#include <linux/rbtree.h>
Patrick McHardy0ba48052007-07-02 22:49:07 -070039#include <net/netlink.h>
40#include <net/pkt_sched.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070041
42/* HTB algorithm.
43 Author: devik@cdi.cz
44 ========================================================================
45 HTB is like TBF with multiple classes. It is also similar to CBQ because
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +090046 it allows to assign priority to each class in hierarchy.
Linus Torvalds1da177e2005-04-16 15:20:36 -070047 In fact it is another implementation of Floyd's formal sharing.
48
49 Levels:
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +090050 Each class is assigned level. Leaf has ALWAYS level 0 and root
Linus Torvalds1da177e2005-04-16 15:20:36 -070051 classes have level TC_HTB_MAXDEPTH-1. Interior nodes has level
52 one less than their parent.
53*/
54
Stephen Hemminger87990462006-08-10 23:35:16 -070055#define HTB_HSIZE 16 /* classid hash size */
Stephen Hemminger87990462006-08-10 23:35:16 -070056#define HTB_HYSTERESIS 1 /* whether to use mode hysteresis for speedup */
57#define HTB_VER 0x30011 /* major must be matched with number suplied by TC as version */
Linus Torvalds1da177e2005-04-16 15:20:36 -070058
59#if HTB_VER >> 16 != TC_HTB_PROTOVER
60#error "Mismatched sch_htb.c and pkt_sch.h"
61#endif
62
Linus Torvalds1da177e2005-04-16 15:20:36 -070063/* used internaly to keep status of single class */
64enum htb_cmode {
Stephen Hemminger87990462006-08-10 23:35:16 -070065 HTB_CANT_SEND, /* class can't send and can't borrow */
66 HTB_MAY_BORROW, /* class can't send but may borrow */
67 HTB_CAN_SEND /* class can send */
Linus Torvalds1da177e2005-04-16 15:20:36 -070068};
69
70/* interior & leaf nodes; props specific to leaves are marked L: */
Stephen Hemminger87990462006-08-10 23:35:16 -070071struct htb_class {
72 /* general class parameters */
73 u32 classid;
74 struct gnet_stats_basic bstats;
75 struct gnet_stats_queue qstats;
76 struct gnet_stats_rate_est rate_est;
77 struct tc_htb_xstats xstats; /* our special stats */
78 int refcnt; /* usage count of this class */
Linus Torvalds1da177e2005-04-16 15:20:36 -070079
Stephen Hemminger87990462006-08-10 23:35:16 -070080 /* topology */
81 int level; /* our level (see above) */
82 struct htb_class *parent; /* parent class */
Stephen Hemminger0cef2962006-08-10 23:35:38 -070083 struct hlist_node hlist; /* classid hash list item */
Stephen Hemminger87990462006-08-10 23:35:16 -070084 struct list_head sibling; /* sibling list item */
85 struct list_head children; /* children list */
Linus Torvalds1da177e2005-04-16 15:20:36 -070086
Stephen Hemminger87990462006-08-10 23:35:16 -070087 union {
88 struct htb_class_leaf {
89 struct Qdisc *q;
90 int prio;
91 int aprio;
92 int quantum;
93 int deficit[TC_HTB_MAXDEPTH];
94 struct list_head drop_list;
95 } leaf;
96 struct htb_class_inner {
97 struct rb_root feed[TC_HTB_NUMPRIO]; /* feed trees */
98 struct rb_node *ptr[TC_HTB_NUMPRIO]; /* current class ptr */
99 /* When class changes from state 1->2 and disconnects from
100 parent's feed then we lost ptr value and start from the
101 first child again. Here we store classid of the
102 last valid ptr (used when ptr is NULL). */
103 u32 last_ptr_id[TC_HTB_NUMPRIO];
104 } inner;
105 } un;
106 struct rb_node node[TC_HTB_NUMPRIO]; /* node for self or feed tree */
107 struct rb_node pq_node; /* node for event queue */
Patrick McHardyfb983d42007-03-16 01:22:39 -0700108 psched_time_t pq_key;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109
Stephen Hemminger87990462006-08-10 23:35:16 -0700110 int prio_activity; /* for which prios are we active */
111 enum htb_cmode cmode; /* current mode of the class */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112
Stephen Hemminger87990462006-08-10 23:35:16 -0700113 /* class attached filters */
114 struct tcf_proto *filter_list;
115 int filter_cnt;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116
Stephen Hemminger87990462006-08-10 23:35:16 -0700117 int warned; /* only one warning about non work conserving .. */
118
119 /* token bucket parameters */
120 struct qdisc_rate_table *rate; /* rate table of the class itself */
121 struct qdisc_rate_table *ceil; /* ceiling rate (limits borrows too) */
122 long buffer, cbuffer; /* token bucket depth/rate */
123 psched_tdiff_t mbuffer; /* max wait time */
124 long tokens, ctokens; /* current number of tokens */
125 psched_time_t t_c; /* checkpoint time */
Jarek Poplawski160d5e12006-12-08 00:26:56 -0800126
127 int prio; /* For parent to leaf return possible here */
128 int quantum; /* we do backup. Finally full replacement */
129 /* of un.leaf originals should be done. */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130};
131
Stephen Hemminger87990462006-08-10 23:35:16 -0700132static inline long L2T(struct htb_class *cl, struct qdisc_rate_table *rate,
133 int size)
134{
135 int slot = size >> rate->rate.cell_log;
Ranjit Manomohanc9726d62007-07-10 22:43:16 -0700136 if (slot > 255)
137 return (rate->data[255]*(slot >> 8) + rate->data[slot & 0xFF]);
Stephen Hemminger87990462006-08-10 23:35:16 -0700138 return rate->data[slot];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139}
140
Stephen Hemminger87990462006-08-10 23:35:16 -0700141struct htb_sched {
142 struct list_head root; /* root classes list */
Stephen Hemminger0cef2962006-08-10 23:35:38 -0700143 struct hlist_head hash[HTB_HSIZE]; /* hashed by classid */
144 struct list_head drops[TC_HTB_NUMPRIO];/* active leaves (for drops) */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700145
Stephen Hemminger87990462006-08-10 23:35:16 -0700146 /* self list - roots of self generating tree */
147 struct rb_root row[TC_HTB_MAXDEPTH][TC_HTB_NUMPRIO];
148 int row_mask[TC_HTB_MAXDEPTH];
149 struct rb_node *ptr[TC_HTB_MAXDEPTH][TC_HTB_NUMPRIO];
150 u32 last_ptr_id[TC_HTB_MAXDEPTH][TC_HTB_NUMPRIO];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151
Stephen Hemminger87990462006-08-10 23:35:16 -0700152 /* self wait list - roots of wait PQs per row */
153 struct rb_root wait_pq[TC_HTB_MAXDEPTH];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700154
Stephen Hemminger87990462006-08-10 23:35:16 -0700155 /* time of nearest event per level (row) */
Patrick McHardyfb983d42007-03-16 01:22:39 -0700156 psched_time_t near_ev_cache[TC_HTB_MAXDEPTH];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157
Stephen Hemminger87990462006-08-10 23:35:16 -0700158 /* whether we hit non-work conserving class during this dequeue; we use */
159 int nwc_hit; /* this to disable mindelay complaint in dequeue */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160
Stephen Hemminger87990462006-08-10 23:35:16 -0700161 int defcls; /* class where unclassified flows go to */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162
Stephen Hemminger87990462006-08-10 23:35:16 -0700163 /* filters for qdisc itself */
164 struct tcf_proto *filter_list;
165 int filter_cnt;
166
167 int rate2quantum; /* quant = rate / rate2quantum */
168 psched_time_t now; /* cached dequeue time */
Patrick McHardyfb983d42007-03-16 01:22:39 -0700169 struct qdisc_watchdog watchdog;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170
Stephen Hemminger87990462006-08-10 23:35:16 -0700171 /* non shaped skbs; let them go directly thru */
172 struct sk_buff_head direct_queue;
173 int direct_qlen; /* max qlen of above */
174
175 long direct_pkts;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176};
177
178/* compute hash of size HTB_HSIZE for given handle */
Stephen Hemminger87990462006-08-10 23:35:16 -0700179static inline int htb_hash(u32 h)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180{
181#if HTB_HSIZE != 16
Stephen Hemminger87990462006-08-10 23:35:16 -0700182#error "Declare new hash for your HTB_HSIZE"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183#endif
Stephen Hemminger87990462006-08-10 23:35:16 -0700184 h ^= h >> 8; /* stolen from cbq_hash */
185 h ^= h >> 4;
186 return h & 0xf;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187}
188
189/* find class in global hash table using given handle */
Stephen Hemminger87990462006-08-10 23:35:16 -0700190static inline struct htb_class *htb_find(u32 handle, struct Qdisc *sch)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191{
192 struct htb_sched *q = qdisc_priv(sch);
Stephen Hemminger0cef2962006-08-10 23:35:38 -0700193 struct hlist_node *p;
194 struct htb_class *cl;
195
Stephen Hemminger87990462006-08-10 23:35:16 -0700196 if (TC_H_MAJ(handle) != sch->handle)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197 return NULL;
Stephen Hemminger87990462006-08-10 23:35:16 -0700198
Stephen Hemminger0cef2962006-08-10 23:35:38 -0700199 hlist_for_each_entry(cl, p, q->hash + htb_hash(handle), hlist) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200 if (cl->classid == handle)
201 return cl;
202 }
203 return NULL;
204}
205
206/**
207 * htb_classify - classify a packet into class
208 *
209 * It returns NULL if the packet should be dropped or -1 if the packet
210 * should be passed directly thru. In all other cases leaf class is returned.
211 * We allow direct class selection by classid in priority. The we examine
212 * filters in qdisc and in inner nodes (if higher filter points to the inner
213 * node). If we end up with classid MAJOR:0 we enqueue the skb into special
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900214 * internal fifo (direct). These packets then go directly thru. If we still
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215 * have no valid leaf we try to use MAJOR:default leaf. It still unsuccessfull
216 * then finish and return direct queue.
217 */
218#define HTB_DIRECT (struct htb_class*)-1
219static inline u32 htb_classid(struct htb_class *cl)
220{
221 return (cl && cl != HTB_DIRECT) ? cl->classid : TC_H_UNSPEC;
222}
223
Stephen Hemminger87990462006-08-10 23:35:16 -0700224static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch,
225 int *qerr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226{
227 struct htb_sched *q = qdisc_priv(sch);
228 struct htb_class *cl;
229 struct tcf_result res;
230 struct tcf_proto *tcf;
231 int result;
232
233 /* allow to select class by setting skb->priority to valid classid;
234 note that nfmark can be used too by attaching filter fw with no
235 rules in it */
236 if (skb->priority == sch->handle)
Stephen Hemminger87990462006-08-10 23:35:16 -0700237 return HTB_DIRECT; /* X:0 (direct flow) selected */
238 if ((cl = htb_find(skb->priority, sch)) != NULL && cl->level == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700239 return cl;
240
Jamal Hadi Salim29f1df62006-01-08 22:35:55 -0800241 *qerr = NET_XMIT_BYPASS;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700242 tcf = q->filter_list;
243 while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) {
244#ifdef CONFIG_NET_CLS_ACT
245 switch (result) {
246 case TC_ACT_QUEUED:
Stephen Hemminger87990462006-08-10 23:35:16 -0700247 case TC_ACT_STOLEN:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700248 *qerr = NET_XMIT_SUCCESS;
249 case TC_ACT_SHOT:
250 return NULL;
251 }
252#elif defined(CONFIG_NET_CLS_POLICE)
253 if (result == TC_POLICE_SHOT)
254 return HTB_DIRECT;
255#endif
Stephen Hemminger87990462006-08-10 23:35:16 -0700256 if ((cl = (void *)res.class) == NULL) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257 if (res.classid == sch->handle)
Stephen Hemminger87990462006-08-10 23:35:16 -0700258 return HTB_DIRECT; /* X:0 (direct flow) */
259 if ((cl = htb_find(res.classid, sch)) == NULL)
260 break; /* filter selected invalid classid */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700261 }
262 if (!cl->level)
Stephen Hemminger87990462006-08-10 23:35:16 -0700263 return cl; /* we hit leaf; return it */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700264
265 /* we have got inner class; apply inner filter chain */
266 tcf = cl->filter_list;
267 }
268 /* classification failed; try to use default class */
Stephen Hemminger87990462006-08-10 23:35:16 -0700269 cl = htb_find(TC_H_MAKE(TC_H_MAJ(sch->handle), q->defcls), sch);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700270 if (!cl || cl->level)
Stephen Hemminger87990462006-08-10 23:35:16 -0700271 return HTB_DIRECT; /* bad default .. this is safe bet */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272 return cl;
273}
274
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275/**
276 * htb_add_to_id_tree - adds class to the round robin list
277 *
278 * Routine adds class to the list (actually tree) sorted by classid.
279 * Make sure that class is not already on such list for given prio.
280 */
Stephen Hemminger87990462006-08-10 23:35:16 -0700281static void htb_add_to_id_tree(struct rb_root *root,
282 struct htb_class *cl, int prio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283{
284 struct rb_node **p = &root->rb_node, *parent = NULL;
Stephen Hemminger3bf72952006-08-10 23:31:08 -0700285
Linus Torvalds1da177e2005-04-16 15:20:36 -0700286 while (*p) {
Stephen Hemminger87990462006-08-10 23:35:16 -0700287 struct htb_class *c;
288 parent = *p;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700289 c = rb_entry(parent, struct htb_class, node[prio]);
Stephen Hemminger3bf72952006-08-10 23:31:08 -0700290
Linus Torvalds1da177e2005-04-16 15:20:36 -0700291 if (cl->classid > c->classid)
292 p = &parent->rb_right;
Stephen Hemminger87990462006-08-10 23:35:16 -0700293 else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700294 p = &parent->rb_left;
295 }
296 rb_link_node(&cl->node[prio], parent, p);
297 rb_insert_color(&cl->node[prio], root);
298}
299
300/**
301 * htb_add_to_wait_tree - adds class to the event queue with delay
302 *
303 * The class is added to priority event queue to indicate that class will
304 * change its mode in cl->pq_key microseconds. Make sure that class is not
305 * already in the queue.
306 */
Stephen Hemminger87990462006-08-10 23:35:16 -0700307static void htb_add_to_wait_tree(struct htb_sched *q,
308 struct htb_class *cl, long delay)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700309{
310 struct rb_node **p = &q->wait_pq[cl->level].rb_node, *parent = NULL;
Stephen Hemminger3bf72952006-08-10 23:31:08 -0700311
Patrick McHardyfb983d42007-03-16 01:22:39 -0700312 cl->pq_key = q->now + delay;
313 if (cl->pq_key == q->now)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700314 cl->pq_key++;
315
316 /* update the nearest event cache */
Patrick McHardyfb983d42007-03-16 01:22:39 -0700317 if (q->near_ev_cache[cl->level] > cl->pq_key)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700318 q->near_ev_cache[cl->level] = cl->pq_key;
Stephen Hemminger87990462006-08-10 23:35:16 -0700319
Linus Torvalds1da177e2005-04-16 15:20:36 -0700320 while (*p) {
Stephen Hemminger87990462006-08-10 23:35:16 -0700321 struct htb_class *c;
322 parent = *p;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700323 c = rb_entry(parent, struct htb_class, pq_node);
Patrick McHardyfb983d42007-03-16 01:22:39 -0700324 if (cl->pq_key >= c->pq_key)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700325 p = &parent->rb_right;
Stephen Hemminger87990462006-08-10 23:35:16 -0700326 else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700327 p = &parent->rb_left;
328 }
329 rb_link_node(&cl->pq_node, parent, p);
330 rb_insert_color(&cl->pq_node, &q->wait_pq[cl->level]);
331}
332
333/**
334 * htb_next_rb_node - finds next node in binary tree
335 *
336 * When we are past last key we return NULL.
337 * Average complexity is 2 steps per call.
338 */
Stephen Hemminger3696f622006-08-10 23:36:01 -0700339static inline void htb_next_rb_node(struct rb_node **n)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700340{
341 *n = rb_next(*n);
342}
343
344/**
345 * htb_add_class_to_row - add class to its row
346 *
347 * The class is added to row at priorities marked in mask.
348 * It does nothing if mask == 0.
349 */
Stephen Hemminger87990462006-08-10 23:35:16 -0700350static inline void htb_add_class_to_row(struct htb_sched *q,
351 struct htb_class *cl, int mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700352{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700353 q->row_mask[cl->level] |= mask;
354 while (mask) {
355 int prio = ffz(~mask);
356 mask &= ~(1 << prio);
Stephen Hemminger87990462006-08-10 23:35:16 -0700357 htb_add_to_id_tree(q->row[cl->level] + prio, cl, prio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700358 }
359}
360
Stephen Hemminger3696f622006-08-10 23:36:01 -0700361/* If this triggers, it is a bug in this code, but it need not be fatal */
362static void htb_safe_rb_erase(struct rb_node *rb, struct rb_root *root)
363{
Ismail Donmez81771b32006-10-03 13:49:10 -0700364 if (RB_EMPTY_NODE(rb)) {
Stephen Hemminger3696f622006-08-10 23:36:01 -0700365 WARN_ON(1);
366 } else {
367 rb_erase(rb, root);
368 RB_CLEAR_NODE(rb);
369 }
370}
371
372
Linus Torvalds1da177e2005-04-16 15:20:36 -0700373/**
374 * htb_remove_class_from_row - removes class from its row
375 *
376 * The class is removed from row at priorities marked in mask.
377 * It does nothing if mask == 0.
378 */
Stephen Hemminger87990462006-08-10 23:35:16 -0700379static inline void htb_remove_class_from_row(struct htb_sched *q,
380 struct htb_class *cl, int mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700381{
382 int m = 0;
Stephen Hemminger3bf72952006-08-10 23:31:08 -0700383
Linus Torvalds1da177e2005-04-16 15:20:36 -0700384 while (mask) {
385 int prio = ffz(~mask);
Stephen Hemminger3696f622006-08-10 23:36:01 -0700386
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387 mask &= ~(1 << prio);
Stephen Hemminger87990462006-08-10 23:35:16 -0700388 if (q->ptr[cl->level][prio] == cl->node + prio)
389 htb_next_rb_node(q->ptr[cl->level] + prio);
Stephen Hemminger3696f622006-08-10 23:36:01 -0700390
391 htb_safe_rb_erase(cl->node + prio, q->row[cl->level] + prio);
Stephen Hemminger87990462006-08-10 23:35:16 -0700392 if (!q->row[cl->level][prio].rb_node)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393 m |= 1 << prio;
394 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700395 q->row_mask[cl->level] &= ~m;
396}
397
398/**
399 * htb_activate_prios - creates active classe's feed chain
400 *
401 * The class is connected to ancestors and/or appropriate rows
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900402 * for priorities it is participating on. cl->cmode must be new
Linus Torvalds1da177e2005-04-16 15:20:36 -0700403 * (activated) mode. It does nothing if cl->prio_activity == 0.
404 */
Stephen Hemminger87990462006-08-10 23:35:16 -0700405static void htb_activate_prios(struct htb_sched *q, struct htb_class *cl)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700406{
407 struct htb_class *p = cl->parent;
Stephen Hemminger87990462006-08-10 23:35:16 -0700408 long m, mask = cl->prio_activity;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700409
410 while (cl->cmode == HTB_MAY_BORROW && p && mask) {
Stephen Hemminger87990462006-08-10 23:35:16 -0700411 m = mask;
412 while (m) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700413 int prio = ffz(~m);
414 m &= ~(1 << prio);
Stephen Hemminger87990462006-08-10 23:35:16 -0700415
Linus Torvalds1da177e2005-04-16 15:20:36 -0700416 if (p->un.inner.feed[prio].rb_node)
417 /* parent already has its feed in use so that
418 reset bit in mask as parent is already ok */
419 mask &= ~(1 << prio);
Stephen Hemminger87990462006-08-10 23:35:16 -0700420
421 htb_add_to_id_tree(p->un.inner.feed + prio, cl, prio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700422 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423 p->prio_activity |= mask;
Stephen Hemminger87990462006-08-10 23:35:16 -0700424 cl = p;
425 p = cl->parent;
Stephen Hemminger3bf72952006-08-10 23:31:08 -0700426
Linus Torvalds1da177e2005-04-16 15:20:36 -0700427 }
428 if (cl->cmode == HTB_CAN_SEND && mask)
Stephen Hemminger87990462006-08-10 23:35:16 -0700429 htb_add_class_to_row(q, cl, mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700430}
431
432/**
433 * htb_deactivate_prios - remove class from feed chain
434 *
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900435 * cl->cmode must represent old mode (before deactivation). It does
Linus Torvalds1da177e2005-04-16 15:20:36 -0700436 * nothing if cl->prio_activity == 0. Class is removed from all feed
437 * chains and rows.
438 */
439static void htb_deactivate_prios(struct htb_sched *q, struct htb_class *cl)
440{
441 struct htb_class *p = cl->parent;
Stephen Hemminger87990462006-08-10 23:35:16 -0700442 long m, mask = cl->prio_activity;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700443
444 while (cl->cmode == HTB_MAY_BORROW && p && mask) {
Stephen Hemminger87990462006-08-10 23:35:16 -0700445 m = mask;
446 mask = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700447 while (m) {
448 int prio = ffz(~m);
449 m &= ~(1 << prio);
Stephen Hemminger87990462006-08-10 23:35:16 -0700450
451 if (p->un.inner.ptr[prio] == cl->node + prio) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700452 /* we are removing child which is pointed to from
453 parent feed - forget the pointer but remember
454 classid */
455 p->un.inner.last_ptr_id[prio] = cl->classid;
456 p->un.inner.ptr[prio] = NULL;
457 }
Stephen Hemminger87990462006-08-10 23:35:16 -0700458
Stephen Hemminger3696f622006-08-10 23:36:01 -0700459 htb_safe_rb_erase(cl->node + prio, p->un.inner.feed + prio);
Stephen Hemminger87990462006-08-10 23:35:16 -0700460
461 if (!p->un.inner.feed[prio].rb_node)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700462 mask |= 1 << prio;
463 }
Stephen Hemminger3bf72952006-08-10 23:31:08 -0700464
Linus Torvalds1da177e2005-04-16 15:20:36 -0700465 p->prio_activity &= ~mask;
Stephen Hemminger87990462006-08-10 23:35:16 -0700466 cl = p;
467 p = cl->parent;
Stephen Hemminger3bf72952006-08-10 23:31:08 -0700468
Linus Torvalds1da177e2005-04-16 15:20:36 -0700469 }
Stephen Hemminger87990462006-08-10 23:35:16 -0700470 if (cl->cmode == HTB_CAN_SEND && mask)
471 htb_remove_class_from_row(q, cl, mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700472}
473
Stephen Hemminger18a63e82006-08-10 23:34:02 -0700474#if HTB_HYSTERESIS
475static inline long htb_lowater(const struct htb_class *cl)
476{
477 return cl->cmode != HTB_CANT_SEND ? -cl->cbuffer : 0;
478}
479static inline long htb_hiwater(const struct htb_class *cl)
480{
481 return cl->cmode == HTB_CAN_SEND ? -cl->buffer : 0;
482}
483#else
484#define htb_lowater(cl) (0)
485#define htb_hiwater(cl) (0)
486#endif
487
Linus Torvalds1da177e2005-04-16 15:20:36 -0700488/**
489 * htb_class_mode - computes and returns current class mode
490 *
491 * It computes cl's mode at time cl->t_c+diff and returns it. If mode
492 * is not HTB_CAN_SEND then cl->pq_key is updated to time difference
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900493 * from now to time when cl will change its state.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700494 * Also it is worth to note that class mode doesn't change simply
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900495 * at cl->{c,}tokens == 0 but there can rather be hysteresis of
Linus Torvalds1da177e2005-04-16 15:20:36 -0700496 * 0 .. -cl->{c,}buffer range. It is meant to limit number of
497 * mode transitions per time unit. The speed gain is about 1/6.
498 */
Stephen Hemminger87990462006-08-10 23:35:16 -0700499static inline enum htb_cmode
500htb_class_mode(struct htb_class *cl, long *diff)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700501{
Stephen Hemminger87990462006-08-10 23:35:16 -0700502 long toks;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700503
Stephen Hemminger87990462006-08-10 23:35:16 -0700504 if ((toks = (cl->ctokens + *diff)) < htb_lowater(cl)) {
505 *diff = -toks;
506 return HTB_CANT_SEND;
507 }
Stephen Hemminger18a63e82006-08-10 23:34:02 -0700508
Stephen Hemminger87990462006-08-10 23:35:16 -0700509 if ((toks = (cl->tokens + *diff)) >= htb_hiwater(cl))
510 return HTB_CAN_SEND;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700511
Stephen Hemminger87990462006-08-10 23:35:16 -0700512 *diff = -toks;
513 return HTB_MAY_BORROW;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700514}
515
516/**
517 * htb_change_class_mode - changes classe's mode
518 *
519 * This should be the only way how to change classe's mode under normal
520 * cirsumstances. Routine will update feed lists linkage, change mode
521 * and add class to the wait event queue if appropriate. New mode should
522 * be different from old one and cl->pq_key has to be valid if changing
523 * to mode other than HTB_CAN_SEND (see htb_add_to_wait_tree).
524 */
Stephen Hemminger87990462006-08-10 23:35:16 -0700525static void
Linus Torvalds1da177e2005-04-16 15:20:36 -0700526htb_change_class_mode(struct htb_sched *q, struct htb_class *cl, long *diff)
Stephen Hemminger87990462006-08-10 23:35:16 -0700527{
528 enum htb_cmode new_mode = htb_class_mode(cl, diff);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700529
530 if (new_mode == cl->cmode)
Stephen Hemminger87990462006-08-10 23:35:16 -0700531 return;
532
533 if (cl->prio_activity) { /* not necessary: speed optimization */
534 if (cl->cmode != HTB_CANT_SEND)
535 htb_deactivate_prios(q, cl);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700536 cl->cmode = new_mode;
Stephen Hemminger87990462006-08-10 23:35:16 -0700537 if (new_mode != HTB_CANT_SEND)
538 htb_activate_prios(q, cl);
539 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700540 cl->cmode = new_mode;
541}
542
543/**
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900544 * htb_activate - inserts leaf cl into appropriate active feeds
Linus Torvalds1da177e2005-04-16 15:20:36 -0700545 *
546 * Routine learns (new) priority of leaf and activates feed chain
547 * for the prio. It can be called on already active leaf safely.
548 * It also adds leaf into droplist.
549 */
Stephen Hemminger87990462006-08-10 23:35:16 -0700550static inline void htb_activate(struct htb_sched *q, struct htb_class *cl)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700551{
552 BUG_TRAP(!cl->level && cl->un.leaf.q && cl->un.leaf.q->q.qlen);
Stephen Hemminger3bf72952006-08-10 23:31:08 -0700553
Linus Torvalds1da177e2005-04-16 15:20:36 -0700554 if (!cl->prio_activity) {
555 cl->prio_activity = 1 << (cl->un.leaf.aprio = cl->un.leaf.prio);
Stephen Hemminger87990462006-08-10 23:35:16 -0700556 htb_activate_prios(q, cl);
557 list_add_tail(&cl->un.leaf.drop_list,
558 q->drops + cl->un.leaf.aprio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700559 }
560}
561
562/**
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900563 * htb_deactivate - remove leaf cl from active feeds
Linus Torvalds1da177e2005-04-16 15:20:36 -0700564 *
565 * Make sure that leaf is active. In the other words it can't be called
566 * with non-active leaf. It also removes class from the drop list.
567 */
Stephen Hemminger87990462006-08-10 23:35:16 -0700568static inline void htb_deactivate(struct htb_sched *q, struct htb_class *cl)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700569{
570 BUG_TRAP(cl->prio_activity);
Stephen Hemminger3bf72952006-08-10 23:31:08 -0700571
Stephen Hemminger87990462006-08-10 23:35:16 -0700572 htb_deactivate_prios(q, cl);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700573 cl->prio_activity = 0;
574 list_del_init(&cl->un.leaf.drop_list);
575}
576
577static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
578{
Stephen Hemminger87990462006-08-10 23:35:16 -0700579 int ret;
580 struct htb_sched *q = qdisc_priv(sch);
581 struct htb_class *cl = htb_classify(skb, sch, &ret);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700582
Stephen Hemminger87990462006-08-10 23:35:16 -0700583 if (cl == HTB_DIRECT) {
584 /* enqueue to helper queue */
585 if (q->direct_queue.qlen < q->direct_qlen) {
586 __skb_queue_tail(&q->direct_queue, skb);
587 q->direct_pkts++;
588 } else {
589 kfree_skb(skb);
590 sch->qstats.drops++;
591 return NET_XMIT_DROP;
592 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700593#ifdef CONFIG_NET_CLS_ACT
Stephen Hemminger87990462006-08-10 23:35:16 -0700594 } else if (!cl) {
595 if (ret == NET_XMIT_BYPASS)
596 sch->qstats.drops++;
597 kfree_skb(skb);
598 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700599#endif
Stephen Hemminger87990462006-08-10 23:35:16 -0700600 } else if (cl->un.leaf.q->enqueue(skb, cl->un.leaf.q) !=
601 NET_XMIT_SUCCESS) {
602 sch->qstats.drops++;
603 cl->qstats.drops++;
604 return NET_XMIT_DROP;
605 } else {
Ranjit Manomohanc9726d62007-07-10 22:43:16 -0700606 cl->bstats.packets +=
607 skb_is_gso(skb)?skb_shinfo(skb)->gso_segs:1;
Stephen Hemminger87990462006-08-10 23:35:16 -0700608 cl->bstats.bytes += skb->len;
609 htb_activate(q, cl);
610 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700611
Stephen Hemminger87990462006-08-10 23:35:16 -0700612 sch->q.qlen++;
Ranjit Manomohanc9726d62007-07-10 22:43:16 -0700613 sch->bstats.packets += skb_is_gso(skb)?skb_shinfo(skb)->gso_segs:1;
Stephen Hemminger87990462006-08-10 23:35:16 -0700614 sch->bstats.bytes += skb->len;
615 return NET_XMIT_SUCCESS;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700616}
617
618/* TODO: requeuing packet charges it to policers again !! */
619static int htb_requeue(struct sk_buff *skb, struct Qdisc *sch)
620{
Stephen Hemminger87990462006-08-10 23:35:16 -0700621 struct htb_sched *q = qdisc_priv(sch);
622 int ret = NET_XMIT_SUCCESS;
623 struct htb_class *cl = htb_classify(skb, sch, &ret);
624 struct sk_buff *tskb;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700625
Stephen Hemminger87990462006-08-10 23:35:16 -0700626 if (cl == HTB_DIRECT || !cl) {
627 /* enqueue to helper queue */
628 if (q->direct_queue.qlen < q->direct_qlen && cl) {
629 __skb_queue_head(&q->direct_queue, skb);
630 } else {
631 __skb_queue_head(&q->direct_queue, skb);
632 tskb = __skb_dequeue_tail(&q->direct_queue);
633 kfree_skb(tskb);
634 sch->qstats.drops++;
635 return NET_XMIT_CN;
636 }
637 } else if (cl->un.leaf.q->ops->requeue(skb, cl->un.leaf.q) !=
638 NET_XMIT_SUCCESS) {
639 sch->qstats.drops++;
640 cl->qstats.drops++;
641 return NET_XMIT_DROP;
642 } else
643 htb_activate(q, cl);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700644
Stephen Hemminger87990462006-08-10 23:35:16 -0700645 sch->q.qlen++;
646 sch->qstats.requeues++;
647 return NET_XMIT_SUCCESS;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700648}
649
Linus Torvalds1da177e2005-04-16 15:20:36 -0700650/**
651 * htb_charge_class - charges amount "bytes" to leaf and ancestors
652 *
653 * Routine assumes that packet "bytes" long was dequeued from leaf cl
654 * borrowing from "level". It accounts bytes to ceil leaky bucket for
655 * leaf and all ancestors and to rate bucket for ancestors at levels
656 * "level" and higher. It also handles possible change of mode resulting
657 * from the update. Note that mode can also increase here (MAY_BORROW to
658 * CAN_SEND) because we can use more precise clock that event queue here.
659 * In such case we remove class from event queue first.
660 */
Stephen Hemminger87990462006-08-10 23:35:16 -0700661static void htb_charge_class(struct htb_sched *q, struct htb_class *cl,
Ranjit Manomohanc9726d62007-07-10 22:43:16 -0700662 int level, struct sk_buff *skb)
Stephen Hemminger87990462006-08-10 23:35:16 -0700663{
Ranjit Manomohanc9726d62007-07-10 22:43:16 -0700664 int bytes = skb->len;
Stephen Hemminger87990462006-08-10 23:35:16 -0700665 long toks, diff;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700666 enum htb_cmode old_mode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700667
668#define HTB_ACCNT(T,B,R) toks = diff + cl->T; \
669 if (toks > cl->B) toks = cl->B; \
670 toks -= L2T(cl, cl->R, bytes); \
671 if (toks <= -cl->mbuffer) toks = 1-cl->mbuffer; \
672 cl->T = toks
673
674 while (cl) {
Patrick McHardy03cc45c2007-03-23 11:29:11 -0700675 diff = psched_tdiff_bounded(q->now, cl->t_c, cl->mbuffer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700676 if (cl->level >= level) {
Stephen Hemminger87990462006-08-10 23:35:16 -0700677 if (cl->level == level)
678 cl->xstats.lends++;
679 HTB_ACCNT(tokens, buffer, rate);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700680 } else {
681 cl->xstats.borrows++;
Stephen Hemminger87990462006-08-10 23:35:16 -0700682 cl->tokens += diff; /* we moved t_c; update tokens */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700683 }
Stephen Hemminger87990462006-08-10 23:35:16 -0700684 HTB_ACCNT(ctokens, cbuffer, ceil);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700685 cl->t_c = q->now;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700686
Stephen Hemminger87990462006-08-10 23:35:16 -0700687 old_mode = cl->cmode;
688 diff = 0;
689 htb_change_class_mode(q, cl, &diff);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700690 if (old_mode != cl->cmode) {
691 if (old_mode != HTB_CAN_SEND)
Stephen Hemminger3696f622006-08-10 23:36:01 -0700692 htb_safe_rb_erase(&cl->pq_node, q->wait_pq + cl->level);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700693 if (cl->cmode != HTB_CAN_SEND)
Stephen Hemminger87990462006-08-10 23:35:16 -0700694 htb_add_to_wait_tree(q, cl, diff);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700695 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700696
697 /* update byte stats except for leaves which are already updated */
698 if (cl->level) {
699 cl->bstats.bytes += bytes;
Ranjit Manomohanc9726d62007-07-10 22:43:16 -0700700 cl->bstats.packets += skb_is_gso(skb)?
701 skb_shinfo(skb)->gso_segs:1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700702 }
703 cl = cl->parent;
704 }
705}
706
707/**
708 * htb_do_events - make mode changes to classes at the level
709 *
Patrick McHardyfb983d42007-03-16 01:22:39 -0700710 * Scans event queue for pending events and applies them. Returns time of
Linus Torvalds1da177e2005-04-16 15:20:36 -0700711 * next pending event (0 for no event in pq).
Patrick McHardyfb983d42007-03-16 01:22:39 -0700712 * Note: Applied are events whose have cl->pq_key <= q->now.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700713 */
Patrick McHardyfb983d42007-03-16 01:22:39 -0700714static psched_time_t htb_do_events(struct htb_sched *q, int level)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700715{
716 int i;
Stephen Hemminger3bf72952006-08-10 23:31:08 -0700717
Linus Torvalds1da177e2005-04-16 15:20:36 -0700718 for (i = 0; i < 500; i++) {
719 struct htb_class *cl;
720 long diff;
Akinbou Mita30bdbe32006-10-12 01:52:05 -0700721 struct rb_node *p = rb_first(&q->wait_pq[level]);
722
Stephen Hemminger87990462006-08-10 23:35:16 -0700723 if (!p)
724 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700725
726 cl = rb_entry(p, struct htb_class, pq_node);
Patrick McHardyfb983d42007-03-16 01:22:39 -0700727 if (cl->pq_key > q->now)
728 return cl->pq_key;
729
Stephen Hemminger3696f622006-08-10 23:36:01 -0700730 htb_safe_rb_erase(p, q->wait_pq + level);
Patrick McHardy03cc45c2007-03-23 11:29:11 -0700731 diff = psched_tdiff_bounded(q->now, cl->t_c, cl->mbuffer);
Stephen Hemminger87990462006-08-10 23:35:16 -0700732 htb_change_class_mode(q, cl, &diff);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700733 if (cl->cmode != HTB_CAN_SEND)
Stephen Hemminger87990462006-08-10 23:35:16 -0700734 htb_add_to_wait_tree(q, cl, diff);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700735 }
736 if (net_ratelimit())
737 printk(KERN_WARNING "htb: too many events !\n");
Patrick McHardyfb983d42007-03-16 01:22:39 -0700738 return q->now + PSCHED_TICKS_PER_SEC / 10;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700739}
740
741/* Returns class->node+prio from id-tree where classe's id is >= id. NULL
742 is no such one exists. */
Stephen Hemminger87990462006-08-10 23:35:16 -0700743static struct rb_node *htb_id_find_next_upper(int prio, struct rb_node *n,
744 u32 id)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700745{
746 struct rb_node *r = NULL;
747 while (n) {
Stephen Hemminger87990462006-08-10 23:35:16 -0700748 struct htb_class *cl =
749 rb_entry(n, struct htb_class, node[prio]);
750 if (id == cl->classid)
751 return n;
752
Linus Torvalds1da177e2005-04-16 15:20:36 -0700753 if (id > cl->classid) {
754 n = n->rb_right;
755 } else {
756 r = n;
757 n = n->rb_left;
758 }
759 }
760 return r;
761}
762
763/**
764 * htb_lookup_leaf - returns next leaf class in DRR order
765 *
766 * Find leaf where current feed pointers points to.
767 */
Stephen Hemminger87990462006-08-10 23:35:16 -0700768static struct htb_class *htb_lookup_leaf(struct rb_root *tree, int prio,
769 struct rb_node **pptr, u32 * pid)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700770{
771 int i;
772 struct {
773 struct rb_node *root;
774 struct rb_node **pptr;
775 u32 *pid;
Stephen Hemminger87990462006-08-10 23:35:16 -0700776 } stk[TC_HTB_MAXDEPTH], *sp = stk;
777
Linus Torvalds1da177e2005-04-16 15:20:36 -0700778 BUG_TRAP(tree->rb_node);
779 sp->root = tree->rb_node;
780 sp->pptr = pptr;
781 sp->pid = pid;
782
783 for (i = 0; i < 65535; i++) {
Stephen Hemminger87990462006-08-10 23:35:16 -0700784 if (!*sp->pptr && *sp->pid) {
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900785 /* ptr was invalidated but id is valid - try to recover
Linus Torvalds1da177e2005-04-16 15:20:36 -0700786 the original or next ptr */
Stephen Hemminger87990462006-08-10 23:35:16 -0700787 *sp->pptr =
788 htb_id_find_next_upper(prio, sp->root, *sp->pid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700789 }
Stephen Hemminger87990462006-08-10 23:35:16 -0700790 *sp->pid = 0; /* ptr is valid now so that remove this hint as it
791 can become out of date quickly */
792 if (!*sp->pptr) { /* we are at right end; rewind & go up */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700793 *sp->pptr = sp->root;
Stephen Hemminger87990462006-08-10 23:35:16 -0700794 while ((*sp->pptr)->rb_left)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700795 *sp->pptr = (*sp->pptr)->rb_left;
796 if (sp > stk) {
797 sp--;
Stephen Hemminger87990462006-08-10 23:35:16 -0700798 BUG_TRAP(*sp->pptr);
799 if (!*sp->pptr)
800 return NULL;
801 htb_next_rb_node(sp->pptr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700802 }
803 } else {
804 struct htb_class *cl;
Stephen Hemminger87990462006-08-10 23:35:16 -0700805 cl = rb_entry(*sp->pptr, struct htb_class, node[prio]);
806 if (!cl->level)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700807 return cl;
808 (++sp)->root = cl->un.inner.feed[prio].rb_node;
Stephen Hemminger87990462006-08-10 23:35:16 -0700809 sp->pptr = cl->un.inner.ptr + prio;
810 sp->pid = cl->un.inner.last_ptr_id + prio;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700811 }
812 }
813 BUG_TRAP(0);
814 return NULL;
815}
816
817/* dequeues packet at given priority and level; call only if
818 you are sure that there is active class at prio/level */
Stephen Hemminger87990462006-08-10 23:35:16 -0700819static struct sk_buff *htb_dequeue_tree(struct htb_sched *q, int prio,
820 int level)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700821{
822 struct sk_buff *skb = NULL;
Stephen Hemminger87990462006-08-10 23:35:16 -0700823 struct htb_class *cl, *start;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700824 /* look initial class up in the row */
Stephen Hemminger87990462006-08-10 23:35:16 -0700825 start = cl = htb_lookup_leaf(q->row[level] + prio, prio,
826 q->ptr[level] + prio,
827 q->last_ptr_id[level] + prio);
828
Linus Torvalds1da177e2005-04-16 15:20:36 -0700829 do {
830next:
Stephen Hemminger87990462006-08-10 23:35:16 -0700831 BUG_TRAP(cl);
832 if (!cl)
833 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700834
835 /* class can be empty - it is unlikely but can be true if leaf
836 qdisc drops packets in enqueue routine or if someone used
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900837 graft operation on the leaf since last dequeue;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700838 simply deactivate and skip such class */
839 if (unlikely(cl->un.leaf.q->q.qlen == 0)) {
840 struct htb_class *next;
Stephen Hemminger87990462006-08-10 23:35:16 -0700841 htb_deactivate(q, cl);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700842
843 /* row/level might become empty */
844 if ((q->row_mask[level] & (1 << prio)) == 0)
Stephen Hemminger87990462006-08-10 23:35:16 -0700845 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700846
Stephen Hemminger87990462006-08-10 23:35:16 -0700847 next = htb_lookup_leaf(q->row[level] + prio,
848 prio, q->ptr[level] + prio,
849 q->last_ptr_id[level] + prio);
850
851 if (cl == start) /* fix start if we just deleted it */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700852 start = next;
853 cl = next;
854 goto next;
855 }
Stephen Hemminger87990462006-08-10 23:35:16 -0700856
857 skb = cl->un.leaf.q->dequeue(cl->un.leaf.q);
858 if (likely(skb != NULL))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700859 break;
860 if (!cl->warned) {
Stephen Hemminger87990462006-08-10 23:35:16 -0700861 printk(KERN_WARNING
862 "htb: class %X isn't work conserving ?!\n",
863 cl->classid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700864 cl->warned = 1;
865 }
866 q->nwc_hit++;
Stephen Hemminger87990462006-08-10 23:35:16 -0700867 htb_next_rb_node((level ? cl->parent->un.inner.ptr : q->
868 ptr[0]) + prio);
869 cl = htb_lookup_leaf(q->row[level] + prio, prio,
870 q->ptr[level] + prio,
871 q->last_ptr_id[level] + prio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700872
873 } while (cl != start);
874
875 if (likely(skb != NULL)) {
876 if ((cl->un.leaf.deficit[level] -= skb->len) < 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700877 cl->un.leaf.deficit[level] += cl->un.leaf.quantum;
Stephen Hemminger87990462006-08-10 23:35:16 -0700878 htb_next_rb_node((level ? cl->parent->un.inner.ptr : q->
879 ptr[0]) + prio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700880 }
881 /* this used to be after charge_class but this constelation
882 gives us slightly better performance */
883 if (!cl->un.leaf.q->q.qlen)
Stephen Hemminger87990462006-08-10 23:35:16 -0700884 htb_deactivate(q, cl);
Ranjit Manomohanc9726d62007-07-10 22:43:16 -0700885 htb_charge_class(q, cl, level, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700886 }
887 return skb;
888}
889
Linus Torvalds1da177e2005-04-16 15:20:36 -0700890static struct sk_buff *htb_dequeue(struct Qdisc *sch)
891{
892 struct sk_buff *skb = NULL;
893 struct htb_sched *q = qdisc_priv(sch);
894 int level;
Patrick McHardyfb983d42007-03-16 01:22:39 -0700895 psched_time_t next_event;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700896
897 /* try to dequeue direct packets as high prio (!) to minimize cpu work */
Stephen Hemminger87990462006-08-10 23:35:16 -0700898 skb = __skb_dequeue(&q->direct_queue);
899 if (skb != NULL) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700900 sch->flags &= ~TCQ_F_THROTTLED;
901 sch->q.qlen--;
902 return skb;
903 }
904
Stephen Hemminger87990462006-08-10 23:35:16 -0700905 if (!sch->q.qlen)
906 goto fin;
Patrick McHardy3bebcda2007-03-23 11:29:25 -0700907 q->now = psched_get_time();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700908
Patrick McHardyfb983d42007-03-16 01:22:39 -0700909 next_event = q->now + 5 * PSCHED_TICKS_PER_SEC;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700910 q->nwc_hit = 0;
911 for (level = 0; level < TC_HTB_MAXDEPTH; level++) {
912 /* common case optimization - skip event handler quickly */
913 int m;
Patrick McHardyfb983d42007-03-16 01:22:39 -0700914 psched_time_t event;
Stephen Hemminger87990462006-08-10 23:35:16 -0700915
Patrick McHardyfb983d42007-03-16 01:22:39 -0700916 if (q->now >= q->near_ev_cache[level]) {
917 event = htb_do_events(q, level);
Patrick McHardy2e4b3b02007-05-23 23:39:54 -0700918 if (!event)
919 event = q->now + PSCHED_TICKS_PER_SEC;
920 q->near_ev_cache[level] = event;
Patrick McHardyfb983d42007-03-16 01:22:39 -0700921 } else
922 event = q->near_ev_cache[level];
923
924 if (event && next_event > event)
925 next_event = event;
926
Linus Torvalds1da177e2005-04-16 15:20:36 -0700927 m = ~q->row_mask[level];
928 while (m != (int)(-1)) {
Stephen Hemminger87990462006-08-10 23:35:16 -0700929 int prio = ffz(m);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700930 m |= 1 << prio;
Stephen Hemminger87990462006-08-10 23:35:16 -0700931 skb = htb_dequeue_tree(q, prio, level);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700932 if (likely(skb != NULL)) {
933 sch->q.qlen--;
934 sch->flags &= ~TCQ_F_THROTTLED;
935 goto fin;
936 }
937 }
938 }
Patrick McHardyfb983d42007-03-16 01:22:39 -0700939 sch->qstats.overlimits++;
940 qdisc_watchdog_schedule(&q->watchdog, next_event);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700941fin:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700942 return skb;
943}
944
945/* try to drop from each class (by prio) until one succeed */
Stephen Hemminger87990462006-08-10 23:35:16 -0700946static unsigned int htb_drop(struct Qdisc *sch)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700947{
948 struct htb_sched *q = qdisc_priv(sch);
949 int prio;
950
951 for (prio = TC_HTB_NUMPRIO - 1; prio >= 0; prio--) {
952 struct list_head *p;
Stephen Hemminger87990462006-08-10 23:35:16 -0700953 list_for_each(p, q->drops + prio) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700954 struct htb_class *cl = list_entry(p, struct htb_class,
955 un.leaf.drop_list);
956 unsigned int len;
Stephen Hemminger87990462006-08-10 23:35:16 -0700957 if (cl->un.leaf.q->ops->drop &&
958 (len = cl->un.leaf.q->ops->drop(cl->un.leaf.q))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700959 sch->q.qlen--;
960 if (!cl->un.leaf.q->q.qlen)
Stephen Hemminger87990462006-08-10 23:35:16 -0700961 htb_deactivate(q, cl);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700962 return len;
963 }
964 }
965 }
966 return 0;
967}
968
969/* reset all classes */
970/* always caled under BH & queue lock */
Stephen Hemminger87990462006-08-10 23:35:16 -0700971static void htb_reset(struct Qdisc *sch)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700972{
973 struct htb_sched *q = qdisc_priv(sch);
974 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700975
976 for (i = 0; i < HTB_HSIZE; i++) {
Stephen Hemminger0cef2962006-08-10 23:35:38 -0700977 struct hlist_node *p;
978 struct htb_class *cl;
979
980 hlist_for_each_entry(cl, p, q->hash + i, hlist) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700981 if (cl->level)
Stephen Hemminger87990462006-08-10 23:35:16 -0700982 memset(&cl->un.inner, 0, sizeof(cl->un.inner));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700983 else {
Stephen Hemminger87990462006-08-10 23:35:16 -0700984 if (cl->un.leaf.q)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700985 qdisc_reset(cl->un.leaf.q);
986 INIT_LIST_HEAD(&cl->un.leaf.drop_list);
987 }
988 cl->prio_activity = 0;
989 cl->cmode = HTB_CAN_SEND;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700990
991 }
992 }
Patrick McHardyfb983d42007-03-16 01:22:39 -0700993 qdisc_watchdog_cancel(&q->watchdog);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700994 __skb_queue_purge(&q->direct_queue);
995 sch->q.qlen = 0;
Stephen Hemminger87990462006-08-10 23:35:16 -0700996 memset(q->row, 0, sizeof(q->row));
997 memset(q->row_mask, 0, sizeof(q->row_mask));
998 memset(q->wait_pq, 0, sizeof(q->wait_pq));
999 memset(q->ptr, 0, sizeof(q->ptr));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001000 for (i = 0; i < TC_HTB_NUMPRIO; i++)
Stephen Hemminger87990462006-08-10 23:35:16 -07001001 INIT_LIST_HEAD(q->drops + i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001002}
1003
1004static int htb_init(struct Qdisc *sch, struct rtattr *opt)
1005{
1006 struct htb_sched *q = qdisc_priv(sch);
1007 struct rtattr *tb[TCA_HTB_INIT];
1008 struct tc_htb_glob *gopt;
1009 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001010 if (!opt || rtattr_parse_nested(tb, TCA_HTB_INIT, opt) ||
Stephen Hemminger87990462006-08-10 23:35:16 -07001011 tb[TCA_HTB_INIT - 1] == NULL ||
1012 RTA_PAYLOAD(tb[TCA_HTB_INIT - 1]) < sizeof(*gopt)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001013 printk(KERN_ERR "HTB: hey probably you have bad tc tool ?\n");
1014 return -EINVAL;
1015 }
Stephen Hemminger87990462006-08-10 23:35:16 -07001016 gopt = RTA_DATA(tb[TCA_HTB_INIT - 1]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001017 if (gopt->version != HTB_VER >> 16) {
Stephen Hemminger87990462006-08-10 23:35:16 -07001018 printk(KERN_ERR
1019 "HTB: need tc/htb version %d (minor is %d), you have %d\n",
1020 HTB_VER >> 16, HTB_VER & 0xffff, gopt->version);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001021 return -EINVAL;
1022 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001023
1024 INIT_LIST_HEAD(&q->root);
1025 for (i = 0; i < HTB_HSIZE; i++)
Stephen Hemminger0cef2962006-08-10 23:35:38 -07001026 INIT_HLIST_HEAD(q->hash + i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001027 for (i = 0; i < TC_HTB_NUMPRIO; i++)
Stephen Hemminger87990462006-08-10 23:35:16 -07001028 INIT_LIST_HEAD(q->drops + i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001029
Patrick McHardyfb983d42007-03-16 01:22:39 -07001030 qdisc_watchdog_init(&q->watchdog, sch);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001031 skb_queue_head_init(&q->direct_queue);
1032
1033 q->direct_qlen = sch->dev->tx_queue_len;
Stephen Hemminger87990462006-08-10 23:35:16 -07001034 if (q->direct_qlen < 2) /* some devices have zero tx_queue_len */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001035 q->direct_qlen = 2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001036
Linus Torvalds1da177e2005-04-16 15:20:36 -07001037 if ((q->rate2quantum = gopt->rate2quantum) < 1)
1038 q->rate2quantum = 1;
1039 q->defcls = gopt->defcls;
1040
1041 return 0;
1042}
1043
1044static int htb_dump(struct Qdisc *sch, struct sk_buff *skb)
1045{
1046 struct htb_sched *q = qdisc_priv(sch);
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001047 unsigned char *b = skb_tail_pointer(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001048 struct rtattr *rta;
1049 struct tc_htb_glob gopt;
Stephen Hemminger9ac961e2006-08-10 23:33:16 -07001050 spin_lock_bh(&sch->dev->queue_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001051 gopt.direct_pkts = q->direct_pkts;
1052
Linus Torvalds1da177e2005-04-16 15:20:36 -07001053 gopt.version = HTB_VER;
1054 gopt.rate2quantum = q->rate2quantum;
1055 gopt.defcls = q->defcls;
Stephen Hemminger3bf72952006-08-10 23:31:08 -07001056 gopt.debug = 0;
Stephen Hemminger87990462006-08-10 23:35:16 -07001057 rta = (struct rtattr *)b;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001058 RTA_PUT(skb, TCA_OPTIONS, 0, NULL);
1059 RTA_PUT(skb, TCA_HTB_INIT, sizeof(gopt), &gopt);
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001060 rta->rta_len = skb_tail_pointer(skb) - b;
Stephen Hemminger9ac961e2006-08-10 23:33:16 -07001061 spin_unlock_bh(&sch->dev->queue_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001062 return skb->len;
1063rtattr_failure:
Stephen Hemminger9ac961e2006-08-10 23:33:16 -07001064 spin_unlock_bh(&sch->dev->queue_lock);
Arnaldo Carvalho de Melodc5fc572007-03-25 23:06:12 -07001065 nlmsg_trim(skb, skb_tail_pointer(skb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001066 return -1;
1067}
1068
1069static int htb_dump_class(struct Qdisc *sch, unsigned long arg,
Stephen Hemminger87990462006-08-10 23:35:16 -07001070 struct sk_buff *skb, struct tcmsg *tcm)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001071{
Stephen Hemminger87990462006-08-10 23:35:16 -07001072 struct htb_class *cl = (struct htb_class *)arg;
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001073 unsigned char *b = skb_tail_pointer(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001074 struct rtattr *rta;
1075 struct tc_htb_opt opt;
1076
Stephen Hemminger9ac961e2006-08-10 23:33:16 -07001077 spin_lock_bh(&sch->dev->queue_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001078 tcm->tcm_parent = cl->parent ? cl->parent->classid : TC_H_ROOT;
1079 tcm->tcm_handle = cl->classid;
1080 if (!cl->level && cl->un.leaf.q)
1081 tcm->tcm_info = cl->un.leaf.q->handle;
1082
Stephen Hemminger87990462006-08-10 23:35:16 -07001083 rta = (struct rtattr *)b;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001084 RTA_PUT(skb, TCA_OPTIONS, 0, NULL);
1085
Stephen Hemminger87990462006-08-10 23:35:16 -07001086 memset(&opt, 0, sizeof(opt));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001087
Stephen Hemminger87990462006-08-10 23:35:16 -07001088 opt.rate = cl->rate->rate;
1089 opt.buffer = cl->buffer;
1090 opt.ceil = cl->ceil->rate;
1091 opt.cbuffer = cl->cbuffer;
1092 opt.quantum = cl->un.leaf.quantum;
1093 opt.prio = cl->un.leaf.prio;
1094 opt.level = cl->level;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001095 RTA_PUT(skb, TCA_HTB_PARMS, sizeof(opt), &opt);
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001096 rta->rta_len = skb_tail_pointer(skb) - b;
Stephen Hemminger9ac961e2006-08-10 23:33:16 -07001097 spin_unlock_bh(&sch->dev->queue_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001098 return skb->len;
1099rtattr_failure:
Stephen Hemminger9ac961e2006-08-10 23:33:16 -07001100 spin_unlock_bh(&sch->dev->queue_lock);
Arnaldo Carvalho de Melodc5fc572007-03-25 23:06:12 -07001101 nlmsg_trim(skb, b);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001102 return -1;
1103}
1104
1105static int
Stephen Hemminger87990462006-08-10 23:35:16 -07001106htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001107{
Stephen Hemminger87990462006-08-10 23:35:16 -07001108 struct htb_class *cl = (struct htb_class *)arg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001109
Linus Torvalds1da177e2005-04-16 15:20:36 -07001110 if (!cl->level && cl->un.leaf.q)
1111 cl->qstats.qlen = cl->un.leaf.q->q.qlen;
1112 cl->xstats.tokens = cl->tokens;
1113 cl->xstats.ctokens = cl->ctokens;
1114
1115 if (gnet_stats_copy_basic(d, &cl->bstats) < 0 ||
1116 gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
1117 gnet_stats_copy_queue(d, &cl->qstats) < 0)
1118 return -1;
1119
1120 return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats));
1121}
1122
1123static int htb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
Stephen Hemminger87990462006-08-10 23:35:16 -07001124 struct Qdisc **old)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001125{
Stephen Hemminger87990462006-08-10 23:35:16 -07001126 struct htb_class *cl = (struct htb_class *)arg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001127
1128 if (cl && !cl->level) {
Patrick McHardy9f9afec2006-11-29 17:35:18 -08001129 if (new == NULL &&
1130 (new = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops,
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +09001131 cl->classid))
Stephen Hemminger87990462006-08-10 23:35:16 -07001132 == NULL)
1133 return -ENOBUFS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001134 sch_tree_lock(sch);
1135 if ((*old = xchg(&cl->un.leaf.q, new)) != NULL) {
Patrick McHardy256d61b2006-11-29 17:37:05 -08001136 qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001137 qdisc_reset(*old);
1138 }
1139 sch_tree_unlock(sch);
1140 return 0;
1141 }
1142 return -ENOENT;
1143}
1144
Stephen Hemminger87990462006-08-10 23:35:16 -07001145static struct Qdisc *htb_leaf(struct Qdisc *sch, unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001146{
Stephen Hemminger87990462006-08-10 23:35:16 -07001147 struct htb_class *cl = (struct htb_class *)arg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001148 return (cl && !cl->level) ? cl->un.leaf.q : NULL;
1149}
1150
Patrick McHardy256d61b2006-11-29 17:37:05 -08001151static void htb_qlen_notify(struct Qdisc *sch, unsigned long arg)
1152{
1153 struct htb_class *cl = (struct htb_class *)arg;
1154
1155 if (cl->un.leaf.q->q.qlen == 0)
1156 htb_deactivate(qdisc_priv(sch), cl);
1157}
1158
Linus Torvalds1da177e2005-04-16 15:20:36 -07001159static unsigned long htb_get(struct Qdisc *sch, u32 classid)
1160{
Stephen Hemminger87990462006-08-10 23:35:16 -07001161 struct htb_class *cl = htb_find(classid, sch);
1162 if (cl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001163 cl->refcnt++;
1164 return (unsigned long)cl;
1165}
1166
Jarek Poplawski160d5e12006-12-08 00:26:56 -08001167static inline int htb_parent_last_child(struct htb_class *cl)
1168{
1169 if (!cl->parent)
1170 /* the root class */
1171 return 0;
1172
1173 if (!(cl->parent->children.next == &cl->sibling &&
1174 cl->parent->children.prev == &cl->sibling))
1175 /* not the last child */
1176 return 0;
1177
1178 return 1;
1179}
1180
1181static void htb_parent_to_leaf(struct htb_class *cl, struct Qdisc *new_q)
1182{
1183 struct htb_class *parent = cl->parent;
1184
1185 BUG_TRAP(!cl->level && cl->un.leaf.q && !cl->prio_activity);
1186
1187 parent->level = 0;
1188 memset(&parent->un.inner, 0, sizeof(parent->un.inner));
1189 INIT_LIST_HEAD(&parent->un.leaf.drop_list);
1190 parent->un.leaf.q = new_q ? new_q : &noop_qdisc;
1191 parent->un.leaf.quantum = parent->quantum;
1192 parent->un.leaf.prio = parent->prio;
1193 parent->tokens = parent->buffer;
1194 parent->ctokens = parent->cbuffer;
Patrick McHardy3bebcda2007-03-23 11:29:25 -07001195 parent->t_c = psched_get_time();
Jarek Poplawski160d5e12006-12-08 00:26:56 -08001196 parent->cmode = HTB_CAN_SEND;
1197}
1198
Stephen Hemminger87990462006-08-10 23:35:16 -07001199static void htb_destroy_class(struct Qdisc *sch, struct htb_class *cl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001200{
1201 struct htb_sched *q = qdisc_priv(sch);
Patrick McHardy814a175e2006-11-29 17:34:50 -08001202
Linus Torvalds1da177e2005-04-16 15:20:36 -07001203 if (!cl->level) {
1204 BUG_TRAP(cl->un.leaf.q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001205 qdisc_destroy(cl->un.leaf.q);
1206 }
Patrick McHardyee39e102007-07-02 22:48:13 -07001207 gen_kill_estimator(&cl->bstats, &cl->rate_est);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001208 qdisc_put_rtab(cl->rate);
1209 qdisc_put_rtab(cl->ceil);
Stephen Hemminger87990462006-08-10 23:35:16 -07001210
Patrick McHardya48b5a62007-03-23 11:29:43 -07001211 tcf_destroy_chain(cl->filter_list);
Stephen Hemminger87990462006-08-10 23:35:16 -07001212
1213 while (!list_empty(&cl->children))
1214 htb_destroy_class(sch, list_entry(cl->children.next,
1215 struct htb_class, sibling));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001216
1217 /* note: this delete may happen twice (see htb_delete) */
Stephen Hemmingerda33e3e2006-11-07 14:54:46 -08001218 hlist_del_init(&cl->hlist);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001219 list_del(&cl->sibling);
Stephen Hemminger87990462006-08-10 23:35:16 -07001220
Linus Torvalds1da177e2005-04-16 15:20:36 -07001221 if (cl->prio_activity)
Stephen Hemminger87990462006-08-10 23:35:16 -07001222 htb_deactivate(q, cl);
1223
Linus Torvalds1da177e2005-04-16 15:20:36 -07001224 if (cl->cmode != HTB_CAN_SEND)
Stephen Hemminger3696f622006-08-10 23:36:01 -07001225 htb_safe_rb_erase(&cl->pq_node, q->wait_pq + cl->level);
Stephen Hemminger87990462006-08-10 23:35:16 -07001226
Linus Torvalds1da177e2005-04-16 15:20:36 -07001227 kfree(cl);
1228}
1229
1230/* always caled under BH & queue lock */
Stephen Hemminger87990462006-08-10 23:35:16 -07001231static void htb_destroy(struct Qdisc *sch)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001232{
1233 struct htb_sched *q = qdisc_priv(sch);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001234
Patrick McHardyfb983d42007-03-16 01:22:39 -07001235 qdisc_watchdog_cancel(&q->watchdog);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001236 /* This line used to be after htb_destroy_class call below
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +09001237 and surprisingly it worked in 2.4. But it must precede it
Linus Torvalds1da177e2005-04-16 15:20:36 -07001238 because filter need its target class alive to be able to call
1239 unbind_filter on it (without Oops). */
Patrick McHardya48b5a62007-03-23 11:29:43 -07001240 tcf_destroy_chain(q->filter_list);
Stephen Hemminger87990462006-08-10 23:35:16 -07001241
1242 while (!list_empty(&q->root))
1243 htb_destroy_class(sch, list_entry(q->root.next,
1244 struct htb_class, sibling));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001245
1246 __skb_queue_purge(&q->direct_queue);
1247}
1248
1249static int htb_delete(struct Qdisc *sch, unsigned long arg)
1250{
1251 struct htb_sched *q = qdisc_priv(sch);
Stephen Hemminger87990462006-08-10 23:35:16 -07001252 struct htb_class *cl = (struct htb_class *)arg;
Patrick McHardy256d61b2006-11-29 17:37:05 -08001253 unsigned int qlen;
Jarek Poplawski160d5e12006-12-08 00:26:56 -08001254 struct Qdisc *new_q = NULL;
1255 int last_child = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001256
1257 // TODO: why don't allow to delete subtree ? references ? does
1258 // tc subsys quarantee us that in htb_destroy it holds no class
1259 // refs so that we can remove children safely there ?
1260 if (!list_empty(&cl->children) || cl->filter_cnt)
1261 return -EBUSY;
Stephen Hemminger87990462006-08-10 23:35:16 -07001262
Jarek Poplawski160d5e12006-12-08 00:26:56 -08001263 if (!cl->level && htb_parent_last_child(cl)) {
1264 new_q = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops,
1265 cl->parent->classid);
1266 last_child = 1;
1267 }
1268
Linus Torvalds1da177e2005-04-16 15:20:36 -07001269 sch_tree_lock(sch);
Stephen Hemminger87990462006-08-10 23:35:16 -07001270
Patrick McHardy814a175e2006-11-29 17:34:50 -08001271 if (!cl->level) {
Patrick McHardy256d61b2006-11-29 17:37:05 -08001272 qlen = cl->un.leaf.q->q.qlen;
Patrick McHardy814a175e2006-11-29 17:34:50 -08001273 qdisc_reset(cl->un.leaf.q);
Patrick McHardy256d61b2006-11-29 17:37:05 -08001274 qdisc_tree_decrease_qlen(cl->un.leaf.q, qlen);
Patrick McHardy814a175e2006-11-29 17:34:50 -08001275 }
1276
Patrick McHardyc38c83c2007-03-27 14:04:24 -07001277 /* delete from hash and active; remainder in destroy_class */
1278 hlist_del_init(&cl->hlist);
1279
Linus Torvalds1da177e2005-04-16 15:20:36 -07001280 if (cl->prio_activity)
Stephen Hemminger87990462006-08-10 23:35:16 -07001281 htb_deactivate(q, cl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001282
Jarek Poplawski160d5e12006-12-08 00:26:56 -08001283 if (last_child)
1284 htb_parent_to_leaf(cl, new_q);
1285
Linus Torvalds1da177e2005-04-16 15:20:36 -07001286 if (--cl->refcnt == 0)
Stephen Hemminger87990462006-08-10 23:35:16 -07001287 htb_destroy_class(sch, cl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001288
1289 sch_tree_unlock(sch);
1290 return 0;
1291}
1292
1293static void htb_put(struct Qdisc *sch, unsigned long arg)
1294{
Stephen Hemminger87990462006-08-10 23:35:16 -07001295 struct htb_class *cl = (struct htb_class *)arg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001296
1297 if (--cl->refcnt == 0)
Stephen Hemminger87990462006-08-10 23:35:16 -07001298 htb_destroy_class(sch, cl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001299}
1300
Stephen Hemminger87990462006-08-10 23:35:16 -07001301static int htb_change_class(struct Qdisc *sch, u32 classid,
1302 u32 parentid, struct rtattr **tca,
1303 unsigned long *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001304{
1305 int err = -EINVAL;
1306 struct htb_sched *q = qdisc_priv(sch);
Stephen Hemminger87990462006-08-10 23:35:16 -07001307 struct htb_class *cl = (struct htb_class *)*arg, *parent;
1308 struct rtattr *opt = tca[TCA_OPTIONS - 1];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001309 struct qdisc_rate_table *rtab = NULL, *ctab = NULL;
1310 struct rtattr *tb[TCA_HTB_RTAB];
1311 struct tc_htb_opt *hopt;
1312
1313 /* extract all subattrs from opt attr */
1314 if (!opt || rtattr_parse_nested(tb, TCA_HTB_RTAB, opt) ||
Stephen Hemminger87990462006-08-10 23:35:16 -07001315 tb[TCA_HTB_PARMS - 1] == NULL ||
1316 RTA_PAYLOAD(tb[TCA_HTB_PARMS - 1]) < sizeof(*hopt))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001317 goto failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001318
Stephen Hemminger87990462006-08-10 23:35:16 -07001319 parent = parentid == TC_H_ROOT ? NULL : htb_find(parentid, sch);
Stephen Hemminger3bf72952006-08-10 23:31:08 -07001320
Stephen Hemminger87990462006-08-10 23:35:16 -07001321 hopt = RTA_DATA(tb[TCA_HTB_PARMS - 1]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001322
Stephen Hemminger87990462006-08-10 23:35:16 -07001323 rtab = qdisc_get_rtab(&hopt->rate, tb[TCA_HTB_RTAB - 1]);
1324 ctab = qdisc_get_rtab(&hopt->ceil, tb[TCA_HTB_CTAB - 1]);
1325 if (!rtab || !ctab)
1326 goto failure;
1327
1328 if (!cl) { /* new class */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001329 struct Qdisc *new_q;
Stephen Hemminger3696f622006-08-10 23:36:01 -07001330 int prio;
Patrick McHardyee39e102007-07-02 22:48:13 -07001331 struct {
1332 struct rtattr rta;
1333 struct gnet_estimator opt;
1334 } est = {
1335 .rta = {
1336 .rta_len = RTA_LENGTH(sizeof(est.opt)),
1337 .rta_type = TCA_RATE,
1338 },
1339 .opt = {
1340 /* 4s interval, 16s averaging constant */
1341 .interval = 2,
1342 .ewma_log = 2,
1343 },
1344 };
Stephen Hemminger3696f622006-08-10 23:36:01 -07001345
Linus Torvalds1da177e2005-04-16 15:20:36 -07001346 /* check for valid classid */
Stephen Hemminger87990462006-08-10 23:35:16 -07001347 if (!classid || TC_H_MAJ(classid ^ sch->handle)
1348 || htb_find(classid, sch))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001349 goto failure;
1350
1351 /* check maximal depth */
1352 if (parent && parent->parent && parent->parent->level < 2) {
1353 printk(KERN_ERR "htb: tree is too deep\n");
1354 goto failure;
1355 }
1356 err = -ENOBUFS;
Panagiotis Issaris0da974f2006-07-21 14:51:30 -07001357 if ((cl = kzalloc(sizeof(*cl), GFP_KERNEL)) == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001358 goto failure;
Stephen Hemminger87990462006-08-10 23:35:16 -07001359
Patrick McHardyee39e102007-07-02 22:48:13 -07001360 gen_new_estimator(&cl->bstats, &cl->rate_est,
1361 &sch->dev->queue_lock,
1362 tca[TCA_RATE-1] ? : &est.rta);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001363 cl->refcnt = 1;
1364 INIT_LIST_HEAD(&cl->sibling);
Stephen Hemminger0cef2962006-08-10 23:35:38 -07001365 INIT_HLIST_NODE(&cl->hlist);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001366 INIT_LIST_HEAD(&cl->children);
1367 INIT_LIST_HEAD(&cl->un.leaf.drop_list);
Stephen Hemminger3696f622006-08-10 23:36:01 -07001368 RB_CLEAR_NODE(&cl->pq_node);
1369
1370 for (prio = 0; prio < TC_HTB_NUMPRIO; prio++)
1371 RB_CLEAR_NODE(&cl->node[prio]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001372
1373 /* create leaf qdisc early because it uses kmalloc(GFP_KERNEL)
1374 so that can't be used inside of sch_tree_lock
1375 -- thanks to Karlis Peisenieks */
Patrick McHardy9f9afec2006-11-29 17:35:18 -08001376 new_q = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops, classid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001377 sch_tree_lock(sch);
1378 if (parent && !parent->level) {
Patrick McHardy256d61b2006-11-29 17:37:05 -08001379 unsigned int qlen = parent->un.leaf.q->q.qlen;
1380
Linus Torvalds1da177e2005-04-16 15:20:36 -07001381 /* turn parent into inner node */
Patrick McHardy256d61b2006-11-29 17:37:05 -08001382 qdisc_reset(parent->un.leaf.q);
1383 qdisc_tree_decrease_qlen(parent->un.leaf.q, qlen);
Stephen Hemminger87990462006-08-10 23:35:16 -07001384 qdisc_destroy(parent->un.leaf.q);
1385 if (parent->prio_activity)
1386 htb_deactivate(q, parent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001387
1388 /* remove from evt list because of level change */
1389 if (parent->cmode != HTB_CAN_SEND) {
Stephen Hemminger3696f622006-08-10 23:36:01 -07001390 htb_safe_rb_erase(&parent->pq_node, q->wait_pq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001391 parent->cmode = HTB_CAN_SEND;
1392 }
1393 parent->level = (parent->parent ? parent->parent->level
Stephen Hemminger87990462006-08-10 23:35:16 -07001394 : TC_HTB_MAXDEPTH) - 1;
1395 memset(&parent->un.inner, 0, sizeof(parent->un.inner));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001396 }
1397 /* leaf (we) needs elementary qdisc */
1398 cl->un.leaf.q = new_q ? new_q : &noop_qdisc;
1399
Stephen Hemminger87990462006-08-10 23:35:16 -07001400 cl->classid = classid;
1401 cl->parent = parent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001402
1403 /* set class to be in HTB_CAN_SEND state */
1404 cl->tokens = hopt->buffer;
1405 cl->ctokens = hopt->cbuffer;
Patrick McHardy00c04af2007-03-16 01:23:02 -07001406 cl->mbuffer = 60 * PSCHED_TICKS_PER_SEC; /* 1min */
Patrick McHardy3bebcda2007-03-23 11:29:25 -07001407 cl->t_c = psched_get_time();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001408 cl->cmode = HTB_CAN_SEND;
1409
1410 /* attach to the hash list and parent's family */
Stephen Hemminger0cef2962006-08-10 23:35:38 -07001411 hlist_add_head(&cl->hlist, q->hash + htb_hash(classid));
Stephen Hemminger87990462006-08-10 23:35:16 -07001412 list_add_tail(&cl->sibling,
1413 parent ? &parent->children : &q->root);
Patrick McHardyee39e102007-07-02 22:48:13 -07001414 } else {
1415 if (tca[TCA_RATE-1])
1416 gen_replace_estimator(&cl->bstats, &cl->rate_est,
1417 &sch->dev->queue_lock,
1418 tca[TCA_RATE-1]);
Stephen Hemminger87990462006-08-10 23:35:16 -07001419 sch_tree_lock(sch);
Patrick McHardyee39e102007-07-02 22:48:13 -07001420 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001421
1422 /* it used to be a nasty bug here, we have to check that node
Stephen Hemminger87990462006-08-10 23:35:16 -07001423 is really leaf before changing cl->un.leaf ! */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001424 if (!cl->level) {
1425 cl->un.leaf.quantum = rtab->rate.rate / q->rate2quantum;
1426 if (!hopt->quantum && cl->un.leaf.quantum < 1000) {
Stephen Hemminger87990462006-08-10 23:35:16 -07001427 printk(KERN_WARNING
1428 "HTB: quantum of class %X is small. Consider r2q change.\n",
1429 cl->classid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001430 cl->un.leaf.quantum = 1000;
1431 }
1432 if (!hopt->quantum && cl->un.leaf.quantum > 200000) {
Stephen Hemminger87990462006-08-10 23:35:16 -07001433 printk(KERN_WARNING
1434 "HTB: quantum of class %X is big. Consider r2q change.\n",
1435 cl->classid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001436 cl->un.leaf.quantum = 200000;
1437 }
1438 if (hopt->quantum)
1439 cl->un.leaf.quantum = hopt->quantum;
1440 if ((cl->un.leaf.prio = hopt->prio) >= TC_HTB_NUMPRIO)
1441 cl->un.leaf.prio = TC_HTB_NUMPRIO - 1;
Jarek Poplawski160d5e12006-12-08 00:26:56 -08001442
1443 /* backup for htb_parent_to_leaf */
1444 cl->quantum = cl->un.leaf.quantum;
1445 cl->prio = cl->un.leaf.prio;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001446 }
1447
1448 cl->buffer = hopt->buffer;
1449 cl->cbuffer = hopt->cbuffer;
Stephen Hemminger87990462006-08-10 23:35:16 -07001450 if (cl->rate)
1451 qdisc_put_rtab(cl->rate);
1452 cl->rate = rtab;
1453 if (cl->ceil)
1454 qdisc_put_rtab(cl->ceil);
1455 cl->ceil = ctab;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001456 sch_tree_unlock(sch);
1457
1458 *arg = (unsigned long)cl;
1459 return 0;
1460
1461failure:
Stephen Hemminger87990462006-08-10 23:35:16 -07001462 if (rtab)
1463 qdisc_put_rtab(rtab);
1464 if (ctab)
1465 qdisc_put_rtab(ctab);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001466 return err;
1467}
1468
1469static struct tcf_proto **htb_find_tcf(struct Qdisc *sch, unsigned long arg)
1470{
1471 struct htb_sched *q = qdisc_priv(sch);
1472 struct htb_class *cl = (struct htb_class *)arg;
1473 struct tcf_proto **fl = cl ? &cl->filter_list : &q->filter_list;
Stephen Hemminger3bf72952006-08-10 23:31:08 -07001474
Linus Torvalds1da177e2005-04-16 15:20:36 -07001475 return fl;
1476}
1477
1478static unsigned long htb_bind_filter(struct Qdisc *sch, unsigned long parent,
Stephen Hemminger87990462006-08-10 23:35:16 -07001479 u32 classid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001480{
1481 struct htb_sched *q = qdisc_priv(sch);
Stephen Hemminger87990462006-08-10 23:35:16 -07001482 struct htb_class *cl = htb_find(classid, sch);
Stephen Hemminger3bf72952006-08-10 23:31:08 -07001483
Linus Torvalds1da177e2005-04-16 15:20:36 -07001484 /*if (cl && !cl->level) return 0;
Stephen Hemminger87990462006-08-10 23:35:16 -07001485 The line above used to be there to prevent attaching filters to
1486 leaves. But at least tc_index filter uses this just to get class
1487 for other reasons so that we have to allow for it.
1488 ----
1489 19.6.2002 As Werner explained it is ok - bind filter is just
1490 another way to "lock" the class - unlike "get" this lock can
1491 be broken by class during destroy IIUC.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001492 */
Stephen Hemminger87990462006-08-10 23:35:16 -07001493 if (cl)
1494 cl->filter_cnt++;
1495 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001496 q->filter_cnt++;
1497 return (unsigned long)cl;
1498}
1499
1500static void htb_unbind_filter(struct Qdisc *sch, unsigned long arg)
1501{
1502 struct htb_sched *q = qdisc_priv(sch);
1503 struct htb_class *cl = (struct htb_class *)arg;
Stephen Hemminger3bf72952006-08-10 23:31:08 -07001504
Stephen Hemminger87990462006-08-10 23:35:16 -07001505 if (cl)
1506 cl->filter_cnt--;
1507 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001508 q->filter_cnt--;
1509}
1510
1511static void htb_walk(struct Qdisc *sch, struct qdisc_walker *arg)
1512{
1513 struct htb_sched *q = qdisc_priv(sch);
1514 int i;
1515
1516 if (arg->stop)
1517 return;
1518
1519 for (i = 0; i < HTB_HSIZE; i++) {
Stephen Hemminger0cef2962006-08-10 23:35:38 -07001520 struct hlist_node *p;
1521 struct htb_class *cl;
1522
1523 hlist_for_each_entry(cl, p, q->hash + i, hlist) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001524 if (arg->count < arg->skip) {
1525 arg->count++;
1526 continue;
1527 }
1528 if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
1529 arg->stop = 1;
1530 return;
1531 }
1532 arg->count++;
1533 }
1534 }
1535}
1536
1537static struct Qdisc_class_ops htb_class_ops = {
1538 .graft = htb_graft,
1539 .leaf = htb_leaf,
Patrick McHardy256d61b2006-11-29 17:37:05 -08001540 .qlen_notify = htb_qlen_notify,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001541 .get = htb_get,
1542 .put = htb_put,
1543 .change = htb_change_class,
1544 .delete = htb_delete,
1545 .walk = htb_walk,
1546 .tcf_chain = htb_find_tcf,
1547 .bind_tcf = htb_bind_filter,
1548 .unbind_tcf = htb_unbind_filter,
1549 .dump = htb_dump_class,
1550 .dump_stats = htb_dump_class_stats,
1551};
1552
1553static struct Qdisc_ops htb_qdisc_ops = {
1554 .next = NULL,
1555 .cl_ops = &htb_class_ops,
1556 .id = "htb",
1557 .priv_size = sizeof(struct htb_sched),
1558 .enqueue = htb_enqueue,
1559 .dequeue = htb_dequeue,
1560 .requeue = htb_requeue,
1561 .drop = htb_drop,
1562 .init = htb_init,
1563 .reset = htb_reset,
1564 .destroy = htb_destroy,
1565 .change = NULL /* htb_change */,
1566 .dump = htb_dump,
1567 .owner = THIS_MODULE,
1568};
1569
1570static int __init htb_module_init(void)
1571{
Stephen Hemminger87990462006-08-10 23:35:16 -07001572 return register_qdisc(&htb_qdisc_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001573}
Stephen Hemminger87990462006-08-10 23:35:16 -07001574static void __exit htb_module_exit(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001575{
Stephen Hemminger87990462006-08-10 23:35:16 -07001576 unregister_qdisc(&htb_qdisc_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001577}
Stephen Hemminger87990462006-08-10 23:35:16 -07001578
Linus Torvalds1da177e2005-04-16 15:20:36 -07001579module_init(htb_module_init)
1580module_exit(htb_module_exit)
1581MODULE_LICENSE("GPL");