blob: 246a2f9765f1312c6f83092c31944db1bf9aaa1f [file] [log] [blame]
Stephen Hemminger87990462006-08-10 23:35:16 -07001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 * net/sched/sch_htb.c Hierarchical token bucket, feed tree version
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Martin Devera, <devik@cdi.cz>
10 *
11 * Credits (in time order) for older HTB versions:
12 * Stef Coene <stef.coene@docum.org>
13 * HTB support at LARTC mailing list
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +090014 * Ondrej Kraus, <krauso@barr.cz>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015 * found missing INIT_QDISC(htb)
16 * Vladimir Smelhaus, Aamer Akhter, Bert Hubert
17 * helped a lot to locate nasty class stall bug
18 * Andi Kleen, Jamal Hadi, Bert Hubert
19 * code review and helpful comments on shaping
20 * Tomasz Wrona, <tw@eter.tym.pl>
21 * created test case so that I was able to fix nasty bug
22 * Wilfried Weissmann
23 * spotted bug in dequeue code and helped with fix
24 * Jiri Fojtasek
25 * fixed requeue routine
26 * and many others. thanks.
27 *
28 * $Id: sch_htb.c,v 1.25 2003/12/07 11:08:25 devik Exp devik $
29 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070030#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070031#include <linux/types.h>
32#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033#include <linux/string.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034#include <linux/errno.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035#include <linux/skbuff.h>
36#include <linux/list.h>
37#include <linux/compiler.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070038#include <linux/rbtree.h>
Patrick McHardy0ba48052007-07-02 22:49:07 -070039#include <net/netlink.h>
40#include <net/pkt_sched.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070041
42/* HTB algorithm.
43 Author: devik@cdi.cz
44 ========================================================================
45 HTB is like TBF with multiple classes. It is also similar to CBQ because
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +090046 it allows to assign priority to each class in hierarchy.
Linus Torvalds1da177e2005-04-16 15:20:36 -070047 In fact it is another implementation of Floyd's formal sharing.
48
49 Levels:
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +090050 Each class is assigned level. Leaf has ALWAYS level 0 and root
Linus Torvalds1da177e2005-04-16 15:20:36 -070051 classes have level TC_HTB_MAXDEPTH-1. Interior nodes has level
52 one less than their parent.
53*/
54
Stephen Hemminger87990462006-08-10 23:35:16 -070055#define HTB_HSIZE 16 /* classid hash size */
Stephen Hemminger87990462006-08-10 23:35:16 -070056#define HTB_HYSTERESIS 1 /* whether to use mode hysteresis for speedup */
57#define HTB_VER 0x30011 /* major must be matched with number suplied by TC as version */
Linus Torvalds1da177e2005-04-16 15:20:36 -070058
59#if HTB_VER >> 16 != TC_HTB_PROTOVER
60#error "Mismatched sch_htb.c and pkt_sch.h"
61#endif
62
Linus Torvalds1da177e2005-04-16 15:20:36 -070063/* used internaly to keep status of single class */
64enum htb_cmode {
Stephen Hemminger87990462006-08-10 23:35:16 -070065 HTB_CANT_SEND, /* class can't send and can't borrow */
66 HTB_MAY_BORROW, /* class can't send but may borrow */
67 HTB_CAN_SEND /* class can send */
Linus Torvalds1da177e2005-04-16 15:20:36 -070068};
69
70/* interior & leaf nodes; props specific to leaves are marked L: */
Stephen Hemminger87990462006-08-10 23:35:16 -070071struct htb_class {
72 /* general class parameters */
73 u32 classid;
74 struct gnet_stats_basic bstats;
75 struct gnet_stats_queue qstats;
76 struct gnet_stats_rate_est rate_est;
77 struct tc_htb_xstats xstats; /* our special stats */
78 int refcnt; /* usage count of this class */
Linus Torvalds1da177e2005-04-16 15:20:36 -070079
Stephen Hemminger87990462006-08-10 23:35:16 -070080 /* topology */
81 int level; /* our level (see above) */
82 struct htb_class *parent; /* parent class */
Stephen Hemminger0cef2962006-08-10 23:35:38 -070083 struct hlist_node hlist; /* classid hash list item */
Stephen Hemminger87990462006-08-10 23:35:16 -070084 struct list_head sibling; /* sibling list item */
85 struct list_head children; /* children list */
Linus Torvalds1da177e2005-04-16 15:20:36 -070086
Stephen Hemminger87990462006-08-10 23:35:16 -070087 union {
88 struct htb_class_leaf {
89 struct Qdisc *q;
90 int prio;
91 int aprio;
92 int quantum;
93 int deficit[TC_HTB_MAXDEPTH];
94 struct list_head drop_list;
95 } leaf;
96 struct htb_class_inner {
97 struct rb_root feed[TC_HTB_NUMPRIO]; /* feed trees */
98 struct rb_node *ptr[TC_HTB_NUMPRIO]; /* current class ptr */
99 /* When class changes from state 1->2 and disconnects from
100 parent's feed then we lost ptr value and start from the
101 first child again. Here we store classid of the
102 last valid ptr (used when ptr is NULL). */
103 u32 last_ptr_id[TC_HTB_NUMPRIO];
104 } inner;
105 } un;
106 struct rb_node node[TC_HTB_NUMPRIO]; /* node for self or feed tree */
107 struct rb_node pq_node; /* node for event queue */
Patrick McHardyfb983d42007-03-16 01:22:39 -0700108 psched_time_t pq_key;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109
Stephen Hemminger87990462006-08-10 23:35:16 -0700110 int prio_activity; /* for which prios are we active */
111 enum htb_cmode cmode; /* current mode of the class */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112
Stephen Hemminger87990462006-08-10 23:35:16 -0700113 /* class attached filters */
114 struct tcf_proto *filter_list;
115 int filter_cnt;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116
Stephen Hemminger87990462006-08-10 23:35:16 -0700117 int warned; /* only one warning about non work conserving .. */
118
119 /* token bucket parameters */
120 struct qdisc_rate_table *rate; /* rate table of the class itself */
121 struct qdisc_rate_table *ceil; /* ceiling rate (limits borrows too) */
122 long buffer, cbuffer; /* token bucket depth/rate */
123 psched_tdiff_t mbuffer; /* max wait time */
124 long tokens, ctokens; /* current number of tokens */
125 psched_time_t t_c; /* checkpoint time */
Jarek Poplawski160d5e12006-12-08 00:26:56 -0800126
127 int prio; /* For parent to leaf return possible here */
128 int quantum; /* we do backup. Finally full replacement */
129 /* of un.leaf originals should be done. */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130};
131
Stephen Hemminger87990462006-08-10 23:35:16 -0700132static inline long L2T(struct htb_class *cl, struct qdisc_rate_table *rate,
133 int size)
134{
135 int slot = size >> rate->rate.cell_log;
Ranjit Manomohanc9726d62007-07-10 22:43:16 -0700136 if (slot > 255)
137 return (rate->data[255]*(slot >> 8) + rate->data[slot & 0xFF]);
Stephen Hemminger87990462006-08-10 23:35:16 -0700138 return rate->data[slot];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139}
140
Stephen Hemminger87990462006-08-10 23:35:16 -0700141struct htb_sched {
142 struct list_head root; /* root classes list */
Stephen Hemminger0cef2962006-08-10 23:35:38 -0700143 struct hlist_head hash[HTB_HSIZE]; /* hashed by classid */
144 struct list_head drops[TC_HTB_NUMPRIO];/* active leaves (for drops) */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700145
Stephen Hemminger87990462006-08-10 23:35:16 -0700146 /* self list - roots of self generating tree */
147 struct rb_root row[TC_HTB_MAXDEPTH][TC_HTB_NUMPRIO];
148 int row_mask[TC_HTB_MAXDEPTH];
149 struct rb_node *ptr[TC_HTB_MAXDEPTH][TC_HTB_NUMPRIO];
150 u32 last_ptr_id[TC_HTB_MAXDEPTH][TC_HTB_NUMPRIO];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151
Stephen Hemminger87990462006-08-10 23:35:16 -0700152 /* self wait list - roots of wait PQs per row */
153 struct rb_root wait_pq[TC_HTB_MAXDEPTH];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700154
Stephen Hemminger87990462006-08-10 23:35:16 -0700155 /* time of nearest event per level (row) */
Patrick McHardyfb983d42007-03-16 01:22:39 -0700156 psched_time_t near_ev_cache[TC_HTB_MAXDEPTH];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157
Stephen Hemminger87990462006-08-10 23:35:16 -0700158 /* whether we hit non-work conserving class during this dequeue; we use */
159 int nwc_hit; /* this to disable mindelay complaint in dequeue */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160
Stephen Hemminger87990462006-08-10 23:35:16 -0700161 int defcls; /* class where unclassified flows go to */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162
Stephen Hemminger87990462006-08-10 23:35:16 -0700163 /* filters for qdisc itself */
164 struct tcf_proto *filter_list;
165 int filter_cnt;
166
167 int rate2quantum; /* quant = rate / rate2quantum */
168 psched_time_t now; /* cached dequeue time */
Patrick McHardyfb983d42007-03-16 01:22:39 -0700169 struct qdisc_watchdog watchdog;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170
Stephen Hemminger87990462006-08-10 23:35:16 -0700171 /* non shaped skbs; let them go directly thru */
172 struct sk_buff_head direct_queue;
173 int direct_qlen; /* max qlen of above */
174
175 long direct_pkts;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176};
177
178/* compute hash of size HTB_HSIZE for given handle */
Stephen Hemminger87990462006-08-10 23:35:16 -0700179static inline int htb_hash(u32 h)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180{
181#if HTB_HSIZE != 16
Stephen Hemminger87990462006-08-10 23:35:16 -0700182#error "Declare new hash for your HTB_HSIZE"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183#endif
Stephen Hemminger87990462006-08-10 23:35:16 -0700184 h ^= h >> 8; /* stolen from cbq_hash */
185 h ^= h >> 4;
186 return h & 0xf;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187}
188
189/* find class in global hash table using given handle */
Stephen Hemminger87990462006-08-10 23:35:16 -0700190static inline struct htb_class *htb_find(u32 handle, struct Qdisc *sch)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191{
192 struct htb_sched *q = qdisc_priv(sch);
Stephen Hemminger0cef2962006-08-10 23:35:38 -0700193 struct hlist_node *p;
194 struct htb_class *cl;
195
Stephen Hemminger87990462006-08-10 23:35:16 -0700196 if (TC_H_MAJ(handle) != sch->handle)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197 return NULL;
Stephen Hemminger87990462006-08-10 23:35:16 -0700198
Stephen Hemminger0cef2962006-08-10 23:35:38 -0700199 hlist_for_each_entry(cl, p, q->hash + htb_hash(handle), hlist) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200 if (cl->classid == handle)
201 return cl;
202 }
203 return NULL;
204}
205
206/**
207 * htb_classify - classify a packet into class
208 *
209 * It returns NULL if the packet should be dropped or -1 if the packet
210 * should be passed directly thru. In all other cases leaf class is returned.
211 * We allow direct class selection by classid in priority. The we examine
212 * filters in qdisc and in inner nodes (if higher filter points to the inner
213 * node). If we end up with classid MAJOR:0 we enqueue the skb into special
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900214 * internal fifo (direct). These packets then go directly thru. If we still
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215 * have no valid leaf we try to use MAJOR:default leaf. It still unsuccessfull
216 * then finish and return direct queue.
217 */
218#define HTB_DIRECT (struct htb_class*)-1
219static inline u32 htb_classid(struct htb_class *cl)
220{
221 return (cl && cl != HTB_DIRECT) ? cl->classid : TC_H_UNSPEC;
222}
223
Stephen Hemminger87990462006-08-10 23:35:16 -0700224static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch,
225 int *qerr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226{
227 struct htb_sched *q = qdisc_priv(sch);
228 struct htb_class *cl;
229 struct tcf_result res;
230 struct tcf_proto *tcf;
231 int result;
232
233 /* allow to select class by setting skb->priority to valid classid;
234 note that nfmark can be used too by attaching filter fw with no
235 rules in it */
236 if (skb->priority == sch->handle)
Stephen Hemminger87990462006-08-10 23:35:16 -0700237 return HTB_DIRECT; /* X:0 (direct flow) selected */
238 if ((cl = htb_find(skb->priority, sch)) != NULL && cl->level == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700239 return cl;
240
Jamal Hadi Salim29f1df62006-01-08 22:35:55 -0800241 *qerr = NET_XMIT_BYPASS;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700242 tcf = q->filter_list;
243 while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) {
244#ifdef CONFIG_NET_CLS_ACT
245 switch (result) {
246 case TC_ACT_QUEUED:
Stephen Hemminger87990462006-08-10 23:35:16 -0700247 case TC_ACT_STOLEN:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700248 *qerr = NET_XMIT_SUCCESS;
249 case TC_ACT_SHOT:
250 return NULL;
251 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252#endif
Stephen Hemminger87990462006-08-10 23:35:16 -0700253 if ((cl = (void *)res.class) == NULL) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254 if (res.classid == sch->handle)
Stephen Hemminger87990462006-08-10 23:35:16 -0700255 return HTB_DIRECT; /* X:0 (direct flow) */
256 if ((cl = htb_find(res.classid, sch)) == NULL)
257 break; /* filter selected invalid classid */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700258 }
259 if (!cl->level)
Stephen Hemminger87990462006-08-10 23:35:16 -0700260 return cl; /* we hit leaf; return it */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700261
262 /* we have got inner class; apply inner filter chain */
263 tcf = cl->filter_list;
264 }
265 /* classification failed; try to use default class */
Stephen Hemminger87990462006-08-10 23:35:16 -0700266 cl = htb_find(TC_H_MAKE(TC_H_MAJ(sch->handle), q->defcls), sch);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700267 if (!cl || cl->level)
Stephen Hemminger87990462006-08-10 23:35:16 -0700268 return HTB_DIRECT; /* bad default .. this is safe bet */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269 return cl;
270}
271
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272/**
273 * htb_add_to_id_tree - adds class to the round robin list
274 *
275 * Routine adds class to the list (actually tree) sorted by classid.
276 * Make sure that class is not already on such list for given prio.
277 */
Stephen Hemminger87990462006-08-10 23:35:16 -0700278static void htb_add_to_id_tree(struct rb_root *root,
279 struct htb_class *cl, int prio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700280{
281 struct rb_node **p = &root->rb_node, *parent = NULL;
Stephen Hemminger3bf72952006-08-10 23:31:08 -0700282
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283 while (*p) {
Stephen Hemminger87990462006-08-10 23:35:16 -0700284 struct htb_class *c;
285 parent = *p;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700286 c = rb_entry(parent, struct htb_class, node[prio]);
Stephen Hemminger3bf72952006-08-10 23:31:08 -0700287
Linus Torvalds1da177e2005-04-16 15:20:36 -0700288 if (cl->classid > c->classid)
289 p = &parent->rb_right;
Stephen Hemminger87990462006-08-10 23:35:16 -0700290 else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700291 p = &parent->rb_left;
292 }
293 rb_link_node(&cl->node[prio], parent, p);
294 rb_insert_color(&cl->node[prio], root);
295}
296
297/**
298 * htb_add_to_wait_tree - adds class to the event queue with delay
299 *
300 * The class is added to priority event queue to indicate that class will
301 * change its mode in cl->pq_key microseconds. Make sure that class is not
302 * already in the queue.
303 */
Stephen Hemminger87990462006-08-10 23:35:16 -0700304static void htb_add_to_wait_tree(struct htb_sched *q,
305 struct htb_class *cl, long delay)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700306{
307 struct rb_node **p = &q->wait_pq[cl->level].rb_node, *parent = NULL;
Stephen Hemminger3bf72952006-08-10 23:31:08 -0700308
Patrick McHardyfb983d42007-03-16 01:22:39 -0700309 cl->pq_key = q->now + delay;
310 if (cl->pq_key == q->now)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700311 cl->pq_key++;
312
313 /* update the nearest event cache */
Patrick McHardyfb983d42007-03-16 01:22:39 -0700314 if (q->near_ev_cache[cl->level] > cl->pq_key)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700315 q->near_ev_cache[cl->level] = cl->pq_key;
Stephen Hemminger87990462006-08-10 23:35:16 -0700316
Linus Torvalds1da177e2005-04-16 15:20:36 -0700317 while (*p) {
Stephen Hemminger87990462006-08-10 23:35:16 -0700318 struct htb_class *c;
319 parent = *p;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700320 c = rb_entry(parent, struct htb_class, pq_node);
Patrick McHardyfb983d42007-03-16 01:22:39 -0700321 if (cl->pq_key >= c->pq_key)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700322 p = &parent->rb_right;
Stephen Hemminger87990462006-08-10 23:35:16 -0700323 else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700324 p = &parent->rb_left;
325 }
326 rb_link_node(&cl->pq_node, parent, p);
327 rb_insert_color(&cl->pq_node, &q->wait_pq[cl->level]);
328}
329
330/**
331 * htb_next_rb_node - finds next node in binary tree
332 *
333 * When we are past last key we return NULL.
334 * Average complexity is 2 steps per call.
335 */
Stephen Hemminger3696f622006-08-10 23:36:01 -0700336static inline void htb_next_rb_node(struct rb_node **n)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700337{
338 *n = rb_next(*n);
339}
340
341/**
342 * htb_add_class_to_row - add class to its row
343 *
344 * The class is added to row at priorities marked in mask.
345 * It does nothing if mask == 0.
346 */
Stephen Hemminger87990462006-08-10 23:35:16 -0700347static inline void htb_add_class_to_row(struct htb_sched *q,
348 struct htb_class *cl, int mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700349{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700350 q->row_mask[cl->level] |= mask;
351 while (mask) {
352 int prio = ffz(~mask);
353 mask &= ~(1 << prio);
Stephen Hemminger87990462006-08-10 23:35:16 -0700354 htb_add_to_id_tree(q->row[cl->level] + prio, cl, prio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355 }
356}
357
Stephen Hemminger3696f622006-08-10 23:36:01 -0700358/* If this triggers, it is a bug in this code, but it need not be fatal */
359static void htb_safe_rb_erase(struct rb_node *rb, struct rb_root *root)
360{
Ismail Donmez81771b32006-10-03 13:49:10 -0700361 if (RB_EMPTY_NODE(rb)) {
Stephen Hemminger3696f622006-08-10 23:36:01 -0700362 WARN_ON(1);
363 } else {
364 rb_erase(rb, root);
365 RB_CLEAR_NODE(rb);
366 }
367}
368
369
Linus Torvalds1da177e2005-04-16 15:20:36 -0700370/**
371 * htb_remove_class_from_row - removes class from its row
372 *
373 * The class is removed from row at priorities marked in mask.
374 * It does nothing if mask == 0.
375 */
Stephen Hemminger87990462006-08-10 23:35:16 -0700376static inline void htb_remove_class_from_row(struct htb_sched *q,
377 struct htb_class *cl, int mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378{
379 int m = 0;
Stephen Hemminger3bf72952006-08-10 23:31:08 -0700380
Linus Torvalds1da177e2005-04-16 15:20:36 -0700381 while (mask) {
382 int prio = ffz(~mask);
Stephen Hemminger3696f622006-08-10 23:36:01 -0700383
Linus Torvalds1da177e2005-04-16 15:20:36 -0700384 mask &= ~(1 << prio);
Stephen Hemminger87990462006-08-10 23:35:16 -0700385 if (q->ptr[cl->level][prio] == cl->node + prio)
386 htb_next_rb_node(q->ptr[cl->level] + prio);
Stephen Hemminger3696f622006-08-10 23:36:01 -0700387
388 htb_safe_rb_erase(cl->node + prio, q->row[cl->level] + prio);
Stephen Hemminger87990462006-08-10 23:35:16 -0700389 if (!q->row[cl->level][prio].rb_node)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700390 m |= 1 << prio;
391 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700392 q->row_mask[cl->level] &= ~m;
393}
394
395/**
396 * htb_activate_prios - creates active classe's feed chain
397 *
398 * The class is connected to ancestors and/or appropriate rows
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900399 * for priorities it is participating on. cl->cmode must be new
Linus Torvalds1da177e2005-04-16 15:20:36 -0700400 * (activated) mode. It does nothing if cl->prio_activity == 0.
401 */
Stephen Hemminger87990462006-08-10 23:35:16 -0700402static void htb_activate_prios(struct htb_sched *q, struct htb_class *cl)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700403{
404 struct htb_class *p = cl->parent;
Stephen Hemminger87990462006-08-10 23:35:16 -0700405 long m, mask = cl->prio_activity;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700406
407 while (cl->cmode == HTB_MAY_BORROW && p && mask) {
Stephen Hemminger87990462006-08-10 23:35:16 -0700408 m = mask;
409 while (m) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700410 int prio = ffz(~m);
411 m &= ~(1 << prio);
Stephen Hemminger87990462006-08-10 23:35:16 -0700412
Linus Torvalds1da177e2005-04-16 15:20:36 -0700413 if (p->un.inner.feed[prio].rb_node)
414 /* parent already has its feed in use so that
415 reset bit in mask as parent is already ok */
416 mask &= ~(1 << prio);
Stephen Hemminger87990462006-08-10 23:35:16 -0700417
418 htb_add_to_id_tree(p->un.inner.feed + prio, cl, prio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700419 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700420 p->prio_activity |= mask;
Stephen Hemminger87990462006-08-10 23:35:16 -0700421 cl = p;
422 p = cl->parent;
Stephen Hemminger3bf72952006-08-10 23:31:08 -0700423
Linus Torvalds1da177e2005-04-16 15:20:36 -0700424 }
425 if (cl->cmode == HTB_CAN_SEND && mask)
Stephen Hemminger87990462006-08-10 23:35:16 -0700426 htb_add_class_to_row(q, cl, mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700427}
428
429/**
430 * htb_deactivate_prios - remove class from feed chain
431 *
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900432 * cl->cmode must represent old mode (before deactivation). It does
Linus Torvalds1da177e2005-04-16 15:20:36 -0700433 * nothing if cl->prio_activity == 0. Class is removed from all feed
434 * chains and rows.
435 */
436static void htb_deactivate_prios(struct htb_sched *q, struct htb_class *cl)
437{
438 struct htb_class *p = cl->parent;
Stephen Hemminger87990462006-08-10 23:35:16 -0700439 long m, mask = cl->prio_activity;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700440
441 while (cl->cmode == HTB_MAY_BORROW && p && mask) {
Stephen Hemminger87990462006-08-10 23:35:16 -0700442 m = mask;
443 mask = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700444 while (m) {
445 int prio = ffz(~m);
446 m &= ~(1 << prio);
Stephen Hemminger87990462006-08-10 23:35:16 -0700447
448 if (p->un.inner.ptr[prio] == cl->node + prio) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700449 /* we are removing child which is pointed to from
450 parent feed - forget the pointer but remember
451 classid */
452 p->un.inner.last_ptr_id[prio] = cl->classid;
453 p->un.inner.ptr[prio] = NULL;
454 }
Stephen Hemminger87990462006-08-10 23:35:16 -0700455
Stephen Hemminger3696f622006-08-10 23:36:01 -0700456 htb_safe_rb_erase(cl->node + prio, p->un.inner.feed + prio);
Stephen Hemminger87990462006-08-10 23:35:16 -0700457
458 if (!p->un.inner.feed[prio].rb_node)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700459 mask |= 1 << prio;
460 }
Stephen Hemminger3bf72952006-08-10 23:31:08 -0700461
Linus Torvalds1da177e2005-04-16 15:20:36 -0700462 p->prio_activity &= ~mask;
Stephen Hemminger87990462006-08-10 23:35:16 -0700463 cl = p;
464 p = cl->parent;
Stephen Hemminger3bf72952006-08-10 23:31:08 -0700465
Linus Torvalds1da177e2005-04-16 15:20:36 -0700466 }
Stephen Hemminger87990462006-08-10 23:35:16 -0700467 if (cl->cmode == HTB_CAN_SEND && mask)
468 htb_remove_class_from_row(q, cl, mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700469}
470
Stephen Hemminger18a63e82006-08-10 23:34:02 -0700471#if HTB_HYSTERESIS
472static inline long htb_lowater(const struct htb_class *cl)
473{
474 return cl->cmode != HTB_CANT_SEND ? -cl->cbuffer : 0;
475}
476static inline long htb_hiwater(const struct htb_class *cl)
477{
478 return cl->cmode == HTB_CAN_SEND ? -cl->buffer : 0;
479}
480#else
481#define htb_lowater(cl) (0)
482#define htb_hiwater(cl) (0)
483#endif
484
Linus Torvalds1da177e2005-04-16 15:20:36 -0700485/**
486 * htb_class_mode - computes and returns current class mode
487 *
488 * It computes cl's mode at time cl->t_c+diff and returns it. If mode
489 * is not HTB_CAN_SEND then cl->pq_key is updated to time difference
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900490 * from now to time when cl will change its state.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700491 * Also it is worth to note that class mode doesn't change simply
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900492 * at cl->{c,}tokens == 0 but there can rather be hysteresis of
Linus Torvalds1da177e2005-04-16 15:20:36 -0700493 * 0 .. -cl->{c,}buffer range. It is meant to limit number of
494 * mode transitions per time unit. The speed gain is about 1/6.
495 */
Stephen Hemminger87990462006-08-10 23:35:16 -0700496static inline enum htb_cmode
497htb_class_mode(struct htb_class *cl, long *diff)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700498{
Stephen Hemminger87990462006-08-10 23:35:16 -0700499 long toks;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700500
Stephen Hemminger87990462006-08-10 23:35:16 -0700501 if ((toks = (cl->ctokens + *diff)) < htb_lowater(cl)) {
502 *diff = -toks;
503 return HTB_CANT_SEND;
504 }
Stephen Hemminger18a63e82006-08-10 23:34:02 -0700505
Stephen Hemminger87990462006-08-10 23:35:16 -0700506 if ((toks = (cl->tokens + *diff)) >= htb_hiwater(cl))
507 return HTB_CAN_SEND;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700508
Stephen Hemminger87990462006-08-10 23:35:16 -0700509 *diff = -toks;
510 return HTB_MAY_BORROW;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700511}
512
513/**
514 * htb_change_class_mode - changes classe's mode
515 *
516 * This should be the only way how to change classe's mode under normal
517 * cirsumstances. Routine will update feed lists linkage, change mode
518 * and add class to the wait event queue if appropriate. New mode should
519 * be different from old one and cl->pq_key has to be valid if changing
520 * to mode other than HTB_CAN_SEND (see htb_add_to_wait_tree).
521 */
Stephen Hemminger87990462006-08-10 23:35:16 -0700522static void
Linus Torvalds1da177e2005-04-16 15:20:36 -0700523htb_change_class_mode(struct htb_sched *q, struct htb_class *cl, long *diff)
Stephen Hemminger87990462006-08-10 23:35:16 -0700524{
525 enum htb_cmode new_mode = htb_class_mode(cl, diff);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700526
527 if (new_mode == cl->cmode)
Stephen Hemminger87990462006-08-10 23:35:16 -0700528 return;
529
530 if (cl->prio_activity) { /* not necessary: speed optimization */
531 if (cl->cmode != HTB_CANT_SEND)
532 htb_deactivate_prios(q, cl);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700533 cl->cmode = new_mode;
Stephen Hemminger87990462006-08-10 23:35:16 -0700534 if (new_mode != HTB_CANT_SEND)
535 htb_activate_prios(q, cl);
536 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700537 cl->cmode = new_mode;
538}
539
540/**
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900541 * htb_activate - inserts leaf cl into appropriate active feeds
Linus Torvalds1da177e2005-04-16 15:20:36 -0700542 *
543 * Routine learns (new) priority of leaf and activates feed chain
544 * for the prio. It can be called on already active leaf safely.
545 * It also adds leaf into droplist.
546 */
Stephen Hemminger87990462006-08-10 23:35:16 -0700547static inline void htb_activate(struct htb_sched *q, struct htb_class *cl)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700548{
549 BUG_TRAP(!cl->level && cl->un.leaf.q && cl->un.leaf.q->q.qlen);
Stephen Hemminger3bf72952006-08-10 23:31:08 -0700550
Linus Torvalds1da177e2005-04-16 15:20:36 -0700551 if (!cl->prio_activity) {
552 cl->prio_activity = 1 << (cl->un.leaf.aprio = cl->un.leaf.prio);
Stephen Hemminger87990462006-08-10 23:35:16 -0700553 htb_activate_prios(q, cl);
554 list_add_tail(&cl->un.leaf.drop_list,
555 q->drops + cl->un.leaf.aprio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700556 }
557}
558
559/**
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900560 * htb_deactivate - remove leaf cl from active feeds
Linus Torvalds1da177e2005-04-16 15:20:36 -0700561 *
562 * Make sure that leaf is active. In the other words it can't be called
563 * with non-active leaf. It also removes class from the drop list.
564 */
Stephen Hemminger87990462006-08-10 23:35:16 -0700565static inline void htb_deactivate(struct htb_sched *q, struct htb_class *cl)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700566{
567 BUG_TRAP(cl->prio_activity);
Stephen Hemminger3bf72952006-08-10 23:31:08 -0700568
Stephen Hemminger87990462006-08-10 23:35:16 -0700569 htb_deactivate_prios(q, cl);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700570 cl->prio_activity = 0;
571 list_del_init(&cl->un.leaf.drop_list);
572}
573
574static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
575{
Stephen Hemminger87990462006-08-10 23:35:16 -0700576 int ret;
577 struct htb_sched *q = qdisc_priv(sch);
578 struct htb_class *cl = htb_classify(skb, sch, &ret);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700579
Stephen Hemminger87990462006-08-10 23:35:16 -0700580 if (cl == HTB_DIRECT) {
581 /* enqueue to helper queue */
582 if (q->direct_queue.qlen < q->direct_qlen) {
583 __skb_queue_tail(&q->direct_queue, skb);
584 q->direct_pkts++;
585 } else {
586 kfree_skb(skb);
587 sch->qstats.drops++;
588 return NET_XMIT_DROP;
589 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700590#ifdef CONFIG_NET_CLS_ACT
Stephen Hemminger87990462006-08-10 23:35:16 -0700591 } else if (!cl) {
592 if (ret == NET_XMIT_BYPASS)
593 sch->qstats.drops++;
594 kfree_skb(skb);
595 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700596#endif
Stephen Hemminger87990462006-08-10 23:35:16 -0700597 } else if (cl->un.leaf.q->enqueue(skb, cl->un.leaf.q) !=
598 NET_XMIT_SUCCESS) {
599 sch->qstats.drops++;
600 cl->qstats.drops++;
601 return NET_XMIT_DROP;
602 } else {
Ranjit Manomohanc9726d62007-07-10 22:43:16 -0700603 cl->bstats.packets +=
604 skb_is_gso(skb)?skb_shinfo(skb)->gso_segs:1;
Stephen Hemminger87990462006-08-10 23:35:16 -0700605 cl->bstats.bytes += skb->len;
606 htb_activate(q, cl);
607 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700608
Stephen Hemminger87990462006-08-10 23:35:16 -0700609 sch->q.qlen++;
Ranjit Manomohanc9726d62007-07-10 22:43:16 -0700610 sch->bstats.packets += skb_is_gso(skb)?skb_shinfo(skb)->gso_segs:1;
Stephen Hemminger87990462006-08-10 23:35:16 -0700611 sch->bstats.bytes += skb->len;
612 return NET_XMIT_SUCCESS;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700613}
614
615/* TODO: requeuing packet charges it to policers again !! */
616static int htb_requeue(struct sk_buff *skb, struct Qdisc *sch)
617{
Stephen Hemminger87990462006-08-10 23:35:16 -0700618 struct htb_sched *q = qdisc_priv(sch);
619 int ret = NET_XMIT_SUCCESS;
620 struct htb_class *cl = htb_classify(skb, sch, &ret);
621 struct sk_buff *tskb;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700622
Stephen Hemminger87990462006-08-10 23:35:16 -0700623 if (cl == HTB_DIRECT || !cl) {
624 /* enqueue to helper queue */
625 if (q->direct_queue.qlen < q->direct_qlen && cl) {
626 __skb_queue_head(&q->direct_queue, skb);
627 } else {
628 __skb_queue_head(&q->direct_queue, skb);
629 tskb = __skb_dequeue_tail(&q->direct_queue);
630 kfree_skb(tskb);
631 sch->qstats.drops++;
632 return NET_XMIT_CN;
633 }
634 } else if (cl->un.leaf.q->ops->requeue(skb, cl->un.leaf.q) !=
635 NET_XMIT_SUCCESS) {
636 sch->qstats.drops++;
637 cl->qstats.drops++;
638 return NET_XMIT_DROP;
639 } else
640 htb_activate(q, cl);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700641
Stephen Hemminger87990462006-08-10 23:35:16 -0700642 sch->q.qlen++;
643 sch->qstats.requeues++;
644 return NET_XMIT_SUCCESS;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700645}
646
Linus Torvalds1da177e2005-04-16 15:20:36 -0700647/**
648 * htb_charge_class - charges amount "bytes" to leaf and ancestors
649 *
650 * Routine assumes that packet "bytes" long was dequeued from leaf cl
651 * borrowing from "level". It accounts bytes to ceil leaky bucket for
652 * leaf and all ancestors and to rate bucket for ancestors at levels
653 * "level" and higher. It also handles possible change of mode resulting
654 * from the update. Note that mode can also increase here (MAY_BORROW to
655 * CAN_SEND) because we can use more precise clock that event queue here.
656 * In such case we remove class from event queue first.
657 */
Stephen Hemminger87990462006-08-10 23:35:16 -0700658static void htb_charge_class(struct htb_sched *q, struct htb_class *cl,
Ranjit Manomohanc9726d62007-07-10 22:43:16 -0700659 int level, struct sk_buff *skb)
Stephen Hemminger87990462006-08-10 23:35:16 -0700660{
Ranjit Manomohanc9726d62007-07-10 22:43:16 -0700661 int bytes = skb->len;
Stephen Hemminger87990462006-08-10 23:35:16 -0700662 long toks, diff;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700663 enum htb_cmode old_mode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700664
665#define HTB_ACCNT(T,B,R) toks = diff + cl->T; \
666 if (toks > cl->B) toks = cl->B; \
667 toks -= L2T(cl, cl->R, bytes); \
668 if (toks <= -cl->mbuffer) toks = 1-cl->mbuffer; \
669 cl->T = toks
670
671 while (cl) {
Patrick McHardy03cc45c2007-03-23 11:29:11 -0700672 diff = psched_tdiff_bounded(q->now, cl->t_c, cl->mbuffer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700673 if (cl->level >= level) {
Stephen Hemminger87990462006-08-10 23:35:16 -0700674 if (cl->level == level)
675 cl->xstats.lends++;
676 HTB_ACCNT(tokens, buffer, rate);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700677 } else {
678 cl->xstats.borrows++;
Stephen Hemminger87990462006-08-10 23:35:16 -0700679 cl->tokens += diff; /* we moved t_c; update tokens */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700680 }
Stephen Hemminger87990462006-08-10 23:35:16 -0700681 HTB_ACCNT(ctokens, cbuffer, ceil);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700682 cl->t_c = q->now;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700683
Stephen Hemminger87990462006-08-10 23:35:16 -0700684 old_mode = cl->cmode;
685 diff = 0;
686 htb_change_class_mode(q, cl, &diff);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700687 if (old_mode != cl->cmode) {
688 if (old_mode != HTB_CAN_SEND)
Stephen Hemminger3696f622006-08-10 23:36:01 -0700689 htb_safe_rb_erase(&cl->pq_node, q->wait_pq + cl->level);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700690 if (cl->cmode != HTB_CAN_SEND)
Stephen Hemminger87990462006-08-10 23:35:16 -0700691 htb_add_to_wait_tree(q, cl, diff);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700692 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700693
694 /* update byte stats except for leaves which are already updated */
695 if (cl->level) {
696 cl->bstats.bytes += bytes;
Ranjit Manomohanc9726d62007-07-10 22:43:16 -0700697 cl->bstats.packets += skb_is_gso(skb)?
698 skb_shinfo(skb)->gso_segs:1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700699 }
700 cl = cl->parent;
701 }
702}
703
704/**
705 * htb_do_events - make mode changes to classes at the level
706 *
Patrick McHardyfb983d42007-03-16 01:22:39 -0700707 * Scans event queue for pending events and applies them. Returns time of
Linus Torvalds1da177e2005-04-16 15:20:36 -0700708 * next pending event (0 for no event in pq).
Patrick McHardyfb983d42007-03-16 01:22:39 -0700709 * Note: Applied are events whose have cl->pq_key <= q->now.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700710 */
Patrick McHardyfb983d42007-03-16 01:22:39 -0700711static psched_time_t htb_do_events(struct htb_sched *q, int level)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700712{
713 int i;
Stephen Hemminger3bf72952006-08-10 23:31:08 -0700714
Linus Torvalds1da177e2005-04-16 15:20:36 -0700715 for (i = 0; i < 500; i++) {
716 struct htb_class *cl;
717 long diff;
Akinbou Mita30bdbe32006-10-12 01:52:05 -0700718 struct rb_node *p = rb_first(&q->wait_pq[level]);
719
Stephen Hemminger87990462006-08-10 23:35:16 -0700720 if (!p)
721 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700722
723 cl = rb_entry(p, struct htb_class, pq_node);
Patrick McHardyfb983d42007-03-16 01:22:39 -0700724 if (cl->pq_key > q->now)
725 return cl->pq_key;
726
Stephen Hemminger3696f622006-08-10 23:36:01 -0700727 htb_safe_rb_erase(p, q->wait_pq + level);
Patrick McHardy03cc45c2007-03-23 11:29:11 -0700728 diff = psched_tdiff_bounded(q->now, cl->t_c, cl->mbuffer);
Stephen Hemminger87990462006-08-10 23:35:16 -0700729 htb_change_class_mode(q, cl, &diff);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700730 if (cl->cmode != HTB_CAN_SEND)
Stephen Hemminger87990462006-08-10 23:35:16 -0700731 htb_add_to_wait_tree(q, cl, diff);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700732 }
733 if (net_ratelimit())
734 printk(KERN_WARNING "htb: too many events !\n");
Patrick McHardyfb983d42007-03-16 01:22:39 -0700735 return q->now + PSCHED_TICKS_PER_SEC / 10;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700736}
737
738/* Returns class->node+prio from id-tree where classe's id is >= id. NULL
739 is no such one exists. */
Stephen Hemminger87990462006-08-10 23:35:16 -0700740static struct rb_node *htb_id_find_next_upper(int prio, struct rb_node *n,
741 u32 id)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700742{
743 struct rb_node *r = NULL;
744 while (n) {
Stephen Hemminger87990462006-08-10 23:35:16 -0700745 struct htb_class *cl =
746 rb_entry(n, struct htb_class, node[prio]);
747 if (id == cl->classid)
748 return n;
749
Linus Torvalds1da177e2005-04-16 15:20:36 -0700750 if (id > cl->classid) {
751 n = n->rb_right;
752 } else {
753 r = n;
754 n = n->rb_left;
755 }
756 }
757 return r;
758}
759
760/**
761 * htb_lookup_leaf - returns next leaf class in DRR order
762 *
763 * Find leaf where current feed pointers points to.
764 */
Stephen Hemminger87990462006-08-10 23:35:16 -0700765static struct htb_class *htb_lookup_leaf(struct rb_root *tree, int prio,
766 struct rb_node **pptr, u32 * pid)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700767{
768 int i;
769 struct {
770 struct rb_node *root;
771 struct rb_node **pptr;
772 u32 *pid;
Stephen Hemminger87990462006-08-10 23:35:16 -0700773 } stk[TC_HTB_MAXDEPTH], *sp = stk;
774
Linus Torvalds1da177e2005-04-16 15:20:36 -0700775 BUG_TRAP(tree->rb_node);
776 sp->root = tree->rb_node;
777 sp->pptr = pptr;
778 sp->pid = pid;
779
780 for (i = 0; i < 65535; i++) {
Stephen Hemminger87990462006-08-10 23:35:16 -0700781 if (!*sp->pptr && *sp->pid) {
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900782 /* ptr was invalidated but id is valid - try to recover
Linus Torvalds1da177e2005-04-16 15:20:36 -0700783 the original or next ptr */
Stephen Hemminger87990462006-08-10 23:35:16 -0700784 *sp->pptr =
785 htb_id_find_next_upper(prio, sp->root, *sp->pid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700786 }
Stephen Hemminger87990462006-08-10 23:35:16 -0700787 *sp->pid = 0; /* ptr is valid now so that remove this hint as it
788 can become out of date quickly */
789 if (!*sp->pptr) { /* we are at right end; rewind & go up */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700790 *sp->pptr = sp->root;
Stephen Hemminger87990462006-08-10 23:35:16 -0700791 while ((*sp->pptr)->rb_left)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700792 *sp->pptr = (*sp->pptr)->rb_left;
793 if (sp > stk) {
794 sp--;
Stephen Hemminger87990462006-08-10 23:35:16 -0700795 BUG_TRAP(*sp->pptr);
796 if (!*sp->pptr)
797 return NULL;
798 htb_next_rb_node(sp->pptr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700799 }
800 } else {
801 struct htb_class *cl;
Stephen Hemminger87990462006-08-10 23:35:16 -0700802 cl = rb_entry(*sp->pptr, struct htb_class, node[prio]);
803 if (!cl->level)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700804 return cl;
805 (++sp)->root = cl->un.inner.feed[prio].rb_node;
Stephen Hemminger87990462006-08-10 23:35:16 -0700806 sp->pptr = cl->un.inner.ptr + prio;
807 sp->pid = cl->un.inner.last_ptr_id + prio;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700808 }
809 }
810 BUG_TRAP(0);
811 return NULL;
812}
813
814/* dequeues packet at given priority and level; call only if
815 you are sure that there is active class at prio/level */
Stephen Hemminger87990462006-08-10 23:35:16 -0700816static struct sk_buff *htb_dequeue_tree(struct htb_sched *q, int prio,
817 int level)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700818{
819 struct sk_buff *skb = NULL;
Stephen Hemminger87990462006-08-10 23:35:16 -0700820 struct htb_class *cl, *start;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700821 /* look initial class up in the row */
Stephen Hemminger87990462006-08-10 23:35:16 -0700822 start = cl = htb_lookup_leaf(q->row[level] + prio, prio,
823 q->ptr[level] + prio,
824 q->last_ptr_id[level] + prio);
825
Linus Torvalds1da177e2005-04-16 15:20:36 -0700826 do {
827next:
Stephen Hemminger87990462006-08-10 23:35:16 -0700828 BUG_TRAP(cl);
829 if (!cl)
830 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700831
832 /* class can be empty - it is unlikely but can be true if leaf
833 qdisc drops packets in enqueue routine or if someone used
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900834 graft operation on the leaf since last dequeue;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700835 simply deactivate and skip such class */
836 if (unlikely(cl->un.leaf.q->q.qlen == 0)) {
837 struct htb_class *next;
Stephen Hemminger87990462006-08-10 23:35:16 -0700838 htb_deactivate(q, cl);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700839
840 /* row/level might become empty */
841 if ((q->row_mask[level] & (1 << prio)) == 0)
Stephen Hemminger87990462006-08-10 23:35:16 -0700842 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700843
Stephen Hemminger87990462006-08-10 23:35:16 -0700844 next = htb_lookup_leaf(q->row[level] + prio,
845 prio, q->ptr[level] + prio,
846 q->last_ptr_id[level] + prio);
847
848 if (cl == start) /* fix start if we just deleted it */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700849 start = next;
850 cl = next;
851 goto next;
852 }
Stephen Hemminger87990462006-08-10 23:35:16 -0700853
854 skb = cl->un.leaf.q->dequeue(cl->un.leaf.q);
855 if (likely(skb != NULL))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700856 break;
857 if (!cl->warned) {
Stephen Hemminger87990462006-08-10 23:35:16 -0700858 printk(KERN_WARNING
859 "htb: class %X isn't work conserving ?!\n",
860 cl->classid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700861 cl->warned = 1;
862 }
863 q->nwc_hit++;
Stephen Hemminger87990462006-08-10 23:35:16 -0700864 htb_next_rb_node((level ? cl->parent->un.inner.ptr : q->
865 ptr[0]) + prio);
866 cl = htb_lookup_leaf(q->row[level] + prio, prio,
867 q->ptr[level] + prio,
868 q->last_ptr_id[level] + prio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700869
870 } while (cl != start);
871
872 if (likely(skb != NULL)) {
873 if ((cl->un.leaf.deficit[level] -= skb->len) < 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700874 cl->un.leaf.deficit[level] += cl->un.leaf.quantum;
Stephen Hemminger87990462006-08-10 23:35:16 -0700875 htb_next_rb_node((level ? cl->parent->un.inner.ptr : q->
876 ptr[0]) + prio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700877 }
878 /* this used to be after charge_class but this constelation
879 gives us slightly better performance */
880 if (!cl->un.leaf.q->q.qlen)
Stephen Hemminger87990462006-08-10 23:35:16 -0700881 htb_deactivate(q, cl);
Ranjit Manomohanc9726d62007-07-10 22:43:16 -0700882 htb_charge_class(q, cl, level, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700883 }
884 return skb;
885}
886
Linus Torvalds1da177e2005-04-16 15:20:36 -0700887static struct sk_buff *htb_dequeue(struct Qdisc *sch)
888{
889 struct sk_buff *skb = NULL;
890 struct htb_sched *q = qdisc_priv(sch);
891 int level;
Patrick McHardyfb983d42007-03-16 01:22:39 -0700892 psched_time_t next_event;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700893
894 /* try to dequeue direct packets as high prio (!) to minimize cpu work */
Stephen Hemminger87990462006-08-10 23:35:16 -0700895 skb = __skb_dequeue(&q->direct_queue);
896 if (skb != NULL) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700897 sch->flags &= ~TCQ_F_THROTTLED;
898 sch->q.qlen--;
899 return skb;
900 }
901
Stephen Hemminger87990462006-08-10 23:35:16 -0700902 if (!sch->q.qlen)
903 goto fin;
Patrick McHardy3bebcda2007-03-23 11:29:25 -0700904 q->now = psched_get_time();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700905
Patrick McHardyfb983d42007-03-16 01:22:39 -0700906 next_event = q->now + 5 * PSCHED_TICKS_PER_SEC;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700907 q->nwc_hit = 0;
908 for (level = 0; level < TC_HTB_MAXDEPTH; level++) {
909 /* common case optimization - skip event handler quickly */
910 int m;
Patrick McHardyfb983d42007-03-16 01:22:39 -0700911 psched_time_t event;
Stephen Hemminger87990462006-08-10 23:35:16 -0700912
Patrick McHardyfb983d42007-03-16 01:22:39 -0700913 if (q->now >= q->near_ev_cache[level]) {
914 event = htb_do_events(q, level);
Patrick McHardy2e4b3b02007-05-23 23:39:54 -0700915 if (!event)
916 event = q->now + PSCHED_TICKS_PER_SEC;
917 q->near_ev_cache[level] = event;
Patrick McHardyfb983d42007-03-16 01:22:39 -0700918 } else
919 event = q->near_ev_cache[level];
920
921 if (event && next_event > event)
922 next_event = event;
923
Linus Torvalds1da177e2005-04-16 15:20:36 -0700924 m = ~q->row_mask[level];
925 while (m != (int)(-1)) {
Stephen Hemminger87990462006-08-10 23:35:16 -0700926 int prio = ffz(m);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700927 m |= 1 << prio;
Stephen Hemminger87990462006-08-10 23:35:16 -0700928 skb = htb_dequeue_tree(q, prio, level);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700929 if (likely(skb != NULL)) {
930 sch->q.qlen--;
931 sch->flags &= ~TCQ_F_THROTTLED;
932 goto fin;
933 }
934 }
935 }
Patrick McHardyfb983d42007-03-16 01:22:39 -0700936 sch->qstats.overlimits++;
937 qdisc_watchdog_schedule(&q->watchdog, next_event);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700938fin:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700939 return skb;
940}
941
942/* try to drop from each class (by prio) until one succeed */
Stephen Hemminger87990462006-08-10 23:35:16 -0700943static unsigned int htb_drop(struct Qdisc *sch)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700944{
945 struct htb_sched *q = qdisc_priv(sch);
946 int prio;
947
948 for (prio = TC_HTB_NUMPRIO - 1; prio >= 0; prio--) {
949 struct list_head *p;
Stephen Hemminger87990462006-08-10 23:35:16 -0700950 list_for_each(p, q->drops + prio) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700951 struct htb_class *cl = list_entry(p, struct htb_class,
952 un.leaf.drop_list);
953 unsigned int len;
Stephen Hemminger87990462006-08-10 23:35:16 -0700954 if (cl->un.leaf.q->ops->drop &&
955 (len = cl->un.leaf.q->ops->drop(cl->un.leaf.q))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700956 sch->q.qlen--;
957 if (!cl->un.leaf.q->q.qlen)
Stephen Hemminger87990462006-08-10 23:35:16 -0700958 htb_deactivate(q, cl);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700959 return len;
960 }
961 }
962 }
963 return 0;
964}
965
966/* reset all classes */
967/* always caled under BH & queue lock */
Stephen Hemminger87990462006-08-10 23:35:16 -0700968static void htb_reset(struct Qdisc *sch)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700969{
970 struct htb_sched *q = qdisc_priv(sch);
971 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700972
973 for (i = 0; i < HTB_HSIZE; i++) {
Stephen Hemminger0cef2962006-08-10 23:35:38 -0700974 struct hlist_node *p;
975 struct htb_class *cl;
976
977 hlist_for_each_entry(cl, p, q->hash + i, hlist) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700978 if (cl->level)
Stephen Hemminger87990462006-08-10 23:35:16 -0700979 memset(&cl->un.inner, 0, sizeof(cl->un.inner));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700980 else {
Stephen Hemminger87990462006-08-10 23:35:16 -0700981 if (cl->un.leaf.q)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700982 qdisc_reset(cl->un.leaf.q);
983 INIT_LIST_HEAD(&cl->un.leaf.drop_list);
984 }
985 cl->prio_activity = 0;
986 cl->cmode = HTB_CAN_SEND;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700987
988 }
989 }
Patrick McHardyfb983d42007-03-16 01:22:39 -0700990 qdisc_watchdog_cancel(&q->watchdog);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700991 __skb_queue_purge(&q->direct_queue);
992 sch->q.qlen = 0;
Stephen Hemminger87990462006-08-10 23:35:16 -0700993 memset(q->row, 0, sizeof(q->row));
994 memset(q->row_mask, 0, sizeof(q->row_mask));
995 memset(q->wait_pq, 0, sizeof(q->wait_pq));
996 memset(q->ptr, 0, sizeof(q->ptr));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700997 for (i = 0; i < TC_HTB_NUMPRIO; i++)
Stephen Hemminger87990462006-08-10 23:35:16 -0700998 INIT_LIST_HEAD(q->drops + i);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700999}
1000
1001static int htb_init(struct Qdisc *sch, struct rtattr *opt)
1002{
1003 struct htb_sched *q = qdisc_priv(sch);
1004 struct rtattr *tb[TCA_HTB_INIT];
1005 struct tc_htb_glob *gopt;
1006 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001007 if (!opt || rtattr_parse_nested(tb, TCA_HTB_INIT, opt) ||
Stephen Hemminger87990462006-08-10 23:35:16 -07001008 tb[TCA_HTB_INIT - 1] == NULL ||
1009 RTA_PAYLOAD(tb[TCA_HTB_INIT - 1]) < sizeof(*gopt)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001010 printk(KERN_ERR "HTB: hey probably you have bad tc tool ?\n");
1011 return -EINVAL;
1012 }
Stephen Hemminger87990462006-08-10 23:35:16 -07001013 gopt = RTA_DATA(tb[TCA_HTB_INIT - 1]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001014 if (gopt->version != HTB_VER >> 16) {
Stephen Hemminger87990462006-08-10 23:35:16 -07001015 printk(KERN_ERR
1016 "HTB: need tc/htb version %d (minor is %d), you have %d\n",
1017 HTB_VER >> 16, HTB_VER & 0xffff, gopt->version);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001018 return -EINVAL;
1019 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001020
1021 INIT_LIST_HEAD(&q->root);
1022 for (i = 0; i < HTB_HSIZE; i++)
Stephen Hemminger0cef2962006-08-10 23:35:38 -07001023 INIT_HLIST_HEAD(q->hash + i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001024 for (i = 0; i < TC_HTB_NUMPRIO; i++)
Stephen Hemminger87990462006-08-10 23:35:16 -07001025 INIT_LIST_HEAD(q->drops + i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001026
Patrick McHardyfb983d42007-03-16 01:22:39 -07001027 qdisc_watchdog_init(&q->watchdog, sch);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001028 skb_queue_head_init(&q->direct_queue);
1029
1030 q->direct_qlen = sch->dev->tx_queue_len;
Stephen Hemminger87990462006-08-10 23:35:16 -07001031 if (q->direct_qlen < 2) /* some devices have zero tx_queue_len */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001032 q->direct_qlen = 2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001033
Linus Torvalds1da177e2005-04-16 15:20:36 -07001034 if ((q->rate2quantum = gopt->rate2quantum) < 1)
1035 q->rate2quantum = 1;
1036 q->defcls = gopt->defcls;
1037
1038 return 0;
1039}
1040
1041static int htb_dump(struct Qdisc *sch, struct sk_buff *skb)
1042{
1043 struct htb_sched *q = qdisc_priv(sch);
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001044 unsigned char *b = skb_tail_pointer(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001045 struct rtattr *rta;
1046 struct tc_htb_glob gopt;
Stephen Hemminger9ac961e2006-08-10 23:33:16 -07001047 spin_lock_bh(&sch->dev->queue_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001048 gopt.direct_pkts = q->direct_pkts;
1049
Linus Torvalds1da177e2005-04-16 15:20:36 -07001050 gopt.version = HTB_VER;
1051 gopt.rate2quantum = q->rate2quantum;
1052 gopt.defcls = q->defcls;
Stephen Hemminger3bf72952006-08-10 23:31:08 -07001053 gopt.debug = 0;
Stephen Hemminger87990462006-08-10 23:35:16 -07001054 rta = (struct rtattr *)b;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001055 RTA_PUT(skb, TCA_OPTIONS, 0, NULL);
1056 RTA_PUT(skb, TCA_HTB_INIT, sizeof(gopt), &gopt);
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001057 rta->rta_len = skb_tail_pointer(skb) - b;
Stephen Hemminger9ac961e2006-08-10 23:33:16 -07001058 spin_unlock_bh(&sch->dev->queue_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001059 return skb->len;
1060rtattr_failure:
Stephen Hemminger9ac961e2006-08-10 23:33:16 -07001061 spin_unlock_bh(&sch->dev->queue_lock);
Arnaldo Carvalho de Melodc5fc572007-03-25 23:06:12 -07001062 nlmsg_trim(skb, skb_tail_pointer(skb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001063 return -1;
1064}
1065
1066static int htb_dump_class(struct Qdisc *sch, unsigned long arg,
Stephen Hemminger87990462006-08-10 23:35:16 -07001067 struct sk_buff *skb, struct tcmsg *tcm)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001068{
Stephen Hemminger87990462006-08-10 23:35:16 -07001069 struct htb_class *cl = (struct htb_class *)arg;
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001070 unsigned char *b = skb_tail_pointer(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001071 struct rtattr *rta;
1072 struct tc_htb_opt opt;
1073
Stephen Hemminger9ac961e2006-08-10 23:33:16 -07001074 spin_lock_bh(&sch->dev->queue_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001075 tcm->tcm_parent = cl->parent ? cl->parent->classid : TC_H_ROOT;
1076 tcm->tcm_handle = cl->classid;
1077 if (!cl->level && cl->un.leaf.q)
1078 tcm->tcm_info = cl->un.leaf.q->handle;
1079
Stephen Hemminger87990462006-08-10 23:35:16 -07001080 rta = (struct rtattr *)b;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001081 RTA_PUT(skb, TCA_OPTIONS, 0, NULL);
1082
Stephen Hemminger87990462006-08-10 23:35:16 -07001083 memset(&opt, 0, sizeof(opt));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001084
Stephen Hemminger87990462006-08-10 23:35:16 -07001085 opt.rate = cl->rate->rate;
1086 opt.buffer = cl->buffer;
1087 opt.ceil = cl->ceil->rate;
1088 opt.cbuffer = cl->cbuffer;
1089 opt.quantum = cl->un.leaf.quantum;
1090 opt.prio = cl->un.leaf.prio;
1091 opt.level = cl->level;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001092 RTA_PUT(skb, TCA_HTB_PARMS, sizeof(opt), &opt);
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001093 rta->rta_len = skb_tail_pointer(skb) - b;
Stephen Hemminger9ac961e2006-08-10 23:33:16 -07001094 spin_unlock_bh(&sch->dev->queue_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001095 return skb->len;
1096rtattr_failure:
Stephen Hemminger9ac961e2006-08-10 23:33:16 -07001097 spin_unlock_bh(&sch->dev->queue_lock);
Arnaldo Carvalho de Melodc5fc572007-03-25 23:06:12 -07001098 nlmsg_trim(skb, b);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001099 return -1;
1100}
1101
1102static int
Stephen Hemminger87990462006-08-10 23:35:16 -07001103htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001104{
Stephen Hemminger87990462006-08-10 23:35:16 -07001105 struct htb_class *cl = (struct htb_class *)arg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001106
Linus Torvalds1da177e2005-04-16 15:20:36 -07001107 if (!cl->level && cl->un.leaf.q)
1108 cl->qstats.qlen = cl->un.leaf.q->q.qlen;
1109 cl->xstats.tokens = cl->tokens;
1110 cl->xstats.ctokens = cl->ctokens;
1111
1112 if (gnet_stats_copy_basic(d, &cl->bstats) < 0 ||
1113 gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
1114 gnet_stats_copy_queue(d, &cl->qstats) < 0)
1115 return -1;
1116
1117 return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats));
1118}
1119
1120static int htb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
Stephen Hemminger87990462006-08-10 23:35:16 -07001121 struct Qdisc **old)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001122{
Stephen Hemminger87990462006-08-10 23:35:16 -07001123 struct htb_class *cl = (struct htb_class *)arg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001124
1125 if (cl && !cl->level) {
Patrick McHardy9f9afec2006-11-29 17:35:18 -08001126 if (new == NULL &&
1127 (new = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops,
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +09001128 cl->classid))
Stephen Hemminger87990462006-08-10 23:35:16 -07001129 == NULL)
1130 return -ENOBUFS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001131 sch_tree_lock(sch);
1132 if ((*old = xchg(&cl->un.leaf.q, new)) != NULL) {
Patrick McHardy256d61b2006-11-29 17:37:05 -08001133 qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001134 qdisc_reset(*old);
1135 }
1136 sch_tree_unlock(sch);
1137 return 0;
1138 }
1139 return -ENOENT;
1140}
1141
Stephen Hemminger87990462006-08-10 23:35:16 -07001142static struct Qdisc *htb_leaf(struct Qdisc *sch, unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001143{
Stephen Hemminger87990462006-08-10 23:35:16 -07001144 struct htb_class *cl = (struct htb_class *)arg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001145 return (cl && !cl->level) ? cl->un.leaf.q : NULL;
1146}
1147
Patrick McHardy256d61b2006-11-29 17:37:05 -08001148static void htb_qlen_notify(struct Qdisc *sch, unsigned long arg)
1149{
1150 struct htb_class *cl = (struct htb_class *)arg;
1151
1152 if (cl->un.leaf.q->q.qlen == 0)
1153 htb_deactivate(qdisc_priv(sch), cl);
1154}
1155
Linus Torvalds1da177e2005-04-16 15:20:36 -07001156static unsigned long htb_get(struct Qdisc *sch, u32 classid)
1157{
Stephen Hemminger87990462006-08-10 23:35:16 -07001158 struct htb_class *cl = htb_find(classid, sch);
1159 if (cl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001160 cl->refcnt++;
1161 return (unsigned long)cl;
1162}
1163
Jarek Poplawski160d5e12006-12-08 00:26:56 -08001164static inline int htb_parent_last_child(struct htb_class *cl)
1165{
1166 if (!cl->parent)
1167 /* the root class */
1168 return 0;
1169
1170 if (!(cl->parent->children.next == &cl->sibling &&
1171 cl->parent->children.prev == &cl->sibling))
1172 /* not the last child */
1173 return 0;
1174
1175 return 1;
1176}
1177
1178static void htb_parent_to_leaf(struct htb_class *cl, struct Qdisc *new_q)
1179{
1180 struct htb_class *parent = cl->parent;
1181
1182 BUG_TRAP(!cl->level && cl->un.leaf.q && !cl->prio_activity);
1183
1184 parent->level = 0;
1185 memset(&parent->un.inner, 0, sizeof(parent->un.inner));
1186 INIT_LIST_HEAD(&parent->un.leaf.drop_list);
1187 parent->un.leaf.q = new_q ? new_q : &noop_qdisc;
1188 parent->un.leaf.quantum = parent->quantum;
1189 parent->un.leaf.prio = parent->prio;
1190 parent->tokens = parent->buffer;
1191 parent->ctokens = parent->cbuffer;
Patrick McHardy3bebcda2007-03-23 11:29:25 -07001192 parent->t_c = psched_get_time();
Jarek Poplawski160d5e12006-12-08 00:26:56 -08001193 parent->cmode = HTB_CAN_SEND;
1194}
1195
Stephen Hemminger87990462006-08-10 23:35:16 -07001196static void htb_destroy_class(struct Qdisc *sch, struct htb_class *cl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001197{
1198 struct htb_sched *q = qdisc_priv(sch);
Patrick McHardy814a175e2006-11-29 17:34:50 -08001199
Linus Torvalds1da177e2005-04-16 15:20:36 -07001200 if (!cl->level) {
1201 BUG_TRAP(cl->un.leaf.q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001202 qdisc_destroy(cl->un.leaf.q);
1203 }
Patrick McHardyee39e102007-07-02 22:48:13 -07001204 gen_kill_estimator(&cl->bstats, &cl->rate_est);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001205 qdisc_put_rtab(cl->rate);
1206 qdisc_put_rtab(cl->ceil);
Stephen Hemminger87990462006-08-10 23:35:16 -07001207
Patrick McHardya48b5a62007-03-23 11:29:43 -07001208 tcf_destroy_chain(cl->filter_list);
Stephen Hemminger87990462006-08-10 23:35:16 -07001209
1210 while (!list_empty(&cl->children))
1211 htb_destroy_class(sch, list_entry(cl->children.next,
1212 struct htb_class, sibling));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001213
1214 /* note: this delete may happen twice (see htb_delete) */
Stephen Hemmingerda33e3e2006-11-07 14:54:46 -08001215 hlist_del_init(&cl->hlist);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001216 list_del(&cl->sibling);
Stephen Hemminger87990462006-08-10 23:35:16 -07001217
Linus Torvalds1da177e2005-04-16 15:20:36 -07001218 if (cl->prio_activity)
Stephen Hemminger87990462006-08-10 23:35:16 -07001219 htb_deactivate(q, cl);
1220
Linus Torvalds1da177e2005-04-16 15:20:36 -07001221 if (cl->cmode != HTB_CAN_SEND)
Stephen Hemminger3696f622006-08-10 23:36:01 -07001222 htb_safe_rb_erase(&cl->pq_node, q->wait_pq + cl->level);
Stephen Hemminger87990462006-08-10 23:35:16 -07001223
Linus Torvalds1da177e2005-04-16 15:20:36 -07001224 kfree(cl);
1225}
1226
1227/* always caled under BH & queue lock */
Stephen Hemminger87990462006-08-10 23:35:16 -07001228static void htb_destroy(struct Qdisc *sch)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001229{
1230 struct htb_sched *q = qdisc_priv(sch);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001231
Patrick McHardyfb983d42007-03-16 01:22:39 -07001232 qdisc_watchdog_cancel(&q->watchdog);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001233 /* This line used to be after htb_destroy_class call below
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +09001234 and surprisingly it worked in 2.4. But it must precede it
Linus Torvalds1da177e2005-04-16 15:20:36 -07001235 because filter need its target class alive to be able to call
1236 unbind_filter on it (without Oops). */
Patrick McHardya48b5a62007-03-23 11:29:43 -07001237 tcf_destroy_chain(q->filter_list);
Stephen Hemminger87990462006-08-10 23:35:16 -07001238
1239 while (!list_empty(&q->root))
1240 htb_destroy_class(sch, list_entry(q->root.next,
1241 struct htb_class, sibling));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001242
1243 __skb_queue_purge(&q->direct_queue);
1244}
1245
1246static int htb_delete(struct Qdisc *sch, unsigned long arg)
1247{
1248 struct htb_sched *q = qdisc_priv(sch);
Stephen Hemminger87990462006-08-10 23:35:16 -07001249 struct htb_class *cl = (struct htb_class *)arg;
Patrick McHardy256d61b2006-11-29 17:37:05 -08001250 unsigned int qlen;
Jarek Poplawski160d5e12006-12-08 00:26:56 -08001251 struct Qdisc *new_q = NULL;
1252 int last_child = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001253
1254 // TODO: why don't allow to delete subtree ? references ? does
1255 // tc subsys quarantee us that in htb_destroy it holds no class
1256 // refs so that we can remove children safely there ?
1257 if (!list_empty(&cl->children) || cl->filter_cnt)
1258 return -EBUSY;
Stephen Hemminger87990462006-08-10 23:35:16 -07001259
Jarek Poplawski160d5e12006-12-08 00:26:56 -08001260 if (!cl->level && htb_parent_last_child(cl)) {
1261 new_q = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops,
1262 cl->parent->classid);
1263 last_child = 1;
1264 }
1265
Linus Torvalds1da177e2005-04-16 15:20:36 -07001266 sch_tree_lock(sch);
Stephen Hemminger87990462006-08-10 23:35:16 -07001267
Patrick McHardy814a175e2006-11-29 17:34:50 -08001268 if (!cl->level) {
Patrick McHardy256d61b2006-11-29 17:37:05 -08001269 qlen = cl->un.leaf.q->q.qlen;
Patrick McHardy814a175e2006-11-29 17:34:50 -08001270 qdisc_reset(cl->un.leaf.q);
Patrick McHardy256d61b2006-11-29 17:37:05 -08001271 qdisc_tree_decrease_qlen(cl->un.leaf.q, qlen);
Patrick McHardy814a175e2006-11-29 17:34:50 -08001272 }
1273
Patrick McHardyc38c83c2007-03-27 14:04:24 -07001274 /* delete from hash and active; remainder in destroy_class */
1275 hlist_del_init(&cl->hlist);
1276
Linus Torvalds1da177e2005-04-16 15:20:36 -07001277 if (cl->prio_activity)
Stephen Hemminger87990462006-08-10 23:35:16 -07001278 htb_deactivate(q, cl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001279
Jarek Poplawski160d5e12006-12-08 00:26:56 -08001280 if (last_child)
1281 htb_parent_to_leaf(cl, new_q);
1282
Linus Torvalds1da177e2005-04-16 15:20:36 -07001283 if (--cl->refcnt == 0)
Stephen Hemminger87990462006-08-10 23:35:16 -07001284 htb_destroy_class(sch, cl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001285
1286 sch_tree_unlock(sch);
1287 return 0;
1288}
1289
1290static void htb_put(struct Qdisc *sch, unsigned long arg)
1291{
Stephen Hemminger87990462006-08-10 23:35:16 -07001292 struct htb_class *cl = (struct htb_class *)arg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001293
1294 if (--cl->refcnt == 0)
Stephen Hemminger87990462006-08-10 23:35:16 -07001295 htb_destroy_class(sch, cl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001296}
1297
Stephen Hemminger87990462006-08-10 23:35:16 -07001298static int htb_change_class(struct Qdisc *sch, u32 classid,
1299 u32 parentid, struct rtattr **tca,
1300 unsigned long *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001301{
1302 int err = -EINVAL;
1303 struct htb_sched *q = qdisc_priv(sch);
Stephen Hemminger87990462006-08-10 23:35:16 -07001304 struct htb_class *cl = (struct htb_class *)*arg, *parent;
1305 struct rtattr *opt = tca[TCA_OPTIONS - 1];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001306 struct qdisc_rate_table *rtab = NULL, *ctab = NULL;
1307 struct rtattr *tb[TCA_HTB_RTAB];
1308 struct tc_htb_opt *hopt;
1309
1310 /* extract all subattrs from opt attr */
1311 if (!opt || rtattr_parse_nested(tb, TCA_HTB_RTAB, opt) ||
Stephen Hemminger87990462006-08-10 23:35:16 -07001312 tb[TCA_HTB_PARMS - 1] == NULL ||
1313 RTA_PAYLOAD(tb[TCA_HTB_PARMS - 1]) < sizeof(*hopt))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001314 goto failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001315
Stephen Hemminger87990462006-08-10 23:35:16 -07001316 parent = parentid == TC_H_ROOT ? NULL : htb_find(parentid, sch);
Stephen Hemminger3bf72952006-08-10 23:31:08 -07001317
Stephen Hemminger87990462006-08-10 23:35:16 -07001318 hopt = RTA_DATA(tb[TCA_HTB_PARMS - 1]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001319
Stephen Hemminger87990462006-08-10 23:35:16 -07001320 rtab = qdisc_get_rtab(&hopt->rate, tb[TCA_HTB_RTAB - 1]);
1321 ctab = qdisc_get_rtab(&hopt->ceil, tb[TCA_HTB_CTAB - 1]);
1322 if (!rtab || !ctab)
1323 goto failure;
1324
1325 if (!cl) { /* new class */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001326 struct Qdisc *new_q;
Stephen Hemminger3696f622006-08-10 23:36:01 -07001327 int prio;
Patrick McHardyee39e102007-07-02 22:48:13 -07001328 struct {
1329 struct rtattr rta;
1330 struct gnet_estimator opt;
1331 } est = {
1332 .rta = {
1333 .rta_len = RTA_LENGTH(sizeof(est.opt)),
1334 .rta_type = TCA_RATE,
1335 },
1336 .opt = {
1337 /* 4s interval, 16s averaging constant */
1338 .interval = 2,
1339 .ewma_log = 2,
1340 },
1341 };
Stephen Hemminger3696f622006-08-10 23:36:01 -07001342
Linus Torvalds1da177e2005-04-16 15:20:36 -07001343 /* check for valid classid */
Stephen Hemminger87990462006-08-10 23:35:16 -07001344 if (!classid || TC_H_MAJ(classid ^ sch->handle)
1345 || htb_find(classid, sch))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001346 goto failure;
1347
1348 /* check maximal depth */
1349 if (parent && parent->parent && parent->parent->level < 2) {
1350 printk(KERN_ERR "htb: tree is too deep\n");
1351 goto failure;
1352 }
1353 err = -ENOBUFS;
Panagiotis Issaris0da974f2006-07-21 14:51:30 -07001354 if ((cl = kzalloc(sizeof(*cl), GFP_KERNEL)) == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001355 goto failure;
Stephen Hemminger87990462006-08-10 23:35:16 -07001356
Patrick McHardyee39e102007-07-02 22:48:13 -07001357 gen_new_estimator(&cl->bstats, &cl->rate_est,
1358 &sch->dev->queue_lock,
1359 tca[TCA_RATE-1] ? : &est.rta);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001360 cl->refcnt = 1;
1361 INIT_LIST_HEAD(&cl->sibling);
Stephen Hemminger0cef2962006-08-10 23:35:38 -07001362 INIT_HLIST_NODE(&cl->hlist);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001363 INIT_LIST_HEAD(&cl->children);
1364 INIT_LIST_HEAD(&cl->un.leaf.drop_list);
Stephen Hemminger3696f622006-08-10 23:36:01 -07001365 RB_CLEAR_NODE(&cl->pq_node);
1366
1367 for (prio = 0; prio < TC_HTB_NUMPRIO; prio++)
1368 RB_CLEAR_NODE(&cl->node[prio]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001369
1370 /* create leaf qdisc early because it uses kmalloc(GFP_KERNEL)
1371 so that can't be used inside of sch_tree_lock
1372 -- thanks to Karlis Peisenieks */
Patrick McHardy9f9afec2006-11-29 17:35:18 -08001373 new_q = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops, classid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001374 sch_tree_lock(sch);
1375 if (parent && !parent->level) {
Patrick McHardy256d61b2006-11-29 17:37:05 -08001376 unsigned int qlen = parent->un.leaf.q->q.qlen;
1377
Linus Torvalds1da177e2005-04-16 15:20:36 -07001378 /* turn parent into inner node */
Patrick McHardy256d61b2006-11-29 17:37:05 -08001379 qdisc_reset(parent->un.leaf.q);
1380 qdisc_tree_decrease_qlen(parent->un.leaf.q, qlen);
Stephen Hemminger87990462006-08-10 23:35:16 -07001381 qdisc_destroy(parent->un.leaf.q);
1382 if (parent->prio_activity)
1383 htb_deactivate(q, parent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001384
1385 /* remove from evt list because of level change */
1386 if (parent->cmode != HTB_CAN_SEND) {
Stephen Hemminger3696f622006-08-10 23:36:01 -07001387 htb_safe_rb_erase(&parent->pq_node, q->wait_pq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001388 parent->cmode = HTB_CAN_SEND;
1389 }
1390 parent->level = (parent->parent ? parent->parent->level
Stephen Hemminger87990462006-08-10 23:35:16 -07001391 : TC_HTB_MAXDEPTH) - 1;
1392 memset(&parent->un.inner, 0, sizeof(parent->un.inner));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001393 }
1394 /* leaf (we) needs elementary qdisc */
1395 cl->un.leaf.q = new_q ? new_q : &noop_qdisc;
1396
Stephen Hemminger87990462006-08-10 23:35:16 -07001397 cl->classid = classid;
1398 cl->parent = parent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001399
1400 /* set class to be in HTB_CAN_SEND state */
1401 cl->tokens = hopt->buffer;
1402 cl->ctokens = hopt->cbuffer;
Patrick McHardy00c04af2007-03-16 01:23:02 -07001403 cl->mbuffer = 60 * PSCHED_TICKS_PER_SEC; /* 1min */
Patrick McHardy3bebcda2007-03-23 11:29:25 -07001404 cl->t_c = psched_get_time();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001405 cl->cmode = HTB_CAN_SEND;
1406
1407 /* attach to the hash list and parent's family */
Stephen Hemminger0cef2962006-08-10 23:35:38 -07001408 hlist_add_head(&cl->hlist, q->hash + htb_hash(classid));
Stephen Hemminger87990462006-08-10 23:35:16 -07001409 list_add_tail(&cl->sibling,
1410 parent ? &parent->children : &q->root);
Patrick McHardyee39e102007-07-02 22:48:13 -07001411 } else {
1412 if (tca[TCA_RATE-1])
1413 gen_replace_estimator(&cl->bstats, &cl->rate_est,
1414 &sch->dev->queue_lock,
1415 tca[TCA_RATE-1]);
Stephen Hemminger87990462006-08-10 23:35:16 -07001416 sch_tree_lock(sch);
Patrick McHardyee39e102007-07-02 22:48:13 -07001417 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001418
1419 /* it used to be a nasty bug here, we have to check that node
Stephen Hemminger87990462006-08-10 23:35:16 -07001420 is really leaf before changing cl->un.leaf ! */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001421 if (!cl->level) {
1422 cl->un.leaf.quantum = rtab->rate.rate / q->rate2quantum;
1423 if (!hopt->quantum && cl->un.leaf.quantum < 1000) {
Stephen Hemminger87990462006-08-10 23:35:16 -07001424 printk(KERN_WARNING
1425 "HTB: quantum of class %X is small. Consider r2q change.\n",
1426 cl->classid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001427 cl->un.leaf.quantum = 1000;
1428 }
1429 if (!hopt->quantum && cl->un.leaf.quantum > 200000) {
Stephen Hemminger87990462006-08-10 23:35:16 -07001430 printk(KERN_WARNING
1431 "HTB: quantum of class %X is big. Consider r2q change.\n",
1432 cl->classid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001433 cl->un.leaf.quantum = 200000;
1434 }
1435 if (hopt->quantum)
1436 cl->un.leaf.quantum = hopt->quantum;
1437 if ((cl->un.leaf.prio = hopt->prio) >= TC_HTB_NUMPRIO)
1438 cl->un.leaf.prio = TC_HTB_NUMPRIO - 1;
Jarek Poplawski160d5e12006-12-08 00:26:56 -08001439
1440 /* backup for htb_parent_to_leaf */
1441 cl->quantum = cl->un.leaf.quantum;
1442 cl->prio = cl->un.leaf.prio;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001443 }
1444
1445 cl->buffer = hopt->buffer;
1446 cl->cbuffer = hopt->cbuffer;
Stephen Hemminger87990462006-08-10 23:35:16 -07001447 if (cl->rate)
1448 qdisc_put_rtab(cl->rate);
1449 cl->rate = rtab;
1450 if (cl->ceil)
1451 qdisc_put_rtab(cl->ceil);
1452 cl->ceil = ctab;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001453 sch_tree_unlock(sch);
1454
1455 *arg = (unsigned long)cl;
1456 return 0;
1457
1458failure:
Stephen Hemminger87990462006-08-10 23:35:16 -07001459 if (rtab)
1460 qdisc_put_rtab(rtab);
1461 if (ctab)
1462 qdisc_put_rtab(ctab);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001463 return err;
1464}
1465
1466static struct tcf_proto **htb_find_tcf(struct Qdisc *sch, unsigned long arg)
1467{
1468 struct htb_sched *q = qdisc_priv(sch);
1469 struct htb_class *cl = (struct htb_class *)arg;
1470 struct tcf_proto **fl = cl ? &cl->filter_list : &q->filter_list;
Stephen Hemminger3bf72952006-08-10 23:31:08 -07001471
Linus Torvalds1da177e2005-04-16 15:20:36 -07001472 return fl;
1473}
1474
1475static unsigned long htb_bind_filter(struct Qdisc *sch, unsigned long parent,
Stephen Hemminger87990462006-08-10 23:35:16 -07001476 u32 classid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001477{
1478 struct htb_sched *q = qdisc_priv(sch);
Stephen Hemminger87990462006-08-10 23:35:16 -07001479 struct htb_class *cl = htb_find(classid, sch);
Stephen Hemminger3bf72952006-08-10 23:31:08 -07001480
Linus Torvalds1da177e2005-04-16 15:20:36 -07001481 /*if (cl && !cl->level) return 0;
Stephen Hemminger87990462006-08-10 23:35:16 -07001482 The line above used to be there to prevent attaching filters to
1483 leaves. But at least tc_index filter uses this just to get class
1484 for other reasons so that we have to allow for it.
1485 ----
1486 19.6.2002 As Werner explained it is ok - bind filter is just
1487 another way to "lock" the class - unlike "get" this lock can
1488 be broken by class during destroy IIUC.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001489 */
Stephen Hemminger87990462006-08-10 23:35:16 -07001490 if (cl)
1491 cl->filter_cnt++;
1492 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001493 q->filter_cnt++;
1494 return (unsigned long)cl;
1495}
1496
1497static void htb_unbind_filter(struct Qdisc *sch, unsigned long arg)
1498{
1499 struct htb_sched *q = qdisc_priv(sch);
1500 struct htb_class *cl = (struct htb_class *)arg;
Stephen Hemminger3bf72952006-08-10 23:31:08 -07001501
Stephen Hemminger87990462006-08-10 23:35:16 -07001502 if (cl)
1503 cl->filter_cnt--;
1504 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001505 q->filter_cnt--;
1506}
1507
1508static void htb_walk(struct Qdisc *sch, struct qdisc_walker *arg)
1509{
1510 struct htb_sched *q = qdisc_priv(sch);
1511 int i;
1512
1513 if (arg->stop)
1514 return;
1515
1516 for (i = 0; i < HTB_HSIZE; i++) {
Stephen Hemminger0cef2962006-08-10 23:35:38 -07001517 struct hlist_node *p;
1518 struct htb_class *cl;
1519
1520 hlist_for_each_entry(cl, p, q->hash + i, hlist) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001521 if (arg->count < arg->skip) {
1522 arg->count++;
1523 continue;
1524 }
1525 if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
1526 arg->stop = 1;
1527 return;
1528 }
1529 arg->count++;
1530 }
1531 }
1532}
1533
1534static struct Qdisc_class_ops htb_class_ops = {
1535 .graft = htb_graft,
1536 .leaf = htb_leaf,
Patrick McHardy256d61b2006-11-29 17:37:05 -08001537 .qlen_notify = htb_qlen_notify,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001538 .get = htb_get,
1539 .put = htb_put,
1540 .change = htb_change_class,
1541 .delete = htb_delete,
1542 .walk = htb_walk,
1543 .tcf_chain = htb_find_tcf,
1544 .bind_tcf = htb_bind_filter,
1545 .unbind_tcf = htb_unbind_filter,
1546 .dump = htb_dump_class,
1547 .dump_stats = htb_dump_class_stats,
1548};
1549
1550static struct Qdisc_ops htb_qdisc_ops = {
1551 .next = NULL,
1552 .cl_ops = &htb_class_ops,
1553 .id = "htb",
1554 .priv_size = sizeof(struct htb_sched),
1555 .enqueue = htb_enqueue,
1556 .dequeue = htb_dequeue,
1557 .requeue = htb_requeue,
1558 .drop = htb_drop,
1559 .init = htb_init,
1560 .reset = htb_reset,
1561 .destroy = htb_destroy,
1562 .change = NULL /* htb_change */,
1563 .dump = htb_dump,
1564 .owner = THIS_MODULE,
1565};
1566
1567static int __init htb_module_init(void)
1568{
Stephen Hemminger87990462006-08-10 23:35:16 -07001569 return register_qdisc(&htb_qdisc_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001570}
Stephen Hemminger87990462006-08-10 23:35:16 -07001571static void __exit htb_module_exit(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001572{
Stephen Hemminger87990462006-08-10 23:35:16 -07001573 unregister_qdisc(&htb_qdisc_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001574}
Stephen Hemminger87990462006-08-10 23:35:16 -07001575
Linus Torvalds1da177e2005-04-16 15:20:36 -07001576module_init(htb_module_init)
1577module_exit(htb_module_exit)
1578MODULE_LICENSE("GPL");