blob: 3c3294d01041743e721d53f0cd5f44be2b039865 [file] [log] [blame]
Stephen Hemminger87990462006-08-10 23:35:16 -07001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 * net/sched/sch_htb.c Hierarchical token bucket, feed tree version
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Martin Devera, <devik@cdi.cz>
10 *
11 * Credits (in time order) for older HTB versions:
12 * Stef Coene <stef.coene@docum.org>
13 * HTB support at LARTC mailing list
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +090014 * Ondrej Kraus, <krauso@barr.cz>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015 * found missing INIT_QDISC(htb)
16 * Vladimir Smelhaus, Aamer Akhter, Bert Hubert
17 * helped a lot to locate nasty class stall bug
18 * Andi Kleen, Jamal Hadi, Bert Hubert
19 * code review and helpful comments on shaping
20 * Tomasz Wrona, <tw@eter.tym.pl>
21 * created test case so that I was able to fix nasty bug
22 * Wilfried Weissmann
23 * spotted bug in dequeue code and helped with fix
24 * Jiri Fojtasek
25 * fixed requeue routine
26 * and many others. thanks.
27 *
28 * $Id: sch_htb.c,v 1.25 2003/12/07 11:08:25 devik Exp devik $
29 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070030#include <linux/module.h>
31#include <asm/uaccess.h>
32#include <asm/system.h>
33#include <linux/bitops.h>
34#include <linux/types.h>
35#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070036#include <linux/string.h>
37#include <linux/mm.h>
38#include <linux/socket.h>
39#include <linux/sockios.h>
40#include <linux/in.h>
41#include <linux/errno.h>
42#include <linux/interrupt.h>
43#include <linux/if_ether.h>
44#include <linux/inet.h>
45#include <linux/netdevice.h>
46#include <linux/etherdevice.h>
47#include <linux/notifier.h>
48#include <net/ip.h>
49#include <net/route.h>
50#include <linux/skbuff.h>
51#include <linux/list.h>
52#include <linux/compiler.h>
53#include <net/sock.h>
54#include <net/pkt_sched.h>
55#include <linux/rbtree.h>
56
57/* HTB algorithm.
58 Author: devik@cdi.cz
59 ========================================================================
60 HTB is like TBF with multiple classes. It is also similar to CBQ because
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +090061 it allows to assign priority to each class in hierarchy.
Linus Torvalds1da177e2005-04-16 15:20:36 -070062 In fact it is another implementation of Floyd's formal sharing.
63
64 Levels:
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +090065 Each class is assigned level. Leaf has ALWAYS level 0 and root
Linus Torvalds1da177e2005-04-16 15:20:36 -070066 classes have level TC_HTB_MAXDEPTH-1. Interior nodes has level
67 one less than their parent.
68*/
69
Stephen Hemminger87990462006-08-10 23:35:16 -070070#define HTB_HSIZE 16 /* classid hash size */
71#define HTB_EWMAC 2 /* rate average over HTB_EWMAC*HTB_HSIZE sec */
72#define HTB_RATECM 1 /* whether to use rate computer */
73#define HTB_HYSTERESIS 1 /* whether to use mode hysteresis for speedup */
74#define HTB_VER 0x30011 /* major must be matched with number suplied by TC as version */
Linus Torvalds1da177e2005-04-16 15:20:36 -070075
76#if HTB_VER >> 16 != TC_HTB_PROTOVER
77#error "Mismatched sch_htb.c and pkt_sch.h"
78#endif
79
Linus Torvalds1da177e2005-04-16 15:20:36 -070080/* used internaly to keep status of single class */
81enum htb_cmode {
Stephen Hemminger87990462006-08-10 23:35:16 -070082 HTB_CANT_SEND, /* class can't send and can't borrow */
83 HTB_MAY_BORROW, /* class can't send but may borrow */
84 HTB_CAN_SEND /* class can send */
Linus Torvalds1da177e2005-04-16 15:20:36 -070085};
86
87/* interior & leaf nodes; props specific to leaves are marked L: */
Stephen Hemminger87990462006-08-10 23:35:16 -070088struct htb_class {
89 /* general class parameters */
90 u32 classid;
91 struct gnet_stats_basic bstats;
92 struct gnet_stats_queue qstats;
93 struct gnet_stats_rate_est rate_est;
94 struct tc_htb_xstats xstats; /* our special stats */
95 int refcnt; /* usage count of this class */
Linus Torvalds1da177e2005-04-16 15:20:36 -070096
97#ifdef HTB_RATECM
Stephen Hemminger87990462006-08-10 23:35:16 -070098 /* rate measurement counters */
99 unsigned long rate_bytes, sum_bytes;
100 unsigned long rate_packets, sum_packets;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101#endif
102
Stephen Hemminger87990462006-08-10 23:35:16 -0700103 /* topology */
104 int level; /* our level (see above) */
105 struct htb_class *parent; /* parent class */
Stephen Hemminger0cef2962006-08-10 23:35:38 -0700106 struct hlist_node hlist; /* classid hash list item */
Stephen Hemminger87990462006-08-10 23:35:16 -0700107 struct list_head sibling; /* sibling list item */
108 struct list_head children; /* children list */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109
Stephen Hemminger87990462006-08-10 23:35:16 -0700110 union {
111 struct htb_class_leaf {
112 struct Qdisc *q;
113 int prio;
114 int aprio;
115 int quantum;
116 int deficit[TC_HTB_MAXDEPTH];
117 struct list_head drop_list;
118 } leaf;
119 struct htb_class_inner {
120 struct rb_root feed[TC_HTB_NUMPRIO]; /* feed trees */
121 struct rb_node *ptr[TC_HTB_NUMPRIO]; /* current class ptr */
122 /* When class changes from state 1->2 and disconnects from
123 parent's feed then we lost ptr value and start from the
124 first child again. Here we store classid of the
125 last valid ptr (used when ptr is NULL). */
126 u32 last_ptr_id[TC_HTB_NUMPRIO];
127 } inner;
128 } un;
129 struct rb_node node[TC_HTB_NUMPRIO]; /* node for self or feed tree */
130 struct rb_node pq_node; /* node for event queue */
131 unsigned long pq_key; /* the same type as jiffies global */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132
Stephen Hemminger87990462006-08-10 23:35:16 -0700133 int prio_activity; /* for which prios are we active */
134 enum htb_cmode cmode; /* current mode of the class */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135
Stephen Hemminger87990462006-08-10 23:35:16 -0700136 /* class attached filters */
137 struct tcf_proto *filter_list;
138 int filter_cnt;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139
Stephen Hemminger87990462006-08-10 23:35:16 -0700140 int warned; /* only one warning about non work conserving .. */
141
142 /* token bucket parameters */
143 struct qdisc_rate_table *rate; /* rate table of the class itself */
144 struct qdisc_rate_table *ceil; /* ceiling rate (limits borrows too) */
145 long buffer, cbuffer; /* token bucket depth/rate */
146 psched_tdiff_t mbuffer; /* max wait time */
147 long tokens, ctokens; /* current number of tokens */
148 psched_time_t t_c; /* checkpoint time */
Jarek Poplawski160d5e12006-12-08 00:26:56 -0800149
150 int prio; /* For parent to leaf return possible here */
151 int quantum; /* we do backup. Finally full replacement */
152 /* of un.leaf originals should be done. */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153};
154
155/* TODO: maybe compute rate when size is too large .. or drop ? */
Stephen Hemminger87990462006-08-10 23:35:16 -0700156static inline long L2T(struct htb_class *cl, struct qdisc_rate_table *rate,
157 int size)
158{
159 int slot = size >> rate->rate.cell_log;
160 if (slot > 255) {
161 cl->xstats.giants++;
162 slot = 255;
163 }
164 return rate->data[slot];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165}
166
Stephen Hemminger87990462006-08-10 23:35:16 -0700167struct htb_sched {
168 struct list_head root; /* root classes list */
Stephen Hemminger0cef2962006-08-10 23:35:38 -0700169 struct hlist_head hash[HTB_HSIZE]; /* hashed by classid */
170 struct list_head drops[TC_HTB_NUMPRIO];/* active leaves (for drops) */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171
Stephen Hemminger87990462006-08-10 23:35:16 -0700172 /* self list - roots of self generating tree */
173 struct rb_root row[TC_HTB_MAXDEPTH][TC_HTB_NUMPRIO];
174 int row_mask[TC_HTB_MAXDEPTH];
175 struct rb_node *ptr[TC_HTB_MAXDEPTH][TC_HTB_NUMPRIO];
176 u32 last_ptr_id[TC_HTB_MAXDEPTH][TC_HTB_NUMPRIO];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177
Stephen Hemminger87990462006-08-10 23:35:16 -0700178 /* self wait list - roots of wait PQs per row */
179 struct rb_root wait_pq[TC_HTB_MAXDEPTH];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180
Stephen Hemminger87990462006-08-10 23:35:16 -0700181 /* time of nearest event per level (row) */
182 unsigned long near_ev_cache[TC_HTB_MAXDEPTH];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183
Stephen Hemminger87990462006-08-10 23:35:16 -0700184 /* cached value of jiffies in dequeue */
185 unsigned long jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186
Stephen Hemminger87990462006-08-10 23:35:16 -0700187 /* whether we hit non-work conserving class during this dequeue; we use */
188 int nwc_hit; /* this to disable mindelay complaint in dequeue */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189
Stephen Hemminger87990462006-08-10 23:35:16 -0700190 int defcls; /* class where unclassified flows go to */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191
Stephen Hemminger87990462006-08-10 23:35:16 -0700192 /* filters for qdisc itself */
193 struct tcf_proto *filter_list;
194 int filter_cnt;
195
196 int rate2quantum; /* quant = rate / rate2quantum */
197 psched_time_t now; /* cached dequeue time */
198 struct timer_list timer; /* send delay timer */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199#ifdef HTB_RATECM
Stephen Hemminger87990462006-08-10 23:35:16 -0700200 struct timer_list rttim; /* rate computer timer */
201 int recmp_bucket; /* which hash bucket to recompute next */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203
Stephen Hemminger87990462006-08-10 23:35:16 -0700204 /* non shaped skbs; let them go directly thru */
205 struct sk_buff_head direct_queue;
206 int direct_qlen; /* max qlen of above */
207
208 long direct_pkts;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209};
210
211/* compute hash of size HTB_HSIZE for given handle */
Stephen Hemminger87990462006-08-10 23:35:16 -0700212static inline int htb_hash(u32 h)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700213{
214#if HTB_HSIZE != 16
Stephen Hemminger87990462006-08-10 23:35:16 -0700215#error "Declare new hash for your HTB_HSIZE"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216#endif
Stephen Hemminger87990462006-08-10 23:35:16 -0700217 h ^= h >> 8; /* stolen from cbq_hash */
218 h ^= h >> 4;
219 return h & 0xf;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220}
221
222/* find class in global hash table using given handle */
Stephen Hemminger87990462006-08-10 23:35:16 -0700223static inline struct htb_class *htb_find(u32 handle, struct Qdisc *sch)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224{
225 struct htb_sched *q = qdisc_priv(sch);
Stephen Hemminger0cef2962006-08-10 23:35:38 -0700226 struct hlist_node *p;
227 struct htb_class *cl;
228
Stephen Hemminger87990462006-08-10 23:35:16 -0700229 if (TC_H_MAJ(handle) != sch->handle)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230 return NULL;
Stephen Hemminger87990462006-08-10 23:35:16 -0700231
Stephen Hemminger0cef2962006-08-10 23:35:38 -0700232 hlist_for_each_entry(cl, p, q->hash + htb_hash(handle), hlist) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700233 if (cl->classid == handle)
234 return cl;
235 }
236 return NULL;
237}
238
239/**
240 * htb_classify - classify a packet into class
241 *
242 * It returns NULL if the packet should be dropped or -1 if the packet
243 * should be passed directly thru. In all other cases leaf class is returned.
244 * We allow direct class selection by classid in priority. The we examine
245 * filters in qdisc and in inner nodes (if higher filter points to the inner
246 * node). If we end up with classid MAJOR:0 we enqueue the skb into special
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900247 * internal fifo (direct). These packets then go directly thru. If we still
Linus Torvalds1da177e2005-04-16 15:20:36 -0700248 * have no valid leaf we try to use MAJOR:default leaf. It still unsuccessfull
249 * then finish and return direct queue.
250 */
251#define HTB_DIRECT (struct htb_class*)-1
252static inline u32 htb_classid(struct htb_class *cl)
253{
254 return (cl && cl != HTB_DIRECT) ? cl->classid : TC_H_UNSPEC;
255}
256
Stephen Hemminger87990462006-08-10 23:35:16 -0700257static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch,
258 int *qerr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259{
260 struct htb_sched *q = qdisc_priv(sch);
261 struct htb_class *cl;
262 struct tcf_result res;
263 struct tcf_proto *tcf;
264 int result;
265
266 /* allow to select class by setting skb->priority to valid classid;
267 note that nfmark can be used too by attaching filter fw with no
268 rules in it */
269 if (skb->priority == sch->handle)
Stephen Hemminger87990462006-08-10 23:35:16 -0700270 return HTB_DIRECT; /* X:0 (direct flow) selected */
271 if ((cl = htb_find(skb->priority, sch)) != NULL && cl->level == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272 return cl;
273
Jamal Hadi Salim29f1df62006-01-08 22:35:55 -0800274 *qerr = NET_XMIT_BYPASS;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275 tcf = q->filter_list;
276 while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) {
277#ifdef CONFIG_NET_CLS_ACT
278 switch (result) {
279 case TC_ACT_QUEUED:
Stephen Hemminger87990462006-08-10 23:35:16 -0700280 case TC_ACT_STOLEN:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281 *qerr = NET_XMIT_SUCCESS;
282 case TC_ACT_SHOT:
283 return NULL;
284 }
285#elif defined(CONFIG_NET_CLS_POLICE)
286 if (result == TC_POLICE_SHOT)
287 return HTB_DIRECT;
288#endif
Stephen Hemminger87990462006-08-10 23:35:16 -0700289 if ((cl = (void *)res.class) == NULL) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290 if (res.classid == sch->handle)
Stephen Hemminger87990462006-08-10 23:35:16 -0700291 return HTB_DIRECT; /* X:0 (direct flow) */
292 if ((cl = htb_find(res.classid, sch)) == NULL)
293 break; /* filter selected invalid classid */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700294 }
295 if (!cl->level)
Stephen Hemminger87990462006-08-10 23:35:16 -0700296 return cl; /* we hit leaf; return it */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700297
298 /* we have got inner class; apply inner filter chain */
299 tcf = cl->filter_list;
300 }
301 /* classification failed; try to use default class */
Stephen Hemminger87990462006-08-10 23:35:16 -0700302 cl = htb_find(TC_H_MAKE(TC_H_MAJ(sch->handle), q->defcls), sch);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700303 if (!cl || cl->level)
Stephen Hemminger87990462006-08-10 23:35:16 -0700304 return HTB_DIRECT; /* bad default .. this is safe bet */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700305 return cl;
306}
307
Linus Torvalds1da177e2005-04-16 15:20:36 -0700308/**
309 * htb_add_to_id_tree - adds class to the round robin list
310 *
311 * Routine adds class to the list (actually tree) sorted by classid.
312 * Make sure that class is not already on such list for given prio.
313 */
Stephen Hemminger87990462006-08-10 23:35:16 -0700314static void htb_add_to_id_tree(struct rb_root *root,
315 struct htb_class *cl, int prio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700316{
317 struct rb_node **p = &root->rb_node, *parent = NULL;
Stephen Hemminger3bf72952006-08-10 23:31:08 -0700318
Linus Torvalds1da177e2005-04-16 15:20:36 -0700319 while (*p) {
Stephen Hemminger87990462006-08-10 23:35:16 -0700320 struct htb_class *c;
321 parent = *p;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700322 c = rb_entry(parent, struct htb_class, node[prio]);
Stephen Hemminger3bf72952006-08-10 23:31:08 -0700323
Linus Torvalds1da177e2005-04-16 15:20:36 -0700324 if (cl->classid > c->classid)
325 p = &parent->rb_right;
Stephen Hemminger87990462006-08-10 23:35:16 -0700326 else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700327 p = &parent->rb_left;
328 }
329 rb_link_node(&cl->node[prio], parent, p);
330 rb_insert_color(&cl->node[prio], root);
331}
332
333/**
334 * htb_add_to_wait_tree - adds class to the event queue with delay
335 *
336 * The class is added to priority event queue to indicate that class will
337 * change its mode in cl->pq_key microseconds. Make sure that class is not
338 * already in the queue.
339 */
Stephen Hemminger87990462006-08-10 23:35:16 -0700340static void htb_add_to_wait_tree(struct htb_sched *q,
341 struct htb_class *cl, long delay)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700342{
343 struct rb_node **p = &q->wait_pq[cl->level].rb_node, *parent = NULL;
Stephen Hemminger3bf72952006-08-10 23:31:08 -0700344
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345 cl->pq_key = q->jiffies + PSCHED_US2JIFFIE(delay);
346 if (cl->pq_key == q->jiffies)
347 cl->pq_key++;
348
349 /* update the nearest event cache */
350 if (time_after(q->near_ev_cache[cl->level], cl->pq_key))
351 q->near_ev_cache[cl->level] = cl->pq_key;
Stephen Hemminger87990462006-08-10 23:35:16 -0700352
Linus Torvalds1da177e2005-04-16 15:20:36 -0700353 while (*p) {
Stephen Hemminger87990462006-08-10 23:35:16 -0700354 struct htb_class *c;
355 parent = *p;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700356 c = rb_entry(parent, struct htb_class, pq_node);
357 if (time_after_eq(cl->pq_key, c->pq_key))
358 p = &parent->rb_right;
Stephen Hemminger87990462006-08-10 23:35:16 -0700359 else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700360 p = &parent->rb_left;
361 }
362 rb_link_node(&cl->pq_node, parent, p);
363 rb_insert_color(&cl->pq_node, &q->wait_pq[cl->level]);
364}
365
366/**
367 * htb_next_rb_node - finds next node in binary tree
368 *
369 * When we are past last key we return NULL.
370 * Average complexity is 2 steps per call.
371 */
Stephen Hemminger3696f622006-08-10 23:36:01 -0700372static inline void htb_next_rb_node(struct rb_node **n)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700373{
374 *n = rb_next(*n);
375}
376
377/**
378 * htb_add_class_to_row - add class to its row
379 *
380 * The class is added to row at priorities marked in mask.
381 * It does nothing if mask == 0.
382 */
Stephen Hemminger87990462006-08-10 23:35:16 -0700383static inline void htb_add_class_to_row(struct htb_sched *q,
384 struct htb_class *cl, int mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700385{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700386 q->row_mask[cl->level] |= mask;
387 while (mask) {
388 int prio = ffz(~mask);
389 mask &= ~(1 << prio);
Stephen Hemminger87990462006-08-10 23:35:16 -0700390 htb_add_to_id_tree(q->row[cl->level] + prio, cl, prio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700391 }
392}
393
Stephen Hemminger3696f622006-08-10 23:36:01 -0700394/* If this triggers, it is a bug in this code, but it need not be fatal */
395static void htb_safe_rb_erase(struct rb_node *rb, struct rb_root *root)
396{
Ismail Donmez81771b32006-10-03 13:49:10 -0700397 if (RB_EMPTY_NODE(rb)) {
Stephen Hemminger3696f622006-08-10 23:36:01 -0700398 WARN_ON(1);
399 } else {
400 rb_erase(rb, root);
401 RB_CLEAR_NODE(rb);
402 }
403}
404
405
Linus Torvalds1da177e2005-04-16 15:20:36 -0700406/**
407 * htb_remove_class_from_row - removes class from its row
408 *
409 * The class is removed from row at priorities marked in mask.
410 * It does nothing if mask == 0.
411 */
Stephen Hemminger87990462006-08-10 23:35:16 -0700412static inline void htb_remove_class_from_row(struct htb_sched *q,
413 struct htb_class *cl, int mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700414{
415 int m = 0;
Stephen Hemminger3bf72952006-08-10 23:31:08 -0700416
Linus Torvalds1da177e2005-04-16 15:20:36 -0700417 while (mask) {
418 int prio = ffz(~mask);
Stephen Hemminger3696f622006-08-10 23:36:01 -0700419
Linus Torvalds1da177e2005-04-16 15:20:36 -0700420 mask &= ~(1 << prio);
Stephen Hemminger87990462006-08-10 23:35:16 -0700421 if (q->ptr[cl->level][prio] == cl->node + prio)
422 htb_next_rb_node(q->ptr[cl->level] + prio);
Stephen Hemminger3696f622006-08-10 23:36:01 -0700423
424 htb_safe_rb_erase(cl->node + prio, q->row[cl->level] + prio);
Stephen Hemminger87990462006-08-10 23:35:16 -0700425 if (!q->row[cl->level][prio].rb_node)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426 m |= 1 << prio;
427 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700428 q->row_mask[cl->level] &= ~m;
429}
430
431/**
432 * htb_activate_prios - creates active classe's feed chain
433 *
434 * The class is connected to ancestors and/or appropriate rows
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900435 * for priorities it is participating on. cl->cmode must be new
Linus Torvalds1da177e2005-04-16 15:20:36 -0700436 * (activated) mode. It does nothing if cl->prio_activity == 0.
437 */
Stephen Hemminger87990462006-08-10 23:35:16 -0700438static void htb_activate_prios(struct htb_sched *q, struct htb_class *cl)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700439{
440 struct htb_class *p = cl->parent;
Stephen Hemminger87990462006-08-10 23:35:16 -0700441 long m, mask = cl->prio_activity;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700442
443 while (cl->cmode == HTB_MAY_BORROW && p && mask) {
Stephen Hemminger87990462006-08-10 23:35:16 -0700444 m = mask;
445 while (m) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700446 int prio = ffz(~m);
447 m &= ~(1 << prio);
Stephen Hemminger87990462006-08-10 23:35:16 -0700448
Linus Torvalds1da177e2005-04-16 15:20:36 -0700449 if (p->un.inner.feed[prio].rb_node)
450 /* parent already has its feed in use so that
451 reset bit in mask as parent is already ok */
452 mask &= ~(1 << prio);
Stephen Hemminger87990462006-08-10 23:35:16 -0700453
454 htb_add_to_id_tree(p->un.inner.feed + prio, cl, prio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700455 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700456 p->prio_activity |= mask;
Stephen Hemminger87990462006-08-10 23:35:16 -0700457 cl = p;
458 p = cl->parent;
Stephen Hemminger3bf72952006-08-10 23:31:08 -0700459
Linus Torvalds1da177e2005-04-16 15:20:36 -0700460 }
461 if (cl->cmode == HTB_CAN_SEND && mask)
Stephen Hemminger87990462006-08-10 23:35:16 -0700462 htb_add_class_to_row(q, cl, mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700463}
464
465/**
466 * htb_deactivate_prios - remove class from feed chain
467 *
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900468 * cl->cmode must represent old mode (before deactivation). It does
Linus Torvalds1da177e2005-04-16 15:20:36 -0700469 * nothing if cl->prio_activity == 0. Class is removed from all feed
470 * chains and rows.
471 */
472static void htb_deactivate_prios(struct htb_sched *q, struct htb_class *cl)
473{
474 struct htb_class *p = cl->parent;
Stephen Hemminger87990462006-08-10 23:35:16 -0700475 long m, mask = cl->prio_activity;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700476
477 while (cl->cmode == HTB_MAY_BORROW && p && mask) {
Stephen Hemminger87990462006-08-10 23:35:16 -0700478 m = mask;
479 mask = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700480 while (m) {
481 int prio = ffz(~m);
482 m &= ~(1 << prio);
Stephen Hemminger87990462006-08-10 23:35:16 -0700483
484 if (p->un.inner.ptr[prio] == cl->node + prio) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700485 /* we are removing child which is pointed to from
486 parent feed - forget the pointer but remember
487 classid */
488 p->un.inner.last_ptr_id[prio] = cl->classid;
489 p->un.inner.ptr[prio] = NULL;
490 }
Stephen Hemminger87990462006-08-10 23:35:16 -0700491
Stephen Hemminger3696f622006-08-10 23:36:01 -0700492 htb_safe_rb_erase(cl->node + prio, p->un.inner.feed + prio);
Stephen Hemminger87990462006-08-10 23:35:16 -0700493
494 if (!p->un.inner.feed[prio].rb_node)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700495 mask |= 1 << prio;
496 }
Stephen Hemminger3bf72952006-08-10 23:31:08 -0700497
Linus Torvalds1da177e2005-04-16 15:20:36 -0700498 p->prio_activity &= ~mask;
Stephen Hemminger87990462006-08-10 23:35:16 -0700499 cl = p;
500 p = cl->parent;
Stephen Hemminger3bf72952006-08-10 23:31:08 -0700501
Linus Torvalds1da177e2005-04-16 15:20:36 -0700502 }
Stephen Hemminger87990462006-08-10 23:35:16 -0700503 if (cl->cmode == HTB_CAN_SEND && mask)
504 htb_remove_class_from_row(q, cl, mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700505}
506
Stephen Hemminger18a63e82006-08-10 23:34:02 -0700507#if HTB_HYSTERESIS
508static inline long htb_lowater(const struct htb_class *cl)
509{
510 return cl->cmode != HTB_CANT_SEND ? -cl->cbuffer : 0;
511}
512static inline long htb_hiwater(const struct htb_class *cl)
513{
514 return cl->cmode == HTB_CAN_SEND ? -cl->buffer : 0;
515}
516#else
517#define htb_lowater(cl) (0)
518#define htb_hiwater(cl) (0)
519#endif
520
Linus Torvalds1da177e2005-04-16 15:20:36 -0700521/**
522 * htb_class_mode - computes and returns current class mode
523 *
524 * It computes cl's mode at time cl->t_c+diff and returns it. If mode
525 * is not HTB_CAN_SEND then cl->pq_key is updated to time difference
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900526 * from now to time when cl will change its state.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700527 * Also it is worth to note that class mode doesn't change simply
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900528 * at cl->{c,}tokens == 0 but there can rather be hysteresis of
Linus Torvalds1da177e2005-04-16 15:20:36 -0700529 * 0 .. -cl->{c,}buffer range. It is meant to limit number of
530 * mode transitions per time unit. The speed gain is about 1/6.
531 */
Stephen Hemminger87990462006-08-10 23:35:16 -0700532static inline enum htb_cmode
533htb_class_mode(struct htb_class *cl, long *diff)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700534{
Stephen Hemminger87990462006-08-10 23:35:16 -0700535 long toks;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700536
Stephen Hemminger87990462006-08-10 23:35:16 -0700537 if ((toks = (cl->ctokens + *diff)) < htb_lowater(cl)) {
538 *diff = -toks;
539 return HTB_CANT_SEND;
540 }
Stephen Hemminger18a63e82006-08-10 23:34:02 -0700541
Stephen Hemminger87990462006-08-10 23:35:16 -0700542 if ((toks = (cl->tokens + *diff)) >= htb_hiwater(cl))
543 return HTB_CAN_SEND;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700544
Stephen Hemminger87990462006-08-10 23:35:16 -0700545 *diff = -toks;
546 return HTB_MAY_BORROW;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700547}
548
549/**
550 * htb_change_class_mode - changes classe's mode
551 *
552 * This should be the only way how to change classe's mode under normal
553 * cirsumstances. Routine will update feed lists linkage, change mode
554 * and add class to the wait event queue if appropriate. New mode should
555 * be different from old one and cl->pq_key has to be valid if changing
556 * to mode other than HTB_CAN_SEND (see htb_add_to_wait_tree).
557 */
Stephen Hemminger87990462006-08-10 23:35:16 -0700558static void
Linus Torvalds1da177e2005-04-16 15:20:36 -0700559htb_change_class_mode(struct htb_sched *q, struct htb_class *cl, long *diff)
Stephen Hemminger87990462006-08-10 23:35:16 -0700560{
561 enum htb_cmode new_mode = htb_class_mode(cl, diff);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700562
563 if (new_mode == cl->cmode)
Stephen Hemminger87990462006-08-10 23:35:16 -0700564 return;
565
566 if (cl->prio_activity) { /* not necessary: speed optimization */
567 if (cl->cmode != HTB_CANT_SEND)
568 htb_deactivate_prios(q, cl);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700569 cl->cmode = new_mode;
Stephen Hemminger87990462006-08-10 23:35:16 -0700570 if (new_mode != HTB_CANT_SEND)
571 htb_activate_prios(q, cl);
572 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700573 cl->cmode = new_mode;
574}
575
576/**
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900577 * htb_activate - inserts leaf cl into appropriate active feeds
Linus Torvalds1da177e2005-04-16 15:20:36 -0700578 *
579 * Routine learns (new) priority of leaf and activates feed chain
580 * for the prio. It can be called on already active leaf safely.
581 * It also adds leaf into droplist.
582 */
Stephen Hemminger87990462006-08-10 23:35:16 -0700583static inline void htb_activate(struct htb_sched *q, struct htb_class *cl)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700584{
585 BUG_TRAP(!cl->level && cl->un.leaf.q && cl->un.leaf.q->q.qlen);
Stephen Hemminger3bf72952006-08-10 23:31:08 -0700586
Linus Torvalds1da177e2005-04-16 15:20:36 -0700587 if (!cl->prio_activity) {
588 cl->prio_activity = 1 << (cl->un.leaf.aprio = cl->un.leaf.prio);
Stephen Hemminger87990462006-08-10 23:35:16 -0700589 htb_activate_prios(q, cl);
590 list_add_tail(&cl->un.leaf.drop_list,
591 q->drops + cl->un.leaf.aprio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700592 }
593}
594
595/**
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900596 * htb_deactivate - remove leaf cl from active feeds
Linus Torvalds1da177e2005-04-16 15:20:36 -0700597 *
598 * Make sure that leaf is active. In the other words it can't be called
599 * with non-active leaf. It also removes class from the drop list.
600 */
Stephen Hemminger87990462006-08-10 23:35:16 -0700601static inline void htb_deactivate(struct htb_sched *q, struct htb_class *cl)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700602{
603 BUG_TRAP(cl->prio_activity);
Stephen Hemminger3bf72952006-08-10 23:31:08 -0700604
Stephen Hemminger87990462006-08-10 23:35:16 -0700605 htb_deactivate_prios(q, cl);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700606 cl->prio_activity = 0;
607 list_del_init(&cl->un.leaf.drop_list);
608}
609
610static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
611{
Stephen Hemminger87990462006-08-10 23:35:16 -0700612 int ret;
613 struct htb_sched *q = qdisc_priv(sch);
614 struct htb_class *cl = htb_classify(skb, sch, &ret);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700615
Stephen Hemminger87990462006-08-10 23:35:16 -0700616 if (cl == HTB_DIRECT) {
617 /* enqueue to helper queue */
618 if (q->direct_queue.qlen < q->direct_qlen) {
619 __skb_queue_tail(&q->direct_queue, skb);
620 q->direct_pkts++;
621 } else {
622 kfree_skb(skb);
623 sch->qstats.drops++;
624 return NET_XMIT_DROP;
625 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700626#ifdef CONFIG_NET_CLS_ACT
Stephen Hemminger87990462006-08-10 23:35:16 -0700627 } else if (!cl) {
628 if (ret == NET_XMIT_BYPASS)
629 sch->qstats.drops++;
630 kfree_skb(skb);
631 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700632#endif
Stephen Hemminger87990462006-08-10 23:35:16 -0700633 } else if (cl->un.leaf.q->enqueue(skb, cl->un.leaf.q) !=
634 NET_XMIT_SUCCESS) {
635 sch->qstats.drops++;
636 cl->qstats.drops++;
637 return NET_XMIT_DROP;
638 } else {
639 cl->bstats.packets++;
640 cl->bstats.bytes += skb->len;
641 htb_activate(q, cl);
642 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700643
Stephen Hemminger87990462006-08-10 23:35:16 -0700644 sch->q.qlen++;
645 sch->bstats.packets++;
646 sch->bstats.bytes += skb->len;
647 return NET_XMIT_SUCCESS;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700648}
649
650/* TODO: requeuing packet charges it to policers again !! */
651static int htb_requeue(struct sk_buff *skb, struct Qdisc *sch)
652{
Stephen Hemminger87990462006-08-10 23:35:16 -0700653 struct htb_sched *q = qdisc_priv(sch);
654 int ret = NET_XMIT_SUCCESS;
655 struct htb_class *cl = htb_classify(skb, sch, &ret);
656 struct sk_buff *tskb;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700657
Stephen Hemminger87990462006-08-10 23:35:16 -0700658 if (cl == HTB_DIRECT || !cl) {
659 /* enqueue to helper queue */
660 if (q->direct_queue.qlen < q->direct_qlen && cl) {
661 __skb_queue_head(&q->direct_queue, skb);
662 } else {
663 __skb_queue_head(&q->direct_queue, skb);
664 tskb = __skb_dequeue_tail(&q->direct_queue);
665 kfree_skb(tskb);
666 sch->qstats.drops++;
667 return NET_XMIT_CN;
668 }
669 } else if (cl->un.leaf.q->ops->requeue(skb, cl->un.leaf.q) !=
670 NET_XMIT_SUCCESS) {
671 sch->qstats.drops++;
672 cl->qstats.drops++;
673 return NET_XMIT_DROP;
674 } else
675 htb_activate(q, cl);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700676
Stephen Hemminger87990462006-08-10 23:35:16 -0700677 sch->q.qlen++;
678 sch->qstats.requeues++;
679 return NET_XMIT_SUCCESS;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700680}
681
682static void htb_timer(unsigned long arg)
683{
Stephen Hemminger87990462006-08-10 23:35:16 -0700684 struct Qdisc *sch = (struct Qdisc *)arg;
685 sch->flags &= ~TCQ_F_THROTTLED;
686 wmb();
687 netif_schedule(sch->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700688}
689
690#ifdef HTB_RATECM
691#define RT_GEN(D,R) R+=D-(R/HTB_EWMAC);D=0
692static void htb_rate_timer(unsigned long arg)
693{
Stephen Hemminger87990462006-08-10 23:35:16 -0700694 struct Qdisc *sch = (struct Qdisc *)arg;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700695 struct htb_sched *q = qdisc_priv(sch);
Stephen Hemminger0cef2962006-08-10 23:35:38 -0700696 struct hlist_node *p;
697 struct htb_class *cl;
698
Linus Torvalds1da177e2005-04-16 15:20:36 -0700699
700 /* lock queue so that we can muck with it */
Stephen Hemminger9ac961e2006-08-10 23:33:16 -0700701 spin_lock_bh(&sch->dev->queue_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700702
703 q->rttim.expires = jiffies + HZ;
704 add_timer(&q->rttim);
705
706 /* scan and recompute one bucket at time */
Stephen Hemminger87990462006-08-10 23:35:16 -0700707 if (++q->recmp_bucket >= HTB_HSIZE)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700708 q->recmp_bucket = 0;
Stephen Hemminger3bf72952006-08-10 23:31:08 -0700709
Stephen Hemminger0cef2962006-08-10 23:35:38 -0700710 hlist_for_each_entry(cl,p, q->hash + q->recmp_bucket, hlist) {
Stephen Hemminger87990462006-08-10 23:35:16 -0700711 RT_GEN(cl->sum_bytes, cl->rate_bytes);
712 RT_GEN(cl->sum_packets, cl->rate_packets);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700713 }
Stephen Hemminger9ac961e2006-08-10 23:33:16 -0700714 spin_unlock_bh(&sch->dev->queue_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700715}
716#endif
717
718/**
719 * htb_charge_class - charges amount "bytes" to leaf and ancestors
720 *
721 * Routine assumes that packet "bytes" long was dequeued from leaf cl
722 * borrowing from "level". It accounts bytes to ceil leaky bucket for
723 * leaf and all ancestors and to rate bucket for ancestors at levels
724 * "level" and higher. It also handles possible change of mode resulting
725 * from the update. Note that mode can also increase here (MAY_BORROW to
726 * CAN_SEND) because we can use more precise clock that event queue here.
727 * In such case we remove class from event queue first.
728 */
Stephen Hemminger87990462006-08-10 23:35:16 -0700729static void htb_charge_class(struct htb_sched *q, struct htb_class *cl,
730 int level, int bytes)
731{
732 long toks, diff;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700733 enum htb_cmode old_mode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700734
735#define HTB_ACCNT(T,B,R) toks = diff + cl->T; \
736 if (toks > cl->B) toks = cl->B; \
737 toks -= L2T(cl, cl->R, bytes); \
738 if (toks <= -cl->mbuffer) toks = 1-cl->mbuffer; \
739 cl->T = toks
740
741 while (cl) {
Stephen Hemminger87990462006-08-10 23:35:16 -0700742 diff = PSCHED_TDIFF_SAFE(q->now, cl->t_c, (u32) cl->mbuffer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700743 if (cl->level >= level) {
Stephen Hemminger87990462006-08-10 23:35:16 -0700744 if (cl->level == level)
745 cl->xstats.lends++;
746 HTB_ACCNT(tokens, buffer, rate);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700747 } else {
748 cl->xstats.borrows++;
Stephen Hemminger87990462006-08-10 23:35:16 -0700749 cl->tokens += diff; /* we moved t_c; update tokens */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700750 }
Stephen Hemminger87990462006-08-10 23:35:16 -0700751 HTB_ACCNT(ctokens, cbuffer, ceil);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700752 cl->t_c = q->now;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700753
Stephen Hemminger87990462006-08-10 23:35:16 -0700754 old_mode = cl->cmode;
755 diff = 0;
756 htb_change_class_mode(q, cl, &diff);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700757 if (old_mode != cl->cmode) {
758 if (old_mode != HTB_CAN_SEND)
Stephen Hemminger3696f622006-08-10 23:36:01 -0700759 htb_safe_rb_erase(&cl->pq_node, q->wait_pq + cl->level);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700760 if (cl->cmode != HTB_CAN_SEND)
Stephen Hemminger87990462006-08-10 23:35:16 -0700761 htb_add_to_wait_tree(q, cl, diff);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700762 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700763#ifdef HTB_RATECM
764 /* update rate counters */
Stephen Hemminger87990462006-08-10 23:35:16 -0700765 cl->sum_bytes += bytes;
766 cl->sum_packets++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700767#endif
768
769 /* update byte stats except for leaves which are already updated */
770 if (cl->level) {
771 cl->bstats.bytes += bytes;
772 cl->bstats.packets++;
773 }
774 cl = cl->parent;
775 }
776}
777
778/**
779 * htb_do_events - make mode changes to classes at the level
780 *
781 * Scans event queue for pending events and applies them. Returns jiffies to
782 * next pending event (0 for no event in pq).
783 * Note: Aplied are events whose have cl->pq_key <= jiffies.
784 */
Stephen Hemminger87990462006-08-10 23:35:16 -0700785static long htb_do_events(struct htb_sched *q, int level)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700786{
787 int i;
Stephen Hemminger3bf72952006-08-10 23:31:08 -0700788
Linus Torvalds1da177e2005-04-16 15:20:36 -0700789 for (i = 0; i < 500; i++) {
790 struct htb_class *cl;
791 long diff;
Akinbou Mita30bdbe32006-10-12 01:52:05 -0700792 struct rb_node *p = rb_first(&q->wait_pq[level]);
793
Stephen Hemminger87990462006-08-10 23:35:16 -0700794 if (!p)
795 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700796
797 cl = rb_entry(p, struct htb_class, pq_node);
798 if (time_after(cl->pq_key, q->jiffies)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700799 return cl->pq_key - q->jiffies;
800 }
Stephen Hemminger3696f622006-08-10 23:36:01 -0700801 htb_safe_rb_erase(p, q->wait_pq + level);
Stephen Hemminger87990462006-08-10 23:35:16 -0700802 diff = PSCHED_TDIFF_SAFE(q->now, cl->t_c, (u32) cl->mbuffer);
803 htb_change_class_mode(q, cl, &diff);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700804 if (cl->cmode != HTB_CAN_SEND)
Stephen Hemminger87990462006-08-10 23:35:16 -0700805 htb_add_to_wait_tree(q, cl, diff);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700806 }
807 if (net_ratelimit())
808 printk(KERN_WARNING "htb: too many events !\n");
Stephen Hemminger87990462006-08-10 23:35:16 -0700809 return HZ / 10;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700810}
811
812/* Returns class->node+prio from id-tree where classe's id is >= id. NULL
813 is no such one exists. */
Stephen Hemminger87990462006-08-10 23:35:16 -0700814static struct rb_node *htb_id_find_next_upper(int prio, struct rb_node *n,
815 u32 id)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700816{
817 struct rb_node *r = NULL;
818 while (n) {
Stephen Hemminger87990462006-08-10 23:35:16 -0700819 struct htb_class *cl =
820 rb_entry(n, struct htb_class, node[prio]);
821 if (id == cl->classid)
822 return n;
823
Linus Torvalds1da177e2005-04-16 15:20:36 -0700824 if (id > cl->classid) {
825 n = n->rb_right;
826 } else {
827 r = n;
828 n = n->rb_left;
829 }
830 }
831 return r;
832}
833
834/**
835 * htb_lookup_leaf - returns next leaf class in DRR order
836 *
837 * Find leaf where current feed pointers points to.
838 */
Stephen Hemminger87990462006-08-10 23:35:16 -0700839static struct htb_class *htb_lookup_leaf(struct rb_root *tree, int prio,
840 struct rb_node **pptr, u32 * pid)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700841{
842 int i;
843 struct {
844 struct rb_node *root;
845 struct rb_node **pptr;
846 u32 *pid;
Stephen Hemminger87990462006-08-10 23:35:16 -0700847 } stk[TC_HTB_MAXDEPTH], *sp = stk;
848
Linus Torvalds1da177e2005-04-16 15:20:36 -0700849 BUG_TRAP(tree->rb_node);
850 sp->root = tree->rb_node;
851 sp->pptr = pptr;
852 sp->pid = pid;
853
854 for (i = 0; i < 65535; i++) {
Stephen Hemminger87990462006-08-10 23:35:16 -0700855 if (!*sp->pptr && *sp->pid) {
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900856 /* ptr was invalidated but id is valid - try to recover
Linus Torvalds1da177e2005-04-16 15:20:36 -0700857 the original or next ptr */
Stephen Hemminger87990462006-08-10 23:35:16 -0700858 *sp->pptr =
859 htb_id_find_next_upper(prio, sp->root, *sp->pid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700860 }
Stephen Hemminger87990462006-08-10 23:35:16 -0700861 *sp->pid = 0; /* ptr is valid now so that remove this hint as it
862 can become out of date quickly */
863 if (!*sp->pptr) { /* we are at right end; rewind & go up */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700864 *sp->pptr = sp->root;
Stephen Hemminger87990462006-08-10 23:35:16 -0700865 while ((*sp->pptr)->rb_left)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700866 *sp->pptr = (*sp->pptr)->rb_left;
867 if (sp > stk) {
868 sp--;
Stephen Hemminger87990462006-08-10 23:35:16 -0700869 BUG_TRAP(*sp->pptr);
870 if (!*sp->pptr)
871 return NULL;
872 htb_next_rb_node(sp->pptr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700873 }
874 } else {
875 struct htb_class *cl;
Stephen Hemminger87990462006-08-10 23:35:16 -0700876 cl = rb_entry(*sp->pptr, struct htb_class, node[prio]);
877 if (!cl->level)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700878 return cl;
879 (++sp)->root = cl->un.inner.feed[prio].rb_node;
Stephen Hemminger87990462006-08-10 23:35:16 -0700880 sp->pptr = cl->un.inner.ptr + prio;
881 sp->pid = cl->un.inner.last_ptr_id + prio;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700882 }
883 }
884 BUG_TRAP(0);
885 return NULL;
886}
887
888/* dequeues packet at given priority and level; call only if
889 you are sure that there is active class at prio/level */
Stephen Hemminger87990462006-08-10 23:35:16 -0700890static struct sk_buff *htb_dequeue_tree(struct htb_sched *q, int prio,
891 int level)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700892{
893 struct sk_buff *skb = NULL;
Stephen Hemminger87990462006-08-10 23:35:16 -0700894 struct htb_class *cl, *start;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700895 /* look initial class up in the row */
Stephen Hemminger87990462006-08-10 23:35:16 -0700896 start = cl = htb_lookup_leaf(q->row[level] + prio, prio,
897 q->ptr[level] + prio,
898 q->last_ptr_id[level] + prio);
899
Linus Torvalds1da177e2005-04-16 15:20:36 -0700900 do {
901next:
Stephen Hemminger87990462006-08-10 23:35:16 -0700902 BUG_TRAP(cl);
903 if (!cl)
904 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700905
906 /* class can be empty - it is unlikely but can be true if leaf
907 qdisc drops packets in enqueue routine or if someone used
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900908 graft operation on the leaf since last dequeue;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700909 simply deactivate and skip such class */
910 if (unlikely(cl->un.leaf.q->q.qlen == 0)) {
911 struct htb_class *next;
Stephen Hemminger87990462006-08-10 23:35:16 -0700912 htb_deactivate(q, cl);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700913
914 /* row/level might become empty */
915 if ((q->row_mask[level] & (1 << prio)) == 0)
Stephen Hemminger87990462006-08-10 23:35:16 -0700916 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700917
Stephen Hemminger87990462006-08-10 23:35:16 -0700918 next = htb_lookup_leaf(q->row[level] + prio,
919 prio, q->ptr[level] + prio,
920 q->last_ptr_id[level] + prio);
921
922 if (cl == start) /* fix start if we just deleted it */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700923 start = next;
924 cl = next;
925 goto next;
926 }
Stephen Hemminger87990462006-08-10 23:35:16 -0700927
928 skb = cl->un.leaf.q->dequeue(cl->un.leaf.q);
929 if (likely(skb != NULL))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700930 break;
931 if (!cl->warned) {
Stephen Hemminger87990462006-08-10 23:35:16 -0700932 printk(KERN_WARNING
933 "htb: class %X isn't work conserving ?!\n",
934 cl->classid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700935 cl->warned = 1;
936 }
937 q->nwc_hit++;
Stephen Hemminger87990462006-08-10 23:35:16 -0700938 htb_next_rb_node((level ? cl->parent->un.inner.ptr : q->
939 ptr[0]) + prio);
940 cl = htb_lookup_leaf(q->row[level] + prio, prio,
941 q->ptr[level] + prio,
942 q->last_ptr_id[level] + prio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700943
944 } while (cl != start);
945
946 if (likely(skb != NULL)) {
947 if ((cl->un.leaf.deficit[level] -= skb->len) < 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700948 cl->un.leaf.deficit[level] += cl->un.leaf.quantum;
Stephen Hemminger87990462006-08-10 23:35:16 -0700949 htb_next_rb_node((level ? cl->parent->un.inner.ptr : q->
950 ptr[0]) + prio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700951 }
952 /* this used to be after charge_class but this constelation
953 gives us slightly better performance */
954 if (!cl->un.leaf.q->q.qlen)
Stephen Hemminger87990462006-08-10 23:35:16 -0700955 htb_deactivate(q, cl);
956 htb_charge_class(q, cl, level, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700957 }
958 return skb;
959}
960
Stephen Hemminger87990462006-08-10 23:35:16 -0700961static void htb_delay_by(struct Qdisc *sch, long delay)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700962{
963 struct htb_sched *q = qdisc_priv(sch);
Stephen Hemminger87990462006-08-10 23:35:16 -0700964 if (delay <= 0)
965 delay = 1;
966 if (unlikely(delay > 5 * HZ)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700967 if (net_ratelimit())
968 printk(KERN_INFO "HTB delay %ld > 5sec\n", delay);
Stephen Hemminger87990462006-08-10 23:35:16 -0700969 delay = 5 * HZ;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700970 }
971 /* why don't use jiffies here ? because expires can be in past */
972 mod_timer(&q->timer, q->jiffies + delay);
973 sch->flags |= TCQ_F_THROTTLED;
974 sch->qstats.overlimits++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700975}
976
977static struct sk_buff *htb_dequeue(struct Qdisc *sch)
978{
979 struct sk_buff *skb = NULL;
980 struct htb_sched *q = qdisc_priv(sch);
981 int level;
982 long min_delay;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700983
984 q->jiffies = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700985
986 /* try to dequeue direct packets as high prio (!) to minimize cpu work */
Stephen Hemminger87990462006-08-10 23:35:16 -0700987 skb = __skb_dequeue(&q->direct_queue);
988 if (skb != NULL) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700989 sch->flags &= ~TCQ_F_THROTTLED;
990 sch->q.qlen--;
991 return skb;
992 }
993
Stephen Hemminger87990462006-08-10 23:35:16 -0700994 if (!sch->q.qlen)
995 goto fin;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700996 PSCHED_GET_TIME(q->now);
997
998 min_delay = LONG_MAX;
999 q->nwc_hit = 0;
1000 for (level = 0; level < TC_HTB_MAXDEPTH; level++) {
1001 /* common case optimization - skip event handler quickly */
1002 int m;
1003 long delay;
1004 if (time_after_eq(q->jiffies, q->near_ev_cache[level])) {
Stephen Hemminger87990462006-08-10 23:35:16 -07001005 delay = htb_do_events(q, level);
1006 q->near_ev_cache[level] =
1007 q->jiffies + (delay ? delay : HZ);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001008 } else
Stephen Hemminger87990462006-08-10 23:35:16 -07001009 delay = q->near_ev_cache[level] - q->jiffies;
1010
1011 if (delay && min_delay > delay)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001012 min_delay = delay;
1013 m = ~q->row_mask[level];
1014 while (m != (int)(-1)) {
Stephen Hemminger87990462006-08-10 23:35:16 -07001015 int prio = ffz(m);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001016 m |= 1 << prio;
Stephen Hemminger87990462006-08-10 23:35:16 -07001017 skb = htb_dequeue_tree(q, prio, level);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001018 if (likely(skb != NULL)) {
1019 sch->q.qlen--;
1020 sch->flags &= ~TCQ_F_THROTTLED;
1021 goto fin;
1022 }
1023 }
1024 }
Stephen Hemminger87990462006-08-10 23:35:16 -07001025 htb_delay_by(sch, min_delay > 5 * HZ ? 5 * HZ : min_delay);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001026fin:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001027 return skb;
1028}
1029
1030/* try to drop from each class (by prio) until one succeed */
Stephen Hemminger87990462006-08-10 23:35:16 -07001031static unsigned int htb_drop(struct Qdisc *sch)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001032{
1033 struct htb_sched *q = qdisc_priv(sch);
1034 int prio;
1035
1036 for (prio = TC_HTB_NUMPRIO - 1; prio >= 0; prio--) {
1037 struct list_head *p;
Stephen Hemminger87990462006-08-10 23:35:16 -07001038 list_for_each(p, q->drops + prio) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001039 struct htb_class *cl = list_entry(p, struct htb_class,
1040 un.leaf.drop_list);
1041 unsigned int len;
Stephen Hemminger87990462006-08-10 23:35:16 -07001042 if (cl->un.leaf.q->ops->drop &&
1043 (len = cl->un.leaf.q->ops->drop(cl->un.leaf.q))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001044 sch->q.qlen--;
1045 if (!cl->un.leaf.q->q.qlen)
Stephen Hemminger87990462006-08-10 23:35:16 -07001046 htb_deactivate(q, cl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001047 return len;
1048 }
1049 }
1050 }
1051 return 0;
1052}
1053
1054/* reset all classes */
1055/* always caled under BH & queue lock */
Stephen Hemminger87990462006-08-10 23:35:16 -07001056static void htb_reset(struct Qdisc *sch)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001057{
1058 struct htb_sched *q = qdisc_priv(sch);
1059 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001060
1061 for (i = 0; i < HTB_HSIZE; i++) {
Stephen Hemminger0cef2962006-08-10 23:35:38 -07001062 struct hlist_node *p;
1063 struct htb_class *cl;
1064
1065 hlist_for_each_entry(cl, p, q->hash + i, hlist) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001066 if (cl->level)
Stephen Hemminger87990462006-08-10 23:35:16 -07001067 memset(&cl->un.inner, 0, sizeof(cl->un.inner));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001068 else {
Stephen Hemminger87990462006-08-10 23:35:16 -07001069 if (cl->un.leaf.q)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001070 qdisc_reset(cl->un.leaf.q);
1071 INIT_LIST_HEAD(&cl->un.leaf.drop_list);
1072 }
1073 cl->prio_activity = 0;
1074 cl->cmode = HTB_CAN_SEND;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001075
1076 }
1077 }
1078 sch->flags &= ~TCQ_F_THROTTLED;
1079 del_timer(&q->timer);
1080 __skb_queue_purge(&q->direct_queue);
1081 sch->q.qlen = 0;
Stephen Hemminger87990462006-08-10 23:35:16 -07001082 memset(q->row, 0, sizeof(q->row));
1083 memset(q->row_mask, 0, sizeof(q->row_mask));
1084 memset(q->wait_pq, 0, sizeof(q->wait_pq));
1085 memset(q->ptr, 0, sizeof(q->ptr));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001086 for (i = 0; i < TC_HTB_NUMPRIO; i++)
Stephen Hemminger87990462006-08-10 23:35:16 -07001087 INIT_LIST_HEAD(q->drops + i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001088}
1089
1090static int htb_init(struct Qdisc *sch, struct rtattr *opt)
1091{
1092 struct htb_sched *q = qdisc_priv(sch);
1093 struct rtattr *tb[TCA_HTB_INIT];
1094 struct tc_htb_glob *gopt;
1095 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001096 if (!opt || rtattr_parse_nested(tb, TCA_HTB_INIT, opt) ||
Stephen Hemminger87990462006-08-10 23:35:16 -07001097 tb[TCA_HTB_INIT - 1] == NULL ||
1098 RTA_PAYLOAD(tb[TCA_HTB_INIT - 1]) < sizeof(*gopt)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001099 printk(KERN_ERR "HTB: hey probably you have bad tc tool ?\n");
1100 return -EINVAL;
1101 }
Stephen Hemminger87990462006-08-10 23:35:16 -07001102 gopt = RTA_DATA(tb[TCA_HTB_INIT - 1]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001103 if (gopt->version != HTB_VER >> 16) {
Stephen Hemminger87990462006-08-10 23:35:16 -07001104 printk(KERN_ERR
1105 "HTB: need tc/htb version %d (minor is %d), you have %d\n",
1106 HTB_VER >> 16, HTB_VER & 0xffff, gopt->version);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001107 return -EINVAL;
1108 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001109
1110 INIT_LIST_HEAD(&q->root);
1111 for (i = 0; i < HTB_HSIZE; i++)
Stephen Hemminger0cef2962006-08-10 23:35:38 -07001112 INIT_HLIST_HEAD(q->hash + i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001113 for (i = 0; i < TC_HTB_NUMPRIO; i++)
Stephen Hemminger87990462006-08-10 23:35:16 -07001114 INIT_LIST_HEAD(q->drops + i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001115
1116 init_timer(&q->timer);
1117 skb_queue_head_init(&q->direct_queue);
1118
1119 q->direct_qlen = sch->dev->tx_queue_len;
Stephen Hemminger87990462006-08-10 23:35:16 -07001120 if (q->direct_qlen < 2) /* some devices have zero tx_queue_len */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001121 q->direct_qlen = 2;
1122 q->timer.function = htb_timer;
1123 q->timer.data = (unsigned long)sch;
1124
1125#ifdef HTB_RATECM
1126 init_timer(&q->rttim);
1127 q->rttim.function = htb_rate_timer;
1128 q->rttim.data = (unsigned long)sch;
1129 q->rttim.expires = jiffies + HZ;
1130 add_timer(&q->rttim);
1131#endif
1132 if ((q->rate2quantum = gopt->rate2quantum) < 1)
1133 q->rate2quantum = 1;
1134 q->defcls = gopt->defcls;
1135
1136 return 0;
1137}
1138
1139static int htb_dump(struct Qdisc *sch, struct sk_buff *skb)
1140{
1141 struct htb_sched *q = qdisc_priv(sch);
Stephen Hemminger87990462006-08-10 23:35:16 -07001142 unsigned char *b = skb->tail;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001143 struct rtattr *rta;
1144 struct tc_htb_glob gopt;
Stephen Hemminger9ac961e2006-08-10 23:33:16 -07001145 spin_lock_bh(&sch->dev->queue_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001146 gopt.direct_pkts = q->direct_pkts;
1147
Linus Torvalds1da177e2005-04-16 15:20:36 -07001148 gopt.version = HTB_VER;
1149 gopt.rate2quantum = q->rate2quantum;
1150 gopt.defcls = q->defcls;
Stephen Hemminger3bf72952006-08-10 23:31:08 -07001151 gopt.debug = 0;
Stephen Hemminger87990462006-08-10 23:35:16 -07001152 rta = (struct rtattr *)b;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001153 RTA_PUT(skb, TCA_OPTIONS, 0, NULL);
1154 RTA_PUT(skb, TCA_HTB_INIT, sizeof(gopt), &gopt);
1155 rta->rta_len = skb->tail - b;
Stephen Hemminger9ac961e2006-08-10 23:33:16 -07001156 spin_unlock_bh(&sch->dev->queue_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001157 return skb->len;
1158rtattr_failure:
Stephen Hemminger9ac961e2006-08-10 23:33:16 -07001159 spin_unlock_bh(&sch->dev->queue_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001160 skb_trim(skb, skb->tail - skb->data);
1161 return -1;
1162}
1163
1164static int htb_dump_class(struct Qdisc *sch, unsigned long arg,
Stephen Hemminger87990462006-08-10 23:35:16 -07001165 struct sk_buff *skb, struct tcmsg *tcm)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001166{
Stephen Hemminger87990462006-08-10 23:35:16 -07001167 struct htb_class *cl = (struct htb_class *)arg;
1168 unsigned char *b = skb->tail;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001169 struct rtattr *rta;
1170 struct tc_htb_opt opt;
1171
Stephen Hemminger9ac961e2006-08-10 23:33:16 -07001172 spin_lock_bh(&sch->dev->queue_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001173 tcm->tcm_parent = cl->parent ? cl->parent->classid : TC_H_ROOT;
1174 tcm->tcm_handle = cl->classid;
1175 if (!cl->level && cl->un.leaf.q)
1176 tcm->tcm_info = cl->un.leaf.q->handle;
1177
Stephen Hemminger87990462006-08-10 23:35:16 -07001178 rta = (struct rtattr *)b;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001179 RTA_PUT(skb, TCA_OPTIONS, 0, NULL);
1180
Stephen Hemminger87990462006-08-10 23:35:16 -07001181 memset(&opt, 0, sizeof(opt));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001182
Stephen Hemminger87990462006-08-10 23:35:16 -07001183 opt.rate = cl->rate->rate;
1184 opt.buffer = cl->buffer;
1185 opt.ceil = cl->ceil->rate;
1186 opt.cbuffer = cl->cbuffer;
1187 opt.quantum = cl->un.leaf.quantum;
1188 opt.prio = cl->un.leaf.prio;
1189 opt.level = cl->level;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001190 RTA_PUT(skb, TCA_HTB_PARMS, sizeof(opt), &opt);
1191 rta->rta_len = skb->tail - b;
Stephen Hemminger9ac961e2006-08-10 23:33:16 -07001192 spin_unlock_bh(&sch->dev->queue_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001193 return skb->len;
1194rtattr_failure:
Stephen Hemminger9ac961e2006-08-10 23:33:16 -07001195 spin_unlock_bh(&sch->dev->queue_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001196 skb_trim(skb, b - skb->data);
1197 return -1;
1198}
1199
1200static int
Stephen Hemminger87990462006-08-10 23:35:16 -07001201htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001202{
Stephen Hemminger87990462006-08-10 23:35:16 -07001203 struct htb_class *cl = (struct htb_class *)arg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001204
1205#ifdef HTB_RATECM
Stephen Hemminger87990462006-08-10 23:35:16 -07001206 cl->rate_est.bps = cl->rate_bytes / (HTB_EWMAC * HTB_HSIZE);
1207 cl->rate_est.pps = cl->rate_packets / (HTB_EWMAC * HTB_HSIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001208#endif
1209
1210 if (!cl->level && cl->un.leaf.q)
1211 cl->qstats.qlen = cl->un.leaf.q->q.qlen;
1212 cl->xstats.tokens = cl->tokens;
1213 cl->xstats.ctokens = cl->ctokens;
1214
1215 if (gnet_stats_copy_basic(d, &cl->bstats) < 0 ||
1216 gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
1217 gnet_stats_copy_queue(d, &cl->qstats) < 0)
1218 return -1;
1219
1220 return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats));
1221}
1222
1223static int htb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
Stephen Hemminger87990462006-08-10 23:35:16 -07001224 struct Qdisc **old)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001225{
Stephen Hemminger87990462006-08-10 23:35:16 -07001226 struct htb_class *cl = (struct htb_class *)arg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001227
1228 if (cl && !cl->level) {
Patrick McHardy9f9afec2006-11-29 17:35:18 -08001229 if (new == NULL &&
1230 (new = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops,
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +09001231 cl->classid))
Stephen Hemminger87990462006-08-10 23:35:16 -07001232 == NULL)
1233 return -ENOBUFS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001234 sch_tree_lock(sch);
1235 if ((*old = xchg(&cl->un.leaf.q, new)) != NULL) {
Patrick McHardy256d61b2006-11-29 17:37:05 -08001236 qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001237 qdisc_reset(*old);
1238 }
1239 sch_tree_unlock(sch);
1240 return 0;
1241 }
1242 return -ENOENT;
1243}
1244
Stephen Hemminger87990462006-08-10 23:35:16 -07001245static struct Qdisc *htb_leaf(struct Qdisc *sch, unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001246{
Stephen Hemminger87990462006-08-10 23:35:16 -07001247 struct htb_class *cl = (struct htb_class *)arg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001248 return (cl && !cl->level) ? cl->un.leaf.q : NULL;
1249}
1250
Patrick McHardy256d61b2006-11-29 17:37:05 -08001251static void htb_qlen_notify(struct Qdisc *sch, unsigned long arg)
1252{
1253 struct htb_class *cl = (struct htb_class *)arg;
1254
1255 if (cl->un.leaf.q->q.qlen == 0)
1256 htb_deactivate(qdisc_priv(sch), cl);
1257}
1258
Linus Torvalds1da177e2005-04-16 15:20:36 -07001259static unsigned long htb_get(struct Qdisc *sch, u32 classid)
1260{
Stephen Hemminger87990462006-08-10 23:35:16 -07001261 struct htb_class *cl = htb_find(classid, sch);
1262 if (cl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001263 cl->refcnt++;
1264 return (unsigned long)cl;
1265}
1266
1267static void htb_destroy_filters(struct tcf_proto **fl)
1268{
1269 struct tcf_proto *tp;
1270
1271 while ((tp = *fl) != NULL) {
1272 *fl = tp->next;
1273 tcf_destroy(tp);
1274 }
1275}
1276
Jarek Poplawski160d5e12006-12-08 00:26:56 -08001277static inline int htb_parent_last_child(struct htb_class *cl)
1278{
1279 if (!cl->parent)
1280 /* the root class */
1281 return 0;
1282
1283 if (!(cl->parent->children.next == &cl->sibling &&
1284 cl->parent->children.prev == &cl->sibling))
1285 /* not the last child */
1286 return 0;
1287
1288 return 1;
1289}
1290
1291static void htb_parent_to_leaf(struct htb_class *cl, struct Qdisc *new_q)
1292{
1293 struct htb_class *parent = cl->parent;
1294
1295 BUG_TRAP(!cl->level && cl->un.leaf.q && !cl->prio_activity);
1296
1297 parent->level = 0;
1298 memset(&parent->un.inner, 0, sizeof(parent->un.inner));
1299 INIT_LIST_HEAD(&parent->un.leaf.drop_list);
1300 parent->un.leaf.q = new_q ? new_q : &noop_qdisc;
1301 parent->un.leaf.quantum = parent->quantum;
1302 parent->un.leaf.prio = parent->prio;
1303 parent->tokens = parent->buffer;
1304 parent->ctokens = parent->cbuffer;
1305 PSCHED_GET_TIME(parent->t_c);
1306 parent->cmode = HTB_CAN_SEND;
1307}
1308
Stephen Hemminger87990462006-08-10 23:35:16 -07001309static void htb_destroy_class(struct Qdisc *sch, struct htb_class *cl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001310{
1311 struct htb_sched *q = qdisc_priv(sch);
Patrick McHardy814a175e2006-11-29 17:34:50 -08001312
Linus Torvalds1da177e2005-04-16 15:20:36 -07001313 if (!cl->level) {
1314 BUG_TRAP(cl->un.leaf.q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001315 qdisc_destroy(cl->un.leaf.q);
1316 }
1317 qdisc_put_rtab(cl->rate);
1318 qdisc_put_rtab(cl->ceil);
Stephen Hemminger87990462006-08-10 23:35:16 -07001319
1320 htb_destroy_filters(&cl->filter_list);
1321
1322 while (!list_empty(&cl->children))
1323 htb_destroy_class(sch, list_entry(cl->children.next,
1324 struct htb_class, sibling));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001325
1326 /* note: this delete may happen twice (see htb_delete) */
Stephen Hemmingerda33e3e2006-11-07 14:54:46 -08001327 hlist_del_init(&cl->hlist);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001328 list_del(&cl->sibling);
Stephen Hemminger87990462006-08-10 23:35:16 -07001329
Linus Torvalds1da177e2005-04-16 15:20:36 -07001330 if (cl->prio_activity)
Stephen Hemminger87990462006-08-10 23:35:16 -07001331 htb_deactivate(q, cl);
1332
Linus Torvalds1da177e2005-04-16 15:20:36 -07001333 if (cl->cmode != HTB_CAN_SEND)
Stephen Hemminger3696f622006-08-10 23:36:01 -07001334 htb_safe_rb_erase(&cl->pq_node, q->wait_pq + cl->level);
Stephen Hemminger87990462006-08-10 23:35:16 -07001335
Linus Torvalds1da177e2005-04-16 15:20:36 -07001336 kfree(cl);
1337}
1338
1339/* always caled under BH & queue lock */
Stephen Hemminger87990462006-08-10 23:35:16 -07001340static void htb_destroy(struct Qdisc *sch)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001341{
1342 struct htb_sched *q = qdisc_priv(sch);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001343
Stephen Hemminger87990462006-08-10 23:35:16 -07001344 del_timer_sync(&q->timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001345#ifdef HTB_RATECM
Stephen Hemminger87990462006-08-10 23:35:16 -07001346 del_timer_sync(&q->rttim);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001347#endif
1348 /* This line used to be after htb_destroy_class call below
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +09001349 and surprisingly it worked in 2.4. But it must precede it
Linus Torvalds1da177e2005-04-16 15:20:36 -07001350 because filter need its target class alive to be able to call
1351 unbind_filter on it (without Oops). */
1352 htb_destroy_filters(&q->filter_list);
Stephen Hemminger87990462006-08-10 23:35:16 -07001353
1354 while (!list_empty(&q->root))
1355 htb_destroy_class(sch, list_entry(q->root.next,
1356 struct htb_class, sibling));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001357
1358 __skb_queue_purge(&q->direct_queue);
1359}
1360
1361static int htb_delete(struct Qdisc *sch, unsigned long arg)
1362{
1363 struct htb_sched *q = qdisc_priv(sch);
Stephen Hemminger87990462006-08-10 23:35:16 -07001364 struct htb_class *cl = (struct htb_class *)arg;
Patrick McHardy256d61b2006-11-29 17:37:05 -08001365 unsigned int qlen;
Jarek Poplawski160d5e12006-12-08 00:26:56 -08001366 struct Qdisc *new_q = NULL;
1367 int last_child = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001368
1369 // TODO: why don't allow to delete subtree ? references ? does
1370 // tc subsys quarantee us that in htb_destroy it holds no class
1371 // refs so that we can remove children safely there ?
1372 if (!list_empty(&cl->children) || cl->filter_cnt)
1373 return -EBUSY;
Stephen Hemminger87990462006-08-10 23:35:16 -07001374
Jarek Poplawski160d5e12006-12-08 00:26:56 -08001375 if (!cl->level && htb_parent_last_child(cl)) {
1376 new_q = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops,
1377 cl->parent->classid);
1378 last_child = 1;
1379 }
1380
Linus Torvalds1da177e2005-04-16 15:20:36 -07001381 sch_tree_lock(sch);
Stephen Hemminger87990462006-08-10 23:35:16 -07001382
Patrick McHardy814a175e2006-11-29 17:34:50 -08001383 if (!cl->level) {
Patrick McHardy256d61b2006-11-29 17:37:05 -08001384 qlen = cl->un.leaf.q->q.qlen;
Patrick McHardy814a175e2006-11-29 17:34:50 -08001385 qdisc_reset(cl->un.leaf.q);
Patrick McHardy256d61b2006-11-29 17:37:05 -08001386 qdisc_tree_decrease_qlen(cl->un.leaf.q, qlen);
Patrick McHardy814a175e2006-11-29 17:34:50 -08001387 }
1388
Patrick McHardyc38c83c2007-03-27 14:04:24 -07001389 /* delete from hash and active; remainder in destroy_class */
1390 hlist_del_init(&cl->hlist);
1391
Linus Torvalds1da177e2005-04-16 15:20:36 -07001392 if (cl->prio_activity)
Stephen Hemminger87990462006-08-10 23:35:16 -07001393 htb_deactivate(q, cl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001394
Jarek Poplawski160d5e12006-12-08 00:26:56 -08001395 if (last_child)
1396 htb_parent_to_leaf(cl, new_q);
1397
Linus Torvalds1da177e2005-04-16 15:20:36 -07001398 if (--cl->refcnt == 0)
Stephen Hemminger87990462006-08-10 23:35:16 -07001399 htb_destroy_class(sch, cl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001400
1401 sch_tree_unlock(sch);
1402 return 0;
1403}
1404
1405static void htb_put(struct Qdisc *sch, unsigned long arg)
1406{
Stephen Hemminger87990462006-08-10 23:35:16 -07001407 struct htb_class *cl = (struct htb_class *)arg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001408
1409 if (--cl->refcnt == 0)
Stephen Hemminger87990462006-08-10 23:35:16 -07001410 htb_destroy_class(sch, cl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001411}
1412
Stephen Hemminger87990462006-08-10 23:35:16 -07001413static int htb_change_class(struct Qdisc *sch, u32 classid,
1414 u32 parentid, struct rtattr **tca,
1415 unsigned long *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001416{
1417 int err = -EINVAL;
1418 struct htb_sched *q = qdisc_priv(sch);
Stephen Hemminger87990462006-08-10 23:35:16 -07001419 struct htb_class *cl = (struct htb_class *)*arg, *parent;
1420 struct rtattr *opt = tca[TCA_OPTIONS - 1];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001421 struct qdisc_rate_table *rtab = NULL, *ctab = NULL;
1422 struct rtattr *tb[TCA_HTB_RTAB];
1423 struct tc_htb_opt *hopt;
1424
1425 /* extract all subattrs from opt attr */
1426 if (!opt || rtattr_parse_nested(tb, TCA_HTB_RTAB, opt) ||
Stephen Hemminger87990462006-08-10 23:35:16 -07001427 tb[TCA_HTB_PARMS - 1] == NULL ||
1428 RTA_PAYLOAD(tb[TCA_HTB_PARMS - 1]) < sizeof(*hopt))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001429 goto failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001430
Stephen Hemminger87990462006-08-10 23:35:16 -07001431 parent = parentid == TC_H_ROOT ? NULL : htb_find(parentid, sch);
Stephen Hemminger3bf72952006-08-10 23:31:08 -07001432
Stephen Hemminger87990462006-08-10 23:35:16 -07001433 hopt = RTA_DATA(tb[TCA_HTB_PARMS - 1]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001434
Stephen Hemminger87990462006-08-10 23:35:16 -07001435 rtab = qdisc_get_rtab(&hopt->rate, tb[TCA_HTB_RTAB - 1]);
1436 ctab = qdisc_get_rtab(&hopt->ceil, tb[TCA_HTB_CTAB - 1]);
1437 if (!rtab || !ctab)
1438 goto failure;
1439
1440 if (!cl) { /* new class */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001441 struct Qdisc *new_q;
Stephen Hemminger3696f622006-08-10 23:36:01 -07001442 int prio;
1443
Linus Torvalds1da177e2005-04-16 15:20:36 -07001444 /* check for valid classid */
Stephen Hemminger87990462006-08-10 23:35:16 -07001445 if (!classid || TC_H_MAJ(classid ^ sch->handle)
1446 || htb_find(classid, sch))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001447 goto failure;
1448
1449 /* check maximal depth */
1450 if (parent && parent->parent && parent->parent->level < 2) {
1451 printk(KERN_ERR "htb: tree is too deep\n");
1452 goto failure;
1453 }
1454 err = -ENOBUFS;
Panagiotis Issaris0da974f2006-07-21 14:51:30 -07001455 if ((cl = kzalloc(sizeof(*cl), GFP_KERNEL)) == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001456 goto failure;
Stephen Hemminger87990462006-08-10 23:35:16 -07001457
Linus Torvalds1da177e2005-04-16 15:20:36 -07001458 cl->refcnt = 1;
1459 INIT_LIST_HEAD(&cl->sibling);
Stephen Hemminger0cef2962006-08-10 23:35:38 -07001460 INIT_HLIST_NODE(&cl->hlist);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001461 INIT_LIST_HEAD(&cl->children);
1462 INIT_LIST_HEAD(&cl->un.leaf.drop_list);
Stephen Hemminger3696f622006-08-10 23:36:01 -07001463 RB_CLEAR_NODE(&cl->pq_node);
1464
1465 for (prio = 0; prio < TC_HTB_NUMPRIO; prio++)
1466 RB_CLEAR_NODE(&cl->node[prio]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001467
1468 /* create leaf qdisc early because it uses kmalloc(GFP_KERNEL)
1469 so that can't be used inside of sch_tree_lock
1470 -- thanks to Karlis Peisenieks */
Patrick McHardy9f9afec2006-11-29 17:35:18 -08001471 new_q = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops, classid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001472 sch_tree_lock(sch);
1473 if (parent && !parent->level) {
Patrick McHardy256d61b2006-11-29 17:37:05 -08001474 unsigned int qlen = parent->un.leaf.q->q.qlen;
1475
Linus Torvalds1da177e2005-04-16 15:20:36 -07001476 /* turn parent into inner node */
Patrick McHardy256d61b2006-11-29 17:37:05 -08001477 qdisc_reset(parent->un.leaf.q);
1478 qdisc_tree_decrease_qlen(parent->un.leaf.q, qlen);
Stephen Hemminger87990462006-08-10 23:35:16 -07001479 qdisc_destroy(parent->un.leaf.q);
1480 if (parent->prio_activity)
1481 htb_deactivate(q, parent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001482
1483 /* remove from evt list because of level change */
1484 if (parent->cmode != HTB_CAN_SEND) {
Stephen Hemminger3696f622006-08-10 23:36:01 -07001485 htb_safe_rb_erase(&parent->pq_node, q->wait_pq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001486 parent->cmode = HTB_CAN_SEND;
1487 }
1488 parent->level = (parent->parent ? parent->parent->level
Stephen Hemminger87990462006-08-10 23:35:16 -07001489 : TC_HTB_MAXDEPTH) - 1;
1490 memset(&parent->un.inner, 0, sizeof(parent->un.inner));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001491 }
1492 /* leaf (we) needs elementary qdisc */
1493 cl->un.leaf.q = new_q ? new_q : &noop_qdisc;
1494
Stephen Hemminger87990462006-08-10 23:35:16 -07001495 cl->classid = classid;
1496 cl->parent = parent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001497
1498 /* set class to be in HTB_CAN_SEND state */
1499 cl->tokens = hopt->buffer;
1500 cl->ctokens = hopt->cbuffer;
Stephen Hemminger87990462006-08-10 23:35:16 -07001501 cl->mbuffer = PSCHED_JIFFIE2US(HZ * 60); /* 1min */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001502 PSCHED_GET_TIME(cl->t_c);
1503 cl->cmode = HTB_CAN_SEND;
1504
1505 /* attach to the hash list and parent's family */
Stephen Hemminger0cef2962006-08-10 23:35:38 -07001506 hlist_add_head(&cl->hlist, q->hash + htb_hash(classid));
Stephen Hemminger87990462006-08-10 23:35:16 -07001507 list_add_tail(&cl->sibling,
1508 parent ? &parent->children : &q->root);
1509 } else
1510 sch_tree_lock(sch);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001511
1512 /* it used to be a nasty bug here, we have to check that node
Stephen Hemminger87990462006-08-10 23:35:16 -07001513 is really leaf before changing cl->un.leaf ! */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001514 if (!cl->level) {
1515 cl->un.leaf.quantum = rtab->rate.rate / q->rate2quantum;
1516 if (!hopt->quantum && cl->un.leaf.quantum < 1000) {
Stephen Hemminger87990462006-08-10 23:35:16 -07001517 printk(KERN_WARNING
1518 "HTB: quantum of class %X is small. Consider r2q change.\n",
1519 cl->classid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001520 cl->un.leaf.quantum = 1000;
1521 }
1522 if (!hopt->quantum && cl->un.leaf.quantum > 200000) {
Stephen Hemminger87990462006-08-10 23:35:16 -07001523 printk(KERN_WARNING
1524 "HTB: quantum of class %X is big. Consider r2q change.\n",
1525 cl->classid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001526 cl->un.leaf.quantum = 200000;
1527 }
1528 if (hopt->quantum)
1529 cl->un.leaf.quantum = hopt->quantum;
1530 if ((cl->un.leaf.prio = hopt->prio) >= TC_HTB_NUMPRIO)
1531 cl->un.leaf.prio = TC_HTB_NUMPRIO - 1;
Jarek Poplawski160d5e12006-12-08 00:26:56 -08001532
1533 /* backup for htb_parent_to_leaf */
1534 cl->quantum = cl->un.leaf.quantum;
1535 cl->prio = cl->un.leaf.prio;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001536 }
1537
1538 cl->buffer = hopt->buffer;
1539 cl->cbuffer = hopt->cbuffer;
Stephen Hemminger87990462006-08-10 23:35:16 -07001540 if (cl->rate)
1541 qdisc_put_rtab(cl->rate);
1542 cl->rate = rtab;
1543 if (cl->ceil)
1544 qdisc_put_rtab(cl->ceil);
1545 cl->ceil = ctab;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001546 sch_tree_unlock(sch);
1547
1548 *arg = (unsigned long)cl;
1549 return 0;
1550
1551failure:
Stephen Hemminger87990462006-08-10 23:35:16 -07001552 if (rtab)
1553 qdisc_put_rtab(rtab);
1554 if (ctab)
1555 qdisc_put_rtab(ctab);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001556 return err;
1557}
1558
1559static struct tcf_proto **htb_find_tcf(struct Qdisc *sch, unsigned long arg)
1560{
1561 struct htb_sched *q = qdisc_priv(sch);
1562 struct htb_class *cl = (struct htb_class *)arg;
1563 struct tcf_proto **fl = cl ? &cl->filter_list : &q->filter_list;
Stephen Hemminger3bf72952006-08-10 23:31:08 -07001564
Linus Torvalds1da177e2005-04-16 15:20:36 -07001565 return fl;
1566}
1567
1568static unsigned long htb_bind_filter(struct Qdisc *sch, unsigned long parent,
Stephen Hemminger87990462006-08-10 23:35:16 -07001569 u32 classid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001570{
1571 struct htb_sched *q = qdisc_priv(sch);
Stephen Hemminger87990462006-08-10 23:35:16 -07001572 struct htb_class *cl = htb_find(classid, sch);
Stephen Hemminger3bf72952006-08-10 23:31:08 -07001573
Linus Torvalds1da177e2005-04-16 15:20:36 -07001574 /*if (cl && !cl->level) return 0;
Stephen Hemminger87990462006-08-10 23:35:16 -07001575 The line above used to be there to prevent attaching filters to
1576 leaves. But at least tc_index filter uses this just to get class
1577 for other reasons so that we have to allow for it.
1578 ----
1579 19.6.2002 As Werner explained it is ok - bind filter is just
1580 another way to "lock" the class - unlike "get" this lock can
1581 be broken by class during destroy IIUC.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001582 */
Stephen Hemminger87990462006-08-10 23:35:16 -07001583 if (cl)
1584 cl->filter_cnt++;
1585 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001586 q->filter_cnt++;
1587 return (unsigned long)cl;
1588}
1589
1590static void htb_unbind_filter(struct Qdisc *sch, unsigned long arg)
1591{
1592 struct htb_sched *q = qdisc_priv(sch);
1593 struct htb_class *cl = (struct htb_class *)arg;
Stephen Hemminger3bf72952006-08-10 23:31:08 -07001594
Stephen Hemminger87990462006-08-10 23:35:16 -07001595 if (cl)
1596 cl->filter_cnt--;
1597 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001598 q->filter_cnt--;
1599}
1600
1601static void htb_walk(struct Qdisc *sch, struct qdisc_walker *arg)
1602{
1603 struct htb_sched *q = qdisc_priv(sch);
1604 int i;
1605
1606 if (arg->stop)
1607 return;
1608
1609 for (i = 0; i < HTB_HSIZE; i++) {
Stephen Hemminger0cef2962006-08-10 23:35:38 -07001610 struct hlist_node *p;
1611 struct htb_class *cl;
1612
1613 hlist_for_each_entry(cl, p, q->hash + i, hlist) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001614 if (arg->count < arg->skip) {
1615 arg->count++;
1616 continue;
1617 }
1618 if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
1619 arg->stop = 1;
1620 return;
1621 }
1622 arg->count++;
1623 }
1624 }
1625}
1626
1627static struct Qdisc_class_ops htb_class_ops = {
1628 .graft = htb_graft,
1629 .leaf = htb_leaf,
Patrick McHardy256d61b2006-11-29 17:37:05 -08001630 .qlen_notify = htb_qlen_notify,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001631 .get = htb_get,
1632 .put = htb_put,
1633 .change = htb_change_class,
1634 .delete = htb_delete,
1635 .walk = htb_walk,
1636 .tcf_chain = htb_find_tcf,
1637 .bind_tcf = htb_bind_filter,
1638 .unbind_tcf = htb_unbind_filter,
1639 .dump = htb_dump_class,
1640 .dump_stats = htb_dump_class_stats,
1641};
1642
1643static struct Qdisc_ops htb_qdisc_ops = {
1644 .next = NULL,
1645 .cl_ops = &htb_class_ops,
1646 .id = "htb",
1647 .priv_size = sizeof(struct htb_sched),
1648 .enqueue = htb_enqueue,
1649 .dequeue = htb_dequeue,
1650 .requeue = htb_requeue,
1651 .drop = htb_drop,
1652 .init = htb_init,
1653 .reset = htb_reset,
1654 .destroy = htb_destroy,
1655 .change = NULL /* htb_change */,
1656 .dump = htb_dump,
1657 .owner = THIS_MODULE,
1658};
1659
1660static int __init htb_module_init(void)
1661{
Stephen Hemminger87990462006-08-10 23:35:16 -07001662 return register_qdisc(&htb_qdisc_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001663}
Stephen Hemminger87990462006-08-10 23:35:16 -07001664static void __exit htb_module_exit(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001665{
Stephen Hemminger87990462006-08-10 23:35:16 -07001666 unregister_qdisc(&htb_qdisc_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001667}
Stephen Hemminger87990462006-08-10 23:35:16 -07001668
Linus Torvalds1da177e2005-04-16 15:20:36 -07001669module_init(htb_module_init)
1670module_exit(htb_module_exit)
1671MODULE_LICENSE("GPL");