blob: c031486b675fb5c76f0be9a9f6c568bd23b54ab1 [file] [log] [blame]
Stephen Hemminger87990462006-08-10 23:35:16 -07001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 * net/sched/sch_htb.c Hierarchical token bucket, feed tree version
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Martin Devera, <devik@cdi.cz>
10 *
11 * Credits (in time order) for older HTB versions:
12 * Stef Coene <stef.coene@docum.org>
13 * HTB support at LARTC mailing list
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +090014 * Ondrej Kraus, <krauso@barr.cz>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015 * found missing INIT_QDISC(htb)
16 * Vladimir Smelhaus, Aamer Akhter, Bert Hubert
17 * helped a lot to locate nasty class stall bug
18 * Andi Kleen, Jamal Hadi, Bert Hubert
19 * code review and helpful comments on shaping
20 * Tomasz Wrona, <tw@eter.tym.pl>
21 * created test case so that I was able to fix nasty bug
22 * Wilfried Weissmann
23 * spotted bug in dequeue code and helped with fix
24 * Jiri Fojtasek
25 * fixed requeue routine
26 * and many others. thanks.
27 *
28 * $Id: sch_htb.c,v 1.25 2003/12/07 11:08:25 devik Exp devik $
29 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070030#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070031#include <linux/types.h>
32#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033#include <linux/string.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034#include <linux/errno.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035#include <linux/skbuff.h>
36#include <linux/list.h>
37#include <linux/compiler.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070038#include <linux/rbtree.h>
Patrick McHardy0ba48052007-07-02 22:49:07 -070039#include <net/netlink.h>
40#include <net/pkt_sched.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070041
42/* HTB algorithm.
43 Author: devik@cdi.cz
44 ========================================================================
45 HTB is like TBF with multiple classes. It is also similar to CBQ because
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +090046 it allows to assign priority to each class in hierarchy.
Linus Torvalds1da177e2005-04-16 15:20:36 -070047 In fact it is another implementation of Floyd's formal sharing.
48
49 Levels:
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +090050 Each class is assigned level. Leaf has ALWAYS level 0 and root
Linus Torvalds1da177e2005-04-16 15:20:36 -070051 classes have level TC_HTB_MAXDEPTH-1. Interior nodes has level
52 one less than their parent.
53*/
54
Stephen Hemminger87990462006-08-10 23:35:16 -070055#define HTB_HSIZE 16 /* classid hash size */
Stephen Hemminger87990462006-08-10 23:35:16 -070056#define HTB_HYSTERESIS 1 /* whether to use mode hysteresis for speedup */
57#define HTB_VER 0x30011 /* major must be matched with number suplied by TC as version */
Linus Torvalds1da177e2005-04-16 15:20:36 -070058
59#if HTB_VER >> 16 != TC_HTB_PROTOVER
60#error "Mismatched sch_htb.c and pkt_sch.h"
61#endif
62
Linus Torvalds1da177e2005-04-16 15:20:36 -070063/* used internaly to keep status of single class */
64enum htb_cmode {
Stephen Hemminger87990462006-08-10 23:35:16 -070065 HTB_CANT_SEND, /* class can't send and can't borrow */
66 HTB_MAY_BORROW, /* class can't send but may borrow */
67 HTB_CAN_SEND /* class can send */
Linus Torvalds1da177e2005-04-16 15:20:36 -070068};
69
70/* interior & leaf nodes; props specific to leaves are marked L: */
Stephen Hemminger87990462006-08-10 23:35:16 -070071struct htb_class {
72 /* general class parameters */
73 u32 classid;
74 struct gnet_stats_basic bstats;
75 struct gnet_stats_queue qstats;
76 struct gnet_stats_rate_est rate_est;
77 struct tc_htb_xstats xstats; /* our special stats */
78 int refcnt; /* usage count of this class */
Linus Torvalds1da177e2005-04-16 15:20:36 -070079
Stephen Hemminger87990462006-08-10 23:35:16 -070080 /* topology */
81 int level; /* our level (see above) */
82 struct htb_class *parent; /* parent class */
Stephen Hemminger0cef2962006-08-10 23:35:38 -070083 struct hlist_node hlist; /* classid hash list item */
Stephen Hemminger87990462006-08-10 23:35:16 -070084 struct list_head sibling; /* sibling list item */
85 struct list_head children; /* children list */
Linus Torvalds1da177e2005-04-16 15:20:36 -070086
Stephen Hemminger87990462006-08-10 23:35:16 -070087 union {
88 struct htb_class_leaf {
89 struct Qdisc *q;
90 int prio;
91 int aprio;
92 int quantum;
93 int deficit[TC_HTB_MAXDEPTH];
94 struct list_head drop_list;
95 } leaf;
96 struct htb_class_inner {
97 struct rb_root feed[TC_HTB_NUMPRIO]; /* feed trees */
98 struct rb_node *ptr[TC_HTB_NUMPRIO]; /* current class ptr */
99 /* When class changes from state 1->2 and disconnects from
100 parent's feed then we lost ptr value and start from the
101 first child again. Here we store classid of the
102 last valid ptr (used when ptr is NULL). */
103 u32 last_ptr_id[TC_HTB_NUMPRIO];
104 } inner;
105 } un;
106 struct rb_node node[TC_HTB_NUMPRIO]; /* node for self or feed tree */
107 struct rb_node pq_node; /* node for event queue */
Patrick McHardyfb983d42007-03-16 01:22:39 -0700108 psched_time_t pq_key;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109
Stephen Hemminger87990462006-08-10 23:35:16 -0700110 int prio_activity; /* for which prios are we active */
111 enum htb_cmode cmode; /* current mode of the class */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112
Stephen Hemminger87990462006-08-10 23:35:16 -0700113 /* class attached filters */
114 struct tcf_proto *filter_list;
115 int filter_cnt;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116
Stephen Hemminger87990462006-08-10 23:35:16 -0700117 int warned; /* only one warning about non work conserving .. */
118
119 /* token bucket parameters */
120 struct qdisc_rate_table *rate; /* rate table of the class itself */
121 struct qdisc_rate_table *ceil; /* ceiling rate (limits borrows too) */
122 long buffer, cbuffer; /* token bucket depth/rate */
123 psched_tdiff_t mbuffer; /* max wait time */
124 long tokens, ctokens; /* current number of tokens */
125 psched_time_t t_c; /* checkpoint time */
Jarek Poplawski160d5e12006-12-08 00:26:56 -0800126
127 int prio; /* For parent to leaf return possible here */
128 int quantum; /* we do backup. Finally full replacement */
129 /* of un.leaf originals should be done. */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130};
131
132/* TODO: maybe compute rate when size is too large .. or drop ? */
Stephen Hemminger87990462006-08-10 23:35:16 -0700133static inline long L2T(struct htb_class *cl, struct qdisc_rate_table *rate,
134 int size)
135{
136 int slot = size >> rate->rate.cell_log;
137 if (slot > 255) {
138 cl->xstats.giants++;
139 slot = 255;
140 }
141 return rate->data[slot];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142}
143
Stephen Hemminger87990462006-08-10 23:35:16 -0700144struct htb_sched {
145 struct list_head root; /* root classes list */
Stephen Hemminger0cef2962006-08-10 23:35:38 -0700146 struct hlist_head hash[HTB_HSIZE]; /* hashed by classid */
147 struct list_head drops[TC_HTB_NUMPRIO];/* active leaves (for drops) */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148
Stephen Hemminger87990462006-08-10 23:35:16 -0700149 /* self list - roots of self generating tree */
150 struct rb_root row[TC_HTB_MAXDEPTH][TC_HTB_NUMPRIO];
151 int row_mask[TC_HTB_MAXDEPTH];
152 struct rb_node *ptr[TC_HTB_MAXDEPTH][TC_HTB_NUMPRIO];
153 u32 last_ptr_id[TC_HTB_MAXDEPTH][TC_HTB_NUMPRIO];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700154
Stephen Hemminger87990462006-08-10 23:35:16 -0700155 /* self wait list - roots of wait PQs per row */
156 struct rb_root wait_pq[TC_HTB_MAXDEPTH];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157
Stephen Hemminger87990462006-08-10 23:35:16 -0700158 /* time of nearest event per level (row) */
Patrick McHardyfb983d42007-03-16 01:22:39 -0700159 psched_time_t near_ev_cache[TC_HTB_MAXDEPTH];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160
Stephen Hemminger87990462006-08-10 23:35:16 -0700161 /* whether we hit non-work conserving class during this dequeue; we use */
162 int nwc_hit; /* this to disable mindelay complaint in dequeue */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163
Stephen Hemminger87990462006-08-10 23:35:16 -0700164 int defcls; /* class where unclassified flows go to */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165
Stephen Hemminger87990462006-08-10 23:35:16 -0700166 /* filters for qdisc itself */
167 struct tcf_proto *filter_list;
168 int filter_cnt;
169
170 int rate2quantum; /* quant = rate / rate2quantum */
171 psched_time_t now; /* cached dequeue time */
Patrick McHardyfb983d42007-03-16 01:22:39 -0700172 struct qdisc_watchdog watchdog;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173
Stephen Hemminger87990462006-08-10 23:35:16 -0700174 /* non shaped skbs; let them go directly thru */
175 struct sk_buff_head direct_queue;
176 int direct_qlen; /* max qlen of above */
177
178 long direct_pkts;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700179};
180
181/* compute hash of size HTB_HSIZE for given handle */
Stephen Hemminger87990462006-08-10 23:35:16 -0700182static inline int htb_hash(u32 h)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183{
184#if HTB_HSIZE != 16
Stephen Hemminger87990462006-08-10 23:35:16 -0700185#error "Declare new hash for your HTB_HSIZE"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186#endif
Stephen Hemminger87990462006-08-10 23:35:16 -0700187 h ^= h >> 8; /* stolen from cbq_hash */
188 h ^= h >> 4;
189 return h & 0xf;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190}
191
192/* find class in global hash table using given handle */
Stephen Hemminger87990462006-08-10 23:35:16 -0700193static inline struct htb_class *htb_find(u32 handle, struct Qdisc *sch)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194{
195 struct htb_sched *q = qdisc_priv(sch);
Stephen Hemminger0cef2962006-08-10 23:35:38 -0700196 struct hlist_node *p;
197 struct htb_class *cl;
198
Stephen Hemminger87990462006-08-10 23:35:16 -0700199 if (TC_H_MAJ(handle) != sch->handle)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200 return NULL;
Stephen Hemminger87990462006-08-10 23:35:16 -0700201
Stephen Hemminger0cef2962006-08-10 23:35:38 -0700202 hlist_for_each_entry(cl, p, q->hash + htb_hash(handle), hlist) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203 if (cl->classid == handle)
204 return cl;
205 }
206 return NULL;
207}
208
209/**
210 * htb_classify - classify a packet into class
211 *
212 * It returns NULL if the packet should be dropped or -1 if the packet
213 * should be passed directly thru. In all other cases leaf class is returned.
214 * We allow direct class selection by classid in priority. The we examine
215 * filters in qdisc and in inner nodes (if higher filter points to the inner
216 * node). If we end up with classid MAJOR:0 we enqueue the skb into special
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900217 * internal fifo (direct). These packets then go directly thru. If we still
Linus Torvalds1da177e2005-04-16 15:20:36 -0700218 * have no valid leaf we try to use MAJOR:default leaf. It still unsuccessfull
219 * then finish and return direct queue.
220 */
221#define HTB_DIRECT (struct htb_class*)-1
222static inline u32 htb_classid(struct htb_class *cl)
223{
224 return (cl && cl != HTB_DIRECT) ? cl->classid : TC_H_UNSPEC;
225}
226
Stephen Hemminger87990462006-08-10 23:35:16 -0700227static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch,
228 int *qerr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700229{
230 struct htb_sched *q = qdisc_priv(sch);
231 struct htb_class *cl;
232 struct tcf_result res;
233 struct tcf_proto *tcf;
234 int result;
235
236 /* allow to select class by setting skb->priority to valid classid;
237 note that nfmark can be used too by attaching filter fw with no
238 rules in it */
239 if (skb->priority == sch->handle)
Stephen Hemminger87990462006-08-10 23:35:16 -0700240 return HTB_DIRECT; /* X:0 (direct flow) selected */
241 if ((cl = htb_find(skb->priority, sch)) != NULL && cl->level == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700242 return cl;
243
Jamal Hadi Salim29f1df62006-01-08 22:35:55 -0800244 *qerr = NET_XMIT_BYPASS;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700245 tcf = q->filter_list;
246 while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) {
247#ifdef CONFIG_NET_CLS_ACT
248 switch (result) {
249 case TC_ACT_QUEUED:
Stephen Hemminger87990462006-08-10 23:35:16 -0700250 case TC_ACT_STOLEN:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251 *qerr = NET_XMIT_SUCCESS;
252 case TC_ACT_SHOT:
253 return NULL;
254 }
255#elif defined(CONFIG_NET_CLS_POLICE)
256 if (result == TC_POLICE_SHOT)
257 return HTB_DIRECT;
258#endif
Stephen Hemminger87990462006-08-10 23:35:16 -0700259 if ((cl = (void *)res.class) == NULL) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700260 if (res.classid == sch->handle)
Stephen Hemminger87990462006-08-10 23:35:16 -0700261 return HTB_DIRECT; /* X:0 (direct flow) */
262 if ((cl = htb_find(res.classid, sch)) == NULL)
263 break; /* filter selected invalid classid */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700264 }
265 if (!cl->level)
Stephen Hemminger87990462006-08-10 23:35:16 -0700266 return cl; /* we hit leaf; return it */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700267
268 /* we have got inner class; apply inner filter chain */
269 tcf = cl->filter_list;
270 }
271 /* classification failed; try to use default class */
Stephen Hemminger87990462006-08-10 23:35:16 -0700272 cl = htb_find(TC_H_MAKE(TC_H_MAJ(sch->handle), q->defcls), sch);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700273 if (!cl || cl->level)
Stephen Hemminger87990462006-08-10 23:35:16 -0700274 return HTB_DIRECT; /* bad default .. this is safe bet */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275 return cl;
276}
277
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278/**
279 * htb_add_to_id_tree - adds class to the round robin list
280 *
281 * Routine adds class to the list (actually tree) sorted by classid.
282 * Make sure that class is not already on such list for given prio.
283 */
Stephen Hemminger87990462006-08-10 23:35:16 -0700284static void htb_add_to_id_tree(struct rb_root *root,
285 struct htb_class *cl, int prio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700286{
287 struct rb_node **p = &root->rb_node, *parent = NULL;
Stephen Hemminger3bf72952006-08-10 23:31:08 -0700288
Linus Torvalds1da177e2005-04-16 15:20:36 -0700289 while (*p) {
Stephen Hemminger87990462006-08-10 23:35:16 -0700290 struct htb_class *c;
291 parent = *p;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292 c = rb_entry(parent, struct htb_class, node[prio]);
Stephen Hemminger3bf72952006-08-10 23:31:08 -0700293
Linus Torvalds1da177e2005-04-16 15:20:36 -0700294 if (cl->classid > c->classid)
295 p = &parent->rb_right;
Stephen Hemminger87990462006-08-10 23:35:16 -0700296 else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700297 p = &parent->rb_left;
298 }
299 rb_link_node(&cl->node[prio], parent, p);
300 rb_insert_color(&cl->node[prio], root);
301}
302
303/**
304 * htb_add_to_wait_tree - adds class to the event queue with delay
305 *
306 * The class is added to priority event queue to indicate that class will
307 * change its mode in cl->pq_key microseconds. Make sure that class is not
308 * already in the queue.
309 */
Stephen Hemminger87990462006-08-10 23:35:16 -0700310static void htb_add_to_wait_tree(struct htb_sched *q,
311 struct htb_class *cl, long delay)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700312{
313 struct rb_node **p = &q->wait_pq[cl->level].rb_node, *parent = NULL;
Stephen Hemminger3bf72952006-08-10 23:31:08 -0700314
Patrick McHardyfb983d42007-03-16 01:22:39 -0700315 cl->pq_key = q->now + delay;
316 if (cl->pq_key == q->now)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700317 cl->pq_key++;
318
319 /* update the nearest event cache */
Patrick McHardyfb983d42007-03-16 01:22:39 -0700320 if (q->near_ev_cache[cl->level] > cl->pq_key)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700321 q->near_ev_cache[cl->level] = cl->pq_key;
Stephen Hemminger87990462006-08-10 23:35:16 -0700322
Linus Torvalds1da177e2005-04-16 15:20:36 -0700323 while (*p) {
Stephen Hemminger87990462006-08-10 23:35:16 -0700324 struct htb_class *c;
325 parent = *p;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700326 c = rb_entry(parent, struct htb_class, pq_node);
Patrick McHardyfb983d42007-03-16 01:22:39 -0700327 if (cl->pq_key >= c->pq_key)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700328 p = &parent->rb_right;
Stephen Hemminger87990462006-08-10 23:35:16 -0700329 else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700330 p = &parent->rb_left;
331 }
332 rb_link_node(&cl->pq_node, parent, p);
333 rb_insert_color(&cl->pq_node, &q->wait_pq[cl->level]);
334}
335
336/**
337 * htb_next_rb_node - finds next node in binary tree
338 *
339 * When we are past last key we return NULL.
340 * Average complexity is 2 steps per call.
341 */
Stephen Hemminger3696f622006-08-10 23:36:01 -0700342static inline void htb_next_rb_node(struct rb_node **n)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700343{
344 *n = rb_next(*n);
345}
346
347/**
348 * htb_add_class_to_row - add class to its row
349 *
350 * The class is added to row at priorities marked in mask.
351 * It does nothing if mask == 0.
352 */
Stephen Hemminger87990462006-08-10 23:35:16 -0700353static inline void htb_add_class_to_row(struct htb_sched *q,
354 struct htb_class *cl, int mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700356 q->row_mask[cl->level] |= mask;
357 while (mask) {
358 int prio = ffz(~mask);
359 mask &= ~(1 << prio);
Stephen Hemminger87990462006-08-10 23:35:16 -0700360 htb_add_to_id_tree(q->row[cl->level] + prio, cl, prio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700361 }
362}
363
Stephen Hemminger3696f622006-08-10 23:36:01 -0700364/* If this triggers, it is a bug in this code, but it need not be fatal */
365static void htb_safe_rb_erase(struct rb_node *rb, struct rb_root *root)
366{
Ismail Donmez81771b32006-10-03 13:49:10 -0700367 if (RB_EMPTY_NODE(rb)) {
Stephen Hemminger3696f622006-08-10 23:36:01 -0700368 WARN_ON(1);
369 } else {
370 rb_erase(rb, root);
371 RB_CLEAR_NODE(rb);
372 }
373}
374
375
Linus Torvalds1da177e2005-04-16 15:20:36 -0700376/**
377 * htb_remove_class_from_row - removes class from its row
378 *
379 * The class is removed from row at priorities marked in mask.
380 * It does nothing if mask == 0.
381 */
Stephen Hemminger87990462006-08-10 23:35:16 -0700382static inline void htb_remove_class_from_row(struct htb_sched *q,
383 struct htb_class *cl, int mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700384{
385 int m = 0;
Stephen Hemminger3bf72952006-08-10 23:31:08 -0700386
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387 while (mask) {
388 int prio = ffz(~mask);
Stephen Hemminger3696f622006-08-10 23:36:01 -0700389
Linus Torvalds1da177e2005-04-16 15:20:36 -0700390 mask &= ~(1 << prio);
Stephen Hemminger87990462006-08-10 23:35:16 -0700391 if (q->ptr[cl->level][prio] == cl->node + prio)
392 htb_next_rb_node(q->ptr[cl->level] + prio);
Stephen Hemminger3696f622006-08-10 23:36:01 -0700393
394 htb_safe_rb_erase(cl->node + prio, q->row[cl->level] + prio);
Stephen Hemminger87990462006-08-10 23:35:16 -0700395 if (!q->row[cl->level][prio].rb_node)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700396 m |= 1 << prio;
397 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700398 q->row_mask[cl->level] &= ~m;
399}
400
401/**
402 * htb_activate_prios - creates active classe's feed chain
403 *
404 * The class is connected to ancestors and/or appropriate rows
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900405 * for priorities it is participating on. cl->cmode must be new
Linus Torvalds1da177e2005-04-16 15:20:36 -0700406 * (activated) mode. It does nothing if cl->prio_activity == 0.
407 */
Stephen Hemminger87990462006-08-10 23:35:16 -0700408static void htb_activate_prios(struct htb_sched *q, struct htb_class *cl)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700409{
410 struct htb_class *p = cl->parent;
Stephen Hemminger87990462006-08-10 23:35:16 -0700411 long m, mask = cl->prio_activity;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700412
413 while (cl->cmode == HTB_MAY_BORROW && p && mask) {
Stephen Hemminger87990462006-08-10 23:35:16 -0700414 m = mask;
415 while (m) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700416 int prio = ffz(~m);
417 m &= ~(1 << prio);
Stephen Hemminger87990462006-08-10 23:35:16 -0700418
Linus Torvalds1da177e2005-04-16 15:20:36 -0700419 if (p->un.inner.feed[prio].rb_node)
420 /* parent already has its feed in use so that
421 reset bit in mask as parent is already ok */
422 mask &= ~(1 << prio);
Stephen Hemminger87990462006-08-10 23:35:16 -0700423
424 htb_add_to_id_tree(p->un.inner.feed + prio, cl, prio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700425 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426 p->prio_activity |= mask;
Stephen Hemminger87990462006-08-10 23:35:16 -0700427 cl = p;
428 p = cl->parent;
Stephen Hemminger3bf72952006-08-10 23:31:08 -0700429
Linus Torvalds1da177e2005-04-16 15:20:36 -0700430 }
431 if (cl->cmode == HTB_CAN_SEND && mask)
Stephen Hemminger87990462006-08-10 23:35:16 -0700432 htb_add_class_to_row(q, cl, mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700433}
434
435/**
436 * htb_deactivate_prios - remove class from feed chain
437 *
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900438 * cl->cmode must represent old mode (before deactivation). It does
Linus Torvalds1da177e2005-04-16 15:20:36 -0700439 * nothing if cl->prio_activity == 0. Class is removed from all feed
440 * chains and rows.
441 */
442static void htb_deactivate_prios(struct htb_sched *q, struct htb_class *cl)
443{
444 struct htb_class *p = cl->parent;
Stephen Hemminger87990462006-08-10 23:35:16 -0700445 long m, mask = cl->prio_activity;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700446
447 while (cl->cmode == HTB_MAY_BORROW && p && mask) {
Stephen Hemminger87990462006-08-10 23:35:16 -0700448 m = mask;
449 mask = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700450 while (m) {
451 int prio = ffz(~m);
452 m &= ~(1 << prio);
Stephen Hemminger87990462006-08-10 23:35:16 -0700453
454 if (p->un.inner.ptr[prio] == cl->node + prio) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700455 /* we are removing child which is pointed to from
456 parent feed - forget the pointer but remember
457 classid */
458 p->un.inner.last_ptr_id[prio] = cl->classid;
459 p->un.inner.ptr[prio] = NULL;
460 }
Stephen Hemminger87990462006-08-10 23:35:16 -0700461
Stephen Hemminger3696f622006-08-10 23:36:01 -0700462 htb_safe_rb_erase(cl->node + prio, p->un.inner.feed + prio);
Stephen Hemminger87990462006-08-10 23:35:16 -0700463
464 if (!p->un.inner.feed[prio].rb_node)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700465 mask |= 1 << prio;
466 }
Stephen Hemminger3bf72952006-08-10 23:31:08 -0700467
Linus Torvalds1da177e2005-04-16 15:20:36 -0700468 p->prio_activity &= ~mask;
Stephen Hemminger87990462006-08-10 23:35:16 -0700469 cl = p;
470 p = cl->parent;
Stephen Hemminger3bf72952006-08-10 23:31:08 -0700471
Linus Torvalds1da177e2005-04-16 15:20:36 -0700472 }
Stephen Hemminger87990462006-08-10 23:35:16 -0700473 if (cl->cmode == HTB_CAN_SEND && mask)
474 htb_remove_class_from_row(q, cl, mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700475}
476
Stephen Hemminger18a63e82006-08-10 23:34:02 -0700477#if HTB_HYSTERESIS
478static inline long htb_lowater(const struct htb_class *cl)
479{
480 return cl->cmode != HTB_CANT_SEND ? -cl->cbuffer : 0;
481}
482static inline long htb_hiwater(const struct htb_class *cl)
483{
484 return cl->cmode == HTB_CAN_SEND ? -cl->buffer : 0;
485}
486#else
487#define htb_lowater(cl) (0)
488#define htb_hiwater(cl) (0)
489#endif
490
Linus Torvalds1da177e2005-04-16 15:20:36 -0700491/**
492 * htb_class_mode - computes and returns current class mode
493 *
494 * It computes cl's mode at time cl->t_c+diff and returns it. If mode
495 * is not HTB_CAN_SEND then cl->pq_key is updated to time difference
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900496 * from now to time when cl will change its state.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700497 * Also it is worth to note that class mode doesn't change simply
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900498 * at cl->{c,}tokens == 0 but there can rather be hysteresis of
Linus Torvalds1da177e2005-04-16 15:20:36 -0700499 * 0 .. -cl->{c,}buffer range. It is meant to limit number of
500 * mode transitions per time unit. The speed gain is about 1/6.
501 */
Stephen Hemminger87990462006-08-10 23:35:16 -0700502static inline enum htb_cmode
503htb_class_mode(struct htb_class *cl, long *diff)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700504{
Stephen Hemminger87990462006-08-10 23:35:16 -0700505 long toks;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700506
Stephen Hemminger87990462006-08-10 23:35:16 -0700507 if ((toks = (cl->ctokens + *diff)) < htb_lowater(cl)) {
508 *diff = -toks;
509 return HTB_CANT_SEND;
510 }
Stephen Hemminger18a63e82006-08-10 23:34:02 -0700511
Stephen Hemminger87990462006-08-10 23:35:16 -0700512 if ((toks = (cl->tokens + *diff)) >= htb_hiwater(cl))
513 return HTB_CAN_SEND;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700514
Stephen Hemminger87990462006-08-10 23:35:16 -0700515 *diff = -toks;
516 return HTB_MAY_BORROW;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700517}
518
519/**
520 * htb_change_class_mode - changes classe's mode
521 *
522 * This should be the only way how to change classe's mode under normal
523 * cirsumstances. Routine will update feed lists linkage, change mode
524 * and add class to the wait event queue if appropriate. New mode should
525 * be different from old one and cl->pq_key has to be valid if changing
526 * to mode other than HTB_CAN_SEND (see htb_add_to_wait_tree).
527 */
Stephen Hemminger87990462006-08-10 23:35:16 -0700528static void
Linus Torvalds1da177e2005-04-16 15:20:36 -0700529htb_change_class_mode(struct htb_sched *q, struct htb_class *cl, long *diff)
Stephen Hemminger87990462006-08-10 23:35:16 -0700530{
531 enum htb_cmode new_mode = htb_class_mode(cl, diff);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700532
533 if (new_mode == cl->cmode)
Stephen Hemminger87990462006-08-10 23:35:16 -0700534 return;
535
536 if (cl->prio_activity) { /* not necessary: speed optimization */
537 if (cl->cmode != HTB_CANT_SEND)
538 htb_deactivate_prios(q, cl);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700539 cl->cmode = new_mode;
Stephen Hemminger87990462006-08-10 23:35:16 -0700540 if (new_mode != HTB_CANT_SEND)
541 htb_activate_prios(q, cl);
542 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700543 cl->cmode = new_mode;
544}
545
546/**
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900547 * htb_activate - inserts leaf cl into appropriate active feeds
Linus Torvalds1da177e2005-04-16 15:20:36 -0700548 *
549 * Routine learns (new) priority of leaf and activates feed chain
550 * for the prio. It can be called on already active leaf safely.
551 * It also adds leaf into droplist.
552 */
Stephen Hemminger87990462006-08-10 23:35:16 -0700553static inline void htb_activate(struct htb_sched *q, struct htb_class *cl)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700554{
555 BUG_TRAP(!cl->level && cl->un.leaf.q && cl->un.leaf.q->q.qlen);
Stephen Hemminger3bf72952006-08-10 23:31:08 -0700556
Linus Torvalds1da177e2005-04-16 15:20:36 -0700557 if (!cl->prio_activity) {
558 cl->prio_activity = 1 << (cl->un.leaf.aprio = cl->un.leaf.prio);
Stephen Hemminger87990462006-08-10 23:35:16 -0700559 htb_activate_prios(q, cl);
560 list_add_tail(&cl->un.leaf.drop_list,
561 q->drops + cl->un.leaf.aprio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700562 }
563}
564
565/**
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900566 * htb_deactivate - remove leaf cl from active feeds
Linus Torvalds1da177e2005-04-16 15:20:36 -0700567 *
568 * Make sure that leaf is active. In the other words it can't be called
569 * with non-active leaf. It also removes class from the drop list.
570 */
Stephen Hemminger87990462006-08-10 23:35:16 -0700571static inline void htb_deactivate(struct htb_sched *q, struct htb_class *cl)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700572{
573 BUG_TRAP(cl->prio_activity);
Stephen Hemminger3bf72952006-08-10 23:31:08 -0700574
Stephen Hemminger87990462006-08-10 23:35:16 -0700575 htb_deactivate_prios(q, cl);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700576 cl->prio_activity = 0;
577 list_del_init(&cl->un.leaf.drop_list);
578}
579
580static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
581{
Stephen Hemminger87990462006-08-10 23:35:16 -0700582 int ret;
583 struct htb_sched *q = qdisc_priv(sch);
584 struct htb_class *cl = htb_classify(skb, sch, &ret);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700585
Stephen Hemminger87990462006-08-10 23:35:16 -0700586 if (cl == HTB_DIRECT) {
587 /* enqueue to helper queue */
588 if (q->direct_queue.qlen < q->direct_qlen) {
589 __skb_queue_tail(&q->direct_queue, skb);
590 q->direct_pkts++;
591 } else {
592 kfree_skb(skb);
593 sch->qstats.drops++;
594 return NET_XMIT_DROP;
595 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700596#ifdef CONFIG_NET_CLS_ACT
Stephen Hemminger87990462006-08-10 23:35:16 -0700597 } else if (!cl) {
598 if (ret == NET_XMIT_BYPASS)
599 sch->qstats.drops++;
600 kfree_skb(skb);
601 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700602#endif
Stephen Hemminger87990462006-08-10 23:35:16 -0700603 } else if (cl->un.leaf.q->enqueue(skb, cl->un.leaf.q) !=
604 NET_XMIT_SUCCESS) {
605 sch->qstats.drops++;
606 cl->qstats.drops++;
607 return NET_XMIT_DROP;
608 } else {
609 cl->bstats.packets++;
610 cl->bstats.bytes += skb->len;
611 htb_activate(q, cl);
612 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700613
Stephen Hemminger87990462006-08-10 23:35:16 -0700614 sch->q.qlen++;
615 sch->bstats.packets++;
616 sch->bstats.bytes += skb->len;
617 return NET_XMIT_SUCCESS;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700618}
619
620/* TODO: requeuing packet charges it to policers again !! */
621static int htb_requeue(struct sk_buff *skb, struct Qdisc *sch)
622{
Stephen Hemminger87990462006-08-10 23:35:16 -0700623 struct htb_sched *q = qdisc_priv(sch);
624 int ret = NET_XMIT_SUCCESS;
625 struct htb_class *cl = htb_classify(skb, sch, &ret);
626 struct sk_buff *tskb;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700627
Stephen Hemminger87990462006-08-10 23:35:16 -0700628 if (cl == HTB_DIRECT || !cl) {
629 /* enqueue to helper queue */
630 if (q->direct_queue.qlen < q->direct_qlen && cl) {
631 __skb_queue_head(&q->direct_queue, skb);
632 } else {
633 __skb_queue_head(&q->direct_queue, skb);
634 tskb = __skb_dequeue_tail(&q->direct_queue);
635 kfree_skb(tskb);
636 sch->qstats.drops++;
637 return NET_XMIT_CN;
638 }
639 } else if (cl->un.leaf.q->ops->requeue(skb, cl->un.leaf.q) !=
640 NET_XMIT_SUCCESS) {
641 sch->qstats.drops++;
642 cl->qstats.drops++;
643 return NET_XMIT_DROP;
644 } else
645 htb_activate(q, cl);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700646
Stephen Hemminger87990462006-08-10 23:35:16 -0700647 sch->q.qlen++;
648 sch->qstats.requeues++;
649 return NET_XMIT_SUCCESS;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700650}
651
Linus Torvalds1da177e2005-04-16 15:20:36 -0700652/**
653 * htb_charge_class - charges amount "bytes" to leaf and ancestors
654 *
655 * Routine assumes that packet "bytes" long was dequeued from leaf cl
656 * borrowing from "level". It accounts bytes to ceil leaky bucket for
657 * leaf and all ancestors and to rate bucket for ancestors at levels
658 * "level" and higher. It also handles possible change of mode resulting
659 * from the update. Note that mode can also increase here (MAY_BORROW to
660 * CAN_SEND) because we can use more precise clock that event queue here.
661 * In such case we remove class from event queue first.
662 */
Stephen Hemminger87990462006-08-10 23:35:16 -0700663static void htb_charge_class(struct htb_sched *q, struct htb_class *cl,
664 int level, int bytes)
665{
666 long toks, diff;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700667 enum htb_cmode old_mode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700668
669#define HTB_ACCNT(T,B,R) toks = diff + cl->T; \
670 if (toks > cl->B) toks = cl->B; \
671 toks -= L2T(cl, cl->R, bytes); \
672 if (toks <= -cl->mbuffer) toks = 1-cl->mbuffer; \
673 cl->T = toks
674
675 while (cl) {
Patrick McHardy03cc45c2007-03-23 11:29:11 -0700676 diff = psched_tdiff_bounded(q->now, cl->t_c, cl->mbuffer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700677 if (cl->level >= level) {
Stephen Hemminger87990462006-08-10 23:35:16 -0700678 if (cl->level == level)
679 cl->xstats.lends++;
680 HTB_ACCNT(tokens, buffer, rate);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700681 } else {
682 cl->xstats.borrows++;
Stephen Hemminger87990462006-08-10 23:35:16 -0700683 cl->tokens += diff; /* we moved t_c; update tokens */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700684 }
Stephen Hemminger87990462006-08-10 23:35:16 -0700685 HTB_ACCNT(ctokens, cbuffer, ceil);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700686 cl->t_c = q->now;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700687
Stephen Hemminger87990462006-08-10 23:35:16 -0700688 old_mode = cl->cmode;
689 diff = 0;
690 htb_change_class_mode(q, cl, &diff);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700691 if (old_mode != cl->cmode) {
692 if (old_mode != HTB_CAN_SEND)
Stephen Hemminger3696f622006-08-10 23:36:01 -0700693 htb_safe_rb_erase(&cl->pq_node, q->wait_pq + cl->level);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700694 if (cl->cmode != HTB_CAN_SEND)
Stephen Hemminger87990462006-08-10 23:35:16 -0700695 htb_add_to_wait_tree(q, cl, diff);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700696 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700697
698 /* update byte stats except for leaves which are already updated */
699 if (cl->level) {
700 cl->bstats.bytes += bytes;
701 cl->bstats.packets++;
702 }
703 cl = cl->parent;
704 }
705}
706
707/**
708 * htb_do_events - make mode changes to classes at the level
709 *
Patrick McHardyfb983d42007-03-16 01:22:39 -0700710 * Scans event queue for pending events and applies them. Returns time of
Linus Torvalds1da177e2005-04-16 15:20:36 -0700711 * next pending event (0 for no event in pq).
Patrick McHardyfb983d42007-03-16 01:22:39 -0700712 * Note: Applied are events whose have cl->pq_key <= q->now.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700713 */
Patrick McHardyfb983d42007-03-16 01:22:39 -0700714static psched_time_t htb_do_events(struct htb_sched *q, int level)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700715{
716 int i;
Stephen Hemminger3bf72952006-08-10 23:31:08 -0700717
Linus Torvalds1da177e2005-04-16 15:20:36 -0700718 for (i = 0; i < 500; i++) {
719 struct htb_class *cl;
720 long diff;
Akinbou Mita30bdbe32006-10-12 01:52:05 -0700721 struct rb_node *p = rb_first(&q->wait_pq[level]);
722
Stephen Hemminger87990462006-08-10 23:35:16 -0700723 if (!p)
724 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700725
726 cl = rb_entry(p, struct htb_class, pq_node);
Patrick McHardyfb983d42007-03-16 01:22:39 -0700727 if (cl->pq_key > q->now)
728 return cl->pq_key;
729
Stephen Hemminger3696f622006-08-10 23:36:01 -0700730 htb_safe_rb_erase(p, q->wait_pq + level);
Patrick McHardy03cc45c2007-03-23 11:29:11 -0700731 diff = psched_tdiff_bounded(q->now, cl->t_c, cl->mbuffer);
Stephen Hemminger87990462006-08-10 23:35:16 -0700732 htb_change_class_mode(q, cl, &diff);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700733 if (cl->cmode != HTB_CAN_SEND)
Stephen Hemminger87990462006-08-10 23:35:16 -0700734 htb_add_to_wait_tree(q, cl, diff);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700735 }
736 if (net_ratelimit())
737 printk(KERN_WARNING "htb: too many events !\n");
Patrick McHardyfb983d42007-03-16 01:22:39 -0700738 return q->now + PSCHED_TICKS_PER_SEC / 10;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700739}
740
741/* Returns class->node+prio from id-tree where classe's id is >= id. NULL
742 is no such one exists. */
Stephen Hemminger87990462006-08-10 23:35:16 -0700743static struct rb_node *htb_id_find_next_upper(int prio, struct rb_node *n,
744 u32 id)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700745{
746 struct rb_node *r = NULL;
747 while (n) {
Stephen Hemminger87990462006-08-10 23:35:16 -0700748 struct htb_class *cl =
749 rb_entry(n, struct htb_class, node[prio]);
750 if (id == cl->classid)
751 return n;
752
Linus Torvalds1da177e2005-04-16 15:20:36 -0700753 if (id > cl->classid) {
754 n = n->rb_right;
755 } else {
756 r = n;
757 n = n->rb_left;
758 }
759 }
760 return r;
761}
762
763/**
764 * htb_lookup_leaf - returns next leaf class in DRR order
765 *
766 * Find leaf where current feed pointers points to.
767 */
Stephen Hemminger87990462006-08-10 23:35:16 -0700768static struct htb_class *htb_lookup_leaf(struct rb_root *tree, int prio,
769 struct rb_node **pptr, u32 * pid)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700770{
771 int i;
772 struct {
773 struct rb_node *root;
774 struct rb_node **pptr;
775 u32 *pid;
Stephen Hemminger87990462006-08-10 23:35:16 -0700776 } stk[TC_HTB_MAXDEPTH], *sp = stk;
777
Linus Torvalds1da177e2005-04-16 15:20:36 -0700778 BUG_TRAP(tree->rb_node);
779 sp->root = tree->rb_node;
780 sp->pptr = pptr;
781 sp->pid = pid;
782
783 for (i = 0; i < 65535; i++) {
Stephen Hemminger87990462006-08-10 23:35:16 -0700784 if (!*sp->pptr && *sp->pid) {
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900785 /* ptr was invalidated but id is valid - try to recover
Linus Torvalds1da177e2005-04-16 15:20:36 -0700786 the original or next ptr */
Stephen Hemminger87990462006-08-10 23:35:16 -0700787 *sp->pptr =
788 htb_id_find_next_upper(prio, sp->root, *sp->pid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700789 }
Stephen Hemminger87990462006-08-10 23:35:16 -0700790 *sp->pid = 0; /* ptr is valid now so that remove this hint as it
791 can become out of date quickly */
792 if (!*sp->pptr) { /* we are at right end; rewind & go up */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700793 *sp->pptr = sp->root;
Stephen Hemminger87990462006-08-10 23:35:16 -0700794 while ((*sp->pptr)->rb_left)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700795 *sp->pptr = (*sp->pptr)->rb_left;
796 if (sp > stk) {
797 sp--;
Stephen Hemminger87990462006-08-10 23:35:16 -0700798 BUG_TRAP(*sp->pptr);
799 if (!*sp->pptr)
800 return NULL;
801 htb_next_rb_node(sp->pptr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700802 }
803 } else {
804 struct htb_class *cl;
Stephen Hemminger87990462006-08-10 23:35:16 -0700805 cl = rb_entry(*sp->pptr, struct htb_class, node[prio]);
806 if (!cl->level)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700807 return cl;
808 (++sp)->root = cl->un.inner.feed[prio].rb_node;
Stephen Hemminger87990462006-08-10 23:35:16 -0700809 sp->pptr = cl->un.inner.ptr + prio;
810 sp->pid = cl->un.inner.last_ptr_id + prio;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700811 }
812 }
813 BUG_TRAP(0);
814 return NULL;
815}
816
817/* dequeues packet at given priority and level; call only if
818 you are sure that there is active class at prio/level */
Stephen Hemminger87990462006-08-10 23:35:16 -0700819static struct sk_buff *htb_dequeue_tree(struct htb_sched *q, int prio,
820 int level)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700821{
822 struct sk_buff *skb = NULL;
Stephen Hemminger87990462006-08-10 23:35:16 -0700823 struct htb_class *cl, *start;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700824 /* look initial class up in the row */
Stephen Hemminger87990462006-08-10 23:35:16 -0700825 start = cl = htb_lookup_leaf(q->row[level] + prio, prio,
826 q->ptr[level] + prio,
827 q->last_ptr_id[level] + prio);
828
Linus Torvalds1da177e2005-04-16 15:20:36 -0700829 do {
830next:
Stephen Hemminger87990462006-08-10 23:35:16 -0700831 BUG_TRAP(cl);
832 if (!cl)
833 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700834
835 /* class can be empty - it is unlikely but can be true if leaf
836 qdisc drops packets in enqueue routine or if someone used
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900837 graft operation on the leaf since last dequeue;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700838 simply deactivate and skip such class */
839 if (unlikely(cl->un.leaf.q->q.qlen == 0)) {
840 struct htb_class *next;
Stephen Hemminger87990462006-08-10 23:35:16 -0700841 htb_deactivate(q, cl);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700842
843 /* row/level might become empty */
844 if ((q->row_mask[level] & (1 << prio)) == 0)
Stephen Hemminger87990462006-08-10 23:35:16 -0700845 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700846
Stephen Hemminger87990462006-08-10 23:35:16 -0700847 next = htb_lookup_leaf(q->row[level] + prio,
848 prio, q->ptr[level] + prio,
849 q->last_ptr_id[level] + prio);
850
851 if (cl == start) /* fix start if we just deleted it */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700852 start = next;
853 cl = next;
854 goto next;
855 }
Stephen Hemminger87990462006-08-10 23:35:16 -0700856
857 skb = cl->un.leaf.q->dequeue(cl->un.leaf.q);
858 if (likely(skb != NULL))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700859 break;
860 if (!cl->warned) {
Stephen Hemminger87990462006-08-10 23:35:16 -0700861 printk(KERN_WARNING
862 "htb: class %X isn't work conserving ?!\n",
863 cl->classid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700864 cl->warned = 1;
865 }
866 q->nwc_hit++;
Stephen Hemminger87990462006-08-10 23:35:16 -0700867 htb_next_rb_node((level ? cl->parent->un.inner.ptr : q->
868 ptr[0]) + prio);
869 cl = htb_lookup_leaf(q->row[level] + prio, prio,
870 q->ptr[level] + prio,
871 q->last_ptr_id[level] + prio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700872
873 } while (cl != start);
874
875 if (likely(skb != NULL)) {
876 if ((cl->un.leaf.deficit[level] -= skb->len) < 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700877 cl->un.leaf.deficit[level] += cl->un.leaf.quantum;
Stephen Hemminger87990462006-08-10 23:35:16 -0700878 htb_next_rb_node((level ? cl->parent->un.inner.ptr : q->
879 ptr[0]) + prio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700880 }
881 /* this used to be after charge_class but this constelation
882 gives us slightly better performance */
883 if (!cl->un.leaf.q->q.qlen)
Stephen Hemminger87990462006-08-10 23:35:16 -0700884 htb_deactivate(q, cl);
885 htb_charge_class(q, cl, level, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700886 }
887 return skb;
888}
889
Linus Torvalds1da177e2005-04-16 15:20:36 -0700890static struct sk_buff *htb_dequeue(struct Qdisc *sch)
891{
892 struct sk_buff *skb = NULL;
893 struct htb_sched *q = qdisc_priv(sch);
894 int level;
Patrick McHardyfb983d42007-03-16 01:22:39 -0700895 psched_time_t next_event;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700896
897 /* try to dequeue direct packets as high prio (!) to minimize cpu work */
Stephen Hemminger87990462006-08-10 23:35:16 -0700898 skb = __skb_dequeue(&q->direct_queue);
899 if (skb != NULL) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700900 sch->flags &= ~TCQ_F_THROTTLED;
901 sch->q.qlen--;
902 return skb;
903 }
904
Stephen Hemminger87990462006-08-10 23:35:16 -0700905 if (!sch->q.qlen)
906 goto fin;
Patrick McHardy3bebcda2007-03-23 11:29:25 -0700907 q->now = psched_get_time();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700908
Patrick McHardyfb983d42007-03-16 01:22:39 -0700909 next_event = q->now + 5 * PSCHED_TICKS_PER_SEC;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700910 q->nwc_hit = 0;
911 for (level = 0; level < TC_HTB_MAXDEPTH; level++) {
912 /* common case optimization - skip event handler quickly */
913 int m;
Patrick McHardyfb983d42007-03-16 01:22:39 -0700914 psched_time_t event;
Stephen Hemminger87990462006-08-10 23:35:16 -0700915
Patrick McHardyfb983d42007-03-16 01:22:39 -0700916 if (q->now >= q->near_ev_cache[level]) {
917 event = htb_do_events(q, level);
Patrick McHardy2e4b3b02007-05-23 23:39:54 -0700918 if (!event)
919 event = q->now + PSCHED_TICKS_PER_SEC;
920 q->near_ev_cache[level] = event;
Patrick McHardyfb983d42007-03-16 01:22:39 -0700921 } else
922 event = q->near_ev_cache[level];
923
924 if (event && next_event > event)
925 next_event = event;
926
Linus Torvalds1da177e2005-04-16 15:20:36 -0700927 m = ~q->row_mask[level];
928 while (m != (int)(-1)) {
Stephen Hemminger87990462006-08-10 23:35:16 -0700929 int prio = ffz(m);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700930 m |= 1 << prio;
Stephen Hemminger87990462006-08-10 23:35:16 -0700931 skb = htb_dequeue_tree(q, prio, level);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700932 if (likely(skb != NULL)) {
933 sch->q.qlen--;
934 sch->flags &= ~TCQ_F_THROTTLED;
935 goto fin;
936 }
937 }
938 }
Patrick McHardyfb983d42007-03-16 01:22:39 -0700939 sch->qstats.overlimits++;
940 qdisc_watchdog_schedule(&q->watchdog, next_event);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700941fin:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700942 return skb;
943}
944
945/* try to drop from each class (by prio) until one succeed */
Stephen Hemminger87990462006-08-10 23:35:16 -0700946static unsigned int htb_drop(struct Qdisc *sch)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700947{
948 struct htb_sched *q = qdisc_priv(sch);
949 int prio;
950
951 for (prio = TC_HTB_NUMPRIO - 1; prio >= 0; prio--) {
952 struct list_head *p;
Stephen Hemminger87990462006-08-10 23:35:16 -0700953 list_for_each(p, q->drops + prio) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700954 struct htb_class *cl = list_entry(p, struct htb_class,
955 un.leaf.drop_list);
956 unsigned int len;
Stephen Hemminger87990462006-08-10 23:35:16 -0700957 if (cl->un.leaf.q->ops->drop &&
958 (len = cl->un.leaf.q->ops->drop(cl->un.leaf.q))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700959 sch->q.qlen--;
960 if (!cl->un.leaf.q->q.qlen)
Stephen Hemminger87990462006-08-10 23:35:16 -0700961 htb_deactivate(q, cl);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700962 return len;
963 }
964 }
965 }
966 return 0;
967}
968
969/* reset all classes */
970/* always caled under BH & queue lock */
Stephen Hemminger87990462006-08-10 23:35:16 -0700971static void htb_reset(struct Qdisc *sch)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700972{
973 struct htb_sched *q = qdisc_priv(sch);
974 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700975
976 for (i = 0; i < HTB_HSIZE; i++) {
Stephen Hemminger0cef2962006-08-10 23:35:38 -0700977 struct hlist_node *p;
978 struct htb_class *cl;
979
980 hlist_for_each_entry(cl, p, q->hash + i, hlist) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700981 if (cl->level)
Stephen Hemminger87990462006-08-10 23:35:16 -0700982 memset(&cl->un.inner, 0, sizeof(cl->un.inner));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700983 else {
Stephen Hemminger87990462006-08-10 23:35:16 -0700984 if (cl->un.leaf.q)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700985 qdisc_reset(cl->un.leaf.q);
986 INIT_LIST_HEAD(&cl->un.leaf.drop_list);
987 }
988 cl->prio_activity = 0;
989 cl->cmode = HTB_CAN_SEND;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700990
991 }
992 }
Patrick McHardyfb983d42007-03-16 01:22:39 -0700993 qdisc_watchdog_cancel(&q->watchdog);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700994 __skb_queue_purge(&q->direct_queue);
995 sch->q.qlen = 0;
Stephen Hemminger87990462006-08-10 23:35:16 -0700996 memset(q->row, 0, sizeof(q->row));
997 memset(q->row_mask, 0, sizeof(q->row_mask));
998 memset(q->wait_pq, 0, sizeof(q->wait_pq));
999 memset(q->ptr, 0, sizeof(q->ptr));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001000 for (i = 0; i < TC_HTB_NUMPRIO; i++)
Stephen Hemminger87990462006-08-10 23:35:16 -07001001 INIT_LIST_HEAD(q->drops + i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001002}
1003
1004static int htb_init(struct Qdisc *sch, struct rtattr *opt)
1005{
1006 struct htb_sched *q = qdisc_priv(sch);
1007 struct rtattr *tb[TCA_HTB_INIT];
1008 struct tc_htb_glob *gopt;
1009 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001010 if (!opt || rtattr_parse_nested(tb, TCA_HTB_INIT, opt) ||
Stephen Hemminger87990462006-08-10 23:35:16 -07001011 tb[TCA_HTB_INIT - 1] == NULL ||
1012 RTA_PAYLOAD(tb[TCA_HTB_INIT - 1]) < sizeof(*gopt)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001013 printk(KERN_ERR "HTB: hey probably you have bad tc tool ?\n");
1014 return -EINVAL;
1015 }
Stephen Hemminger87990462006-08-10 23:35:16 -07001016 gopt = RTA_DATA(tb[TCA_HTB_INIT - 1]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001017 if (gopt->version != HTB_VER >> 16) {
Stephen Hemminger87990462006-08-10 23:35:16 -07001018 printk(KERN_ERR
1019 "HTB: need tc/htb version %d (minor is %d), you have %d\n",
1020 HTB_VER >> 16, HTB_VER & 0xffff, gopt->version);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001021 return -EINVAL;
1022 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001023
1024 INIT_LIST_HEAD(&q->root);
1025 for (i = 0; i < HTB_HSIZE; i++)
Stephen Hemminger0cef2962006-08-10 23:35:38 -07001026 INIT_HLIST_HEAD(q->hash + i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001027 for (i = 0; i < TC_HTB_NUMPRIO; i++)
Stephen Hemminger87990462006-08-10 23:35:16 -07001028 INIT_LIST_HEAD(q->drops + i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001029
Patrick McHardyfb983d42007-03-16 01:22:39 -07001030 qdisc_watchdog_init(&q->watchdog, sch);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001031 skb_queue_head_init(&q->direct_queue);
1032
1033 q->direct_qlen = sch->dev->tx_queue_len;
Stephen Hemminger87990462006-08-10 23:35:16 -07001034 if (q->direct_qlen < 2) /* some devices have zero tx_queue_len */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001035 q->direct_qlen = 2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001036
Linus Torvalds1da177e2005-04-16 15:20:36 -07001037 if ((q->rate2quantum = gopt->rate2quantum) < 1)
1038 q->rate2quantum = 1;
1039 q->defcls = gopt->defcls;
1040
1041 return 0;
1042}
1043
1044static int htb_dump(struct Qdisc *sch, struct sk_buff *skb)
1045{
1046 struct htb_sched *q = qdisc_priv(sch);
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001047 unsigned char *b = skb_tail_pointer(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001048 struct rtattr *rta;
1049 struct tc_htb_glob gopt;
Stephen Hemminger9ac961e2006-08-10 23:33:16 -07001050 spin_lock_bh(&sch->dev->queue_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001051 gopt.direct_pkts = q->direct_pkts;
1052
Linus Torvalds1da177e2005-04-16 15:20:36 -07001053 gopt.version = HTB_VER;
1054 gopt.rate2quantum = q->rate2quantum;
1055 gopt.defcls = q->defcls;
Stephen Hemminger3bf72952006-08-10 23:31:08 -07001056 gopt.debug = 0;
Stephen Hemminger87990462006-08-10 23:35:16 -07001057 rta = (struct rtattr *)b;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001058 RTA_PUT(skb, TCA_OPTIONS, 0, NULL);
1059 RTA_PUT(skb, TCA_HTB_INIT, sizeof(gopt), &gopt);
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001060 rta->rta_len = skb_tail_pointer(skb) - b;
Stephen Hemminger9ac961e2006-08-10 23:33:16 -07001061 spin_unlock_bh(&sch->dev->queue_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001062 return skb->len;
1063rtattr_failure:
Stephen Hemminger9ac961e2006-08-10 23:33:16 -07001064 spin_unlock_bh(&sch->dev->queue_lock);
Arnaldo Carvalho de Melodc5fc572007-03-25 23:06:12 -07001065 nlmsg_trim(skb, skb_tail_pointer(skb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001066 return -1;
1067}
1068
1069static int htb_dump_class(struct Qdisc *sch, unsigned long arg,
Stephen Hemminger87990462006-08-10 23:35:16 -07001070 struct sk_buff *skb, struct tcmsg *tcm)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001071{
Stephen Hemminger87990462006-08-10 23:35:16 -07001072 struct htb_class *cl = (struct htb_class *)arg;
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001073 unsigned char *b = skb_tail_pointer(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001074 struct rtattr *rta;
1075 struct tc_htb_opt opt;
1076
Stephen Hemminger9ac961e2006-08-10 23:33:16 -07001077 spin_lock_bh(&sch->dev->queue_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001078 tcm->tcm_parent = cl->parent ? cl->parent->classid : TC_H_ROOT;
1079 tcm->tcm_handle = cl->classid;
1080 if (!cl->level && cl->un.leaf.q)
1081 tcm->tcm_info = cl->un.leaf.q->handle;
1082
Stephen Hemminger87990462006-08-10 23:35:16 -07001083 rta = (struct rtattr *)b;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001084 RTA_PUT(skb, TCA_OPTIONS, 0, NULL);
1085
Stephen Hemminger87990462006-08-10 23:35:16 -07001086 memset(&opt, 0, sizeof(opt));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001087
Stephen Hemminger87990462006-08-10 23:35:16 -07001088 opt.rate = cl->rate->rate;
1089 opt.buffer = cl->buffer;
1090 opt.ceil = cl->ceil->rate;
1091 opt.cbuffer = cl->cbuffer;
1092 opt.quantum = cl->un.leaf.quantum;
1093 opt.prio = cl->un.leaf.prio;
1094 opt.level = cl->level;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001095 RTA_PUT(skb, TCA_HTB_PARMS, sizeof(opt), &opt);
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001096 rta->rta_len = skb_tail_pointer(skb) - b;
Stephen Hemminger9ac961e2006-08-10 23:33:16 -07001097 spin_unlock_bh(&sch->dev->queue_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001098 return skb->len;
1099rtattr_failure:
Stephen Hemminger9ac961e2006-08-10 23:33:16 -07001100 spin_unlock_bh(&sch->dev->queue_lock);
Arnaldo Carvalho de Melodc5fc572007-03-25 23:06:12 -07001101 nlmsg_trim(skb, b);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001102 return -1;
1103}
1104
1105static int
Stephen Hemminger87990462006-08-10 23:35:16 -07001106htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001107{
Stephen Hemminger87990462006-08-10 23:35:16 -07001108 struct htb_class *cl = (struct htb_class *)arg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001109
Linus Torvalds1da177e2005-04-16 15:20:36 -07001110 if (!cl->level && cl->un.leaf.q)
1111 cl->qstats.qlen = cl->un.leaf.q->q.qlen;
1112 cl->xstats.tokens = cl->tokens;
1113 cl->xstats.ctokens = cl->ctokens;
1114
1115 if (gnet_stats_copy_basic(d, &cl->bstats) < 0 ||
1116 gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
1117 gnet_stats_copy_queue(d, &cl->qstats) < 0)
1118 return -1;
1119
1120 return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats));
1121}
1122
1123static int htb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
Stephen Hemminger87990462006-08-10 23:35:16 -07001124 struct Qdisc **old)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001125{
Stephen Hemminger87990462006-08-10 23:35:16 -07001126 struct htb_class *cl = (struct htb_class *)arg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001127
1128 if (cl && !cl->level) {
Patrick McHardy9f9afec2006-11-29 17:35:18 -08001129 if (new == NULL &&
1130 (new = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops,
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +09001131 cl->classid))
Stephen Hemminger87990462006-08-10 23:35:16 -07001132 == NULL)
1133 return -ENOBUFS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001134 sch_tree_lock(sch);
1135 if ((*old = xchg(&cl->un.leaf.q, new)) != NULL) {
Patrick McHardy256d61b2006-11-29 17:37:05 -08001136 qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001137 qdisc_reset(*old);
1138 }
1139 sch_tree_unlock(sch);
1140 return 0;
1141 }
1142 return -ENOENT;
1143}
1144
Stephen Hemminger87990462006-08-10 23:35:16 -07001145static struct Qdisc *htb_leaf(struct Qdisc *sch, unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001146{
Stephen Hemminger87990462006-08-10 23:35:16 -07001147 struct htb_class *cl = (struct htb_class *)arg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001148 return (cl && !cl->level) ? cl->un.leaf.q : NULL;
1149}
1150
Patrick McHardy256d61b2006-11-29 17:37:05 -08001151static void htb_qlen_notify(struct Qdisc *sch, unsigned long arg)
1152{
1153 struct htb_class *cl = (struct htb_class *)arg;
1154
1155 if (cl->un.leaf.q->q.qlen == 0)
1156 htb_deactivate(qdisc_priv(sch), cl);
1157}
1158
Linus Torvalds1da177e2005-04-16 15:20:36 -07001159static unsigned long htb_get(struct Qdisc *sch, u32 classid)
1160{
Stephen Hemminger87990462006-08-10 23:35:16 -07001161 struct htb_class *cl = htb_find(classid, sch);
1162 if (cl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001163 cl->refcnt++;
1164 return (unsigned long)cl;
1165}
1166
Jarek Poplawski160d5e12006-12-08 00:26:56 -08001167static inline int htb_parent_last_child(struct htb_class *cl)
1168{
1169 if (!cl->parent)
1170 /* the root class */
1171 return 0;
1172
1173 if (!(cl->parent->children.next == &cl->sibling &&
1174 cl->parent->children.prev == &cl->sibling))
1175 /* not the last child */
1176 return 0;
1177
1178 return 1;
1179}
1180
1181static void htb_parent_to_leaf(struct htb_class *cl, struct Qdisc *new_q)
1182{
1183 struct htb_class *parent = cl->parent;
1184
1185 BUG_TRAP(!cl->level && cl->un.leaf.q && !cl->prio_activity);
1186
1187 parent->level = 0;
1188 memset(&parent->un.inner, 0, sizeof(parent->un.inner));
1189 INIT_LIST_HEAD(&parent->un.leaf.drop_list);
1190 parent->un.leaf.q = new_q ? new_q : &noop_qdisc;
1191 parent->un.leaf.quantum = parent->quantum;
1192 parent->un.leaf.prio = parent->prio;
1193 parent->tokens = parent->buffer;
1194 parent->ctokens = parent->cbuffer;
Patrick McHardy3bebcda2007-03-23 11:29:25 -07001195 parent->t_c = psched_get_time();
Jarek Poplawski160d5e12006-12-08 00:26:56 -08001196 parent->cmode = HTB_CAN_SEND;
1197}
1198
Stephen Hemminger87990462006-08-10 23:35:16 -07001199static void htb_destroy_class(struct Qdisc *sch, struct htb_class *cl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001200{
1201 struct htb_sched *q = qdisc_priv(sch);
Patrick McHardy814a175e2006-11-29 17:34:50 -08001202
Linus Torvalds1da177e2005-04-16 15:20:36 -07001203 if (!cl->level) {
1204 BUG_TRAP(cl->un.leaf.q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001205 qdisc_destroy(cl->un.leaf.q);
1206 }
Patrick McHardyee39e102007-07-02 22:48:13 -07001207 gen_kill_estimator(&cl->bstats, &cl->rate_est);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001208 qdisc_put_rtab(cl->rate);
1209 qdisc_put_rtab(cl->ceil);
Stephen Hemminger87990462006-08-10 23:35:16 -07001210
Patrick McHardya48b5a62007-03-23 11:29:43 -07001211 tcf_destroy_chain(cl->filter_list);
Stephen Hemminger87990462006-08-10 23:35:16 -07001212
1213 while (!list_empty(&cl->children))
1214 htb_destroy_class(sch, list_entry(cl->children.next,
1215 struct htb_class, sibling));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001216
1217 /* note: this delete may happen twice (see htb_delete) */
Stephen Hemmingerda33e3e2006-11-07 14:54:46 -08001218 hlist_del_init(&cl->hlist);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001219 list_del(&cl->sibling);
Stephen Hemminger87990462006-08-10 23:35:16 -07001220
Linus Torvalds1da177e2005-04-16 15:20:36 -07001221 if (cl->prio_activity)
Stephen Hemminger87990462006-08-10 23:35:16 -07001222 htb_deactivate(q, cl);
1223
Linus Torvalds1da177e2005-04-16 15:20:36 -07001224 if (cl->cmode != HTB_CAN_SEND)
Stephen Hemminger3696f622006-08-10 23:36:01 -07001225 htb_safe_rb_erase(&cl->pq_node, q->wait_pq + cl->level);
Stephen Hemminger87990462006-08-10 23:35:16 -07001226
Linus Torvalds1da177e2005-04-16 15:20:36 -07001227 kfree(cl);
1228}
1229
1230/* always caled under BH & queue lock */
Stephen Hemminger87990462006-08-10 23:35:16 -07001231static void htb_destroy(struct Qdisc *sch)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001232{
1233 struct htb_sched *q = qdisc_priv(sch);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001234
Patrick McHardyfb983d42007-03-16 01:22:39 -07001235 qdisc_watchdog_cancel(&q->watchdog);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001236 /* This line used to be after htb_destroy_class call below
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +09001237 and surprisingly it worked in 2.4. But it must precede it
Linus Torvalds1da177e2005-04-16 15:20:36 -07001238 because filter need its target class alive to be able to call
1239 unbind_filter on it (without Oops). */
Patrick McHardya48b5a62007-03-23 11:29:43 -07001240 tcf_destroy_chain(q->filter_list);
Stephen Hemminger87990462006-08-10 23:35:16 -07001241
1242 while (!list_empty(&q->root))
1243 htb_destroy_class(sch, list_entry(q->root.next,
1244 struct htb_class, sibling));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001245
1246 __skb_queue_purge(&q->direct_queue);
1247}
1248
1249static int htb_delete(struct Qdisc *sch, unsigned long arg)
1250{
1251 struct htb_sched *q = qdisc_priv(sch);
Stephen Hemminger87990462006-08-10 23:35:16 -07001252 struct htb_class *cl = (struct htb_class *)arg;
Patrick McHardy256d61b2006-11-29 17:37:05 -08001253 unsigned int qlen;
Jarek Poplawski160d5e12006-12-08 00:26:56 -08001254 struct Qdisc *new_q = NULL;
1255 int last_child = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001256
1257 // TODO: why don't allow to delete subtree ? references ? does
1258 // tc subsys quarantee us that in htb_destroy it holds no class
1259 // refs so that we can remove children safely there ?
1260 if (!list_empty(&cl->children) || cl->filter_cnt)
1261 return -EBUSY;
Stephen Hemminger87990462006-08-10 23:35:16 -07001262
Jarek Poplawski160d5e12006-12-08 00:26:56 -08001263 if (!cl->level && htb_parent_last_child(cl)) {
1264 new_q = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops,
1265 cl->parent->classid);
1266 last_child = 1;
1267 }
1268
Linus Torvalds1da177e2005-04-16 15:20:36 -07001269 sch_tree_lock(sch);
Stephen Hemminger87990462006-08-10 23:35:16 -07001270
Patrick McHardy814a175e2006-11-29 17:34:50 -08001271 if (!cl->level) {
Patrick McHardy256d61b2006-11-29 17:37:05 -08001272 qlen = cl->un.leaf.q->q.qlen;
Patrick McHardy814a175e2006-11-29 17:34:50 -08001273 qdisc_reset(cl->un.leaf.q);
Patrick McHardy256d61b2006-11-29 17:37:05 -08001274 qdisc_tree_decrease_qlen(cl->un.leaf.q, qlen);
Patrick McHardy814a175e2006-11-29 17:34:50 -08001275 }
1276
Patrick McHardyc38c83c2007-03-27 14:04:24 -07001277 /* delete from hash and active; remainder in destroy_class */
1278 hlist_del_init(&cl->hlist);
1279
Linus Torvalds1da177e2005-04-16 15:20:36 -07001280 if (cl->prio_activity)
Stephen Hemminger87990462006-08-10 23:35:16 -07001281 htb_deactivate(q, cl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001282
Jarek Poplawski160d5e12006-12-08 00:26:56 -08001283 if (last_child)
1284 htb_parent_to_leaf(cl, new_q);
1285
Linus Torvalds1da177e2005-04-16 15:20:36 -07001286 if (--cl->refcnt == 0)
Stephen Hemminger87990462006-08-10 23:35:16 -07001287 htb_destroy_class(sch, cl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001288
1289 sch_tree_unlock(sch);
1290 return 0;
1291}
1292
1293static void htb_put(struct Qdisc *sch, unsigned long arg)
1294{
Stephen Hemminger87990462006-08-10 23:35:16 -07001295 struct htb_class *cl = (struct htb_class *)arg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001296
1297 if (--cl->refcnt == 0)
Stephen Hemminger87990462006-08-10 23:35:16 -07001298 htb_destroy_class(sch, cl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001299}
1300
Stephen Hemminger87990462006-08-10 23:35:16 -07001301static int htb_change_class(struct Qdisc *sch, u32 classid,
1302 u32 parentid, struct rtattr **tca,
1303 unsigned long *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001304{
1305 int err = -EINVAL;
1306 struct htb_sched *q = qdisc_priv(sch);
Stephen Hemminger87990462006-08-10 23:35:16 -07001307 struct htb_class *cl = (struct htb_class *)*arg, *parent;
1308 struct rtattr *opt = tca[TCA_OPTIONS - 1];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001309 struct qdisc_rate_table *rtab = NULL, *ctab = NULL;
1310 struct rtattr *tb[TCA_HTB_RTAB];
1311 struct tc_htb_opt *hopt;
1312
1313 /* extract all subattrs from opt attr */
1314 if (!opt || rtattr_parse_nested(tb, TCA_HTB_RTAB, opt) ||
Stephen Hemminger87990462006-08-10 23:35:16 -07001315 tb[TCA_HTB_PARMS - 1] == NULL ||
1316 RTA_PAYLOAD(tb[TCA_HTB_PARMS - 1]) < sizeof(*hopt))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001317 goto failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001318
Stephen Hemminger87990462006-08-10 23:35:16 -07001319 parent = parentid == TC_H_ROOT ? NULL : htb_find(parentid, sch);
Stephen Hemminger3bf72952006-08-10 23:31:08 -07001320
Stephen Hemminger87990462006-08-10 23:35:16 -07001321 hopt = RTA_DATA(tb[TCA_HTB_PARMS - 1]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001322
Stephen Hemminger87990462006-08-10 23:35:16 -07001323 rtab = qdisc_get_rtab(&hopt->rate, tb[TCA_HTB_RTAB - 1]);
1324 ctab = qdisc_get_rtab(&hopt->ceil, tb[TCA_HTB_CTAB - 1]);
1325 if (!rtab || !ctab)
1326 goto failure;
1327
1328 if (!cl) { /* new class */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001329 struct Qdisc *new_q;
Stephen Hemminger3696f622006-08-10 23:36:01 -07001330 int prio;
Patrick McHardyee39e102007-07-02 22:48:13 -07001331 struct {
1332 struct rtattr rta;
1333 struct gnet_estimator opt;
1334 } est = {
1335 .rta = {
1336 .rta_len = RTA_LENGTH(sizeof(est.opt)),
1337 .rta_type = TCA_RATE,
1338 },
1339 .opt = {
1340 /* 4s interval, 16s averaging constant */
1341 .interval = 2,
1342 .ewma_log = 2,
1343 },
1344 };
Stephen Hemminger3696f622006-08-10 23:36:01 -07001345
Linus Torvalds1da177e2005-04-16 15:20:36 -07001346 /* check for valid classid */
Stephen Hemminger87990462006-08-10 23:35:16 -07001347 if (!classid || TC_H_MAJ(classid ^ sch->handle)
1348 || htb_find(classid, sch))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001349 goto failure;
1350
1351 /* check maximal depth */
1352 if (parent && parent->parent && parent->parent->level < 2) {
1353 printk(KERN_ERR "htb: tree is too deep\n");
1354 goto failure;
1355 }
1356 err = -ENOBUFS;
Panagiotis Issaris0da974f2006-07-21 14:51:30 -07001357 if ((cl = kzalloc(sizeof(*cl), GFP_KERNEL)) == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001358 goto failure;
Stephen Hemminger87990462006-08-10 23:35:16 -07001359
Patrick McHardyee39e102007-07-02 22:48:13 -07001360 gen_new_estimator(&cl->bstats, &cl->rate_est,
1361 &sch->dev->queue_lock,
1362 tca[TCA_RATE-1] ? : &est.rta);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001363 cl->refcnt = 1;
1364 INIT_LIST_HEAD(&cl->sibling);
Stephen Hemminger0cef2962006-08-10 23:35:38 -07001365 INIT_HLIST_NODE(&cl->hlist);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001366 INIT_LIST_HEAD(&cl->children);
1367 INIT_LIST_HEAD(&cl->un.leaf.drop_list);
Stephen Hemminger3696f622006-08-10 23:36:01 -07001368 RB_CLEAR_NODE(&cl->pq_node);
1369
1370 for (prio = 0; prio < TC_HTB_NUMPRIO; prio++)
1371 RB_CLEAR_NODE(&cl->node[prio]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001372
1373 /* create leaf qdisc early because it uses kmalloc(GFP_KERNEL)
1374 so that can't be used inside of sch_tree_lock
1375 -- thanks to Karlis Peisenieks */
Patrick McHardy9f9afec2006-11-29 17:35:18 -08001376 new_q = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops, classid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001377 sch_tree_lock(sch);
1378 if (parent && !parent->level) {
Patrick McHardy256d61b2006-11-29 17:37:05 -08001379 unsigned int qlen = parent->un.leaf.q->q.qlen;
1380
Linus Torvalds1da177e2005-04-16 15:20:36 -07001381 /* turn parent into inner node */
Patrick McHardy256d61b2006-11-29 17:37:05 -08001382 qdisc_reset(parent->un.leaf.q);
1383 qdisc_tree_decrease_qlen(parent->un.leaf.q, qlen);
Stephen Hemminger87990462006-08-10 23:35:16 -07001384 qdisc_destroy(parent->un.leaf.q);
1385 if (parent->prio_activity)
1386 htb_deactivate(q, parent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001387
1388 /* remove from evt list because of level change */
1389 if (parent->cmode != HTB_CAN_SEND) {
Stephen Hemminger3696f622006-08-10 23:36:01 -07001390 htb_safe_rb_erase(&parent->pq_node, q->wait_pq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001391 parent->cmode = HTB_CAN_SEND;
1392 }
1393 parent->level = (parent->parent ? parent->parent->level
Stephen Hemminger87990462006-08-10 23:35:16 -07001394 : TC_HTB_MAXDEPTH) - 1;
1395 memset(&parent->un.inner, 0, sizeof(parent->un.inner));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001396 }
1397 /* leaf (we) needs elementary qdisc */
1398 cl->un.leaf.q = new_q ? new_q : &noop_qdisc;
1399
Stephen Hemminger87990462006-08-10 23:35:16 -07001400 cl->classid = classid;
1401 cl->parent = parent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001402
1403 /* set class to be in HTB_CAN_SEND state */
1404 cl->tokens = hopt->buffer;
1405 cl->ctokens = hopt->cbuffer;
Patrick McHardy00c04af2007-03-16 01:23:02 -07001406 cl->mbuffer = 60 * PSCHED_TICKS_PER_SEC; /* 1min */
Patrick McHardy3bebcda2007-03-23 11:29:25 -07001407 cl->t_c = psched_get_time();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001408 cl->cmode = HTB_CAN_SEND;
1409
1410 /* attach to the hash list and parent's family */
Stephen Hemminger0cef2962006-08-10 23:35:38 -07001411 hlist_add_head(&cl->hlist, q->hash + htb_hash(classid));
Stephen Hemminger87990462006-08-10 23:35:16 -07001412 list_add_tail(&cl->sibling,
1413 parent ? &parent->children : &q->root);
Patrick McHardyee39e102007-07-02 22:48:13 -07001414 } else {
1415 if (tca[TCA_RATE-1])
1416 gen_replace_estimator(&cl->bstats, &cl->rate_est,
1417 &sch->dev->queue_lock,
1418 tca[TCA_RATE-1]);
Stephen Hemminger87990462006-08-10 23:35:16 -07001419 sch_tree_lock(sch);
Patrick McHardyee39e102007-07-02 22:48:13 -07001420 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001421
1422 /* it used to be a nasty bug here, we have to check that node
Stephen Hemminger87990462006-08-10 23:35:16 -07001423 is really leaf before changing cl->un.leaf ! */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001424 if (!cl->level) {
1425 cl->un.leaf.quantum = rtab->rate.rate / q->rate2quantum;
1426 if (!hopt->quantum && cl->un.leaf.quantum < 1000) {
Stephen Hemminger87990462006-08-10 23:35:16 -07001427 printk(KERN_WARNING
1428 "HTB: quantum of class %X is small. Consider r2q change.\n",
1429 cl->classid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001430 cl->un.leaf.quantum = 1000;
1431 }
1432 if (!hopt->quantum && cl->un.leaf.quantum > 200000) {
Stephen Hemminger87990462006-08-10 23:35:16 -07001433 printk(KERN_WARNING
1434 "HTB: quantum of class %X is big. Consider r2q change.\n",
1435 cl->classid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001436 cl->un.leaf.quantum = 200000;
1437 }
1438 if (hopt->quantum)
1439 cl->un.leaf.quantum = hopt->quantum;
1440 if ((cl->un.leaf.prio = hopt->prio) >= TC_HTB_NUMPRIO)
1441 cl->un.leaf.prio = TC_HTB_NUMPRIO - 1;
Jarek Poplawski160d5e12006-12-08 00:26:56 -08001442
1443 /* backup for htb_parent_to_leaf */
1444 cl->quantum = cl->un.leaf.quantum;
1445 cl->prio = cl->un.leaf.prio;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001446 }
1447
1448 cl->buffer = hopt->buffer;
1449 cl->cbuffer = hopt->cbuffer;
Stephen Hemminger87990462006-08-10 23:35:16 -07001450 if (cl->rate)
1451 qdisc_put_rtab(cl->rate);
1452 cl->rate = rtab;
1453 if (cl->ceil)
1454 qdisc_put_rtab(cl->ceil);
1455 cl->ceil = ctab;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001456 sch_tree_unlock(sch);
1457
1458 *arg = (unsigned long)cl;
1459 return 0;
1460
1461failure:
Stephen Hemminger87990462006-08-10 23:35:16 -07001462 if (rtab)
1463 qdisc_put_rtab(rtab);
1464 if (ctab)
1465 qdisc_put_rtab(ctab);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001466 return err;
1467}
1468
1469static struct tcf_proto **htb_find_tcf(struct Qdisc *sch, unsigned long arg)
1470{
1471 struct htb_sched *q = qdisc_priv(sch);
1472 struct htb_class *cl = (struct htb_class *)arg;
1473 struct tcf_proto **fl = cl ? &cl->filter_list : &q->filter_list;
Stephen Hemminger3bf72952006-08-10 23:31:08 -07001474
Linus Torvalds1da177e2005-04-16 15:20:36 -07001475 return fl;
1476}
1477
1478static unsigned long htb_bind_filter(struct Qdisc *sch, unsigned long parent,
Stephen Hemminger87990462006-08-10 23:35:16 -07001479 u32 classid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001480{
1481 struct htb_sched *q = qdisc_priv(sch);
Stephen Hemminger87990462006-08-10 23:35:16 -07001482 struct htb_class *cl = htb_find(classid, sch);
Stephen Hemminger3bf72952006-08-10 23:31:08 -07001483
Linus Torvalds1da177e2005-04-16 15:20:36 -07001484 /*if (cl && !cl->level) return 0;
Stephen Hemminger87990462006-08-10 23:35:16 -07001485 The line above used to be there to prevent attaching filters to
1486 leaves. But at least tc_index filter uses this just to get class
1487 for other reasons so that we have to allow for it.
1488 ----
1489 19.6.2002 As Werner explained it is ok - bind filter is just
1490 another way to "lock" the class - unlike "get" this lock can
1491 be broken by class during destroy IIUC.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001492 */
Stephen Hemminger87990462006-08-10 23:35:16 -07001493 if (cl)
1494 cl->filter_cnt++;
1495 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001496 q->filter_cnt++;
1497 return (unsigned long)cl;
1498}
1499
1500static void htb_unbind_filter(struct Qdisc *sch, unsigned long arg)
1501{
1502 struct htb_sched *q = qdisc_priv(sch);
1503 struct htb_class *cl = (struct htb_class *)arg;
Stephen Hemminger3bf72952006-08-10 23:31:08 -07001504
Stephen Hemminger87990462006-08-10 23:35:16 -07001505 if (cl)
1506 cl->filter_cnt--;
1507 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001508 q->filter_cnt--;
1509}
1510
1511static void htb_walk(struct Qdisc *sch, struct qdisc_walker *arg)
1512{
1513 struct htb_sched *q = qdisc_priv(sch);
1514 int i;
1515
1516 if (arg->stop)
1517 return;
1518
1519 for (i = 0; i < HTB_HSIZE; i++) {
Stephen Hemminger0cef2962006-08-10 23:35:38 -07001520 struct hlist_node *p;
1521 struct htb_class *cl;
1522
1523 hlist_for_each_entry(cl, p, q->hash + i, hlist) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001524 if (arg->count < arg->skip) {
1525 arg->count++;
1526 continue;
1527 }
1528 if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
1529 arg->stop = 1;
1530 return;
1531 }
1532 arg->count++;
1533 }
1534 }
1535}
1536
1537static struct Qdisc_class_ops htb_class_ops = {
1538 .graft = htb_graft,
1539 .leaf = htb_leaf,
Patrick McHardy256d61b2006-11-29 17:37:05 -08001540 .qlen_notify = htb_qlen_notify,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001541 .get = htb_get,
1542 .put = htb_put,
1543 .change = htb_change_class,
1544 .delete = htb_delete,
1545 .walk = htb_walk,
1546 .tcf_chain = htb_find_tcf,
1547 .bind_tcf = htb_bind_filter,
1548 .unbind_tcf = htb_unbind_filter,
1549 .dump = htb_dump_class,
1550 .dump_stats = htb_dump_class_stats,
1551};
1552
1553static struct Qdisc_ops htb_qdisc_ops = {
1554 .next = NULL,
1555 .cl_ops = &htb_class_ops,
1556 .id = "htb",
1557 .priv_size = sizeof(struct htb_sched),
1558 .enqueue = htb_enqueue,
1559 .dequeue = htb_dequeue,
1560 .requeue = htb_requeue,
1561 .drop = htb_drop,
1562 .init = htb_init,
1563 .reset = htb_reset,
1564 .destroy = htb_destroy,
1565 .change = NULL /* htb_change */,
1566 .dump = htb_dump,
1567 .owner = THIS_MODULE,
1568};
1569
1570static int __init htb_module_init(void)
1571{
Stephen Hemminger87990462006-08-10 23:35:16 -07001572 return register_qdisc(&htb_qdisc_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001573}
Stephen Hemminger87990462006-08-10 23:35:16 -07001574static void __exit htb_module_exit(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001575{
Stephen Hemminger87990462006-08-10 23:35:16 -07001576 unregister_qdisc(&htb_qdisc_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001577}
Stephen Hemminger87990462006-08-10 23:35:16 -07001578
Linus Torvalds1da177e2005-04-16 15:20:36 -07001579module_init(htb_module_init)
1580module_exit(htb_module_exit)
1581MODULE_LICENSE("GPL");