blob: 433604bb3fe8a70df241dcdc1370a867fd57ed92 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef __NET_SCHED_GENERIC_H
2#define __NET_SCHED_GENERIC_H
3
Linus Torvalds1da177e2005-04-16 15:20:36 -07004#include <linux/netdevice.h>
5#include <linux/types.h>
6#include <linux/rcupdate.h>
7#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07008#include <linux/pkt_sched.h>
9#include <linux/pkt_cls.h>
10#include <net/gen_stats.h>
Thomas Grafbe577dd2007-03-22 11:55:50 -070011#include <net/rtnetlink.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070012
13struct Qdisc_ops;
14struct qdisc_walker;
15struct tcf_walker;
16struct module;
17
Eric Dumazetfd2c3ef2009-11-03 03:26:03 +000018struct qdisc_rate_table {
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 struct tc_ratespec rate;
20 u32 data[256];
21 struct qdisc_rate_table *next;
22 int refcnt;
23};
24
Eric Dumazetfd2c3ef2009-11-03 03:26:03 +000025enum qdisc_state_t {
David S. Millere2627c82008-07-16 00:56:32 -070026 __QDISC_STATE_RUNNING,
David S. Miller37437bb2008-07-16 02:15:04 -070027 __QDISC_STATE_SCHED,
David S. Millera9312ae2008-08-17 21:51:03 -070028 __QDISC_STATE_DEACTIVATED,
David S. Millere2627c82008-07-16 00:56:32 -070029};
30
Jussi Kivilinna175f9c12008-07-20 00:08:47 -070031struct qdisc_size_table {
32 struct list_head list;
33 struct tc_sizespec szopts;
34 int refcnt;
35 u16 data[];
36};
37
Eric Dumazetfd2c3ef2009-11-03 03:26:03 +000038struct Qdisc {
Linus Torvalds1da177e2005-04-16 15:20:36 -070039 int (*enqueue)(struct sk_buff *skb, struct Qdisc *dev);
40 struct sk_buff * (*dequeue)(struct Qdisc *dev);
41 unsigned flags;
Jarek Poplawskib00355d2009-02-01 01:12:42 -080042#define TCQ_F_BUILTIN 1
43#define TCQ_F_THROTTLED 2
44#define TCQ_F_INGRESS 4
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +000045#define TCQ_F_CAN_BYPASS 8
Patrick McHardy23bcf632009-09-09 18:11:23 -070046#define TCQ_F_MQROOT 16
Jarek Poplawskib00355d2009-02-01 01:12:42 -080047#define TCQ_F_WARN_NONWC (1 << 16)
Linus Torvalds1da177e2005-04-16 15:20:36 -070048 int padded;
49 struct Qdisc_ops *ops;
Jussi Kivilinna175f9c12008-07-20 00:08:47 -070050 struct qdisc_size_table *stab;
Eric Dumazet5e140df2009-03-20 01:33:32 -070051 struct list_head list;
Linus Torvalds1da177e2005-04-16 15:20:36 -070052 u32 handle;
53 u32 parent;
54 atomic_t refcnt;
Linus Torvalds1da177e2005-04-16 15:20:36 -070055 struct gnet_stats_rate_est rate_est;
Linus Torvalds1da177e2005-04-16 15:20:36 -070056 int (*reshape_fail)(struct sk_buff *skb,
57 struct Qdisc *q);
58
David S. Miller72b25a92008-07-18 20:54:17 -070059 void *u32_node;
60
Linus Torvalds1da177e2005-04-16 15:20:36 -070061 /* This field is deprecated, but it is still used by CBQ
62 * and it will live until better solution will be invented.
63 */
64 struct Qdisc *__parent;
Eric Dumazet5e140df2009-03-20 01:33:32 -070065 struct netdev_queue *dev_queue;
66 struct Qdisc *next_sched;
67
68 struct sk_buff *gso_skb;
69 /*
70 * For performance sake on SMP, we put highly modified fields at the end
71 */
72 unsigned long state;
73 struct sk_buff_head q;
Eric Dumazetc1a8f1f2009-08-16 09:36:49 +000074 struct gnet_stats_basic_packed bstats;
Eric Dumazet5e140df2009-03-20 01:33:32 -070075 struct gnet_stats_queue qstats;
Eric Dumazet5d944c62010-03-31 07:06:04 +000076 struct rcu_head rcu_head;
Linus Torvalds1da177e2005-04-16 15:20:36 -070077};
78
Eric Dumazetfd2c3ef2009-11-03 03:26:03 +000079struct Qdisc_class_ops {
Linus Torvalds1da177e2005-04-16 15:20:36 -070080 /* Child qdisc manipulation */
Jarek Poplawski926e61b2009-09-15 02:53:07 -070081 struct netdev_queue * (*select_queue)(struct Qdisc *, struct tcmsg *);
Linus Torvalds1da177e2005-04-16 15:20:36 -070082 int (*graft)(struct Qdisc *, unsigned long cl,
83 struct Qdisc *, struct Qdisc **);
84 struct Qdisc * (*leaf)(struct Qdisc *, unsigned long cl);
Patrick McHardy43effa12006-11-29 17:35:48 -080085 void (*qlen_notify)(struct Qdisc *, unsigned long);
Linus Torvalds1da177e2005-04-16 15:20:36 -070086
87 /* Class manipulation routines */
88 unsigned long (*get)(struct Qdisc *, u32 classid);
89 void (*put)(struct Qdisc *, unsigned long);
90 int (*change)(struct Qdisc *, u32, u32,
Patrick McHardy1e904742008-01-22 22:11:17 -080091 struct nlattr **, unsigned long *);
Linus Torvalds1da177e2005-04-16 15:20:36 -070092 int (*delete)(struct Qdisc *, unsigned long);
93 void (*walk)(struct Qdisc *, struct qdisc_walker * arg);
94
95 /* Filter manipulation */
96 struct tcf_proto ** (*tcf_chain)(struct Qdisc *, unsigned long);
97 unsigned long (*bind_tcf)(struct Qdisc *, unsigned long,
98 u32 classid);
99 void (*unbind_tcf)(struct Qdisc *, unsigned long);
100
101 /* rtnetlink specific */
102 int (*dump)(struct Qdisc *, unsigned long,
103 struct sk_buff *skb, struct tcmsg*);
104 int (*dump_stats)(struct Qdisc *, unsigned long,
105 struct gnet_dump *);
106};
107
Eric Dumazetfd2c3ef2009-11-03 03:26:03 +0000108struct Qdisc_ops {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109 struct Qdisc_ops *next;
Eric Dumazet20fea082007-11-14 01:44:41 -0800110 const struct Qdisc_class_ops *cl_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111 char id[IFNAMSIZ];
112 int priv_size;
113
114 int (*enqueue)(struct sk_buff *, struct Qdisc *);
115 struct sk_buff * (*dequeue)(struct Qdisc *);
Jarek Poplawski90d841fd2008-10-31 00:43:45 -0700116 struct sk_buff * (*peek)(struct Qdisc *);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117 unsigned int (*drop)(struct Qdisc *);
118
Patrick McHardy1e904742008-01-22 22:11:17 -0800119 int (*init)(struct Qdisc *, struct nlattr *arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120 void (*reset)(struct Qdisc *);
121 void (*destroy)(struct Qdisc *);
Patrick McHardy1e904742008-01-22 22:11:17 -0800122 int (*change)(struct Qdisc *, struct nlattr *arg);
David S. Miller6ec1c692009-09-06 01:58:51 -0700123 void (*attach)(struct Qdisc *);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124
125 int (*dump)(struct Qdisc *, struct sk_buff *);
126 int (*dump_stats)(struct Qdisc *, struct gnet_dump *);
127
128 struct module *owner;
129};
130
131
Eric Dumazetfd2c3ef2009-11-03 03:26:03 +0000132struct tcf_result {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700133 unsigned long class;
134 u32 classid;
135};
136
Eric Dumazetfd2c3ef2009-11-03 03:26:03 +0000137struct tcf_proto_ops {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138 struct tcf_proto_ops *next;
139 char kind[IFNAMSIZ];
140
141 int (*classify)(struct sk_buff*, struct tcf_proto*,
142 struct tcf_result *);
143 int (*init)(struct tcf_proto*);
144 void (*destroy)(struct tcf_proto*);
145
146 unsigned long (*get)(struct tcf_proto*, u32 handle);
147 void (*put)(struct tcf_proto*, unsigned long);
148 int (*change)(struct tcf_proto*, unsigned long,
Patrick McHardyadd93b62008-01-22 22:11:33 -0800149 u32 handle, struct nlattr **,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700150 unsigned long *);
151 int (*delete)(struct tcf_proto*, unsigned long);
152 void (*walk)(struct tcf_proto*, struct tcf_walker *arg);
153
154 /* rtnetlink specific */
155 int (*dump)(struct tcf_proto*, unsigned long,
156 struct sk_buff *skb, struct tcmsg*);
157
158 struct module *owner;
159};
160
Eric Dumazetfd2c3ef2009-11-03 03:26:03 +0000161struct tcf_proto {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162 /* Fast access part */
163 struct tcf_proto *next;
164 void *root;
165 int (*classify)(struct sk_buff*, struct tcf_proto*,
166 struct tcf_result *);
Al Viro66c6f522006-11-20 18:07:51 -0800167 __be16 protocol;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168
169 /* All the rest */
170 u32 prio;
171 u32 classid;
172 struct Qdisc *q;
173 void *data;
174 struct tcf_proto_ops *ops;
175};
176
Jussi Kivilinna175f9c12008-07-20 00:08:47 -0700177struct qdisc_skb_cb {
178 unsigned int pkt_len;
179 char data[];
180};
181
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +0000182static inline int qdisc_qlen(struct Qdisc *q)
183{
184 return q->q.qlen;
185}
186
Jussi Kivilinna175f9c12008-07-20 00:08:47 -0700187static inline struct qdisc_skb_cb *qdisc_skb_cb(struct sk_buff *skb)
188{
189 return (struct qdisc_skb_cb *)skb->cb;
190}
191
David S. Miller83874002008-07-17 00:53:03 -0700192static inline spinlock_t *qdisc_lock(struct Qdisc *qdisc)
193{
194 return &qdisc->q.lock;
195}
196
David S. Miller7698b4f2008-07-16 01:42:40 -0700197static inline struct Qdisc *qdisc_root(struct Qdisc *qdisc)
198{
199 return qdisc->dev_queue->qdisc;
200}
201
Jarek Poplawski2540e052008-08-21 05:11:14 -0700202static inline struct Qdisc *qdisc_root_sleeping(struct Qdisc *qdisc)
203{
204 return qdisc->dev_queue->qdisc_sleeping;
205}
206
David S. Miller7e43f112008-08-02 23:27:37 -0700207/* The qdisc root lock is a mechanism by which to top level
208 * of a qdisc tree can be locked from any qdisc node in the
209 * forest. This allows changing the configuration of some
210 * aspect of the qdisc tree while blocking out asynchronous
211 * qdisc access in the packet processing paths.
212 *
213 * It is only legal to do this when the root will not change
214 * on us. Otherwise we'll potentially lock the wrong qdisc
215 * root. This is enforced by holding the RTNL semaphore, which
216 * all users of this lock accessor must do.
217 */
David S. Miller7698b4f2008-07-16 01:42:40 -0700218static inline spinlock_t *qdisc_root_lock(struct Qdisc *qdisc)
219{
220 struct Qdisc *root = qdisc_root(qdisc);
221
David S. Miller7e43f112008-08-02 23:27:37 -0700222 ASSERT_RTNL();
David S. Miller83874002008-07-17 00:53:03 -0700223 return qdisc_lock(root);
David S. Miller7698b4f2008-07-16 01:42:40 -0700224}
225
Jarek Poplawskif6f9b932008-08-27 02:25:17 -0700226static inline spinlock_t *qdisc_root_sleeping_lock(struct Qdisc *qdisc)
227{
228 struct Qdisc *root = qdisc_root_sleeping(qdisc);
229
230 ASSERT_RTNL();
231 return qdisc_lock(root);
232}
233
David S. Miller5ce2d482008-07-08 17:06:30 -0700234static inline struct net_device *qdisc_dev(struct Qdisc *qdisc)
235{
236 return qdisc->dev_queue->dev;
237}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700238
David S. Miller78a5b302008-07-16 03:12:24 -0700239static inline void sch_tree_lock(struct Qdisc *q)
240{
Jarek Poplawskife439dd2008-08-27 02:27:10 -0700241 spin_lock_bh(qdisc_root_sleeping_lock(q));
David S. Miller78a5b302008-07-16 03:12:24 -0700242}
243
244static inline void sch_tree_unlock(struct Qdisc *q)
245{
Jarek Poplawskife439dd2008-08-27 02:27:10 -0700246 spin_unlock_bh(qdisc_root_sleeping_lock(q));
David S. Miller78a5b302008-07-16 03:12:24 -0700247}
248
249#define tcf_tree_lock(tp) sch_tree_lock((tp)->q)
250#define tcf_tree_unlock(tp) sch_tree_unlock((tp)->q)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251
Thomas Grafe41a33e2005-07-05 14:14:30 -0700252extern struct Qdisc noop_qdisc;
253extern struct Qdisc_ops noop_qdisc_ops;
David S. Miller6ec1c692009-09-06 01:58:51 -0700254extern struct Qdisc_ops pfifo_fast_ops;
255extern struct Qdisc_ops mq_qdisc_ops;
Thomas Grafe41a33e2005-07-05 14:14:30 -0700256
Eric Dumazetfd2c3ef2009-11-03 03:26:03 +0000257struct Qdisc_class_common {
Patrick McHardy6fe1c7a2008-07-05 23:21:31 -0700258 u32 classid;
259 struct hlist_node hnode;
260};
261
Eric Dumazetfd2c3ef2009-11-03 03:26:03 +0000262struct Qdisc_class_hash {
Patrick McHardy6fe1c7a2008-07-05 23:21:31 -0700263 struct hlist_head *hash;
264 unsigned int hashsize;
265 unsigned int hashmask;
266 unsigned int hashelems;
267};
268
269static inline unsigned int qdisc_class_hash(u32 id, u32 mask)
270{
271 id ^= id >> 8;
272 id ^= id >> 4;
273 return id & mask;
274}
275
276static inline struct Qdisc_class_common *
277qdisc_class_find(struct Qdisc_class_hash *hash, u32 id)
278{
279 struct Qdisc_class_common *cl;
280 struct hlist_node *n;
281 unsigned int h;
282
283 h = qdisc_class_hash(id, hash->hashmask);
284 hlist_for_each_entry(cl, n, &hash->hash[h], hnode) {
285 if (cl->classid == id)
286 return cl;
287 }
288 return NULL;
289}
290
291extern int qdisc_class_hash_init(struct Qdisc_class_hash *);
292extern void qdisc_class_hash_insert(struct Qdisc_class_hash *, struct Qdisc_class_common *);
293extern void qdisc_class_hash_remove(struct Qdisc_class_hash *, struct Qdisc_class_common *);
294extern void qdisc_class_hash_grow(struct Qdisc *, struct Qdisc_class_hash *);
295extern void qdisc_class_hash_destroy(struct Qdisc_class_hash *);
296
Thomas Grafe41a33e2005-07-05 14:14:30 -0700297extern void dev_init_scheduler(struct net_device *dev);
298extern void dev_shutdown(struct net_device *dev);
299extern void dev_activate(struct net_device *dev);
300extern void dev_deactivate(struct net_device *dev);
Patrick McHardy589983c2009-09-04 06:41:20 +0000301extern struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
302 struct Qdisc *qdisc);
Thomas Grafe41a33e2005-07-05 14:14:30 -0700303extern void qdisc_reset(struct Qdisc *qdisc);
304extern void qdisc_destroy(struct Qdisc *qdisc);
Patrick McHardy43effa12006-11-29 17:35:48 -0800305extern void qdisc_tree_decrease_qlen(struct Qdisc *qdisc, unsigned int n);
David S. Miller5ce2d482008-07-08 17:06:30 -0700306extern struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
David S. Millerbb949fb2008-07-08 16:55:56 -0700307 struct Qdisc_ops *ops);
Thomas Grafe41a33e2005-07-05 14:14:30 -0700308extern struct Qdisc *qdisc_create_dflt(struct net_device *dev,
David S. Millerbb949fb2008-07-08 16:55:56 -0700309 struct netdev_queue *dev_queue,
Patrick McHardy9f9afec2006-11-29 17:35:18 -0800310 struct Qdisc_ops *ops, u32 parentid);
Jussi Kivilinna175f9c12008-07-20 00:08:47 -0700311extern void qdisc_calculate_pkt_len(struct sk_buff *skb,
312 struct qdisc_size_table *stab);
Patrick McHardya48b5a62007-03-23 11:29:43 -0700313extern void tcf_destroy(struct tcf_proto *tp);
Patrick McHardyff31ab52008-07-01 19:52:38 -0700314extern void tcf_destroy_chain(struct tcf_proto **fl);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700315
John Fastabendf0796d52010-07-01 13:21:57 +0000316/* Reset all TX qdiscs greater then index of a device. */
317static inline void qdisc_reset_all_tx_gt(struct net_device *dev, unsigned int i)
David S. Miller5aa70992008-07-08 22:59:10 -0700318{
John Fastabend4ef6acf2010-07-01 13:21:35 +0000319 struct Qdisc *qdisc;
320
John Fastabendf0796d52010-07-01 13:21:57 +0000321 for (; i < dev->num_tx_queues; i++) {
John Fastabend4ef6acf2010-07-01 13:21:35 +0000322 qdisc = netdev_get_tx_queue(dev, i)->qdisc;
323 if (qdisc) {
324 spin_lock_bh(qdisc_lock(qdisc));
325 qdisc_reset(qdisc);
326 spin_unlock_bh(qdisc_lock(qdisc));
327 }
328 }
David S. Miller5aa70992008-07-08 22:59:10 -0700329}
330
John Fastabendf0796d52010-07-01 13:21:57 +0000331static inline void qdisc_reset_all_tx(struct net_device *dev)
332{
333 qdisc_reset_all_tx_gt(dev, 0);
334}
335
David S. Miller3e745dd2008-07-08 23:00:25 -0700336/* Are all TX queues of the device empty? */
337static inline bool qdisc_all_tx_empty(const struct net_device *dev)
338{
David S. Millere8a04642008-07-17 00:34:19 -0700339 unsigned int i;
340 for (i = 0; i < dev->num_tx_queues; i++) {
341 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
342 const struct Qdisc *q = txq->qdisc;
David S. Miller3e745dd2008-07-08 23:00:25 -0700343
David S. Millere8a04642008-07-17 00:34:19 -0700344 if (q->q.qlen)
345 return false;
346 }
347 return true;
David S. Miller3e745dd2008-07-08 23:00:25 -0700348}
349
David S. Miller6fa98642008-07-08 23:01:06 -0700350/* Are any of the TX qdiscs changing? */
351static inline bool qdisc_tx_changing(struct net_device *dev)
352{
David S. Millere8a04642008-07-17 00:34:19 -0700353 unsigned int i;
354 for (i = 0; i < dev->num_tx_queues; i++) {
355 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
356 if (txq->qdisc != txq->qdisc_sleeping)
357 return true;
358 }
359 return false;
David S. Miller6fa98642008-07-08 23:01:06 -0700360}
361
David S. Millere8a04642008-07-17 00:34:19 -0700362/* Is the device using the noop qdisc on all queues? */
David S. Miller05297942008-07-08 23:01:27 -0700363static inline bool qdisc_tx_is_noop(const struct net_device *dev)
364{
David S. Millere8a04642008-07-17 00:34:19 -0700365 unsigned int i;
366 for (i = 0; i < dev->num_tx_queues; i++) {
367 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
368 if (txq->qdisc != &noop_qdisc)
369 return false;
370 }
371 return true;
David S. Miller05297942008-07-08 23:01:27 -0700372}
373
Jussi Kivilinna0abf77e2008-07-20 00:08:27 -0700374static inline unsigned int qdisc_pkt_len(struct sk_buff *skb)
375{
Jussi Kivilinna175f9c12008-07-20 00:08:47 -0700376 return qdisc_skb_cb(skb)->pkt_len;
Jussi Kivilinna0abf77e2008-07-20 00:08:27 -0700377}
378
Jarek Poplawskic27f3392008-08-04 22:39:11 -0700379/* additional qdisc xmit flags (NET_XMIT_MASK in linux/netdevice.h) */
Jarek Poplawski378a2f02008-08-04 22:31:03 -0700380enum net_xmit_qdisc_t {
381 __NET_XMIT_STOLEN = 0x00010000,
Jarek Poplawskic27f3392008-08-04 22:39:11 -0700382 __NET_XMIT_BYPASS = 0x00020000,
Jarek Poplawski378a2f02008-08-04 22:31:03 -0700383};
384
Jarek Poplawskic27f3392008-08-04 22:39:11 -0700385#ifdef CONFIG_NET_CLS_ACT
Jarek Poplawski378a2f02008-08-04 22:31:03 -0700386#define net_xmit_drop_count(e) ((e) & __NET_XMIT_STOLEN ? 0 : 1)
Jarek Poplawski378a2f02008-08-04 22:31:03 -0700387#else
388#define net_xmit_drop_count(e) (1)
389#endif
390
Jussi Kivilinna5f861732008-07-20 00:08:04 -0700391static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
392{
David S. Miller3a682fb2008-07-20 18:13:01 -0700393#ifdef CONFIG_NET_SCHED
Jussi Kivilinna175f9c12008-07-20 00:08:47 -0700394 if (sch->stab)
395 qdisc_calculate_pkt_len(skb, sch->stab);
David S. Miller3a682fb2008-07-20 18:13:01 -0700396#endif
Jussi Kivilinna5f861732008-07-20 00:08:04 -0700397 return sch->enqueue(skb, sch);
398}
399
400static inline int qdisc_enqueue_root(struct sk_buff *skb, struct Qdisc *sch)
401{
Jussi Kivilinna175f9c12008-07-20 00:08:47 -0700402 qdisc_skb_cb(skb)->pkt_len = skb->len;
Jarek Poplawski378a2f02008-08-04 22:31:03 -0700403 return qdisc_enqueue(skb, sch) & NET_XMIT_MASK;
Jussi Kivilinna5f861732008-07-20 00:08:04 -0700404}
405
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +0000406static inline void __qdisc_update_bstats(struct Qdisc *sch, unsigned int len)
407{
408 sch->bstats.bytes += len;
409 sch->bstats.packets++;
410}
411
Thomas Graf9972b252005-06-18 22:57:26 -0700412static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch,
413 struct sk_buff_head *list)
414{
415 __skb_queue_tail(list, skb);
Jussi Kivilinna0abf77e2008-07-20 00:08:27 -0700416 sch->qstats.backlog += qdisc_pkt_len(skb);
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +0000417 __qdisc_update_bstats(sch, qdisc_pkt_len(skb));
Thomas Graf9972b252005-06-18 22:57:26 -0700418
419 return NET_XMIT_SUCCESS;
420}
421
422static inline int qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch)
423{
424 return __qdisc_enqueue_tail(skb, sch, &sch->q);
425}
426
427static inline struct sk_buff *__qdisc_dequeue_head(struct Qdisc *sch,
428 struct sk_buff_head *list)
429{
430 struct sk_buff *skb = __skb_dequeue(list);
431
432 if (likely(skb != NULL))
Jussi Kivilinna0abf77e2008-07-20 00:08:27 -0700433 sch->qstats.backlog -= qdisc_pkt_len(skb);
Thomas Graf9972b252005-06-18 22:57:26 -0700434
435 return skb;
436}
437
438static inline struct sk_buff *qdisc_dequeue_head(struct Qdisc *sch)
439{
440 return __qdisc_dequeue_head(sch, &sch->q);
441}
442
Hagen Paul Pfeifer57dbb2d2010-01-24 12:30:59 +0000443static inline unsigned int __qdisc_queue_drop_head(struct Qdisc *sch,
444 struct sk_buff_head *list)
445{
446 struct sk_buff *skb = __qdisc_dequeue_head(sch, list);
447
448 if (likely(skb != NULL)) {
449 unsigned int len = qdisc_pkt_len(skb);
450 kfree_skb(skb);
451 return len;
452 }
453
454 return 0;
455}
456
457static inline unsigned int qdisc_queue_drop_head(struct Qdisc *sch)
458{
459 return __qdisc_queue_drop_head(sch, &sch->q);
460}
461
Thomas Graf9972b252005-06-18 22:57:26 -0700462static inline struct sk_buff *__qdisc_dequeue_tail(struct Qdisc *sch,
463 struct sk_buff_head *list)
464{
465 struct sk_buff *skb = __skb_dequeue_tail(list);
466
467 if (likely(skb != NULL))
Jussi Kivilinna0abf77e2008-07-20 00:08:27 -0700468 sch->qstats.backlog -= qdisc_pkt_len(skb);
Thomas Graf9972b252005-06-18 22:57:26 -0700469
470 return skb;
471}
472
473static inline struct sk_buff *qdisc_dequeue_tail(struct Qdisc *sch)
474{
475 return __qdisc_dequeue_tail(sch, &sch->q);
476}
477
Patrick McHardy48a8f512008-10-31 00:44:18 -0700478static inline struct sk_buff *qdisc_peek_head(struct Qdisc *sch)
479{
480 return skb_peek(&sch->q);
481}
482
Jarek Poplawski77be1552008-10-31 00:47:01 -0700483/* generic pseudo peek method for non-work-conserving qdisc */
484static inline struct sk_buff *qdisc_peek_dequeued(struct Qdisc *sch)
485{
486 /* we can reuse ->gso_skb because peek isn't called for root qdiscs */
Jarek Poplawski61c9eaf2008-11-05 16:02:34 -0800487 if (!sch->gso_skb) {
Jarek Poplawski77be1552008-10-31 00:47:01 -0700488 sch->gso_skb = sch->dequeue(sch);
Jarek Poplawski61c9eaf2008-11-05 16:02:34 -0800489 if (sch->gso_skb)
490 /* it's still part of the queue */
491 sch->q.qlen++;
492 }
Jarek Poplawski77be1552008-10-31 00:47:01 -0700493
494 return sch->gso_skb;
495}
496
497/* use instead of qdisc->dequeue() for all qdiscs queried with ->peek() */
498static inline struct sk_buff *qdisc_dequeue_peeked(struct Qdisc *sch)
499{
500 struct sk_buff *skb = sch->gso_skb;
501
Jarek Poplawski61c9eaf2008-11-05 16:02:34 -0800502 if (skb) {
Jarek Poplawski77be1552008-10-31 00:47:01 -0700503 sch->gso_skb = NULL;
Jarek Poplawski61c9eaf2008-11-05 16:02:34 -0800504 sch->q.qlen--;
505 } else {
Jarek Poplawski77be1552008-10-31 00:47:01 -0700506 skb = sch->dequeue(sch);
Jarek Poplawski61c9eaf2008-11-05 16:02:34 -0800507 }
Jarek Poplawski77be1552008-10-31 00:47:01 -0700508
509 return skb;
510}
511
Thomas Graf9972b252005-06-18 22:57:26 -0700512static inline void __qdisc_reset_queue(struct Qdisc *sch,
513 struct sk_buff_head *list)
514{
515 /*
516 * We do not know the backlog in bytes of this list, it
517 * is up to the caller to correct it
518 */
David S. Miller93245dd2008-07-17 04:03:43 -0700519 __skb_queue_purge(list);
Thomas Graf9972b252005-06-18 22:57:26 -0700520}
521
522static inline void qdisc_reset_queue(struct Qdisc *sch)
523{
524 __qdisc_reset_queue(sch, &sch->q);
525 sch->qstats.backlog = 0;
526}
527
528static inline unsigned int __qdisc_queue_drop(struct Qdisc *sch,
529 struct sk_buff_head *list)
530{
531 struct sk_buff *skb = __qdisc_dequeue_tail(sch, list);
532
533 if (likely(skb != NULL)) {
Jussi Kivilinna0abf77e2008-07-20 00:08:27 -0700534 unsigned int len = qdisc_pkt_len(skb);
Thomas Graf9972b252005-06-18 22:57:26 -0700535 kfree_skb(skb);
536 return len;
537 }
538
539 return 0;
540}
541
542static inline unsigned int qdisc_queue_drop(struct Qdisc *sch)
543{
544 return __qdisc_queue_drop(sch, &sch->q);
545}
546
547static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch)
548{
549 kfree_skb(skb);
550 sch->qstats.drops++;
551
552 return NET_XMIT_DROP;
553}
554
555static inline int qdisc_reshape_fail(struct sk_buff *skb, struct Qdisc *sch)
556{
557 sch->qstats.drops++;
558
Patrick McHardyc3bc7cf2007-07-15 00:03:05 -0700559#ifdef CONFIG_NET_CLS_ACT
Thomas Graf9972b252005-06-18 22:57:26 -0700560 if (sch->reshape_fail == NULL || sch->reshape_fail(skb, sch))
561 goto drop;
562
563 return NET_XMIT_SUCCESS;
564
565drop:
566#endif
567 kfree_skb(skb);
568 return NET_XMIT_DROP;
569}
570
Jesper Dangaard Brouere9bef552007-09-12 16:35:24 +0200571/* Length to Time (L2T) lookup in a qdisc_rate_table, to determine how
572 long it will take to send a packet given its size.
573 */
574static inline u32 qdisc_l2t(struct qdisc_rate_table* rtab, unsigned int pktlen)
575{
Jesper Dangaard Brouere08b0992007-09-12 16:36:28 +0200576 int slot = pktlen + rtab->rate.cell_align + rtab->rate.overhead;
577 if (slot < 0)
578 slot = 0;
Jesper Dangaard Brouere9bef552007-09-12 16:35:24 +0200579 slot >>= rtab->rate.cell_log;
580 if (slot > 255)
581 return (rtab->data[255]*(slot >> 8) + rtab->data[slot & 0xFF]);
582 return rtab->data[slot];
583}
584
Jamal Hadi Salim12da81d2007-10-26 02:47:23 -0700585#ifdef CONFIG_NET_CLS_ACT
586static inline struct sk_buff *skb_act_clone(struct sk_buff *skb, gfp_t gfp_mask)
587{
588 struct sk_buff *n = skb_clone(skb, gfp_mask);
589
590 if (n) {
591 n->tc_verd = SET_TC_VERD(n->tc_verd, 0);
592 n->tc_verd = CLR_TC_OK2MUNGE(n->tc_verd);
593 n->tc_verd = CLR_TC_MUNGED(n->tc_verd);
Jamal Hadi Salim12da81d2007-10-26 02:47:23 -0700594 }
595 return n;
596}
597#endif
598
Linus Torvalds1da177e2005-04-16 15:20:36 -0700599#endif