blob: 160a407c19632a2220fa6fc5efc120086fd076ee [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef __NET_SCHED_GENERIC_H
2#define __NET_SCHED_GENERIC_H
3
Linus Torvalds1da177e2005-04-16 15:20:36 -07004#include <linux/netdevice.h>
5#include <linux/types.h>
6#include <linux/rcupdate.h>
7#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07008#include <linux/pkt_sched.h>
9#include <linux/pkt_cls.h>
10#include <net/gen_stats.h>
Thomas Grafbe577dd2007-03-22 11:55:50 -070011#include <net/rtnetlink.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070012
13struct Qdisc_ops;
14struct qdisc_walker;
15struct tcf_walker;
16struct module;
17
Eric Dumazetfd2c3ef2009-11-03 03:26:03 +000018struct qdisc_rate_table {
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 struct tc_ratespec rate;
20 u32 data[256];
21 struct qdisc_rate_table *next;
22 int refcnt;
23};
24
Eric Dumazetfd2c3ef2009-11-03 03:26:03 +000025enum qdisc_state_t {
David S. Miller37437bb2008-07-16 02:15:04 -070026 __QDISC_STATE_SCHED,
David S. Millera9312ae2008-08-17 21:51:03 -070027 __QDISC_STATE_DEACTIVATED,
David S. Millere2627c82008-07-16 00:56:32 -070028};
29
Eric Dumazet37112102010-06-02 03:24:13 -070030/*
31 * following bits are only changed while qdisc lock is held
32 */
33enum qdisc___state_t {
34 __QDISC___STATE_RUNNING,
35};
36
Jussi Kivilinna175f9c12008-07-20 00:08:47 -070037struct qdisc_size_table {
38 struct list_head list;
39 struct tc_sizespec szopts;
40 int refcnt;
41 u16 data[];
42};
43
Eric Dumazetfd2c3ef2009-11-03 03:26:03 +000044struct Qdisc {
Linus Torvalds1da177e2005-04-16 15:20:36 -070045 int (*enqueue)(struct sk_buff *skb, struct Qdisc *dev);
46 struct sk_buff * (*dequeue)(struct Qdisc *dev);
47 unsigned flags;
Jarek Poplawskib00355d2009-02-01 01:12:42 -080048#define TCQ_F_BUILTIN 1
49#define TCQ_F_THROTTLED 2
50#define TCQ_F_INGRESS 4
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +000051#define TCQ_F_CAN_BYPASS 8
Patrick McHardy23bcf632009-09-09 18:11:23 -070052#define TCQ_F_MQROOT 16
Jarek Poplawskib00355d2009-02-01 01:12:42 -080053#define TCQ_F_WARN_NONWC (1 << 16)
Linus Torvalds1da177e2005-04-16 15:20:36 -070054 int padded;
55 struct Qdisc_ops *ops;
Jussi Kivilinna175f9c12008-07-20 00:08:47 -070056 struct qdisc_size_table *stab;
Eric Dumazet5e140df2009-03-20 01:33:32 -070057 struct list_head list;
Linus Torvalds1da177e2005-04-16 15:20:36 -070058 u32 handle;
59 u32 parent;
60 atomic_t refcnt;
Linus Torvalds1da177e2005-04-16 15:20:36 -070061 struct gnet_stats_rate_est rate_est;
Linus Torvalds1da177e2005-04-16 15:20:36 -070062 int (*reshape_fail)(struct sk_buff *skb,
63 struct Qdisc *q);
64
David S. Miller72b25a92008-07-18 20:54:17 -070065 void *u32_node;
66
Linus Torvalds1da177e2005-04-16 15:20:36 -070067 /* This field is deprecated, but it is still used by CBQ
68 * and it will live until better solution will be invented.
69 */
70 struct Qdisc *__parent;
Eric Dumazet5e140df2009-03-20 01:33:32 -070071 struct netdev_queue *dev_queue;
72 struct Qdisc *next_sched;
73
74 struct sk_buff *gso_skb;
75 /*
76 * For performance sake on SMP, we put highly modified fields at the end
77 */
78 unsigned long state;
79 struct sk_buff_head q;
Eric Dumazetc1a8f1f2009-08-16 09:36:49 +000080 struct gnet_stats_basic_packed bstats;
Eric Dumazet37112102010-06-02 03:24:13 -070081 unsigned long __state;
Eric Dumazet5e140df2009-03-20 01:33:32 -070082 struct gnet_stats_queue qstats;
Eric Dumazet79640a42010-06-02 05:09:29 -070083 struct rcu_head rcu_head;
84 spinlock_t busylock;
Linus Torvalds1da177e2005-04-16 15:20:36 -070085};
86
Eric Dumazetbc135b22010-06-02 03:23:51 -070087static inline bool qdisc_is_running(struct Qdisc *qdisc)
88{
Eric Dumazet37112102010-06-02 03:24:13 -070089 return test_bit(__QDISC___STATE_RUNNING, &qdisc->__state);
Eric Dumazetbc135b22010-06-02 03:23:51 -070090}
91
92static inline bool qdisc_run_begin(struct Qdisc *qdisc)
93{
Eric Dumazet37112102010-06-02 03:24:13 -070094 return !__test_and_set_bit(__QDISC___STATE_RUNNING, &qdisc->__state);
Eric Dumazetbc135b22010-06-02 03:23:51 -070095}
96
97static inline void qdisc_run_end(struct Qdisc *qdisc)
98{
Eric Dumazet37112102010-06-02 03:24:13 -070099 __clear_bit(__QDISC___STATE_RUNNING, &qdisc->__state);
Eric Dumazetbc135b22010-06-02 03:23:51 -0700100}
101
Eric Dumazetfd2c3ef2009-11-03 03:26:03 +0000102struct Qdisc_class_ops {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103 /* Child qdisc manipulation */
Jarek Poplawski926e61b2009-09-15 02:53:07 -0700104 struct netdev_queue * (*select_queue)(struct Qdisc *, struct tcmsg *);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105 int (*graft)(struct Qdisc *, unsigned long cl,
106 struct Qdisc *, struct Qdisc **);
107 struct Qdisc * (*leaf)(struct Qdisc *, unsigned long cl);
Patrick McHardy43effa12006-11-29 17:35:48 -0800108 void (*qlen_notify)(struct Qdisc *, unsigned long);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109
110 /* Class manipulation routines */
111 unsigned long (*get)(struct Qdisc *, u32 classid);
112 void (*put)(struct Qdisc *, unsigned long);
113 int (*change)(struct Qdisc *, u32, u32,
Patrick McHardy1e904742008-01-22 22:11:17 -0800114 struct nlattr **, unsigned long *);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115 int (*delete)(struct Qdisc *, unsigned long);
116 void (*walk)(struct Qdisc *, struct qdisc_walker * arg);
117
118 /* Filter manipulation */
119 struct tcf_proto ** (*tcf_chain)(struct Qdisc *, unsigned long);
120 unsigned long (*bind_tcf)(struct Qdisc *, unsigned long,
121 u32 classid);
122 void (*unbind_tcf)(struct Qdisc *, unsigned long);
123
124 /* rtnetlink specific */
125 int (*dump)(struct Qdisc *, unsigned long,
126 struct sk_buff *skb, struct tcmsg*);
127 int (*dump_stats)(struct Qdisc *, unsigned long,
128 struct gnet_dump *);
129};
130
Eric Dumazetfd2c3ef2009-11-03 03:26:03 +0000131struct Qdisc_ops {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132 struct Qdisc_ops *next;
Eric Dumazet20fea082007-11-14 01:44:41 -0800133 const struct Qdisc_class_ops *cl_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134 char id[IFNAMSIZ];
135 int priv_size;
136
137 int (*enqueue)(struct sk_buff *, struct Qdisc *);
138 struct sk_buff * (*dequeue)(struct Qdisc *);
Jarek Poplawski90d841fd2008-10-31 00:43:45 -0700139 struct sk_buff * (*peek)(struct Qdisc *);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700140 unsigned int (*drop)(struct Qdisc *);
141
Patrick McHardy1e904742008-01-22 22:11:17 -0800142 int (*init)(struct Qdisc *, struct nlattr *arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143 void (*reset)(struct Qdisc *);
144 void (*destroy)(struct Qdisc *);
Patrick McHardy1e904742008-01-22 22:11:17 -0800145 int (*change)(struct Qdisc *, struct nlattr *arg);
David S. Miller6ec1c692009-09-06 01:58:51 -0700146 void (*attach)(struct Qdisc *);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147
148 int (*dump)(struct Qdisc *, struct sk_buff *);
149 int (*dump_stats)(struct Qdisc *, struct gnet_dump *);
150
151 struct module *owner;
152};
153
154
Eric Dumazetfd2c3ef2009-11-03 03:26:03 +0000155struct tcf_result {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156 unsigned long class;
157 u32 classid;
158};
159
Eric Dumazetfd2c3ef2009-11-03 03:26:03 +0000160struct tcf_proto_ops {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161 struct tcf_proto_ops *next;
162 char kind[IFNAMSIZ];
163
164 int (*classify)(struct sk_buff*, struct tcf_proto*,
165 struct tcf_result *);
166 int (*init)(struct tcf_proto*);
167 void (*destroy)(struct tcf_proto*);
168
169 unsigned long (*get)(struct tcf_proto*, u32 handle);
170 void (*put)(struct tcf_proto*, unsigned long);
171 int (*change)(struct tcf_proto*, unsigned long,
Patrick McHardyadd93b62008-01-22 22:11:33 -0800172 u32 handle, struct nlattr **,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173 unsigned long *);
174 int (*delete)(struct tcf_proto*, unsigned long);
175 void (*walk)(struct tcf_proto*, struct tcf_walker *arg);
176
177 /* rtnetlink specific */
178 int (*dump)(struct tcf_proto*, unsigned long,
179 struct sk_buff *skb, struct tcmsg*);
180
181 struct module *owner;
182};
183
Eric Dumazetfd2c3ef2009-11-03 03:26:03 +0000184struct tcf_proto {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185 /* Fast access part */
186 struct tcf_proto *next;
187 void *root;
188 int (*classify)(struct sk_buff*, struct tcf_proto*,
189 struct tcf_result *);
Al Viro66c6f522006-11-20 18:07:51 -0800190 __be16 protocol;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191
192 /* All the rest */
193 u32 prio;
194 u32 classid;
195 struct Qdisc *q;
196 void *data;
197 struct tcf_proto_ops *ops;
198};
199
Jussi Kivilinna175f9c12008-07-20 00:08:47 -0700200struct qdisc_skb_cb {
201 unsigned int pkt_len;
202 char data[];
203};
204
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +0000205static inline int qdisc_qlen(struct Qdisc *q)
206{
207 return q->q.qlen;
208}
209
Eric Dumazetbfe0d022011-01-09 08:30:54 +0000210static inline struct qdisc_skb_cb *qdisc_skb_cb(const struct sk_buff *skb)
Jussi Kivilinna175f9c12008-07-20 00:08:47 -0700211{
212 return (struct qdisc_skb_cb *)skb->cb;
213}
214
David S. Miller83874002008-07-17 00:53:03 -0700215static inline spinlock_t *qdisc_lock(struct Qdisc *qdisc)
216{
217 return &qdisc->q.lock;
218}
219
David S. Miller7698b4f2008-07-16 01:42:40 -0700220static inline struct Qdisc *qdisc_root(struct Qdisc *qdisc)
221{
222 return qdisc->dev_queue->qdisc;
223}
224
Jarek Poplawski2540e052008-08-21 05:11:14 -0700225static inline struct Qdisc *qdisc_root_sleeping(struct Qdisc *qdisc)
226{
227 return qdisc->dev_queue->qdisc_sleeping;
228}
229
David S. Miller7e43f112008-08-02 23:27:37 -0700230/* The qdisc root lock is a mechanism by which to top level
231 * of a qdisc tree can be locked from any qdisc node in the
232 * forest. This allows changing the configuration of some
233 * aspect of the qdisc tree while blocking out asynchronous
234 * qdisc access in the packet processing paths.
235 *
236 * It is only legal to do this when the root will not change
237 * on us. Otherwise we'll potentially lock the wrong qdisc
238 * root. This is enforced by holding the RTNL semaphore, which
239 * all users of this lock accessor must do.
240 */
David S. Miller7698b4f2008-07-16 01:42:40 -0700241static inline spinlock_t *qdisc_root_lock(struct Qdisc *qdisc)
242{
243 struct Qdisc *root = qdisc_root(qdisc);
244
David S. Miller7e43f112008-08-02 23:27:37 -0700245 ASSERT_RTNL();
David S. Miller83874002008-07-17 00:53:03 -0700246 return qdisc_lock(root);
David S. Miller7698b4f2008-07-16 01:42:40 -0700247}
248
Jarek Poplawskif6f9b932008-08-27 02:25:17 -0700249static inline spinlock_t *qdisc_root_sleeping_lock(struct Qdisc *qdisc)
250{
251 struct Qdisc *root = qdisc_root_sleeping(qdisc);
252
253 ASSERT_RTNL();
254 return qdisc_lock(root);
255}
256
David S. Miller5ce2d482008-07-08 17:06:30 -0700257static inline struct net_device *qdisc_dev(struct Qdisc *qdisc)
258{
259 return qdisc->dev_queue->dev;
260}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700261
David S. Miller78a5b302008-07-16 03:12:24 -0700262static inline void sch_tree_lock(struct Qdisc *q)
263{
Jarek Poplawskife439dd2008-08-27 02:27:10 -0700264 spin_lock_bh(qdisc_root_sleeping_lock(q));
David S. Miller78a5b302008-07-16 03:12:24 -0700265}
266
267static inline void sch_tree_unlock(struct Qdisc *q)
268{
Jarek Poplawskife439dd2008-08-27 02:27:10 -0700269 spin_unlock_bh(qdisc_root_sleeping_lock(q));
David S. Miller78a5b302008-07-16 03:12:24 -0700270}
271
272#define tcf_tree_lock(tp) sch_tree_lock((tp)->q)
273#define tcf_tree_unlock(tp) sch_tree_unlock((tp)->q)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700274
Thomas Grafe41a33e2005-07-05 14:14:30 -0700275extern struct Qdisc noop_qdisc;
276extern struct Qdisc_ops noop_qdisc_ops;
David S. Miller6ec1c692009-09-06 01:58:51 -0700277extern struct Qdisc_ops pfifo_fast_ops;
278extern struct Qdisc_ops mq_qdisc_ops;
Thomas Grafe41a33e2005-07-05 14:14:30 -0700279
Eric Dumazetfd2c3ef2009-11-03 03:26:03 +0000280struct Qdisc_class_common {
Patrick McHardy6fe1c7a2008-07-05 23:21:31 -0700281 u32 classid;
282 struct hlist_node hnode;
283};
284
Eric Dumazetfd2c3ef2009-11-03 03:26:03 +0000285struct Qdisc_class_hash {
Patrick McHardy6fe1c7a2008-07-05 23:21:31 -0700286 struct hlist_head *hash;
287 unsigned int hashsize;
288 unsigned int hashmask;
289 unsigned int hashelems;
290};
291
292static inline unsigned int qdisc_class_hash(u32 id, u32 mask)
293{
294 id ^= id >> 8;
295 id ^= id >> 4;
296 return id & mask;
297}
298
299static inline struct Qdisc_class_common *
300qdisc_class_find(struct Qdisc_class_hash *hash, u32 id)
301{
302 struct Qdisc_class_common *cl;
303 struct hlist_node *n;
304 unsigned int h;
305
306 h = qdisc_class_hash(id, hash->hashmask);
307 hlist_for_each_entry(cl, n, &hash->hash[h], hnode) {
308 if (cl->classid == id)
309 return cl;
310 }
311 return NULL;
312}
313
314extern int qdisc_class_hash_init(struct Qdisc_class_hash *);
315extern void qdisc_class_hash_insert(struct Qdisc_class_hash *, struct Qdisc_class_common *);
316extern void qdisc_class_hash_remove(struct Qdisc_class_hash *, struct Qdisc_class_common *);
317extern void qdisc_class_hash_grow(struct Qdisc *, struct Qdisc_class_hash *);
318extern void qdisc_class_hash_destroy(struct Qdisc_class_hash *);
319
Thomas Grafe41a33e2005-07-05 14:14:30 -0700320extern void dev_init_scheduler(struct net_device *dev);
321extern void dev_shutdown(struct net_device *dev);
322extern void dev_activate(struct net_device *dev);
323extern void dev_deactivate(struct net_device *dev);
Octavian Purdila44345722010-12-13 12:44:07 +0000324extern void dev_deactivate_many(struct list_head *head);
Patrick McHardy589983c2009-09-04 06:41:20 +0000325extern struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
326 struct Qdisc *qdisc);
Thomas Grafe41a33e2005-07-05 14:14:30 -0700327extern void qdisc_reset(struct Qdisc *qdisc);
328extern void qdisc_destroy(struct Qdisc *qdisc);
Patrick McHardy43effa12006-11-29 17:35:48 -0800329extern void qdisc_tree_decrease_qlen(struct Qdisc *qdisc, unsigned int n);
David S. Miller5ce2d482008-07-08 17:06:30 -0700330extern struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
David S. Millerbb949fb2008-07-08 16:55:56 -0700331 struct Qdisc_ops *ops);
Changli Gao3511c912010-10-16 13:04:08 +0000332extern struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
Patrick McHardy9f9afec2006-11-29 17:35:18 -0800333 struct Qdisc_ops *ops, u32 parentid);
Jussi Kivilinna175f9c12008-07-20 00:08:47 -0700334extern void qdisc_calculate_pkt_len(struct sk_buff *skb,
335 struct qdisc_size_table *stab);
Patrick McHardya48b5a62007-03-23 11:29:43 -0700336extern void tcf_destroy(struct tcf_proto *tp);
Patrick McHardyff31ab52008-07-01 19:52:38 -0700337extern void tcf_destroy_chain(struct tcf_proto **fl);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700338
John Fastabendf0796d52010-07-01 13:21:57 +0000339/* Reset all TX qdiscs greater then index of a device. */
340static inline void qdisc_reset_all_tx_gt(struct net_device *dev, unsigned int i)
David S. Miller5aa70992008-07-08 22:59:10 -0700341{
John Fastabend4ef6acf2010-07-01 13:21:35 +0000342 struct Qdisc *qdisc;
343
John Fastabendf0796d52010-07-01 13:21:57 +0000344 for (; i < dev->num_tx_queues; i++) {
John Fastabend4ef6acf2010-07-01 13:21:35 +0000345 qdisc = netdev_get_tx_queue(dev, i)->qdisc;
346 if (qdisc) {
347 spin_lock_bh(qdisc_lock(qdisc));
348 qdisc_reset(qdisc);
349 spin_unlock_bh(qdisc_lock(qdisc));
350 }
351 }
David S. Miller5aa70992008-07-08 22:59:10 -0700352}
353
Linus Torvalds1da177e2005-04-16 15:20:36 -0700354static inline void qdisc_reset_all_tx(struct net_device *dev)
Thomas Graf9972b252005-06-18 22:57:26 -0700355{
John Fastabendf0796d52010-07-01 13:21:57 +0000356 qdisc_reset_all_tx_gt(dev, 0);
Thomas Graf9972b252005-06-18 22:57:26 -0700357}
358
David S. Miller3e745dd2008-07-08 23:00:25 -0700359/* Are all TX queues of the device empty? */
360static inline bool qdisc_all_tx_empty(const struct net_device *dev)
361{
David S. Millere8a04642008-07-17 00:34:19 -0700362 unsigned int i;
363 for (i = 0; i < dev->num_tx_queues; i++) {
364 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
365 const struct Qdisc *q = txq->qdisc;
David S. Miller3e745dd2008-07-08 23:00:25 -0700366
David S. Millere8a04642008-07-17 00:34:19 -0700367 if (q->q.qlen)
368 return false;
369 }
370 return true;
David S. Miller3e745dd2008-07-08 23:00:25 -0700371}
372
David S. Miller6fa98642008-07-08 23:01:06 -0700373/* Are any of the TX qdiscs changing? */
374static inline bool qdisc_tx_changing(struct net_device *dev)
375{
David S. Millere8a04642008-07-17 00:34:19 -0700376 unsigned int i;
377 for (i = 0; i < dev->num_tx_queues; i++) {
378 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
379 if (txq->qdisc != txq->qdisc_sleeping)
380 return true;
381 }
382 return false;
David S. Miller6fa98642008-07-08 23:01:06 -0700383}
384
David S. Millere8a04642008-07-17 00:34:19 -0700385/* Is the device using the noop qdisc on all queues? */
David S. Miller05297942008-07-08 23:01:27 -0700386static inline bool qdisc_tx_is_noop(const struct net_device *dev)
387{
David S. Millere8a04642008-07-17 00:34:19 -0700388 unsigned int i;
389 for (i = 0; i < dev->num_tx_queues; i++) {
390 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
391 if (txq->qdisc != &noop_qdisc)
392 return false;
393 }
394 return true;
David S. Miller05297942008-07-08 23:01:27 -0700395}
396
Eric Dumazetbfe0d022011-01-09 08:30:54 +0000397static inline unsigned int qdisc_pkt_len(const struct sk_buff *skb)
Jussi Kivilinna0abf77e2008-07-20 00:08:27 -0700398{
Jussi Kivilinna175f9c12008-07-20 00:08:47 -0700399 return qdisc_skb_cb(skb)->pkt_len;
Jussi Kivilinna0abf77e2008-07-20 00:08:27 -0700400}
401
Jarek Poplawskic27f3392008-08-04 22:39:11 -0700402/* additional qdisc xmit flags (NET_XMIT_MASK in linux/netdevice.h) */
Jarek Poplawski378a2f02008-08-04 22:31:03 -0700403enum net_xmit_qdisc_t {
404 __NET_XMIT_STOLEN = 0x00010000,
Jarek Poplawskic27f3392008-08-04 22:39:11 -0700405 __NET_XMIT_BYPASS = 0x00020000,
Jarek Poplawski378a2f02008-08-04 22:31:03 -0700406};
407
Jarek Poplawskic27f3392008-08-04 22:39:11 -0700408#ifdef CONFIG_NET_CLS_ACT
Jarek Poplawski378a2f02008-08-04 22:31:03 -0700409#define net_xmit_drop_count(e) ((e) & __NET_XMIT_STOLEN ? 0 : 1)
Jarek Poplawski378a2f02008-08-04 22:31:03 -0700410#else
411#define net_xmit_drop_count(e) (1)
412#endif
413
Jussi Kivilinna5f861732008-07-20 00:08:04 -0700414static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
415{
David S. Miller3a682fb2008-07-20 18:13:01 -0700416#ifdef CONFIG_NET_SCHED
Jussi Kivilinna175f9c12008-07-20 00:08:47 -0700417 if (sch->stab)
418 qdisc_calculate_pkt_len(skb, sch->stab);
David S. Miller3a682fb2008-07-20 18:13:01 -0700419#endif
Jussi Kivilinna5f861732008-07-20 00:08:04 -0700420 return sch->enqueue(skb, sch);
421}
422
423static inline int qdisc_enqueue_root(struct sk_buff *skb, struct Qdisc *sch)
424{
Jussi Kivilinna175f9c12008-07-20 00:08:47 -0700425 qdisc_skb_cb(skb)->pkt_len = skb->len;
Jarek Poplawski378a2f02008-08-04 22:31:03 -0700426 return qdisc_enqueue(skb, sch) & NET_XMIT_MASK;
Jussi Kivilinna5f861732008-07-20 00:08:04 -0700427}
428
Eric Dumazetbfe0d022011-01-09 08:30:54 +0000429
430static inline void bstats_update(struct gnet_stats_basic_packed *bstats,
431 const struct sk_buff *skb)
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +0000432{
Eric Dumazetbfe0d022011-01-09 08:30:54 +0000433 bstats->bytes += qdisc_pkt_len(skb);
434 bstats->packets += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
435}
436
437static inline void qdisc_bstats_update(struct Qdisc *sch,
438 const struct sk_buff *skb)
439{
440 bstats_update(&sch->bstats, skb);
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +0000441}
442
Thomas Graf9972b252005-06-18 22:57:26 -0700443static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch,
444 struct sk_buff_head *list)
445{
446 __skb_queue_tail(list, skb);
Jussi Kivilinna0abf77e2008-07-20 00:08:27 -0700447 sch->qstats.backlog += qdisc_pkt_len(skb);
Thomas Graf9972b252005-06-18 22:57:26 -0700448
449 return NET_XMIT_SUCCESS;
450}
451
452static inline int qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch)
453{
454 return __qdisc_enqueue_tail(skb, sch, &sch->q);
455}
456
457static inline struct sk_buff *__qdisc_dequeue_head(struct Qdisc *sch,
458 struct sk_buff_head *list)
459{
460 struct sk_buff *skb = __skb_dequeue(list);
461
Eric Dumazet9190b3b2011-01-20 23:31:33 -0800462 if (likely(skb != NULL)) {
Jussi Kivilinna0abf77e2008-07-20 00:08:27 -0700463 sch->qstats.backlog -= qdisc_pkt_len(skb);
Eric Dumazet9190b3b2011-01-20 23:31:33 -0800464 qdisc_bstats_update(sch, skb);
465 }
Thomas Graf9972b252005-06-18 22:57:26 -0700466
467 return skb;
468}
469
470static inline struct sk_buff *qdisc_dequeue_head(struct Qdisc *sch)
471{
472 return __qdisc_dequeue_head(sch, &sch->q);
473}
474
Hagen Paul Pfeifer57dbb2d2010-01-24 12:30:59 +0000475static inline unsigned int __qdisc_queue_drop_head(struct Qdisc *sch,
476 struct sk_buff_head *list)
477{
Eric Dumazet9190b3b2011-01-20 23:31:33 -0800478 struct sk_buff *skb = __skb_dequeue(list);
Hagen Paul Pfeifer57dbb2d2010-01-24 12:30:59 +0000479
480 if (likely(skb != NULL)) {
481 unsigned int len = qdisc_pkt_len(skb);
Eric Dumazet9190b3b2011-01-20 23:31:33 -0800482 sch->qstats.backlog -= len;
Hagen Paul Pfeifer57dbb2d2010-01-24 12:30:59 +0000483 kfree_skb(skb);
484 return len;
485 }
486
487 return 0;
488}
489
490static inline unsigned int qdisc_queue_drop_head(struct Qdisc *sch)
491{
492 return __qdisc_queue_drop_head(sch, &sch->q);
493}
494
Thomas Graf9972b252005-06-18 22:57:26 -0700495static inline struct sk_buff *__qdisc_dequeue_tail(struct Qdisc *sch,
496 struct sk_buff_head *list)
497{
498 struct sk_buff *skb = __skb_dequeue_tail(list);
499
500 if (likely(skb != NULL))
Jussi Kivilinna0abf77e2008-07-20 00:08:27 -0700501 sch->qstats.backlog -= qdisc_pkt_len(skb);
Thomas Graf9972b252005-06-18 22:57:26 -0700502
503 return skb;
504}
505
506static inline struct sk_buff *qdisc_dequeue_tail(struct Qdisc *sch)
507{
508 return __qdisc_dequeue_tail(sch, &sch->q);
509}
510
Patrick McHardy48a8f512008-10-31 00:44:18 -0700511static inline struct sk_buff *qdisc_peek_head(struct Qdisc *sch)
512{
513 return skb_peek(&sch->q);
514}
515
Jarek Poplawski77be1552008-10-31 00:47:01 -0700516/* generic pseudo peek method for non-work-conserving qdisc */
517static inline struct sk_buff *qdisc_peek_dequeued(struct Qdisc *sch)
518{
519 /* we can reuse ->gso_skb because peek isn't called for root qdiscs */
Jarek Poplawski61c9eaf2008-11-05 16:02:34 -0800520 if (!sch->gso_skb) {
Jarek Poplawski77be1552008-10-31 00:47:01 -0700521 sch->gso_skb = sch->dequeue(sch);
Jarek Poplawski61c9eaf2008-11-05 16:02:34 -0800522 if (sch->gso_skb)
523 /* it's still part of the queue */
524 sch->q.qlen++;
525 }
Jarek Poplawski77be1552008-10-31 00:47:01 -0700526
527 return sch->gso_skb;
528}
529
530/* use instead of qdisc->dequeue() for all qdiscs queried with ->peek() */
531static inline struct sk_buff *qdisc_dequeue_peeked(struct Qdisc *sch)
532{
533 struct sk_buff *skb = sch->gso_skb;
534
Jarek Poplawski61c9eaf2008-11-05 16:02:34 -0800535 if (skb) {
Jarek Poplawski77be1552008-10-31 00:47:01 -0700536 sch->gso_skb = NULL;
Jarek Poplawski61c9eaf2008-11-05 16:02:34 -0800537 sch->q.qlen--;
538 } else {
Jarek Poplawski77be1552008-10-31 00:47:01 -0700539 skb = sch->dequeue(sch);
Jarek Poplawski61c9eaf2008-11-05 16:02:34 -0800540 }
Jarek Poplawski77be1552008-10-31 00:47:01 -0700541
542 return skb;
543}
544
Thomas Graf9972b252005-06-18 22:57:26 -0700545static inline void __qdisc_reset_queue(struct Qdisc *sch,
546 struct sk_buff_head *list)
547{
548 /*
549 * We do not know the backlog in bytes of this list, it
550 * is up to the caller to correct it
551 */
David S. Miller93245dd2008-07-17 04:03:43 -0700552 __skb_queue_purge(list);
Thomas Graf9972b252005-06-18 22:57:26 -0700553}
554
555static inline void qdisc_reset_queue(struct Qdisc *sch)
556{
557 __qdisc_reset_queue(sch, &sch->q);
558 sch->qstats.backlog = 0;
559}
560
561static inline unsigned int __qdisc_queue_drop(struct Qdisc *sch,
562 struct sk_buff_head *list)
563{
564 struct sk_buff *skb = __qdisc_dequeue_tail(sch, list);
565
566 if (likely(skb != NULL)) {
Jussi Kivilinna0abf77e2008-07-20 00:08:27 -0700567 unsigned int len = qdisc_pkt_len(skb);
Thomas Graf9972b252005-06-18 22:57:26 -0700568 kfree_skb(skb);
569 return len;
570 }
571
572 return 0;
573}
574
575static inline unsigned int qdisc_queue_drop(struct Qdisc *sch)
576{
577 return __qdisc_queue_drop(sch, &sch->q);
578}
579
580static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch)
581{
582 kfree_skb(skb);
583 sch->qstats.drops++;
584
585 return NET_XMIT_DROP;
586}
587
588static inline int qdisc_reshape_fail(struct sk_buff *skb, struct Qdisc *sch)
589{
590 sch->qstats.drops++;
591
Patrick McHardyc3bc7cf2007-07-15 00:03:05 -0700592#ifdef CONFIG_NET_CLS_ACT
Thomas Graf9972b252005-06-18 22:57:26 -0700593 if (sch->reshape_fail == NULL || sch->reshape_fail(skb, sch))
594 goto drop;
595
596 return NET_XMIT_SUCCESS;
597
598drop:
599#endif
600 kfree_skb(skb);
601 return NET_XMIT_DROP;
602}
603
Jesper Dangaard Brouere9bef552007-09-12 16:35:24 +0200604/* Length to Time (L2T) lookup in a qdisc_rate_table, to determine how
605 long it will take to send a packet given its size.
606 */
607static inline u32 qdisc_l2t(struct qdisc_rate_table* rtab, unsigned int pktlen)
608{
Jesper Dangaard Brouere08b0992007-09-12 16:36:28 +0200609 int slot = pktlen + rtab->rate.cell_align + rtab->rate.overhead;
610 if (slot < 0)
611 slot = 0;
Jesper Dangaard Brouere9bef552007-09-12 16:35:24 +0200612 slot >>= rtab->rate.cell_log;
613 if (slot > 255)
Eric Dumazeta02cec22010-09-22 20:43:57 +0000614 return rtab->data[255]*(slot >> 8) + rtab->data[slot & 0xFF];
Jesper Dangaard Brouere9bef552007-09-12 16:35:24 +0200615 return rtab->data[slot];
616}
617
Jamal Hadi Salim12da81d2007-10-26 02:47:23 -0700618#ifdef CONFIG_NET_CLS_ACT
Changli Gao210d6de2010-06-24 16:25:12 +0000619static inline struct sk_buff *skb_act_clone(struct sk_buff *skb, gfp_t gfp_mask,
620 int action)
Jamal Hadi Salim12da81d2007-10-26 02:47:23 -0700621{
Changli Gao210d6de2010-06-24 16:25:12 +0000622 struct sk_buff *n;
623
Changli Gao17302102010-12-20 04:35:30 +0000624 n = skb_clone(skb, gfp_mask);
Jamal Hadi Salim12da81d2007-10-26 02:47:23 -0700625
626 if (n) {
627 n->tc_verd = SET_TC_VERD(n->tc_verd, 0);
628 n->tc_verd = CLR_TC_OK2MUNGE(n->tc_verd);
629 n->tc_verd = CLR_TC_MUNGED(n->tc_verd);
Jamal Hadi Salim12da81d2007-10-26 02:47:23 -0700630 }
631 return n;
632}
633#endif
634
Linus Torvalds1da177e2005-04-16 15:20:36 -0700635#endif