Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef __NET_SCHED_GENERIC_H |
| 2 | #define __NET_SCHED_GENERIC_H |
| 3 | |
| 4 | #include <linux/config.h> |
| 5 | #include <linux/netdevice.h> |
| 6 | #include <linux/types.h> |
| 7 | #include <linux/rcupdate.h> |
| 8 | #include <linux/module.h> |
| 9 | #include <linux/rtnetlink.h> |
| 10 | #include <linux/pkt_sched.h> |
| 11 | #include <linux/pkt_cls.h> |
| 12 | #include <net/gen_stats.h> |
| 13 | |
| 14 | struct Qdisc_ops; |
| 15 | struct qdisc_walker; |
| 16 | struct tcf_walker; |
| 17 | struct module; |
| 18 | |
| 19 | struct qdisc_rate_table |
| 20 | { |
| 21 | struct tc_ratespec rate; |
| 22 | u32 data[256]; |
| 23 | struct qdisc_rate_table *next; |
| 24 | int refcnt; |
| 25 | }; |
| 26 | |
| 27 | struct Qdisc |
| 28 | { |
| 29 | int (*enqueue)(struct sk_buff *skb, struct Qdisc *dev); |
| 30 | struct sk_buff * (*dequeue)(struct Qdisc *dev); |
| 31 | unsigned flags; |
| 32 | #define TCQ_F_BUILTIN 1 |
| 33 | #define TCQ_F_THROTTLED 2 |
| 34 | #define TCQ_F_INGRESS 4 |
| 35 | int padded; |
| 36 | struct Qdisc_ops *ops; |
| 37 | u32 handle; |
| 38 | u32 parent; |
| 39 | atomic_t refcnt; |
| 40 | struct sk_buff_head q; |
| 41 | struct net_device *dev; |
| 42 | struct list_head list; |
| 43 | |
| 44 | struct gnet_stats_basic bstats; |
| 45 | struct gnet_stats_queue qstats; |
| 46 | struct gnet_stats_rate_est rate_est; |
| 47 | spinlock_t *stats_lock; |
| 48 | struct rcu_head q_rcu; |
| 49 | int (*reshape_fail)(struct sk_buff *skb, |
| 50 | struct Qdisc *q); |
| 51 | |
| 52 | /* This field is deprecated, but it is still used by CBQ |
| 53 | * and it will live until better solution will be invented. |
| 54 | */ |
| 55 | struct Qdisc *__parent; |
| 56 | }; |
| 57 | |
| 58 | struct Qdisc_class_ops |
| 59 | { |
| 60 | /* Child qdisc manipulation */ |
| 61 | int (*graft)(struct Qdisc *, unsigned long cl, |
| 62 | struct Qdisc *, struct Qdisc **); |
| 63 | struct Qdisc * (*leaf)(struct Qdisc *, unsigned long cl); |
| 64 | |
| 65 | /* Class manipulation routines */ |
| 66 | unsigned long (*get)(struct Qdisc *, u32 classid); |
| 67 | void (*put)(struct Qdisc *, unsigned long); |
| 68 | int (*change)(struct Qdisc *, u32, u32, |
| 69 | struct rtattr **, unsigned long *); |
| 70 | int (*delete)(struct Qdisc *, unsigned long); |
| 71 | void (*walk)(struct Qdisc *, struct qdisc_walker * arg); |
| 72 | |
| 73 | /* Filter manipulation */ |
| 74 | struct tcf_proto ** (*tcf_chain)(struct Qdisc *, unsigned long); |
| 75 | unsigned long (*bind_tcf)(struct Qdisc *, unsigned long, |
| 76 | u32 classid); |
| 77 | void (*unbind_tcf)(struct Qdisc *, unsigned long); |
| 78 | |
| 79 | /* rtnetlink specific */ |
| 80 | int (*dump)(struct Qdisc *, unsigned long, |
| 81 | struct sk_buff *skb, struct tcmsg*); |
| 82 | int (*dump_stats)(struct Qdisc *, unsigned long, |
| 83 | struct gnet_dump *); |
| 84 | }; |
| 85 | |
| 86 | struct Qdisc_ops |
| 87 | { |
| 88 | struct Qdisc_ops *next; |
| 89 | struct Qdisc_class_ops *cl_ops; |
| 90 | char id[IFNAMSIZ]; |
| 91 | int priv_size; |
| 92 | |
| 93 | int (*enqueue)(struct sk_buff *, struct Qdisc *); |
| 94 | struct sk_buff * (*dequeue)(struct Qdisc *); |
| 95 | int (*requeue)(struct sk_buff *, struct Qdisc *); |
| 96 | unsigned int (*drop)(struct Qdisc *); |
| 97 | |
| 98 | int (*init)(struct Qdisc *, struct rtattr *arg); |
| 99 | void (*reset)(struct Qdisc *); |
| 100 | void (*destroy)(struct Qdisc *); |
| 101 | int (*change)(struct Qdisc *, struct rtattr *arg); |
| 102 | |
| 103 | int (*dump)(struct Qdisc *, struct sk_buff *); |
| 104 | int (*dump_stats)(struct Qdisc *, struct gnet_dump *); |
| 105 | |
| 106 | struct module *owner; |
| 107 | }; |
| 108 | |
| 109 | |
| 110 | struct tcf_result |
| 111 | { |
| 112 | unsigned long class; |
| 113 | u32 classid; |
| 114 | }; |
| 115 | |
| 116 | struct tcf_proto_ops |
| 117 | { |
| 118 | struct tcf_proto_ops *next; |
| 119 | char kind[IFNAMSIZ]; |
| 120 | |
| 121 | int (*classify)(struct sk_buff*, struct tcf_proto*, |
| 122 | struct tcf_result *); |
| 123 | int (*init)(struct tcf_proto*); |
| 124 | void (*destroy)(struct tcf_proto*); |
| 125 | |
| 126 | unsigned long (*get)(struct tcf_proto*, u32 handle); |
| 127 | void (*put)(struct tcf_proto*, unsigned long); |
| 128 | int (*change)(struct tcf_proto*, unsigned long, |
| 129 | u32 handle, struct rtattr **, |
| 130 | unsigned long *); |
| 131 | int (*delete)(struct tcf_proto*, unsigned long); |
| 132 | void (*walk)(struct tcf_proto*, struct tcf_walker *arg); |
| 133 | |
| 134 | /* rtnetlink specific */ |
| 135 | int (*dump)(struct tcf_proto*, unsigned long, |
| 136 | struct sk_buff *skb, struct tcmsg*); |
| 137 | |
| 138 | struct module *owner; |
| 139 | }; |
| 140 | |
| 141 | struct tcf_proto |
| 142 | { |
| 143 | /* Fast access part */ |
| 144 | struct tcf_proto *next; |
| 145 | void *root; |
| 146 | int (*classify)(struct sk_buff*, struct tcf_proto*, |
| 147 | struct tcf_result *); |
| 148 | u32 protocol; |
| 149 | |
| 150 | /* All the rest */ |
| 151 | u32 prio; |
| 152 | u32 classid; |
| 153 | struct Qdisc *q; |
| 154 | void *data; |
| 155 | struct tcf_proto_ops *ops; |
| 156 | }; |
| 157 | |
| 158 | |
| 159 | extern void qdisc_lock_tree(struct net_device *dev); |
| 160 | extern void qdisc_unlock_tree(struct net_device *dev); |
| 161 | |
| 162 | #define sch_tree_lock(q) qdisc_lock_tree((q)->dev) |
| 163 | #define sch_tree_unlock(q) qdisc_unlock_tree((q)->dev) |
| 164 | #define tcf_tree_lock(tp) qdisc_lock_tree((tp)->q->dev) |
| 165 | #define tcf_tree_unlock(tp) qdisc_unlock_tree((tp)->q->dev) |
| 166 | |
| 167 | static inline void |
| 168 | tcf_destroy(struct tcf_proto *tp) |
| 169 | { |
| 170 | tp->ops->destroy(tp); |
| 171 | module_put(tp->ops->owner); |
| 172 | kfree(tp); |
| 173 | } |
| 174 | |
Thomas Graf | 9972b25 | 2005-06-18 22:57:26 -0700 | [diff] [blame] | 175 | static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch, |
| 176 | struct sk_buff_head *list) |
| 177 | { |
| 178 | __skb_queue_tail(list, skb); |
| 179 | sch->qstats.backlog += skb->len; |
| 180 | sch->bstats.bytes += skb->len; |
| 181 | sch->bstats.packets++; |
| 182 | |
| 183 | return NET_XMIT_SUCCESS; |
| 184 | } |
| 185 | |
| 186 | static inline int qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch) |
| 187 | { |
| 188 | return __qdisc_enqueue_tail(skb, sch, &sch->q); |
| 189 | } |
| 190 | |
| 191 | static inline struct sk_buff *__qdisc_dequeue_head(struct Qdisc *sch, |
| 192 | struct sk_buff_head *list) |
| 193 | { |
| 194 | struct sk_buff *skb = __skb_dequeue(list); |
| 195 | |
| 196 | if (likely(skb != NULL)) |
| 197 | sch->qstats.backlog -= skb->len; |
| 198 | |
| 199 | return skb; |
| 200 | } |
| 201 | |
| 202 | static inline struct sk_buff *qdisc_dequeue_head(struct Qdisc *sch) |
| 203 | { |
| 204 | return __qdisc_dequeue_head(sch, &sch->q); |
| 205 | } |
| 206 | |
| 207 | static inline struct sk_buff *__qdisc_dequeue_tail(struct Qdisc *sch, |
| 208 | struct sk_buff_head *list) |
| 209 | { |
| 210 | struct sk_buff *skb = __skb_dequeue_tail(list); |
| 211 | |
| 212 | if (likely(skb != NULL)) |
| 213 | sch->qstats.backlog -= skb->len; |
| 214 | |
| 215 | return skb; |
| 216 | } |
| 217 | |
| 218 | static inline struct sk_buff *qdisc_dequeue_tail(struct Qdisc *sch) |
| 219 | { |
| 220 | return __qdisc_dequeue_tail(sch, &sch->q); |
| 221 | } |
| 222 | |
| 223 | static inline int __qdisc_requeue(struct sk_buff *skb, struct Qdisc *sch, |
| 224 | struct sk_buff_head *list) |
| 225 | { |
| 226 | __skb_queue_head(list, skb); |
| 227 | sch->qstats.backlog += skb->len; |
| 228 | sch->qstats.requeues++; |
| 229 | |
| 230 | return NET_XMIT_SUCCESS; |
| 231 | } |
| 232 | |
| 233 | static inline int qdisc_requeue(struct sk_buff *skb, struct Qdisc *sch) |
| 234 | { |
| 235 | return __qdisc_requeue(skb, sch, &sch->q); |
| 236 | } |
| 237 | |
| 238 | static inline void __qdisc_reset_queue(struct Qdisc *sch, |
| 239 | struct sk_buff_head *list) |
| 240 | { |
| 241 | /* |
| 242 | * We do not know the backlog in bytes of this list, it |
| 243 | * is up to the caller to correct it |
| 244 | */ |
| 245 | skb_queue_purge(list); |
| 246 | } |
| 247 | |
| 248 | static inline void qdisc_reset_queue(struct Qdisc *sch) |
| 249 | { |
| 250 | __qdisc_reset_queue(sch, &sch->q); |
| 251 | sch->qstats.backlog = 0; |
| 252 | } |
| 253 | |
| 254 | static inline unsigned int __qdisc_queue_drop(struct Qdisc *sch, |
| 255 | struct sk_buff_head *list) |
| 256 | { |
| 257 | struct sk_buff *skb = __qdisc_dequeue_tail(sch, list); |
| 258 | |
| 259 | if (likely(skb != NULL)) { |
| 260 | unsigned int len = skb->len; |
| 261 | kfree_skb(skb); |
| 262 | return len; |
| 263 | } |
| 264 | |
| 265 | return 0; |
| 266 | } |
| 267 | |
| 268 | static inline unsigned int qdisc_queue_drop(struct Qdisc *sch) |
| 269 | { |
| 270 | return __qdisc_queue_drop(sch, &sch->q); |
| 271 | } |
| 272 | |
| 273 | static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch) |
| 274 | { |
| 275 | kfree_skb(skb); |
| 276 | sch->qstats.drops++; |
| 277 | |
| 278 | return NET_XMIT_DROP; |
| 279 | } |
| 280 | |
| 281 | static inline int qdisc_reshape_fail(struct sk_buff *skb, struct Qdisc *sch) |
| 282 | { |
| 283 | sch->qstats.drops++; |
| 284 | |
| 285 | #ifdef CONFIG_NET_CLS_POLICE |
| 286 | if (sch->reshape_fail == NULL || sch->reshape_fail(skb, sch)) |
| 287 | goto drop; |
| 288 | |
| 289 | return NET_XMIT_SUCCESS; |
| 290 | |
| 291 | drop: |
| 292 | #endif |
| 293 | kfree_skb(skb); |
| 294 | return NET_XMIT_DROP; |
| 295 | } |
| 296 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 297 | #endif |