blob: bd08964b72c085b76b88d578af328f4efeab9435 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef __NET_PKT_ACT_H
2#define __NET_PKT_ACT_H
3
4#include <asm/uaccess.h>
5#include <asm/system.h>
6#include <linux/bitops.h>
7#include <linux/config.h>
8#include <linux/types.h>
9#include <linux/kernel.h>
10#include <linux/sched.h>
11#include <linux/string.h>
12#include <linux/mm.h>
13#include <linux/socket.h>
14#include <linux/sockios.h>
15#include <linux/in.h>
16#include <linux/errno.h>
17#include <linux/interrupt.h>
18#include <linux/netdevice.h>
19#include <linux/skbuff.h>
20#include <linux/rtnetlink.h>
21#include <linux/module.h>
22#include <linux/init.h>
23#include <linux/proc_fs.h>
24#include <net/sock.h>
25#include <net/pkt_sched.h>
26
27#define tca_st(val) (struct tcf_##val *)
28#define PRIV(a,name) ( tca_st(name) (a)->priv)
29
30#if 0 /* control */
31#define DPRINTK(format,args...) printk(KERN_DEBUG format,##args)
32#else
33#define DPRINTK(format,args...)
34#endif
35
36#if 0 /* data */
37#define D2PRINTK(format,args...) printk(KERN_DEBUG format,##args)
38#else
39#define D2PRINTK(format,args...)
40#endif
41
42static __inline__ unsigned
43tcf_hash(u32 index)
44{
45 return index & MY_TAB_MASK;
46}
47
48/* probably move this from being inline
49 * and put into act_generic
50*/
51static inline void
52tcf_hash_destroy(struct tcf_st *p)
53{
54 unsigned h = tcf_hash(p->index);
55 struct tcf_st **p1p;
56
57 for (p1p = &tcf_ht[h]; *p1p; p1p = &(*p1p)->next) {
58 if (*p1p == p) {
59 write_lock_bh(&tcf_t_lock);
60 *p1p = p->next;
61 write_unlock_bh(&tcf_t_lock);
62#ifdef CONFIG_NET_ESTIMATOR
63 gen_kill_estimator(&p->bstats, &p->rate_est);
64#endif
65 kfree(p);
66 return;
67 }
68 }
69 BUG_TRAP(0);
70}
71
72static inline int
73tcf_hash_release(struct tcf_st *p, int bind )
74{
75 int ret = 0;
76 if (p) {
77 if (bind) {
78 p->bindcnt--;
79 }
80 p->refcnt--;
81 if(p->bindcnt <=0 && p->refcnt <= 0) {
82 tcf_hash_destroy(p);
83 ret = 1;
84 }
85 }
86 return ret;
87}
88
89static __inline__ int
90tcf_dump_walker(struct sk_buff *skb, struct netlink_callback *cb,
91 struct tc_action *a)
92{
93 struct tcf_st *p;
94 int err =0, index = -1,i= 0, s_i = 0, n_i = 0;
95 struct rtattr *r ;
96
97 read_lock(&tcf_t_lock);
98
99 s_i = cb->args[0];
100
101 for (i = 0; i < MY_TAB_SIZE; i++) {
102 p = tcf_ht[tcf_hash(i)];
103
104 for (; p; p = p->next) {
105 index++;
106 if (index < s_i)
107 continue;
108 a->priv = p;
109 a->order = n_i;
110 r = (struct rtattr*) skb->tail;
111 RTA_PUT(skb, a->order, 0, NULL);
112 err = tcf_action_dump_1(skb, a, 0, 0);
113 if (0 > err) {
114 index--;
115 skb_trim(skb, (u8*)r - skb->data);
116 goto done;
117 }
118 r->rta_len = skb->tail - (u8*)r;
119 n_i++;
120 if (n_i >= TCA_ACT_MAX_PRIO) {
121 goto done;
122 }
123 }
124 }
125done:
126 read_unlock(&tcf_t_lock);
127 if (n_i)
128 cb->args[0] += n_i;
129 return n_i;
130
131rtattr_failure:
132 skb_trim(skb, (u8*)r - skb->data);
133 goto done;
134}
135
136static __inline__ int
137tcf_del_walker(struct sk_buff *skb, struct tc_action *a)
138{
139 struct tcf_st *p, *s_p;
140 struct rtattr *r ;
141 int i= 0, n_i = 0;
142
143 r = (struct rtattr*) skb->tail;
144 RTA_PUT(skb, a->order, 0, NULL);
145 RTA_PUT(skb, TCA_KIND, IFNAMSIZ, a->ops->kind);
146 for (i = 0; i < MY_TAB_SIZE; i++) {
147 p = tcf_ht[tcf_hash(i)];
148
149 while (p != NULL) {
150 s_p = p->next;
151 if (ACT_P_DELETED == tcf_hash_release(p, 0)) {
152 module_put(a->ops->owner);
153 }
154 n_i++;
155 p = s_p;
156 }
157 }
158 RTA_PUT(skb, TCA_FCNT, 4, &n_i);
159 r->rta_len = skb->tail - (u8*)r;
160
161 return n_i;
162rtattr_failure:
163 skb_trim(skb, (u8*)r - skb->data);
164 return -EINVAL;
165}
166
167static __inline__ int
168tcf_generic_walker(struct sk_buff *skb, struct netlink_callback *cb, int type,
169 struct tc_action *a)
170{
171 if (type == RTM_DELACTION) {
172 return tcf_del_walker(skb,a);
173 } else if (type == RTM_GETACTION) {
174 return tcf_dump_walker(skb,cb,a);
175 } else {
176 printk("tcf_generic_walker: unknown action %d\n",type);
177 return -EINVAL;
178 }
179}
180
181static __inline__ struct tcf_st *
182tcf_hash_lookup(u32 index)
183{
184 struct tcf_st *p;
185
186 read_lock(&tcf_t_lock);
187 for (p = tcf_ht[tcf_hash(index)]; p; p = p->next) {
188 if (p->index == index)
189 break;
190 }
191 read_unlock(&tcf_t_lock);
192 return p;
193}
194
195static __inline__ u32
196tcf_hash_new_index(void)
197{
198 do {
199 if (++idx_gen == 0)
200 idx_gen = 1;
201 } while (tcf_hash_lookup(idx_gen));
202
203 return idx_gen;
204}
205
206
207static inline int
208tcf_hash_search(struct tc_action *a, u32 index)
209{
210 struct tcf_st *p = tcf_hash_lookup(index);
211
212 if (p != NULL) {
213 a->priv = p;
214 return 1;
215 }
216 return 0;
217}
218
219#ifdef CONFIG_NET_ACT_INIT
220static inline struct tcf_st *
221tcf_hash_check(u32 index, struct tc_action *a, int ovr, int bind)
222{
223 struct tcf_st *p = NULL;
224 if (index && (p = tcf_hash_lookup(index)) != NULL) {
225 if (bind) {
226 p->bindcnt++;
227 p->refcnt++;
228 }
229 a->priv = p;
230 }
231 return p;
232}
233
234static inline struct tcf_st *
235tcf_hash_create(u32 index, struct rtattr *est, struct tc_action *a, int size, int ovr, int bind)
236{
237 struct tcf_st *p = NULL;
238
239 p = kmalloc(size, GFP_KERNEL);
240 if (p == NULL)
241 return p;
242
243 memset(p, 0, size);
244 p->refcnt = 1;
245
246 if (bind) {
247 p->bindcnt = 1;
248 }
249
250 spin_lock_init(&p->lock);
251 p->stats_lock = &p->lock;
252 p->index = index ? : tcf_hash_new_index();
253 p->tm.install = jiffies;
254 p->tm.lastuse = jiffies;
255#ifdef CONFIG_NET_ESTIMATOR
256 if (est)
257 gen_new_estimator(&p->bstats, &p->rate_est, p->stats_lock, est);
258#endif
259 a->priv = (void *) p;
260 return p;
261}
262
263static inline void tcf_hash_insert(struct tcf_st *p)
264{
265 unsigned h = tcf_hash(p->index);
266
267 write_lock_bh(&tcf_t_lock);
268 p->next = tcf_ht[h];
269 tcf_ht[h] = p;
270 write_unlock_bh(&tcf_t_lock);
271}
272
273#endif
274
275#endif