blob: b225d8472b7e7536fbf4baa610eeec139ee59a4e [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef __NET_PKT_ACT_H
2#define __NET_PKT_ACT_H
3
4#include <asm/uaccess.h>
5#include <asm/system.h>
6#include <linux/bitops.h>
7#include <linux/config.h>
8#include <linux/types.h>
9#include <linux/kernel.h>
10#include <linux/sched.h>
11#include <linux/string.h>
12#include <linux/mm.h>
13#include <linux/socket.h>
14#include <linux/sockios.h>
15#include <linux/in.h>
16#include <linux/errno.h>
17#include <linux/interrupt.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include <linux/skbuff.h>
19#include <linux/rtnetlink.h>
20#include <linux/module.h>
21#include <linux/init.h>
22#include <linux/proc_fs.h>
23#include <net/sock.h>
24#include <net/pkt_sched.h>
25
26#define tca_st(val) (struct tcf_##val *)
27#define PRIV(a,name) ( tca_st(name) (a)->priv)
28
29#if 0 /* control */
30#define DPRINTK(format,args...) printk(KERN_DEBUG format,##args)
31#else
32#define DPRINTK(format,args...)
33#endif
34
35#if 0 /* data */
36#define D2PRINTK(format,args...) printk(KERN_DEBUG format,##args)
37#else
38#define D2PRINTK(format,args...)
39#endif
40
41static __inline__ unsigned
42tcf_hash(u32 index)
43{
44 return index & MY_TAB_MASK;
45}
46
47/* probably move this from being inline
48 * and put into act_generic
49*/
50static inline void
51tcf_hash_destroy(struct tcf_st *p)
52{
53 unsigned h = tcf_hash(p->index);
54 struct tcf_st **p1p;
55
56 for (p1p = &tcf_ht[h]; *p1p; p1p = &(*p1p)->next) {
57 if (*p1p == p) {
58 write_lock_bh(&tcf_t_lock);
59 *p1p = p->next;
60 write_unlock_bh(&tcf_t_lock);
61#ifdef CONFIG_NET_ESTIMATOR
62 gen_kill_estimator(&p->bstats, &p->rate_est);
63#endif
64 kfree(p);
65 return;
66 }
67 }
68 BUG_TRAP(0);
69}
70
71static inline int
72tcf_hash_release(struct tcf_st *p, int bind )
73{
74 int ret = 0;
75 if (p) {
76 if (bind) {
77 p->bindcnt--;
78 }
79 p->refcnt--;
80 if(p->bindcnt <=0 && p->refcnt <= 0) {
81 tcf_hash_destroy(p);
82 ret = 1;
83 }
84 }
85 return ret;
86}
87
88static __inline__ int
89tcf_dump_walker(struct sk_buff *skb, struct netlink_callback *cb,
90 struct tc_action *a)
91{
92 struct tcf_st *p;
93 int err =0, index = -1,i= 0, s_i = 0, n_i = 0;
94 struct rtattr *r ;
95
96 read_lock(&tcf_t_lock);
97
98 s_i = cb->args[0];
99
100 for (i = 0; i < MY_TAB_SIZE; i++) {
101 p = tcf_ht[tcf_hash(i)];
102
103 for (; p; p = p->next) {
104 index++;
105 if (index < s_i)
106 continue;
107 a->priv = p;
108 a->order = n_i;
109 r = (struct rtattr*) skb->tail;
110 RTA_PUT(skb, a->order, 0, NULL);
111 err = tcf_action_dump_1(skb, a, 0, 0);
112 if (0 > err) {
113 index--;
114 skb_trim(skb, (u8*)r - skb->data);
115 goto done;
116 }
117 r->rta_len = skb->tail - (u8*)r;
118 n_i++;
119 if (n_i >= TCA_ACT_MAX_PRIO) {
120 goto done;
121 }
122 }
123 }
124done:
125 read_unlock(&tcf_t_lock);
126 if (n_i)
127 cb->args[0] += n_i;
128 return n_i;
129
130rtattr_failure:
131 skb_trim(skb, (u8*)r - skb->data);
132 goto done;
133}
134
135static __inline__ int
136tcf_del_walker(struct sk_buff *skb, struct tc_action *a)
137{
138 struct tcf_st *p, *s_p;
139 struct rtattr *r ;
140 int i= 0, n_i = 0;
141
142 r = (struct rtattr*) skb->tail;
143 RTA_PUT(skb, a->order, 0, NULL);
144 RTA_PUT(skb, TCA_KIND, IFNAMSIZ, a->ops->kind);
145 for (i = 0; i < MY_TAB_SIZE; i++) {
146 p = tcf_ht[tcf_hash(i)];
147
148 while (p != NULL) {
149 s_p = p->next;
150 if (ACT_P_DELETED == tcf_hash_release(p, 0)) {
151 module_put(a->ops->owner);
152 }
153 n_i++;
154 p = s_p;
155 }
156 }
157 RTA_PUT(skb, TCA_FCNT, 4, &n_i);
158 r->rta_len = skb->tail - (u8*)r;
159
160 return n_i;
161rtattr_failure:
162 skb_trim(skb, (u8*)r - skb->data);
163 return -EINVAL;
164}
165
166static __inline__ int
167tcf_generic_walker(struct sk_buff *skb, struct netlink_callback *cb, int type,
168 struct tc_action *a)
169{
170 if (type == RTM_DELACTION) {
171 return tcf_del_walker(skb,a);
172 } else if (type == RTM_GETACTION) {
173 return tcf_dump_walker(skb,cb,a);
174 } else {
175 printk("tcf_generic_walker: unknown action %d\n",type);
176 return -EINVAL;
177 }
178}
179
180static __inline__ struct tcf_st *
181tcf_hash_lookup(u32 index)
182{
183 struct tcf_st *p;
184
185 read_lock(&tcf_t_lock);
186 for (p = tcf_ht[tcf_hash(index)]; p; p = p->next) {
187 if (p->index == index)
188 break;
189 }
190 read_unlock(&tcf_t_lock);
191 return p;
192}
193
194static __inline__ u32
195tcf_hash_new_index(void)
196{
197 do {
198 if (++idx_gen == 0)
199 idx_gen = 1;
200 } while (tcf_hash_lookup(idx_gen));
201
202 return idx_gen;
203}
204
205
206static inline int
207tcf_hash_search(struct tc_action *a, u32 index)
208{
209 struct tcf_st *p = tcf_hash_lookup(index);
210
211 if (p != NULL) {
212 a->priv = p;
213 return 1;
214 }
215 return 0;
216}
217
218#ifdef CONFIG_NET_ACT_INIT
219static inline struct tcf_st *
220tcf_hash_check(u32 index, struct tc_action *a, int ovr, int bind)
221{
222 struct tcf_st *p = NULL;
223 if (index && (p = tcf_hash_lookup(index)) != NULL) {
224 if (bind) {
225 p->bindcnt++;
226 p->refcnt++;
227 }
228 a->priv = p;
229 }
230 return p;
231}
232
233static inline struct tcf_st *
234tcf_hash_create(u32 index, struct rtattr *est, struct tc_action *a, int size, int ovr, int bind)
235{
236 struct tcf_st *p = NULL;
237
238 p = kmalloc(size, GFP_KERNEL);
239 if (p == NULL)
240 return p;
241
242 memset(p, 0, size);
243 p->refcnt = 1;
244
245 if (bind) {
246 p->bindcnt = 1;
247 }
248
249 spin_lock_init(&p->lock);
250 p->stats_lock = &p->lock;
251 p->index = index ? : tcf_hash_new_index();
252 p->tm.install = jiffies;
253 p->tm.lastuse = jiffies;
254#ifdef CONFIG_NET_ESTIMATOR
255 if (est)
256 gen_new_estimator(&p->bstats, &p->rate_est, p->stats_lock, est);
257#endif
258 a->priv = (void *) p;
259 return p;
260}
261
262static inline void tcf_hash_insert(struct tcf_st *p)
263{
264 unsigned h = tcf_hash(p->index);
265
266 write_lock_bh(&tcf_t_lock);
267 p->next = tcf_ht[h];
268 tcf_ht[h] = p;
269 write_unlock_bh(&tcf_t_lock);
270}
271
272#endif
273
274#endif