blob: f7e7d3955d289f5cd5585bc06bbc9285e56a9834 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * net/sched/cls_route.c ROUTE4 classifier.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 */
11
12#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <linux/types.h>
14#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include <linux/string.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include <linux/errno.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#include <linux/skbuff.h>
Patrick McHardy0ba48052007-07-02 22:49:07 -070018#include <net/dst.h>
19#include <net/route.h>
20#include <net/netlink.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#include <net/act_api.h>
22#include <net/pkt_cls.h>
23
24/*
25 1. For now we assume that route tags < 256.
26 It allows to use direct table lookups, instead of hash tables.
27 2. For now we assume that "from TAG" and "fromdev DEV" statements
28 are mutually exclusive.
29 3. "to TAG from ANY" has higher priority, than "to ANY from XXX"
30 */
31
32struct route4_fastmap
33{
34 struct route4_filter *filter;
35 u32 id;
36 int iif;
37};
38
39struct route4_head
40{
41 struct route4_fastmap fastmap[16];
42 struct route4_bucket *table[256+1];
43};
44
45struct route4_bucket
46{
47 /* 16 FROM buckets + 16 IIF buckets + 1 wildcard bucket */
48 struct route4_filter *ht[16+16+1];
49};
50
51struct route4_filter
52{
53 struct route4_filter *next;
54 u32 id;
55 int iif;
56
57 struct tcf_result res;
58 struct tcf_exts exts;
59 u32 handle;
60 struct route4_bucket *bkt;
61};
62
63#define ROUTE4_FAILURE ((struct route4_filter*)(-1L))
64
65static struct tcf_ext_map route_ext_map = {
66 .police = TCA_ROUTE4_POLICE,
67 .action = TCA_ROUTE4_ACT
68};
69
70static __inline__ int route4_fastmap_hash(u32 id, int iif)
71{
72 return id&0xF;
73}
74
75static inline
76void route4_reset_fastmap(struct net_device *dev, struct route4_head *head, u32 id)
77{
Patrick McHardyfd44de72007-04-16 17:07:08 -070078 qdisc_lock_tree(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -070079 memset(head->fastmap, 0, sizeof(head->fastmap));
Patrick McHardyfd44de72007-04-16 17:07:08 -070080 qdisc_unlock_tree(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -070081}
82
Dave Jonesb6f99a22007-03-22 12:27:49 -070083static inline void
Linus Torvalds1da177e2005-04-16 15:20:36 -070084route4_set_fastmap(struct route4_head *head, u32 id, int iif,
85 struct route4_filter *f)
86{
87 int h = route4_fastmap_hash(id, iif);
88 head->fastmap[h].id = id;
89 head->fastmap[h].iif = iif;
90 head->fastmap[h].filter = f;
91}
92
93static __inline__ int route4_hash_to(u32 id)
94{
95 return id&0xFF;
96}
97
98static __inline__ int route4_hash_from(u32 id)
99{
100 return (id>>16)&0xF;
101}
102
103static __inline__ int route4_hash_iif(int iif)
104{
105 return 16 + ((iif>>16)&0xF);
106}
107
108static __inline__ int route4_hash_wild(void)
109{
110 return 32;
111}
112
113#define ROUTE4_APPLY_RESULT() \
114{ \
115 *res = f->res; \
116 if (tcf_exts_is_available(&f->exts)) { \
117 int r = tcf_exts_exec(skb, &f->exts, res); \
118 if (r < 0) { \
119 dont_cache = 1; \
120 continue; \
121 } \
122 return r; \
123 } else if (!dont_cache) \
124 route4_set_fastmap(head, id, iif, f); \
125 return 0; \
126}
127
128static int route4_classify(struct sk_buff *skb, struct tcf_proto *tp,
129 struct tcf_result *res)
130{
131 struct route4_head *head = (struct route4_head*)tp->root;
132 struct dst_entry *dst;
133 struct route4_bucket *b;
134 struct route4_filter *f;
135 u32 id, h;
136 int iif, dont_cache = 0;
137
138 if ((dst = skb->dst) == NULL)
139 goto failure;
140
141 id = dst->tclassid;
142 if (head == NULL)
143 goto old_method;
144
145 iif = ((struct rtable*)dst)->fl.iif;
146
147 h = route4_fastmap_hash(id, iif);
148 if (id == head->fastmap[h].id &&
149 iif == head->fastmap[h].iif &&
150 (f = head->fastmap[h].filter) != NULL) {
151 if (f == ROUTE4_FAILURE)
152 goto failure;
153
154 *res = f->res;
155 return 0;
156 }
157
158 h = route4_hash_to(id);
159
160restart:
161 if ((b = head->table[h]) != NULL) {
162 for (f = b->ht[route4_hash_from(id)]; f; f = f->next)
163 if (f->id == id)
164 ROUTE4_APPLY_RESULT();
165
166 for (f = b->ht[route4_hash_iif(iif)]; f; f = f->next)
167 if (f->iif == iif)
168 ROUTE4_APPLY_RESULT();
169
170 for (f = b->ht[route4_hash_wild()]; f; f = f->next)
171 ROUTE4_APPLY_RESULT();
172
173 }
174 if (h < 256) {
175 h = 256;
176 id &= ~0xFFFF;
177 goto restart;
178 }
179
180 if (!dont_cache)
181 route4_set_fastmap(head, id, iif, ROUTE4_FAILURE);
182failure:
183 return -1;
184
185old_method:
186 if (id && (TC_H_MAJ(id) == 0 ||
187 !(TC_H_MAJ(id^tp->q->handle)))) {
188 res->classid = id;
189 res->class = 0;
190 return 0;
191 }
192 return -1;
193}
194
195static inline u32 to_hash(u32 id)
196{
197 u32 h = id&0xFF;
198 if (id&0x8000)
199 h += 256;
200 return h;
201}
202
203static inline u32 from_hash(u32 id)
204{
205 id &= 0xFFFF;
206 if (id == 0xFFFF)
207 return 32;
208 if (!(id & 0x8000)) {
209 if (id > 255)
210 return 256;
211 return id&0xF;
212 }
213 return 16 + (id&0xF);
214}
215
216static unsigned long route4_get(struct tcf_proto *tp, u32 handle)
217{
218 struct route4_head *head = (struct route4_head*)tp->root;
219 struct route4_bucket *b;
220 struct route4_filter *f;
221 unsigned h1, h2;
222
223 if (!head)
224 return 0;
225
226 h1 = to_hash(handle);
227 if (h1 > 256)
228 return 0;
229
230 h2 = from_hash(handle>>16);
231 if (h2 > 32)
232 return 0;
233
234 if ((b = head->table[h1]) != NULL) {
235 for (f = b->ht[h2]; f; f = f->next)
236 if (f->handle == handle)
237 return (unsigned long)f;
238 }
239 return 0;
240}
241
242static void route4_put(struct tcf_proto *tp, unsigned long f)
243{
244}
245
246static int route4_init(struct tcf_proto *tp)
247{
248 return 0;
249}
250
251static inline void
252route4_delete_filter(struct tcf_proto *tp, struct route4_filter *f)
253{
254 tcf_unbind_filter(tp, &f->res);
255 tcf_exts_destroy(tp, &f->exts);
256 kfree(f);
257}
258
259static void route4_destroy(struct tcf_proto *tp)
260{
261 struct route4_head *head = xchg(&tp->root, NULL);
262 int h1, h2;
263
264 if (head == NULL)
265 return;
266
267 for (h1=0; h1<=256; h1++) {
268 struct route4_bucket *b;
269
270 if ((b = head->table[h1]) != NULL) {
271 for (h2=0; h2<=32; h2++) {
272 struct route4_filter *f;
273
274 while ((f = b->ht[h2]) != NULL) {
275 b->ht[h2] = f->next;
276 route4_delete_filter(tp, f);
277 }
278 }
279 kfree(b);
280 }
281 }
282 kfree(head);
283}
284
285static int route4_delete(struct tcf_proto *tp, unsigned long arg)
286{
287 struct route4_head *head = (struct route4_head*)tp->root;
288 struct route4_filter **fp, *f = (struct route4_filter*)arg;
289 unsigned h = 0;
290 struct route4_bucket *b;
291 int i;
292
293 if (!head || !f)
294 return -EINVAL;
295
296 h = f->handle;
297 b = f->bkt;
298
299 for (fp = &b->ht[from_hash(h>>16)]; *fp; fp = &(*fp)->next) {
300 if (*fp == f) {
301 tcf_tree_lock(tp);
302 *fp = f->next;
303 tcf_tree_unlock(tp);
304
305 route4_reset_fastmap(tp->q->dev, head, f->id);
306 route4_delete_filter(tp, f);
307
308 /* Strip tree */
309
310 for (i=0; i<=32; i++)
311 if (b->ht[i])
312 return 0;
313
314 /* OK, session has no flows */
315 tcf_tree_lock(tp);
316 head->table[to_hash(h)] = NULL;
317 tcf_tree_unlock(tp);
318
319 kfree(b);
320 return 0;
321 }
322 }
323 return 0;
324}
325
Patrick McHardy6fa8c012008-01-23 20:36:12 -0800326static const struct nla_policy route4_policy[TCA_ROUTE4_MAX + 1] = {
327 [TCA_ROUTE4_CLASSID] = { .type = NLA_U32 },
328 [TCA_ROUTE4_TO] = { .type = NLA_U32 },
329 [TCA_ROUTE4_FROM] = { .type = NLA_U32 },
330 [TCA_ROUTE4_IIF] = { .type = NLA_U32 },
331};
332
Linus Torvalds1da177e2005-04-16 15:20:36 -0700333static int route4_set_parms(struct tcf_proto *tp, unsigned long base,
334 struct route4_filter *f, u32 handle, struct route4_head *head,
Patrick McHardyadd93b62008-01-22 22:11:33 -0800335 struct nlattr **tb, struct nlattr *est, int new)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336{
337 int err;
338 u32 id = 0, to = 0, nhandle = 0x8000;
339 struct route4_filter *fp;
340 unsigned int h1;
341 struct route4_bucket *b;
342 struct tcf_exts e;
343
344 err = tcf_exts_validate(tp, tb, est, &e, &route_ext_map);
345 if (err < 0)
346 return err;
347
348 err = -EINVAL;
Patrick McHardyadd93b62008-01-22 22:11:33 -0800349 if (tb[TCA_ROUTE4_TO]) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700350 if (new && handle & 0x8000)
351 goto errout;
Patrick McHardy1587bac2008-01-23 20:35:03 -0800352 to = nla_get_u32(tb[TCA_ROUTE4_TO]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700353 if (to > 0xFF)
354 goto errout;
355 nhandle = to;
356 }
357
Patrick McHardyadd93b62008-01-22 22:11:33 -0800358 if (tb[TCA_ROUTE4_FROM]) {
359 if (tb[TCA_ROUTE4_IIF])
Linus Torvalds1da177e2005-04-16 15:20:36 -0700360 goto errout;
Patrick McHardy1587bac2008-01-23 20:35:03 -0800361 id = nla_get_u32(tb[TCA_ROUTE4_FROM]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362 if (id > 0xFF)
363 goto errout;
364 nhandle |= id << 16;
Patrick McHardyadd93b62008-01-22 22:11:33 -0800365 } else if (tb[TCA_ROUTE4_IIF]) {
Patrick McHardy1587bac2008-01-23 20:35:03 -0800366 id = nla_get_u32(tb[TCA_ROUTE4_IIF]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700367 if (id > 0x7FFF)
368 goto errout;
369 nhandle |= (id | 0x8000) << 16;
370 } else
371 nhandle |= 0xFFFF << 16;
372
373 if (handle && new) {
374 nhandle |= handle & 0x7F00;
375 if (nhandle != handle)
376 goto errout;
377 }
378
379 h1 = to_hash(nhandle);
380 if ((b = head->table[h1]) == NULL) {
381 err = -ENOBUFS;
Panagiotis Issaris0da974f2006-07-21 14:51:30 -0700382 b = kzalloc(sizeof(struct route4_bucket), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700383 if (b == NULL)
384 goto errout;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700385
386 tcf_tree_lock(tp);
387 head->table[h1] = b;
388 tcf_tree_unlock(tp);
389 } else {
390 unsigned int h2 = from_hash(nhandle >> 16);
391 err = -EEXIST;
392 for (fp = b->ht[h2]; fp; fp = fp->next)
393 if (fp->handle == f->handle)
394 goto errout;
395 }
396
397 tcf_tree_lock(tp);
Patrick McHardyadd93b62008-01-22 22:11:33 -0800398 if (tb[TCA_ROUTE4_TO])
Linus Torvalds1da177e2005-04-16 15:20:36 -0700399 f->id = to;
400
Patrick McHardyadd93b62008-01-22 22:11:33 -0800401 if (tb[TCA_ROUTE4_FROM])
Linus Torvalds1da177e2005-04-16 15:20:36 -0700402 f->id = to | id<<16;
Patrick McHardyadd93b62008-01-22 22:11:33 -0800403 else if (tb[TCA_ROUTE4_IIF])
Linus Torvalds1da177e2005-04-16 15:20:36 -0700404 f->iif = id;
405
406 f->handle = nhandle;
407 f->bkt = b;
408 tcf_tree_unlock(tp);
409
Patrick McHardyadd93b62008-01-22 22:11:33 -0800410 if (tb[TCA_ROUTE4_CLASSID]) {
Patrick McHardy1587bac2008-01-23 20:35:03 -0800411 f->res.classid = nla_get_u32(tb[TCA_ROUTE4_CLASSID]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700412 tcf_bind_filter(tp, &f->res, base);
413 }
414
415 tcf_exts_change(tp, &f->exts, &e);
416
417 return 0;
418errout:
419 tcf_exts_destroy(tp, &e);
420 return err;
421}
422
423static int route4_change(struct tcf_proto *tp, unsigned long base,
424 u32 handle,
Patrick McHardyadd93b62008-01-22 22:11:33 -0800425 struct nlattr **tca,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426 unsigned long *arg)
427{
428 struct route4_head *head = tp->root;
429 struct route4_filter *f, *f1, **fp;
430 struct route4_bucket *b;
Patrick McHardyadd93b62008-01-22 22:11:33 -0800431 struct nlattr *opt = tca[TCA_OPTIONS];
432 struct nlattr *tb[TCA_ROUTE4_MAX + 1];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700433 unsigned int h, th;
434 u32 old_handle = 0;
435 int err;
436
437 if (opt == NULL)
438 return handle ? -EINVAL : 0;
439
Patrick McHardy6fa8c012008-01-23 20:36:12 -0800440 err = nla_parse_nested(tb, TCA_ROUTE4_MAX, opt, route4_policy);
Patrick McHardycee63722008-01-23 20:33:32 -0800441 if (err < 0)
442 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700443
444 if ((f = (struct route4_filter*)*arg) != NULL) {
445 if (f->handle != handle && handle)
446 return -EINVAL;
447
448 if (f->bkt)
449 old_handle = f->handle;
450
451 err = route4_set_parms(tp, base, f, handle, head, tb,
Patrick McHardyadd93b62008-01-22 22:11:33 -0800452 tca[TCA_RATE], 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700453 if (err < 0)
454 return err;
455
456 goto reinsert;
457 }
458
459 err = -ENOBUFS;
460 if (head == NULL) {
Panagiotis Issaris0da974f2006-07-21 14:51:30 -0700461 head = kzalloc(sizeof(struct route4_head), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700462 if (head == NULL)
463 goto errout;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700464
465 tcf_tree_lock(tp);
466 tp->root = head;
467 tcf_tree_unlock(tp);
468 }
469
Panagiotis Issaris0da974f2006-07-21 14:51:30 -0700470 f = kzalloc(sizeof(struct route4_filter), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700471 if (f == NULL)
472 goto errout;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700473
474 err = route4_set_parms(tp, base, f, handle, head, tb,
Patrick McHardyadd93b62008-01-22 22:11:33 -0800475 tca[TCA_RATE], 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700476 if (err < 0)
477 goto errout;
478
479reinsert:
480 h = from_hash(f->handle >> 16);
481 for (fp = &f->bkt->ht[h]; (f1=*fp) != NULL; fp = &f1->next)
482 if (f->handle < f1->handle)
483 break;
484
485 f->next = f1;
486 tcf_tree_lock(tp);
487 *fp = f;
488
489 if (old_handle && f->handle != old_handle) {
490 th = to_hash(old_handle);
491 h = from_hash(old_handle >> 16);
492 if ((b = head->table[th]) != NULL) {
493 for (fp = &b->ht[h]; *fp; fp = &(*fp)->next) {
494 if (*fp == f) {
495 *fp = f->next;
496 break;
497 }
498 }
499 }
500 }
501 tcf_tree_unlock(tp);
502
503 route4_reset_fastmap(tp->q->dev, head, f->id);
504 *arg = (unsigned long)f;
505 return 0;
506
507errout:
Jesper Juhla51482b2005-11-08 09:41:34 -0800508 kfree(f);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700509 return err;
510}
511
512static void route4_walk(struct tcf_proto *tp, struct tcf_walker *arg)
513{
514 struct route4_head *head = tp->root;
515 unsigned h, h1;
516
517 if (head == NULL)
518 arg->stop = 1;
519
520 if (arg->stop)
521 return;
522
523 for (h = 0; h <= 256; h++) {
524 struct route4_bucket *b = head->table[h];
525
526 if (b) {
527 for (h1 = 0; h1 <= 32; h1++) {
528 struct route4_filter *f;
529
530 for (f = b->ht[h1]; f; f = f->next) {
531 if (arg->count < arg->skip) {
532 arg->count++;
533 continue;
534 }
535 if (arg->fn(tp, (unsigned long)f, arg) < 0) {
536 arg->stop = 1;
537 return;
538 }
539 arg->count++;
540 }
541 }
542 }
543 }
544}
545
546static int route4_dump(struct tcf_proto *tp, unsigned long fh,
547 struct sk_buff *skb, struct tcmsg *t)
548{
549 struct route4_filter *f = (struct route4_filter*)fh;
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -0700550 unsigned char *b = skb_tail_pointer(skb);
Patrick McHardy4b3550ef2008-01-23 20:34:11 -0800551 struct nlattr *nest;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700552 u32 id;
553
554 if (f == NULL)
555 return skb->len;
556
557 t->tcm_handle = f->handle;
558
Patrick McHardy4b3550ef2008-01-23 20:34:11 -0800559 nest = nla_nest_start(skb, TCA_OPTIONS);
560 if (nest == NULL)
561 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700562
563 if (!(f->handle&0x8000)) {
564 id = f->id&0xFF;
Patrick McHardy24beeab2008-01-23 20:34:48 -0800565 NLA_PUT_U32(skb, TCA_ROUTE4_TO, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700566 }
567 if (f->handle&0x80000000) {
568 if ((f->handle>>16) != 0xFFFF)
Patrick McHardy24beeab2008-01-23 20:34:48 -0800569 NLA_PUT_U32(skb, TCA_ROUTE4_IIF, f->iif);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700570 } else {
571 id = f->id>>16;
Patrick McHardy24beeab2008-01-23 20:34:48 -0800572 NLA_PUT_U32(skb, TCA_ROUTE4_FROM, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700573 }
574 if (f->res.classid)
Patrick McHardy24beeab2008-01-23 20:34:48 -0800575 NLA_PUT_U32(skb, TCA_ROUTE4_CLASSID, f->res.classid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700576
577 if (tcf_exts_dump(skb, &f->exts, &route_ext_map) < 0)
Patrick McHardyadd93b62008-01-22 22:11:33 -0800578 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700579
Patrick McHardy4b3550ef2008-01-23 20:34:11 -0800580 nla_nest_end(skb, nest);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700581
582 if (tcf_exts_dump_stats(skb, &f->exts, &route_ext_map) < 0)
Patrick McHardyadd93b62008-01-22 22:11:33 -0800583 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700584
585 return skb->len;
586
Patrick McHardyadd93b62008-01-22 22:11:33 -0800587nla_put_failure:
Arnaldo Carvalho de Melodc5fc572007-03-25 23:06:12 -0700588 nlmsg_trim(skb, b);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700589 return -1;
590}
591
Patrick McHardy2eb9d752008-01-22 22:10:42 -0800592static struct tcf_proto_ops cls_route4_ops __read_mostly = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700593 .kind = "route",
594 .classify = route4_classify,
595 .init = route4_init,
596 .destroy = route4_destroy,
597 .get = route4_get,
598 .put = route4_put,
599 .change = route4_change,
600 .delete = route4_delete,
601 .walk = route4_walk,
602 .dump = route4_dump,
603 .owner = THIS_MODULE,
604};
605
606static int __init init_route4(void)
607{
608 return register_tcf_proto_ops(&cls_route4_ops);
609}
610
611static void __exit exit_route4(void)
612{
613 unregister_tcf_proto_ops(&cls_route4_ops);
614}
615
616module_init(init_route4)
617module_exit(exit_route4)
618MODULE_LICENSE("GPL");