blob: cc941d0ee3a5daf9c51065b9e3fce766d32ea8bd [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * net/sched/cls_route.c ROUTE4 classifier.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 */
11
12#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <asm/uaccess.h>
14#include <asm/system.h>
15#include <linux/bitops.h>
16#include <linux/types.h>
17#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include <linux/string.h>
19#include <linux/mm.h>
20#include <linux/socket.h>
21#include <linux/sockios.h>
22#include <linux/in.h>
23#include <linux/errno.h>
24#include <linux/interrupt.h>
25#include <linux/if_ether.h>
26#include <linux/inet.h>
27#include <linux/netdevice.h>
28#include <linux/etherdevice.h>
29#include <linux/notifier.h>
30#include <net/ip.h>
Arnaldo Carvalho de Melodc5fc572007-03-25 23:06:12 -070031#include <net/netlink.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032#include <net/route.h>
33#include <linux/skbuff.h>
34#include <net/sock.h>
35#include <net/act_api.h>
36#include <net/pkt_cls.h>
37
38/*
39 1. For now we assume that route tags < 256.
40 It allows to use direct table lookups, instead of hash tables.
41 2. For now we assume that "from TAG" and "fromdev DEV" statements
42 are mutually exclusive.
43 3. "to TAG from ANY" has higher priority, than "to ANY from XXX"
44 */
45
46struct route4_fastmap
47{
48 struct route4_filter *filter;
49 u32 id;
50 int iif;
51};
52
53struct route4_head
54{
55 struct route4_fastmap fastmap[16];
56 struct route4_bucket *table[256+1];
57};
58
59struct route4_bucket
60{
61 /* 16 FROM buckets + 16 IIF buckets + 1 wildcard bucket */
62 struct route4_filter *ht[16+16+1];
63};
64
65struct route4_filter
66{
67 struct route4_filter *next;
68 u32 id;
69 int iif;
70
71 struct tcf_result res;
72 struct tcf_exts exts;
73 u32 handle;
74 struct route4_bucket *bkt;
75};
76
77#define ROUTE4_FAILURE ((struct route4_filter*)(-1L))
78
79static struct tcf_ext_map route_ext_map = {
80 .police = TCA_ROUTE4_POLICE,
81 .action = TCA_ROUTE4_ACT
82};
83
84static __inline__ int route4_fastmap_hash(u32 id, int iif)
85{
86 return id&0xF;
87}
88
89static inline
90void route4_reset_fastmap(struct net_device *dev, struct route4_head *head, u32 id)
91{
Patrick McHardyfd44de72007-04-16 17:07:08 -070092 qdisc_lock_tree(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -070093 memset(head->fastmap, 0, sizeof(head->fastmap));
Patrick McHardyfd44de72007-04-16 17:07:08 -070094 qdisc_unlock_tree(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -070095}
96
Dave Jonesb6f99a22007-03-22 12:27:49 -070097static inline void
Linus Torvalds1da177e2005-04-16 15:20:36 -070098route4_set_fastmap(struct route4_head *head, u32 id, int iif,
99 struct route4_filter *f)
100{
101 int h = route4_fastmap_hash(id, iif);
102 head->fastmap[h].id = id;
103 head->fastmap[h].iif = iif;
104 head->fastmap[h].filter = f;
105}
106
107static __inline__ int route4_hash_to(u32 id)
108{
109 return id&0xFF;
110}
111
112static __inline__ int route4_hash_from(u32 id)
113{
114 return (id>>16)&0xF;
115}
116
117static __inline__ int route4_hash_iif(int iif)
118{
119 return 16 + ((iif>>16)&0xF);
120}
121
122static __inline__ int route4_hash_wild(void)
123{
124 return 32;
125}
126
127#define ROUTE4_APPLY_RESULT() \
128{ \
129 *res = f->res; \
130 if (tcf_exts_is_available(&f->exts)) { \
131 int r = tcf_exts_exec(skb, &f->exts, res); \
132 if (r < 0) { \
133 dont_cache = 1; \
134 continue; \
135 } \
136 return r; \
137 } else if (!dont_cache) \
138 route4_set_fastmap(head, id, iif, f); \
139 return 0; \
140}
141
142static int route4_classify(struct sk_buff *skb, struct tcf_proto *tp,
143 struct tcf_result *res)
144{
145 struct route4_head *head = (struct route4_head*)tp->root;
146 struct dst_entry *dst;
147 struct route4_bucket *b;
148 struct route4_filter *f;
149 u32 id, h;
150 int iif, dont_cache = 0;
151
152 if ((dst = skb->dst) == NULL)
153 goto failure;
154
155 id = dst->tclassid;
156 if (head == NULL)
157 goto old_method;
158
159 iif = ((struct rtable*)dst)->fl.iif;
160
161 h = route4_fastmap_hash(id, iif);
162 if (id == head->fastmap[h].id &&
163 iif == head->fastmap[h].iif &&
164 (f = head->fastmap[h].filter) != NULL) {
165 if (f == ROUTE4_FAILURE)
166 goto failure;
167
168 *res = f->res;
169 return 0;
170 }
171
172 h = route4_hash_to(id);
173
174restart:
175 if ((b = head->table[h]) != NULL) {
176 for (f = b->ht[route4_hash_from(id)]; f; f = f->next)
177 if (f->id == id)
178 ROUTE4_APPLY_RESULT();
179
180 for (f = b->ht[route4_hash_iif(iif)]; f; f = f->next)
181 if (f->iif == iif)
182 ROUTE4_APPLY_RESULT();
183
184 for (f = b->ht[route4_hash_wild()]; f; f = f->next)
185 ROUTE4_APPLY_RESULT();
186
187 }
188 if (h < 256) {
189 h = 256;
190 id &= ~0xFFFF;
191 goto restart;
192 }
193
194 if (!dont_cache)
195 route4_set_fastmap(head, id, iif, ROUTE4_FAILURE);
196failure:
197 return -1;
198
199old_method:
200 if (id && (TC_H_MAJ(id) == 0 ||
201 !(TC_H_MAJ(id^tp->q->handle)))) {
202 res->classid = id;
203 res->class = 0;
204 return 0;
205 }
206 return -1;
207}
208
209static inline u32 to_hash(u32 id)
210{
211 u32 h = id&0xFF;
212 if (id&0x8000)
213 h += 256;
214 return h;
215}
216
217static inline u32 from_hash(u32 id)
218{
219 id &= 0xFFFF;
220 if (id == 0xFFFF)
221 return 32;
222 if (!(id & 0x8000)) {
223 if (id > 255)
224 return 256;
225 return id&0xF;
226 }
227 return 16 + (id&0xF);
228}
229
230static unsigned long route4_get(struct tcf_proto *tp, u32 handle)
231{
232 struct route4_head *head = (struct route4_head*)tp->root;
233 struct route4_bucket *b;
234 struct route4_filter *f;
235 unsigned h1, h2;
236
237 if (!head)
238 return 0;
239
240 h1 = to_hash(handle);
241 if (h1 > 256)
242 return 0;
243
244 h2 = from_hash(handle>>16);
245 if (h2 > 32)
246 return 0;
247
248 if ((b = head->table[h1]) != NULL) {
249 for (f = b->ht[h2]; f; f = f->next)
250 if (f->handle == handle)
251 return (unsigned long)f;
252 }
253 return 0;
254}
255
256static void route4_put(struct tcf_proto *tp, unsigned long f)
257{
258}
259
260static int route4_init(struct tcf_proto *tp)
261{
262 return 0;
263}
264
265static inline void
266route4_delete_filter(struct tcf_proto *tp, struct route4_filter *f)
267{
268 tcf_unbind_filter(tp, &f->res);
269 tcf_exts_destroy(tp, &f->exts);
270 kfree(f);
271}
272
273static void route4_destroy(struct tcf_proto *tp)
274{
275 struct route4_head *head = xchg(&tp->root, NULL);
276 int h1, h2;
277
278 if (head == NULL)
279 return;
280
281 for (h1=0; h1<=256; h1++) {
282 struct route4_bucket *b;
283
284 if ((b = head->table[h1]) != NULL) {
285 for (h2=0; h2<=32; h2++) {
286 struct route4_filter *f;
287
288 while ((f = b->ht[h2]) != NULL) {
289 b->ht[h2] = f->next;
290 route4_delete_filter(tp, f);
291 }
292 }
293 kfree(b);
294 }
295 }
296 kfree(head);
297}
298
299static int route4_delete(struct tcf_proto *tp, unsigned long arg)
300{
301 struct route4_head *head = (struct route4_head*)tp->root;
302 struct route4_filter **fp, *f = (struct route4_filter*)arg;
303 unsigned h = 0;
304 struct route4_bucket *b;
305 int i;
306
307 if (!head || !f)
308 return -EINVAL;
309
310 h = f->handle;
311 b = f->bkt;
312
313 for (fp = &b->ht[from_hash(h>>16)]; *fp; fp = &(*fp)->next) {
314 if (*fp == f) {
315 tcf_tree_lock(tp);
316 *fp = f->next;
317 tcf_tree_unlock(tp);
318
319 route4_reset_fastmap(tp->q->dev, head, f->id);
320 route4_delete_filter(tp, f);
321
322 /* Strip tree */
323
324 for (i=0; i<=32; i++)
325 if (b->ht[i])
326 return 0;
327
328 /* OK, session has no flows */
329 tcf_tree_lock(tp);
330 head->table[to_hash(h)] = NULL;
331 tcf_tree_unlock(tp);
332
333 kfree(b);
334 return 0;
335 }
336 }
337 return 0;
338}
339
340static int route4_set_parms(struct tcf_proto *tp, unsigned long base,
341 struct route4_filter *f, u32 handle, struct route4_head *head,
342 struct rtattr **tb, struct rtattr *est, int new)
343{
344 int err;
345 u32 id = 0, to = 0, nhandle = 0x8000;
346 struct route4_filter *fp;
347 unsigned int h1;
348 struct route4_bucket *b;
349 struct tcf_exts e;
350
351 err = tcf_exts_validate(tp, tb, est, &e, &route_ext_map);
352 if (err < 0)
353 return err;
354
355 err = -EINVAL;
356 if (tb[TCA_ROUTE4_CLASSID-1])
357 if (RTA_PAYLOAD(tb[TCA_ROUTE4_CLASSID-1]) < sizeof(u32))
358 goto errout;
359
360 if (tb[TCA_ROUTE4_TO-1]) {
361 if (new && handle & 0x8000)
362 goto errout;
363 if (RTA_PAYLOAD(tb[TCA_ROUTE4_TO-1]) < sizeof(u32))
364 goto errout;
365 to = *(u32*)RTA_DATA(tb[TCA_ROUTE4_TO-1]);
366 if (to > 0xFF)
367 goto errout;
368 nhandle = to;
369 }
370
371 if (tb[TCA_ROUTE4_FROM-1]) {
372 if (tb[TCA_ROUTE4_IIF-1])
373 goto errout;
374 if (RTA_PAYLOAD(tb[TCA_ROUTE4_FROM-1]) < sizeof(u32))
375 goto errout;
376 id = *(u32*)RTA_DATA(tb[TCA_ROUTE4_FROM-1]);
377 if (id > 0xFF)
378 goto errout;
379 nhandle |= id << 16;
380 } else if (tb[TCA_ROUTE4_IIF-1]) {
381 if (RTA_PAYLOAD(tb[TCA_ROUTE4_IIF-1]) < sizeof(u32))
382 goto errout;
383 id = *(u32*)RTA_DATA(tb[TCA_ROUTE4_IIF-1]);
384 if (id > 0x7FFF)
385 goto errout;
386 nhandle |= (id | 0x8000) << 16;
387 } else
388 nhandle |= 0xFFFF << 16;
389
390 if (handle && new) {
391 nhandle |= handle & 0x7F00;
392 if (nhandle != handle)
393 goto errout;
394 }
395
396 h1 = to_hash(nhandle);
397 if ((b = head->table[h1]) == NULL) {
398 err = -ENOBUFS;
Panagiotis Issaris0da974f2006-07-21 14:51:30 -0700399 b = kzalloc(sizeof(struct route4_bucket), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700400 if (b == NULL)
401 goto errout;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700402
403 tcf_tree_lock(tp);
404 head->table[h1] = b;
405 tcf_tree_unlock(tp);
406 } else {
407 unsigned int h2 = from_hash(nhandle >> 16);
408 err = -EEXIST;
409 for (fp = b->ht[h2]; fp; fp = fp->next)
410 if (fp->handle == f->handle)
411 goto errout;
412 }
413
414 tcf_tree_lock(tp);
415 if (tb[TCA_ROUTE4_TO-1])
416 f->id = to;
417
418 if (tb[TCA_ROUTE4_FROM-1])
419 f->id = to | id<<16;
420 else if (tb[TCA_ROUTE4_IIF-1])
421 f->iif = id;
422
423 f->handle = nhandle;
424 f->bkt = b;
425 tcf_tree_unlock(tp);
426
427 if (tb[TCA_ROUTE4_CLASSID-1]) {
428 f->res.classid = *(u32*)RTA_DATA(tb[TCA_ROUTE4_CLASSID-1]);
429 tcf_bind_filter(tp, &f->res, base);
430 }
431
432 tcf_exts_change(tp, &f->exts, &e);
433
434 return 0;
435errout:
436 tcf_exts_destroy(tp, &e);
437 return err;
438}
439
440static int route4_change(struct tcf_proto *tp, unsigned long base,
441 u32 handle,
442 struct rtattr **tca,
443 unsigned long *arg)
444{
445 struct route4_head *head = tp->root;
446 struct route4_filter *f, *f1, **fp;
447 struct route4_bucket *b;
448 struct rtattr *opt = tca[TCA_OPTIONS-1];
449 struct rtattr *tb[TCA_ROUTE4_MAX];
450 unsigned int h, th;
451 u32 old_handle = 0;
452 int err;
453
454 if (opt == NULL)
455 return handle ? -EINVAL : 0;
456
457 if (rtattr_parse_nested(tb, TCA_ROUTE4_MAX, opt) < 0)
458 return -EINVAL;
459
460 if ((f = (struct route4_filter*)*arg) != NULL) {
461 if (f->handle != handle && handle)
462 return -EINVAL;
463
464 if (f->bkt)
465 old_handle = f->handle;
466
467 err = route4_set_parms(tp, base, f, handle, head, tb,
468 tca[TCA_RATE-1], 0);
469 if (err < 0)
470 return err;
471
472 goto reinsert;
473 }
474
475 err = -ENOBUFS;
476 if (head == NULL) {
Panagiotis Issaris0da974f2006-07-21 14:51:30 -0700477 head = kzalloc(sizeof(struct route4_head), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700478 if (head == NULL)
479 goto errout;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700480
481 tcf_tree_lock(tp);
482 tp->root = head;
483 tcf_tree_unlock(tp);
484 }
485
Panagiotis Issaris0da974f2006-07-21 14:51:30 -0700486 f = kzalloc(sizeof(struct route4_filter), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700487 if (f == NULL)
488 goto errout;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700489
490 err = route4_set_parms(tp, base, f, handle, head, tb,
491 tca[TCA_RATE-1], 1);
492 if (err < 0)
493 goto errout;
494
495reinsert:
496 h = from_hash(f->handle >> 16);
497 for (fp = &f->bkt->ht[h]; (f1=*fp) != NULL; fp = &f1->next)
498 if (f->handle < f1->handle)
499 break;
500
501 f->next = f1;
502 tcf_tree_lock(tp);
503 *fp = f;
504
505 if (old_handle && f->handle != old_handle) {
506 th = to_hash(old_handle);
507 h = from_hash(old_handle >> 16);
508 if ((b = head->table[th]) != NULL) {
509 for (fp = &b->ht[h]; *fp; fp = &(*fp)->next) {
510 if (*fp == f) {
511 *fp = f->next;
512 break;
513 }
514 }
515 }
516 }
517 tcf_tree_unlock(tp);
518
519 route4_reset_fastmap(tp->q->dev, head, f->id);
520 *arg = (unsigned long)f;
521 return 0;
522
523errout:
Jesper Juhla51482b2005-11-08 09:41:34 -0800524 kfree(f);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700525 return err;
526}
527
528static void route4_walk(struct tcf_proto *tp, struct tcf_walker *arg)
529{
530 struct route4_head *head = tp->root;
531 unsigned h, h1;
532
533 if (head == NULL)
534 arg->stop = 1;
535
536 if (arg->stop)
537 return;
538
539 for (h = 0; h <= 256; h++) {
540 struct route4_bucket *b = head->table[h];
541
542 if (b) {
543 for (h1 = 0; h1 <= 32; h1++) {
544 struct route4_filter *f;
545
546 for (f = b->ht[h1]; f; f = f->next) {
547 if (arg->count < arg->skip) {
548 arg->count++;
549 continue;
550 }
551 if (arg->fn(tp, (unsigned long)f, arg) < 0) {
552 arg->stop = 1;
553 return;
554 }
555 arg->count++;
556 }
557 }
558 }
559 }
560}
561
562static int route4_dump(struct tcf_proto *tp, unsigned long fh,
563 struct sk_buff *skb, struct tcmsg *t)
564{
565 struct route4_filter *f = (struct route4_filter*)fh;
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -0700566 unsigned char *b = skb_tail_pointer(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700567 struct rtattr *rta;
568 u32 id;
569
570 if (f == NULL)
571 return skb->len;
572
573 t->tcm_handle = f->handle;
574
575 rta = (struct rtattr*)b;
576 RTA_PUT(skb, TCA_OPTIONS, 0, NULL);
577
578 if (!(f->handle&0x8000)) {
579 id = f->id&0xFF;
580 RTA_PUT(skb, TCA_ROUTE4_TO, sizeof(id), &id);
581 }
582 if (f->handle&0x80000000) {
583 if ((f->handle>>16) != 0xFFFF)
584 RTA_PUT(skb, TCA_ROUTE4_IIF, sizeof(f->iif), &f->iif);
585 } else {
586 id = f->id>>16;
587 RTA_PUT(skb, TCA_ROUTE4_FROM, sizeof(id), &id);
588 }
589 if (f->res.classid)
590 RTA_PUT(skb, TCA_ROUTE4_CLASSID, 4, &f->res.classid);
591
592 if (tcf_exts_dump(skb, &f->exts, &route_ext_map) < 0)
593 goto rtattr_failure;
594
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -0700595 rta->rta_len = skb_tail_pointer(skb) - b;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700596
597 if (tcf_exts_dump_stats(skb, &f->exts, &route_ext_map) < 0)
598 goto rtattr_failure;
599
600 return skb->len;
601
602rtattr_failure:
Arnaldo Carvalho de Melodc5fc572007-03-25 23:06:12 -0700603 nlmsg_trim(skb, b);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700604 return -1;
605}
606
607static struct tcf_proto_ops cls_route4_ops = {
608 .next = NULL,
609 .kind = "route",
610 .classify = route4_classify,
611 .init = route4_init,
612 .destroy = route4_destroy,
613 .get = route4_get,
614 .put = route4_put,
615 .change = route4_change,
616 .delete = route4_delete,
617 .walk = route4_walk,
618 .dump = route4_dump,
619 .owner = THIS_MODULE,
620};
621
622static int __init init_route4(void)
623{
624 return register_tcf_proto_ops(&cls_route4_ops);
625}
626
627static void __exit exit_route4(void)
628{
629 unregister_tcf_proto_ops(&cls_route4_ops);
630}
631
632module_init(init_route4)
633module_exit(exit_route4)
634MODULE_LICENSE("GPL");