blob: abc47cc48ad00b8d9a404fa4219b97aef7162dae [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * net/sched/cls_route.c ROUTE4 classifier.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 */
11
12#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <asm/uaccess.h>
14#include <asm/system.h>
15#include <linux/bitops.h>
16#include <linux/types.h>
17#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include <linux/string.h>
19#include <linux/mm.h>
20#include <linux/socket.h>
21#include <linux/sockios.h>
22#include <linux/in.h>
23#include <linux/errno.h>
24#include <linux/interrupt.h>
25#include <linux/if_ether.h>
26#include <linux/inet.h>
27#include <linux/netdevice.h>
28#include <linux/etherdevice.h>
29#include <linux/notifier.h>
30#include <net/ip.h>
31#include <net/route.h>
32#include <linux/skbuff.h>
33#include <net/sock.h>
34#include <net/act_api.h>
35#include <net/pkt_cls.h>
36
37/*
38 1. For now we assume that route tags < 256.
39 It allows to use direct table lookups, instead of hash tables.
40 2. For now we assume that "from TAG" and "fromdev DEV" statements
41 are mutually exclusive.
42 3. "to TAG from ANY" has higher priority, than "to ANY from XXX"
43 */
44
45struct route4_fastmap
46{
47 struct route4_filter *filter;
48 u32 id;
49 int iif;
50};
51
52struct route4_head
53{
54 struct route4_fastmap fastmap[16];
55 struct route4_bucket *table[256+1];
56};
57
58struct route4_bucket
59{
60 /* 16 FROM buckets + 16 IIF buckets + 1 wildcard bucket */
61 struct route4_filter *ht[16+16+1];
62};
63
64struct route4_filter
65{
66 struct route4_filter *next;
67 u32 id;
68 int iif;
69
70 struct tcf_result res;
71 struct tcf_exts exts;
72 u32 handle;
73 struct route4_bucket *bkt;
74};
75
76#define ROUTE4_FAILURE ((struct route4_filter*)(-1L))
77
78static struct tcf_ext_map route_ext_map = {
79 .police = TCA_ROUTE4_POLICE,
80 .action = TCA_ROUTE4_ACT
81};
82
83static __inline__ int route4_fastmap_hash(u32 id, int iif)
84{
85 return id&0xF;
86}
87
88static inline
89void route4_reset_fastmap(struct net_device *dev, struct route4_head *head, u32 id)
90{
91 spin_lock_bh(&dev->queue_lock);
92 memset(head->fastmap, 0, sizeof(head->fastmap));
93 spin_unlock_bh(&dev->queue_lock);
94}
95
Dave Jonesb6f99a22007-03-22 12:27:49 -070096static inline void
Linus Torvalds1da177e2005-04-16 15:20:36 -070097route4_set_fastmap(struct route4_head *head, u32 id, int iif,
98 struct route4_filter *f)
99{
100 int h = route4_fastmap_hash(id, iif);
101 head->fastmap[h].id = id;
102 head->fastmap[h].iif = iif;
103 head->fastmap[h].filter = f;
104}
105
106static __inline__ int route4_hash_to(u32 id)
107{
108 return id&0xFF;
109}
110
111static __inline__ int route4_hash_from(u32 id)
112{
113 return (id>>16)&0xF;
114}
115
116static __inline__ int route4_hash_iif(int iif)
117{
118 return 16 + ((iif>>16)&0xF);
119}
120
121static __inline__ int route4_hash_wild(void)
122{
123 return 32;
124}
125
126#define ROUTE4_APPLY_RESULT() \
127{ \
128 *res = f->res; \
129 if (tcf_exts_is_available(&f->exts)) { \
130 int r = tcf_exts_exec(skb, &f->exts, res); \
131 if (r < 0) { \
132 dont_cache = 1; \
133 continue; \
134 } \
135 return r; \
136 } else if (!dont_cache) \
137 route4_set_fastmap(head, id, iif, f); \
138 return 0; \
139}
140
141static int route4_classify(struct sk_buff *skb, struct tcf_proto *tp,
142 struct tcf_result *res)
143{
144 struct route4_head *head = (struct route4_head*)tp->root;
145 struct dst_entry *dst;
146 struct route4_bucket *b;
147 struct route4_filter *f;
148 u32 id, h;
149 int iif, dont_cache = 0;
150
151 if ((dst = skb->dst) == NULL)
152 goto failure;
153
154 id = dst->tclassid;
155 if (head == NULL)
156 goto old_method;
157
158 iif = ((struct rtable*)dst)->fl.iif;
159
160 h = route4_fastmap_hash(id, iif);
161 if (id == head->fastmap[h].id &&
162 iif == head->fastmap[h].iif &&
163 (f = head->fastmap[h].filter) != NULL) {
164 if (f == ROUTE4_FAILURE)
165 goto failure;
166
167 *res = f->res;
168 return 0;
169 }
170
171 h = route4_hash_to(id);
172
173restart:
174 if ((b = head->table[h]) != NULL) {
175 for (f = b->ht[route4_hash_from(id)]; f; f = f->next)
176 if (f->id == id)
177 ROUTE4_APPLY_RESULT();
178
179 for (f = b->ht[route4_hash_iif(iif)]; f; f = f->next)
180 if (f->iif == iif)
181 ROUTE4_APPLY_RESULT();
182
183 for (f = b->ht[route4_hash_wild()]; f; f = f->next)
184 ROUTE4_APPLY_RESULT();
185
186 }
187 if (h < 256) {
188 h = 256;
189 id &= ~0xFFFF;
190 goto restart;
191 }
192
193 if (!dont_cache)
194 route4_set_fastmap(head, id, iif, ROUTE4_FAILURE);
195failure:
196 return -1;
197
198old_method:
199 if (id && (TC_H_MAJ(id) == 0 ||
200 !(TC_H_MAJ(id^tp->q->handle)))) {
201 res->classid = id;
202 res->class = 0;
203 return 0;
204 }
205 return -1;
206}
207
208static inline u32 to_hash(u32 id)
209{
210 u32 h = id&0xFF;
211 if (id&0x8000)
212 h += 256;
213 return h;
214}
215
216static inline u32 from_hash(u32 id)
217{
218 id &= 0xFFFF;
219 if (id == 0xFFFF)
220 return 32;
221 if (!(id & 0x8000)) {
222 if (id > 255)
223 return 256;
224 return id&0xF;
225 }
226 return 16 + (id&0xF);
227}
228
229static unsigned long route4_get(struct tcf_proto *tp, u32 handle)
230{
231 struct route4_head *head = (struct route4_head*)tp->root;
232 struct route4_bucket *b;
233 struct route4_filter *f;
234 unsigned h1, h2;
235
236 if (!head)
237 return 0;
238
239 h1 = to_hash(handle);
240 if (h1 > 256)
241 return 0;
242
243 h2 = from_hash(handle>>16);
244 if (h2 > 32)
245 return 0;
246
247 if ((b = head->table[h1]) != NULL) {
248 for (f = b->ht[h2]; f; f = f->next)
249 if (f->handle == handle)
250 return (unsigned long)f;
251 }
252 return 0;
253}
254
255static void route4_put(struct tcf_proto *tp, unsigned long f)
256{
257}
258
259static int route4_init(struct tcf_proto *tp)
260{
261 return 0;
262}
263
264static inline void
265route4_delete_filter(struct tcf_proto *tp, struct route4_filter *f)
266{
267 tcf_unbind_filter(tp, &f->res);
268 tcf_exts_destroy(tp, &f->exts);
269 kfree(f);
270}
271
272static void route4_destroy(struct tcf_proto *tp)
273{
274 struct route4_head *head = xchg(&tp->root, NULL);
275 int h1, h2;
276
277 if (head == NULL)
278 return;
279
280 for (h1=0; h1<=256; h1++) {
281 struct route4_bucket *b;
282
283 if ((b = head->table[h1]) != NULL) {
284 for (h2=0; h2<=32; h2++) {
285 struct route4_filter *f;
286
287 while ((f = b->ht[h2]) != NULL) {
288 b->ht[h2] = f->next;
289 route4_delete_filter(tp, f);
290 }
291 }
292 kfree(b);
293 }
294 }
295 kfree(head);
296}
297
298static int route4_delete(struct tcf_proto *tp, unsigned long arg)
299{
300 struct route4_head *head = (struct route4_head*)tp->root;
301 struct route4_filter **fp, *f = (struct route4_filter*)arg;
302 unsigned h = 0;
303 struct route4_bucket *b;
304 int i;
305
306 if (!head || !f)
307 return -EINVAL;
308
309 h = f->handle;
310 b = f->bkt;
311
312 for (fp = &b->ht[from_hash(h>>16)]; *fp; fp = &(*fp)->next) {
313 if (*fp == f) {
314 tcf_tree_lock(tp);
315 *fp = f->next;
316 tcf_tree_unlock(tp);
317
318 route4_reset_fastmap(tp->q->dev, head, f->id);
319 route4_delete_filter(tp, f);
320
321 /* Strip tree */
322
323 for (i=0; i<=32; i++)
324 if (b->ht[i])
325 return 0;
326
327 /* OK, session has no flows */
328 tcf_tree_lock(tp);
329 head->table[to_hash(h)] = NULL;
330 tcf_tree_unlock(tp);
331
332 kfree(b);
333 return 0;
334 }
335 }
336 return 0;
337}
338
339static int route4_set_parms(struct tcf_proto *tp, unsigned long base,
340 struct route4_filter *f, u32 handle, struct route4_head *head,
341 struct rtattr **tb, struct rtattr *est, int new)
342{
343 int err;
344 u32 id = 0, to = 0, nhandle = 0x8000;
345 struct route4_filter *fp;
346 unsigned int h1;
347 struct route4_bucket *b;
348 struct tcf_exts e;
349
350 err = tcf_exts_validate(tp, tb, est, &e, &route_ext_map);
351 if (err < 0)
352 return err;
353
354 err = -EINVAL;
355 if (tb[TCA_ROUTE4_CLASSID-1])
356 if (RTA_PAYLOAD(tb[TCA_ROUTE4_CLASSID-1]) < sizeof(u32))
357 goto errout;
358
359 if (tb[TCA_ROUTE4_TO-1]) {
360 if (new && handle & 0x8000)
361 goto errout;
362 if (RTA_PAYLOAD(tb[TCA_ROUTE4_TO-1]) < sizeof(u32))
363 goto errout;
364 to = *(u32*)RTA_DATA(tb[TCA_ROUTE4_TO-1]);
365 if (to > 0xFF)
366 goto errout;
367 nhandle = to;
368 }
369
370 if (tb[TCA_ROUTE4_FROM-1]) {
371 if (tb[TCA_ROUTE4_IIF-1])
372 goto errout;
373 if (RTA_PAYLOAD(tb[TCA_ROUTE4_FROM-1]) < sizeof(u32))
374 goto errout;
375 id = *(u32*)RTA_DATA(tb[TCA_ROUTE4_FROM-1]);
376 if (id > 0xFF)
377 goto errout;
378 nhandle |= id << 16;
379 } else if (tb[TCA_ROUTE4_IIF-1]) {
380 if (RTA_PAYLOAD(tb[TCA_ROUTE4_IIF-1]) < sizeof(u32))
381 goto errout;
382 id = *(u32*)RTA_DATA(tb[TCA_ROUTE4_IIF-1]);
383 if (id > 0x7FFF)
384 goto errout;
385 nhandle |= (id | 0x8000) << 16;
386 } else
387 nhandle |= 0xFFFF << 16;
388
389 if (handle && new) {
390 nhandle |= handle & 0x7F00;
391 if (nhandle != handle)
392 goto errout;
393 }
394
395 h1 = to_hash(nhandle);
396 if ((b = head->table[h1]) == NULL) {
397 err = -ENOBUFS;
Panagiotis Issaris0da974f2006-07-21 14:51:30 -0700398 b = kzalloc(sizeof(struct route4_bucket), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700399 if (b == NULL)
400 goto errout;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700401
402 tcf_tree_lock(tp);
403 head->table[h1] = b;
404 tcf_tree_unlock(tp);
405 } else {
406 unsigned int h2 = from_hash(nhandle >> 16);
407 err = -EEXIST;
408 for (fp = b->ht[h2]; fp; fp = fp->next)
409 if (fp->handle == f->handle)
410 goto errout;
411 }
412
413 tcf_tree_lock(tp);
414 if (tb[TCA_ROUTE4_TO-1])
415 f->id = to;
416
417 if (tb[TCA_ROUTE4_FROM-1])
418 f->id = to | id<<16;
419 else if (tb[TCA_ROUTE4_IIF-1])
420 f->iif = id;
421
422 f->handle = nhandle;
423 f->bkt = b;
424 tcf_tree_unlock(tp);
425
426 if (tb[TCA_ROUTE4_CLASSID-1]) {
427 f->res.classid = *(u32*)RTA_DATA(tb[TCA_ROUTE4_CLASSID-1]);
428 tcf_bind_filter(tp, &f->res, base);
429 }
430
431 tcf_exts_change(tp, &f->exts, &e);
432
433 return 0;
434errout:
435 tcf_exts_destroy(tp, &e);
436 return err;
437}
438
439static int route4_change(struct tcf_proto *tp, unsigned long base,
440 u32 handle,
441 struct rtattr **tca,
442 unsigned long *arg)
443{
444 struct route4_head *head = tp->root;
445 struct route4_filter *f, *f1, **fp;
446 struct route4_bucket *b;
447 struct rtattr *opt = tca[TCA_OPTIONS-1];
448 struct rtattr *tb[TCA_ROUTE4_MAX];
449 unsigned int h, th;
450 u32 old_handle = 0;
451 int err;
452
453 if (opt == NULL)
454 return handle ? -EINVAL : 0;
455
456 if (rtattr_parse_nested(tb, TCA_ROUTE4_MAX, opt) < 0)
457 return -EINVAL;
458
459 if ((f = (struct route4_filter*)*arg) != NULL) {
460 if (f->handle != handle && handle)
461 return -EINVAL;
462
463 if (f->bkt)
464 old_handle = f->handle;
465
466 err = route4_set_parms(tp, base, f, handle, head, tb,
467 tca[TCA_RATE-1], 0);
468 if (err < 0)
469 return err;
470
471 goto reinsert;
472 }
473
474 err = -ENOBUFS;
475 if (head == NULL) {
Panagiotis Issaris0da974f2006-07-21 14:51:30 -0700476 head = kzalloc(sizeof(struct route4_head), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700477 if (head == NULL)
478 goto errout;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700479
480 tcf_tree_lock(tp);
481 tp->root = head;
482 tcf_tree_unlock(tp);
483 }
484
Panagiotis Issaris0da974f2006-07-21 14:51:30 -0700485 f = kzalloc(sizeof(struct route4_filter), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700486 if (f == NULL)
487 goto errout;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700488
489 err = route4_set_parms(tp, base, f, handle, head, tb,
490 tca[TCA_RATE-1], 1);
491 if (err < 0)
492 goto errout;
493
494reinsert:
495 h = from_hash(f->handle >> 16);
496 for (fp = &f->bkt->ht[h]; (f1=*fp) != NULL; fp = &f1->next)
497 if (f->handle < f1->handle)
498 break;
499
500 f->next = f1;
501 tcf_tree_lock(tp);
502 *fp = f;
503
504 if (old_handle && f->handle != old_handle) {
505 th = to_hash(old_handle);
506 h = from_hash(old_handle >> 16);
507 if ((b = head->table[th]) != NULL) {
508 for (fp = &b->ht[h]; *fp; fp = &(*fp)->next) {
509 if (*fp == f) {
510 *fp = f->next;
511 break;
512 }
513 }
514 }
515 }
516 tcf_tree_unlock(tp);
517
518 route4_reset_fastmap(tp->q->dev, head, f->id);
519 *arg = (unsigned long)f;
520 return 0;
521
522errout:
Jesper Juhla51482b2005-11-08 09:41:34 -0800523 kfree(f);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700524 return err;
525}
526
527static void route4_walk(struct tcf_proto *tp, struct tcf_walker *arg)
528{
529 struct route4_head *head = tp->root;
530 unsigned h, h1;
531
532 if (head == NULL)
533 arg->stop = 1;
534
535 if (arg->stop)
536 return;
537
538 for (h = 0; h <= 256; h++) {
539 struct route4_bucket *b = head->table[h];
540
541 if (b) {
542 for (h1 = 0; h1 <= 32; h1++) {
543 struct route4_filter *f;
544
545 for (f = b->ht[h1]; f; f = f->next) {
546 if (arg->count < arg->skip) {
547 arg->count++;
548 continue;
549 }
550 if (arg->fn(tp, (unsigned long)f, arg) < 0) {
551 arg->stop = 1;
552 return;
553 }
554 arg->count++;
555 }
556 }
557 }
558 }
559}
560
561static int route4_dump(struct tcf_proto *tp, unsigned long fh,
562 struct sk_buff *skb, struct tcmsg *t)
563{
564 struct route4_filter *f = (struct route4_filter*)fh;
565 unsigned char *b = skb->tail;
566 struct rtattr *rta;
567 u32 id;
568
569 if (f == NULL)
570 return skb->len;
571
572 t->tcm_handle = f->handle;
573
574 rta = (struct rtattr*)b;
575 RTA_PUT(skb, TCA_OPTIONS, 0, NULL);
576
577 if (!(f->handle&0x8000)) {
578 id = f->id&0xFF;
579 RTA_PUT(skb, TCA_ROUTE4_TO, sizeof(id), &id);
580 }
581 if (f->handle&0x80000000) {
582 if ((f->handle>>16) != 0xFFFF)
583 RTA_PUT(skb, TCA_ROUTE4_IIF, sizeof(f->iif), &f->iif);
584 } else {
585 id = f->id>>16;
586 RTA_PUT(skb, TCA_ROUTE4_FROM, sizeof(id), &id);
587 }
588 if (f->res.classid)
589 RTA_PUT(skb, TCA_ROUTE4_CLASSID, 4, &f->res.classid);
590
591 if (tcf_exts_dump(skb, &f->exts, &route_ext_map) < 0)
592 goto rtattr_failure;
593
594 rta->rta_len = skb->tail - b;
595
596 if (tcf_exts_dump_stats(skb, &f->exts, &route_ext_map) < 0)
597 goto rtattr_failure;
598
599 return skb->len;
600
601rtattr_failure:
602 skb_trim(skb, b - skb->data);
603 return -1;
604}
605
606static struct tcf_proto_ops cls_route4_ops = {
607 .next = NULL,
608 .kind = "route",
609 .classify = route4_classify,
610 .init = route4_init,
611 .destroy = route4_destroy,
612 .get = route4_get,
613 .put = route4_put,
614 .change = route4_change,
615 .delete = route4_delete,
616 .walk = route4_walk,
617 .dump = route4_dump,
618 .owner = THIS_MODULE,
619};
620
621static int __init init_route4(void)
622{
623 return register_tcf_proto_ops(&cls_route4_ops);
624}
625
626static void __exit exit_route4(void)
627{
628 unregister_tcf_proto_ops(&cls_route4_ops);
629}
630
631module_init(init_route4)
632module_exit(exit_route4)
633MODULE_LICENSE("GPL");