blob: dd872d5383efe371fa4333dcc1af724039b047b5 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * net/sched/cls_route.c ROUTE4 classifier.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 */
11
12#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <linux/types.h>
14#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include <linux/string.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include <linux/errno.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#include <linux/skbuff.h>
Patrick McHardy0ba48052007-07-02 22:49:07 -070018#include <net/dst.h>
19#include <net/route.h>
20#include <net/netlink.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#include <net/act_api.h>
22#include <net/pkt_cls.h>
23
24/*
25 1. For now we assume that route tags < 256.
26 It allows to use direct table lookups, instead of hash tables.
27 2. For now we assume that "from TAG" and "fromdev DEV" statements
28 are mutually exclusive.
29 3. "to TAG from ANY" has higher priority, than "to ANY from XXX"
30 */
31
32struct route4_fastmap
33{
34 struct route4_filter *filter;
35 u32 id;
36 int iif;
37};
38
39struct route4_head
40{
41 struct route4_fastmap fastmap[16];
42 struct route4_bucket *table[256+1];
43};
44
45struct route4_bucket
46{
47 /* 16 FROM buckets + 16 IIF buckets + 1 wildcard bucket */
48 struct route4_filter *ht[16+16+1];
49};
50
51struct route4_filter
52{
53 struct route4_filter *next;
54 u32 id;
55 int iif;
56
57 struct tcf_result res;
58 struct tcf_exts exts;
59 u32 handle;
60 struct route4_bucket *bkt;
61};
62
63#define ROUTE4_FAILURE ((struct route4_filter*)(-1L))
64
Patrick McHardy52390082008-01-31 18:36:18 -080065static const struct tcf_ext_map route_ext_map = {
Linus Torvalds1da177e2005-04-16 15:20:36 -070066 .police = TCA_ROUTE4_POLICE,
67 .action = TCA_ROUTE4_ACT
68};
69
70static __inline__ int route4_fastmap_hash(u32 id, int iif)
71{
72 return id&0xF;
73}
74
75static inline
David S. Miller15b458f2008-07-16 02:42:51 -070076void route4_reset_fastmap(struct Qdisc *q, struct route4_head *head, u32 id)
Linus Torvalds1da177e2005-04-16 15:20:36 -070077{
Jarek Poplawski102396a2008-08-29 14:21:52 -070078 spinlock_t *root_lock = qdisc_root_sleeping_lock(q);
David S. Miller15b458f2008-07-16 02:42:51 -070079
80 spin_lock_bh(root_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070081 memset(head->fastmap, 0, sizeof(head->fastmap));
David S. Miller15b458f2008-07-16 02:42:51 -070082 spin_unlock_bh(root_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070083}
84
Dave Jonesb6f99a22007-03-22 12:27:49 -070085static inline void
Linus Torvalds1da177e2005-04-16 15:20:36 -070086route4_set_fastmap(struct route4_head *head, u32 id, int iif,
87 struct route4_filter *f)
88{
89 int h = route4_fastmap_hash(id, iif);
90 head->fastmap[h].id = id;
91 head->fastmap[h].iif = iif;
92 head->fastmap[h].filter = f;
93}
94
95static __inline__ int route4_hash_to(u32 id)
96{
97 return id&0xFF;
98}
99
100static __inline__ int route4_hash_from(u32 id)
101{
102 return (id>>16)&0xF;
103}
104
105static __inline__ int route4_hash_iif(int iif)
106{
107 return 16 + ((iif>>16)&0xF);
108}
109
110static __inline__ int route4_hash_wild(void)
111{
112 return 32;
113}
114
115#define ROUTE4_APPLY_RESULT() \
116{ \
117 *res = f->res; \
118 if (tcf_exts_is_available(&f->exts)) { \
119 int r = tcf_exts_exec(skb, &f->exts, res); \
120 if (r < 0) { \
121 dont_cache = 1; \
122 continue; \
123 } \
124 return r; \
125 } else if (!dont_cache) \
126 route4_set_fastmap(head, id, iif, f); \
127 return 0; \
128}
129
130static int route4_classify(struct sk_buff *skb, struct tcf_proto *tp,
131 struct tcf_result *res)
132{
133 struct route4_head *head = (struct route4_head*)tp->root;
134 struct dst_entry *dst;
135 struct route4_bucket *b;
136 struct route4_filter *f;
137 u32 id, h;
138 int iif, dont_cache = 0;
139
Eric Dumazetadf30902009-06-02 05:19:30 +0000140 if ((dst = skb_dst(skb)) == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141 goto failure;
142
143 id = dst->tclassid;
144 if (head == NULL)
145 goto old_method;
146
147 iif = ((struct rtable*)dst)->fl.iif;
148
149 h = route4_fastmap_hash(id, iif);
150 if (id == head->fastmap[h].id &&
151 iif == head->fastmap[h].iif &&
152 (f = head->fastmap[h].filter) != NULL) {
153 if (f == ROUTE4_FAILURE)
154 goto failure;
155
156 *res = f->res;
157 return 0;
158 }
159
160 h = route4_hash_to(id);
161
162restart:
163 if ((b = head->table[h]) != NULL) {
164 for (f = b->ht[route4_hash_from(id)]; f; f = f->next)
165 if (f->id == id)
166 ROUTE4_APPLY_RESULT();
167
168 for (f = b->ht[route4_hash_iif(iif)]; f; f = f->next)
169 if (f->iif == iif)
170 ROUTE4_APPLY_RESULT();
171
172 for (f = b->ht[route4_hash_wild()]; f; f = f->next)
173 ROUTE4_APPLY_RESULT();
174
175 }
176 if (h < 256) {
177 h = 256;
178 id &= ~0xFFFF;
179 goto restart;
180 }
181
182 if (!dont_cache)
183 route4_set_fastmap(head, id, iif, ROUTE4_FAILURE);
184failure:
185 return -1;
186
187old_method:
188 if (id && (TC_H_MAJ(id) == 0 ||
189 !(TC_H_MAJ(id^tp->q->handle)))) {
190 res->classid = id;
191 res->class = 0;
192 return 0;
193 }
194 return -1;
195}
196
197static inline u32 to_hash(u32 id)
198{
199 u32 h = id&0xFF;
200 if (id&0x8000)
201 h += 256;
202 return h;
203}
204
205static inline u32 from_hash(u32 id)
206{
207 id &= 0xFFFF;
208 if (id == 0xFFFF)
209 return 32;
210 if (!(id & 0x8000)) {
211 if (id > 255)
212 return 256;
213 return id&0xF;
214 }
215 return 16 + (id&0xF);
216}
217
218static unsigned long route4_get(struct tcf_proto *tp, u32 handle)
219{
220 struct route4_head *head = (struct route4_head*)tp->root;
221 struct route4_bucket *b;
222 struct route4_filter *f;
223 unsigned h1, h2;
224
225 if (!head)
226 return 0;
227
228 h1 = to_hash(handle);
229 if (h1 > 256)
230 return 0;
231
232 h2 = from_hash(handle>>16);
233 if (h2 > 32)
234 return 0;
235
236 if ((b = head->table[h1]) != NULL) {
237 for (f = b->ht[h2]; f; f = f->next)
238 if (f->handle == handle)
239 return (unsigned long)f;
240 }
241 return 0;
242}
243
244static void route4_put(struct tcf_proto *tp, unsigned long f)
245{
246}
247
248static int route4_init(struct tcf_proto *tp)
249{
250 return 0;
251}
252
253static inline void
254route4_delete_filter(struct tcf_proto *tp, struct route4_filter *f)
255{
256 tcf_unbind_filter(tp, &f->res);
257 tcf_exts_destroy(tp, &f->exts);
258 kfree(f);
259}
260
261static void route4_destroy(struct tcf_proto *tp)
262{
Patrick McHardy47a1a1d2008-11-19 08:03:09 +0000263 struct route4_head *head = tp->root;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700264 int h1, h2;
265
266 if (head == NULL)
267 return;
268
269 for (h1=0; h1<=256; h1++) {
270 struct route4_bucket *b;
271
272 if ((b = head->table[h1]) != NULL) {
273 for (h2=0; h2<=32; h2++) {
274 struct route4_filter *f;
275
276 while ((f = b->ht[h2]) != NULL) {
277 b->ht[h2] = f->next;
278 route4_delete_filter(tp, f);
279 }
280 }
281 kfree(b);
282 }
283 }
284 kfree(head);
285}
286
287static int route4_delete(struct tcf_proto *tp, unsigned long arg)
288{
289 struct route4_head *head = (struct route4_head*)tp->root;
290 struct route4_filter **fp, *f = (struct route4_filter*)arg;
291 unsigned h = 0;
292 struct route4_bucket *b;
293 int i;
294
295 if (!head || !f)
296 return -EINVAL;
297
298 h = f->handle;
299 b = f->bkt;
300
301 for (fp = &b->ht[from_hash(h>>16)]; *fp; fp = &(*fp)->next) {
302 if (*fp == f) {
303 tcf_tree_lock(tp);
304 *fp = f->next;
305 tcf_tree_unlock(tp);
306
David S. Miller15b458f2008-07-16 02:42:51 -0700307 route4_reset_fastmap(tp->q, head, f->id);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700308 route4_delete_filter(tp, f);
309
310 /* Strip tree */
311
312 for (i=0; i<=32; i++)
313 if (b->ht[i])
314 return 0;
315
316 /* OK, session has no flows */
317 tcf_tree_lock(tp);
318 head->table[to_hash(h)] = NULL;
319 tcf_tree_unlock(tp);
320
321 kfree(b);
322 return 0;
323 }
324 }
325 return 0;
326}
327
Patrick McHardy6fa8c012008-01-23 20:36:12 -0800328static const struct nla_policy route4_policy[TCA_ROUTE4_MAX + 1] = {
329 [TCA_ROUTE4_CLASSID] = { .type = NLA_U32 },
330 [TCA_ROUTE4_TO] = { .type = NLA_U32 },
331 [TCA_ROUTE4_FROM] = { .type = NLA_U32 },
332 [TCA_ROUTE4_IIF] = { .type = NLA_U32 },
333};
334
Linus Torvalds1da177e2005-04-16 15:20:36 -0700335static int route4_set_parms(struct tcf_proto *tp, unsigned long base,
336 struct route4_filter *f, u32 handle, struct route4_head *head,
Patrick McHardyadd93b62008-01-22 22:11:33 -0800337 struct nlattr **tb, struct nlattr *est, int new)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700338{
339 int err;
340 u32 id = 0, to = 0, nhandle = 0x8000;
341 struct route4_filter *fp;
342 unsigned int h1;
343 struct route4_bucket *b;
344 struct tcf_exts e;
345
346 err = tcf_exts_validate(tp, tb, est, &e, &route_ext_map);
347 if (err < 0)
348 return err;
349
350 err = -EINVAL;
Patrick McHardyadd93b62008-01-22 22:11:33 -0800351 if (tb[TCA_ROUTE4_TO]) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700352 if (new && handle & 0x8000)
353 goto errout;
Patrick McHardy1587bac2008-01-23 20:35:03 -0800354 to = nla_get_u32(tb[TCA_ROUTE4_TO]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355 if (to > 0xFF)
356 goto errout;
357 nhandle = to;
358 }
359
Patrick McHardyadd93b62008-01-22 22:11:33 -0800360 if (tb[TCA_ROUTE4_FROM]) {
361 if (tb[TCA_ROUTE4_IIF])
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362 goto errout;
Patrick McHardy1587bac2008-01-23 20:35:03 -0800363 id = nla_get_u32(tb[TCA_ROUTE4_FROM]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700364 if (id > 0xFF)
365 goto errout;
366 nhandle |= id << 16;
Patrick McHardyadd93b62008-01-22 22:11:33 -0800367 } else if (tb[TCA_ROUTE4_IIF]) {
Patrick McHardy1587bac2008-01-23 20:35:03 -0800368 id = nla_get_u32(tb[TCA_ROUTE4_IIF]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700369 if (id > 0x7FFF)
370 goto errout;
371 nhandle |= (id | 0x8000) << 16;
372 } else
373 nhandle |= 0xFFFF << 16;
374
375 if (handle && new) {
376 nhandle |= handle & 0x7F00;
377 if (nhandle != handle)
378 goto errout;
379 }
380
381 h1 = to_hash(nhandle);
382 if ((b = head->table[h1]) == NULL) {
383 err = -ENOBUFS;
Panagiotis Issaris0da974f2006-07-21 14:51:30 -0700384 b = kzalloc(sizeof(struct route4_bucket), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700385 if (b == NULL)
386 goto errout;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387
388 tcf_tree_lock(tp);
389 head->table[h1] = b;
390 tcf_tree_unlock(tp);
391 } else {
392 unsigned int h2 = from_hash(nhandle >> 16);
393 err = -EEXIST;
394 for (fp = b->ht[h2]; fp; fp = fp->next)
395 if (fp->handle == f->handle)
396 goto errout;
397 }
398
399 tcf_tree_lock(tp);
Patrick McHardyadd93b62008-01-22 22:11:33 -0800400 if (tb[TCA_ROUTE4_TO])
Linus Torvalds1da177e2005-04-16 15:20:36 -0700401 f->id = to;
402
Patrick McHardyadd93b62008-01-22 22:11:33 -0800403 if (tb[TCA_ROUTE4_FROM])
Linus Torvalds1da177e2005-04-16 15:20:36 -0700404 f->id = to | id<<16;
Patrick McHardyadd93b62008-01-22 22:11:33 -0800405 else if (tb[TCA_ROUTE4_IIF])
Linus Torvalds1da177e2005-04-16 15:20:36 -0700406 f->iif = id;
407
408 f->handle = nhandle;
409 f->bkt = b;
410 tcf_tree_unlock(tp);
411
Patrick McHardyadd93b62008-01-22 22:11:33 -0800412 if (tb[TCA_ROUTE4_CLASSID]) {
Patrick McHardy1587bac2008-01-23 20:35:03 -0800413 f->res.classid = nla_get_u32(tb[TCA_ROUTE4_CLASSID]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700414 tcf_bind_filter(tp, &f->res, base);
415 }
416
417 tcf_exts_change(tp, &f->exts, &e);
418
419 return 0;
420errout:
421 tcf_exts_destroy(tp, &e);
422 return err;
423}
424
425static int route4_change(struct tcf_proto *tp, unsigned long base,
426 u32 handle,
Patrick McHardyadd93b62008-01-22 22:11:33 -0800427 struct nlattr **tca,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700428 unsigned long *arg)
429{
430 struct route4_head *head = tp->root;
431 struct route4_filter *f, *f1, **fp;
432 struct route4_bucket *b;
Patrick McHardyadd93b62008-01-22 22:11:33 -0800433 struct nlattr *opt = tca[TCA_OPTIONS];
434 struct nlattr *tb[TCA_ROUTE4_MAX + 1];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700435 unsigned int h, th;
436 u32 old_handle = 0;
437 int err;
438
439 if (opt == NULL)
440 return handle ? -EINVAL : 0;
441
Patrick McHardy6fa8c012008-01-23 20:36:12 -0800442 err = nla_parse_nested(tb, TCA_ROUTE4_MAX, opt, route4_policy);
Patrick McHardycee63722008-01-23 20:33:32 -0800443 if (err < 0)
444 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700445
446 if ((f = (struct route4_filter*)*arg) != NULL) {
447 if (f->handle != handle && handle)
448 return -EINVAL;
449
450 if (f->bkt)
451 old_handle = f->handle;
452
453 err = route4_set_parms(tp, base, f, handle, head, tb,
Patrick McHardyadd93b62008-01-22 22:11:33 -0800454 tca[TCA_RATE], 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700455 if (err < 0)
456 return err;
457
458 goto reinsert;
459 }
460
461 err = -ENOBUFS;
462 if (head == NULL) {
Panagiotis Issaris0da974f2006-07-21 14:51:30 -0700463 head = kzalloc(sizeof(struct route4_head), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700464 if (head == NULL)
465 goto errout;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700466
467 tcf_tree_lock(tp);
468 tp->root = head;
469 tcf_tree_unlock(tp);
470 }
471
Panagiotis Issaris0da974f2006-07-21 14:51:30 -0700472 f = kzalloc(sizeof(struct route4_filter), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700473 if (f == NULL)
474 goto errout;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700475
476 err = route4_set_parms(tp, base, f, handle, head, tb,
Patrick McHardyadd93b62008-01-22 22:11:33 -0800477 tca[TCA_RATE], 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700478 if (err < 0)
479 goto errout;
480
481reinsert:
482 h = from_hash(f->handle >> 16);
483 for (fp = &f->bkt->ht[h]; (f1=*fp) != NULL; fp = &f1->next)
484 if (f->handle < f1->handle)
485 break;
486
487 f->next = f1;
488 tcf_tree_lock(tp);
489 *fp = f;
490
491 if (old_handle && f->handle != old_handle) {
492 th = to_hash(old_handle);
493 h = from_hash(old_handle >> 16);
494 if ((b = head->table[th]) != NULL) {
495 for (fp = &b->ht[h]; *fp; fp = &(*fp)->next) {
496 if (*fp == f) {
497 *fp = f->next;
498 break;
499 }
500 }
501 }
502 }
503 tcf_tree_unlock(tp);
504
David S. Miller15b458f2008-07-16 02:42:51 -0700505 route4_reset_fastmap(tp->q, head, f->id);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700506 *arg = (unsigned long)f;
507 return 0;
508
509errout:
Jesper Juhla51482b2005-11-08 09:41:34 -0800510 kfree(f);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700511 return err;
512}
513
514static void route4_walk(struct tcf_proto *tp, struct tcf_walker *arg)
515{
516 struct route4_head *head = tp->root;
517 unsigned h, h1;
518
519 if (head == NULL)
520 arg->stop = 1;
521
522 if (arg->stop)
523 return;
524
525 for (h = 0; h <= 256; h++) {
526 struct route4_bucket *b = head->table[h];
527
528 if (b) {
529 for (h1 = 0; h1 <= 32; h1++) {
530 struct route4_filter *f;
531
532 for (f = b->ht[h1]; f; f = f->next) {
533 if (arg->count < arg->skip) {
534 arg->count++;
535 continue;
536 }
537 if (arg->fn(tp, (unsigned long)f, arg) < 0) {
538 arg->stop = 1;
539 return;
540 }
541 arg->count++;
542 }
543 }
544 }
545 }
546}
547
548static int route4_dump(struct tcf_proto *tp, unsigned long fh,
549 struct sk_buff *skb, struct tcmsg *t)
550{
551 struct route4_filter *f = (struct route4_filter*)fh;
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -0700552 unsigned char *b = skb_tail_pointer(skb);
Patrick McHardy4b3550ef2008-01-23 20:34:11 -0800553 struct nlattr *nest;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700554 u32 id;
555
556 if (f == NULL)
557 return skb->len;
558
559 t->tcm_handle = f->handle;
560
Patrick McHardy4b3550ef2008-01-23 20:34:11 -0800561 nest = nla_nest_start(skb, TCA_OPTIONS);
562 if (nest == NULL)
563 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700564
565 if (!(f->handle&0x8000)) {
566 id = f->id&0xFF;
Patrick McHardy24beeab2008-01-23 20:34:48 -0800567 NLA_PUT_U32(skb, TCA_ROUTE4_TO, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700568 }
569 if (f->handle&0x80000000) {
570 if ((f->handle>>16) != 0xFFFF)
Patrick McHardy24beeab2008-01-23 20:34:48 -0800571 NLA_PUT_U32(skb, TCA_ROUTE4_IIF, f->iif);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700572 } else {
573 id = f->id>>16;
Patrick McHardy24beeab2008-01-23 20:34:48 -0800574 NLA_PUT_U32(skb, TCA_ROUTE4_FROM, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700575 }
576 if (f->res.classid)
Patrick McHardy24beeab2008-01-23 20:34:48 -0800577 NLA_PUT_U32(skb, TCA_ROUTE4_CLASSID, f->res.classid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700578
579 if (tcf_exts_dump(skb, &f->exts, &route_ext_map) < 0)
Patrick McHardyadd93b62008-01-22 22:11:33 -0800580 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700581
Patrick McHardy4b3550ef2008-01-23 20:34:11 -0800582 nla_nest_end(skb, nest);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700583
584 if (tcf_exts_dump_stats(skb, &f->exts, &route_ext_map) < 0)
Patrick McHardyadd93b62008-01-22 22:11:33 -0800585 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700586
587 return skb->len;
588
Patrick McHardyadd93b62008-01-22 22:11:33 -0800589nla_put_failure:
Arnaldo Carvalho de Melodc5fc572007-03-25 23:06:12 -0700590 nlmsg_trim(skb, b);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700591 return -1;
592}
593
Patrick McHardy2eb9d752008-01-22 22:10:42 -0800594static struct tcf_proto_ops cls_route4_ops __read_mostly = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700595 .kind = "route",
596 .classify = route4_classify,
597 .init = route4_init,
598 .destroy = route4_destroy,
599 .get = route4_get,
600 .put = route4_put,
601 .change = route4_change,
602 .delete = route4_delete,
603 .walk = route4_walk,
604 .dump = route4_dump,
605 .owner = THIS_MODULE,
606};
607
608static int __init init_route4(void)
609{
610 return register_tcf_proto_ops(&cls_route4_ops);
611}
612
613static void __exit exit_route4(void)
614{
615 unregister_tcf_proto_ops(&cls_route4_ops);
616}
617
618module_init(init_route4)
619module_exit(exit_route4)
620MODULE_LICENSE("GPL");