blob: 694dcd85dec83bda586f8a96843a1f8fac6a4259 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * net/sched/cls_route.c ROUTE4 classifier.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 */
11
12#include <linux/module.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090013#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070014#include <linux/types.h>
15#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include <linux/string.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#include <linux/errno.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include <linux/skbuff.h>
Patrick McHardy0ba48052007-07-02 22:49:07 -070019#include <net/dst.h>
20#include <net/route.h>
21#include <net/netlink.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070022#include <net/act_api.h>
23#include <net/pkt_cls.h>
24
25/*
26 1. For now we assume that route tags < 256.
27 It allows to use direct table lookups, instead of hash tables.
28 2. For now we assume that "from TAG" and "fromdev DEV" statements
29 are mutually exclusive.
30 3. "to TAG from ANY" has higher priority, than "to ANY from XXX"
31 */
32
33struct route4_fastmap
34{
35 struct route4_filter *filter;
36 u32 id;
37 int iif;
38};
39
40struct route4_head
41{
42 struct route4_fastmap fastmap[16];
43 struct route4_bucket *table[256+1];
44};
45
46struct route4_bucket
47{
48 /* 16 FROM buckets + 16 IIF buckets + 1 wildcard bucket */
49 struct route4_filter *ht[16+16+1];
50};
51
52struct route4_filter
53{
54 struct route4_filter *next;
55 u32 id;
56 int iif;
57
58 struct tcf_result res;
59 struct tcf_exts exts;
60 u32 handle;
61 struct route4_bucket *bkt;
62};
63
64#define ROUTE4_FAILURE ((struct route4_filter*)(-1L))
65
Patrick McHardy52390082008-01-31 18:36:18 -080066static const struct tcf_ext_map route_ext_map = {
Linus Torvalds1da177e2005-04-16 15:20:36 -070067 .police = TCA_ROUTE4_POLICE,
68 .action = TCA_ROUTE4_ACT
69};
70
71static __inline__ int route4_fastmap_hash(u32 id, int iif)
72{
73 return id&0xF;
74}
75
76static inline
David S. Miller15b458f2008-07-16 02:42:51 -070077void route4_reset_fastmap(struct Qdisc *q, struct route4_head *head, u32 id)
Linus Torvalds1da177e2005-04-16 15:20:36 -070078{
Jarek Poplawski102396a2008-08-29 14:21:52 -070079 spinlock_t *root_lock = qdisc_root_sleeping_lock(q);
David S. Miller15b458f2008-07-16 02:42:51 -070080
81 spin_lock_bh(root_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070082 memset(head->fastmap, 0, sizeof(head->fastmap));
David S. Miller15b458f2008-07-16 02:42:51 -070083 spin_unlock_bh(root_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070084}
85
Dave Jonesb6f99a22007-03-22 12:27:49 -070086static inline void
Linus Torvalds1da177e2005-04-16 15:20:36 -070087route4_set_fastmap(struct route4_head *head, u32 id, int iif,
88 struct route4_filter *f)
89{
90 int h = route4_fastmap_hash(id, iif);
91 head->fastmap[h].id = id;
92 head->fastmap[h].iif = iif;
93 head->fastmap[h].filter = f;
94}
95
96static __inline__ int route4_hash_to(u32 id)
97{
98 return id&0xFF;
99}
100
101static __inline__ int route4_hash_from(u32 id)
102{
103 return (id>>16)&0xF;
104}
105
106static __inline__ int route4_hash_iif(int iif)
107{
108 return 16 + ((iif>>16)&0xF);
109}
110
111static __inline__ int route4_hash_wild(void)
112{
113 return 32;
114}
115
116#define ROUTE4_APPLY_RESULT() \
117{ \
118 *res = f->res; \
119 if (tcf_exts_is_available(&f->exts)) { \
120 int r = tcf_exts_exec(skb, &f->exts, res); \
121 if (r < 0) { \
122 dont_cache = 1; \
123 continue; \
124 } \
125 return r; \
126 } else if (!dont_cache) \
127 route4_set_fastmap(head, id, iif, f); \
128 return 0; \
129}
130
131static int route4_classify(struct sk_buff *skb, struct tcf_proto *tp,
132 struct tcf_result *res)
133{
134 struct route4_head *head = (struct route4_head*)tp->root;
135 struct dst_entry *dst;
136 struct route4_bucket *b;
137 struct route4_filter *f;
138 u32 id, h;
139 int iif, dont_cache = 0;
140
Eric Dumazetadf30902009-06-02 05:19:30 +0000141 if ((dst = skb_dst(skb)) == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142 goto failure;
143
144 id = dst->tclassid;
145 if (head == NULL)
146 goto old_method;
147
148 iif = ((struct rtable*)dst)->fl.iif;
149
150 h = route4_fastmap_hash(id, iif);
151 if (id == head->fastmap[h].id &&
152 iif == head->fastmap[h].iif &&
153 (f = head->fastmap[h].filter) != NULL) {
154 if (f == ROUTE4_FAILURE)
155 goto failure;
156
157 *res = f->res;
158 return 0;
159 }
160
161 h = route4_hash_to(id);
162
163restart:
164 if ((b = head->table[h]) != NULL) {
165 for (f = b->ht[route4_hash_from(id)]; f; f = f->next)
166 if (f->id == id)
167 ROUTE4_APPLY_RESULT();
168
169 for (f = b->ht[route4_hash_iif(iif)]; f; f = f->next)
170 if (f->iif == iif)
171 ROUTE4_APPLY_RESULT();
172
173 for (f = b->ht[route4_hash_wild()]; f; f = f->next)
174 ROUTE4_APPLY_RESULT();
175
176 }
177 if (h < 256) {
178 h = 256;
179 id &= ~0xFFFF;
180 goto restart;
181 }
182
183 if (!dont_cache)
184 route4_set_fastmap(head, id, iif, ROUTE4_FAILURE);
185failure:
186 return -1;
187
188old_method:
189 if (id && (TC_H_MAJ(id) == 0 ||
190 !(TC_H_MAJ(id^tp->q->handle)))) {
191 res->classid = id;
192 res->class = 0;
193 return 0;
194 }
195 return -1;
196}
197
198static inline u32 to_hash(u32 id)
199{
200 u32 h = id&0xFF;
201 if (id&0x8000)
202 h += 256;
203 return h;
204}
205
206static inline u32 from_hash(u32 id)
207{
208 id &= 0xFFFF;
209 if (id == 0xFFFF)
210 return 32;
211 if (!(id & 0x8000)) {
212 if (id > 255)
213 return 256;
214 return id&0xF;
215 }
216 return 16 + (id&0xF);
217}
218
219static unsigned long route4_get(struct tcf_proto *tp, u32 handle)
220{
221 struct route4_head *head = (struct route4_head*)tp->root;
222 struct route4_bucket *b;
223 struct route4_filter *f;
224 unsigned h1, h2;
225
226 if (!head)
227 return 0;
228
229 h1 = to_hash(handle);
230 if (h1 > 256)
231 return 0;
232
233 h2 = from_hash(handle>>16);
234 if (h2 > 32)
235 return 0;
236
237 if ((b = head->table[h1]) != NULL) {
238 for (f = b->ht[h2]; f; f = f->next)
239 if (f->handle == handle)
240 return (unsigned long)f;
241 }
242 return 0;
243}
244
245static void route4_put(struct tcf_proto *tp, unsigned long f)
246{
247}
248
249static int route4_init(struct tcf_proto *tp)
250{
251 return 0;
252}
253
254static inline void
255route4_delete_filter(struct tcf_proto *tp, struct route4_filter *f)
256{
257 tcf_unbind_filter(tp, &f->res);
258 tcf_exts_destroy(tp, &f->exts);
259 kfree(f);
260}
261
262static void route4_destroy(struct tcf_proto *tp)
263{
Patrick McHardy47a1a1d2008-11-19 08:03:09 +0000264 struct route4_head *head = tp->root;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700265 int h1, h2;
266
267 if (head == NULL)
268 return;
269
270 for (h1=0; h1<=256; h1++) {
271 struct route4_bucket *b;
272
273 if ((b = head->table[h1]) != NULL) {
274 for (h2=0; h2<=32; h2++) {
275 struct route4_filter *f;
276
277 while ((f = b->ht[h2]) != NULL) {
278 b->ht[h2] = f->next;
279 route4_delete_filter(tp, f);
280 }
281 }
282 kfree(b);
283 }
284 }
285 kfree(head);
286}
287
288static int route4_delete(struct tcf_proto *tp, unsigned long arg)
289{
290 struct route4_head *head = (struct route4_head*)tp->root;
291 struct route4_filter **fp, *f = (struct route4_filter*)arg;
292 unsigned h = 0;
293 struct route4_bucket *b;
294 int i;
295
296 if (!head || !f)
297 return -EINVAL;
298
299 h = f->handle;
300 b = f->bkt;
301
302 for (fp = &b->ht[from_hash(h>>16)]; *fp; fp = &(*fp)->next) {
303 if (*fp == f) {
304 tcf_tree_lock(tp);
305 *fp = f->next;
306 tcf_tree_unlock(tp);
307
David S. Miller15b458f2008-07-16 02:42:51 -0700308 route4_reset_fastmap(tp->q, head, f->id);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700309 route4_delete_filter(tp, f);
310
311 /* Strip tree */
312
313 for (i=0; i<=32; i++)
314 if (b->ht[i])
315 return 0;
316
317 /* OK, session has no flows */
318 tcf_tree_lock(tp);
319 head->table[to_hash(h)] = NULL;
320 tcf_tree_unlock(tp);
321
322 kfree(b);
323 return 0;
324 }
325 }
326 return 0;
327}
328
Patrick McHardy6fa8c012008-01-23 20:36:12 -0800329static const struct nla_policy route4_policy[TCA_ROUTE4_MAX + 1] = {
330 [TCA_ROUTE4_CLASSID] = { .type = NLA_U32 },
331 [TCA_ROUTE4_TO] = { .type = NLA_U32 },
332 [TCA_ROUTE4_FROM] = { .type = NLA_U32 },
333 [TCA_ROUTE4_IIF] = { .type = NLA_U32 },
334};
335
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336static int route4_set_parms(struct tcf_proto *tp, unsigned long base,
337 struct route4_filter *f, u32 handle, struct route4_head *head,
Patrick McHardyadd93b62008-01-22 22:11:33 -0800338 struct nlattr **tb, struct nlattr *est, int new)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700339{
340 int err;
341 u32 id = 0, to = 0, nhandle = 0x8000;
342 struct route4_filter *fp;
343 unsigned int h1;
344 struct route4_bucket *b;
345 struct tcf_exts e;
346
347 err = tcf_exts_validate(tp, tb, est, &e, &route_ext_map);
348 if (err < 0)
349 return err;
350
351 err = -EINVAL;
Patrick McHardyadd93b62008-01-22 22:11:33 -0800352 if (tb[TCA_ROUTE4_TO]) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700353 if (new && handle & 0x8000)
354 goto errout;
Patrick McHardy1587bac2008-01-23 20:35:03 -0800355 to = nla_get_u32(tb[TCA_ROUTE4_TO]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700356 if (to > 0xFF)
357 goto errout;
358 nhandle = to;
359 }
360
Patrick McHardyadd93b62008-01-22 22:11:33 -0800361 if (tb[TCA_ROUTE4_FROM]) {
362 if (tb[TCA_ROUTE4_IIF])
Linus Torvalds1da177e2005-04-16 15:20:36 -0700363 goto errout;
Patrick McHardy1587bac2008-01-23 20:35:03 -0800364 id = nla_get_u32(tb[TCA_ROUTE4_FROM]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700365 if (id > 0xFF)
366 goto errout;
367 nhandle |= id << 16;
Patrick McHardyadd93b62008-01-22 22:11:33 -0800368 } else if (tb[TCA_ROUTE4_IIF]) {
Patrick McHardy1587bac2008-01-23 20:35:03 -0800369 id = nla_get_u32(tb[TCA_ROUTE4_IIF]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700370 if (id > 0x7FFF)
371 goto errout;
372 nhandle |= (id | 0x8000) << 16;
373 } else
374 nhandle |= 0xFFFF << 16;
375
376 if (handle && new) {
377 nhandle |= handle & 0x7F00;
378 if (nhandle != handle)
379 goto errout;
380 }
381
382 h1 = to_hash(nhandle);
383 if ((b = head->table[h1]) == NULL) {
384 err = -ENOBUFS;
Panagiotis Issaris0da974f2006-07-21 14:51:30 -0700385 b = kzalloc(sizeof(struct route4_bucket), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700386 if (b == NULL)
387 goto errout;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700388
389 tcf_tree_lock(tp);
390 head->table[h1] = b;
391 tcf_tree_unlock(tp);
392 } else {
393 unsigned int h2 = from_hash(nhandle >> 16);
394 err = -EEXIST;
395 for (fp = b->ht[h2]; fp; fp = fp->next)
396 if (fp->handle == f->handle)
397 goto errout;
398 }
399
400 tcf_tree_lock(tp);
Patrick McHardyadd93b62008-01-22 22:11:33 -0800401 if (tb[TCA_ROUTE4_TO])
Linus Torvalds1da177e2005-04-16 15:20:36 -0700402 f->id = to;
403
Patrick McHardyadd93b62008-01-22 22:11:33 -0800404 if (tb[TCA_ROUTE4_FROM])
Linus Torvalds1da177e2005-04-16 15:20:36 -0700405 f->id = to | id<<16;
Patrick McHardyadd93b62008-01-22 22:11:33 -0800406 else if (tb[TCA_ROUTE4_IIF])
Linus Torvalds1da177e2005-04-16 15:20:36 -0700407 f->iif = id;
408
409 f->handle = nhandle;
410 f->bkt = b;
411 tcf_tree_unlock(tp);
412
Patrick McHardyadd93b62008-01-22 22:11:33 -0800413 if (tb[TCA_ROUTE4_CLASSID]) {
Patrick McHardy1587bac2008-01-23 20:35:03 -0800414 f->res.classid = nla_get_u32(tb[TCA_ROUTE4_CLASSID]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700415 tcf_bind_filter(tp, &f->res, base);
416 }
417
418 tcf_exts_change(tp, &f->exts, &e);
419
420 return 0;
421errout:
422 tcf_exts_destroy(tp, &e);
423 return err;
424}
425
426static int route4_change(struct tcf_proto *tp, unsigned long base,
427 u32 handle,
Patrick McHardyadd93b62008-01-22 22:11:33 -0800428 struct nlattr **tca,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700429 unsigned long *arg)
430{
431 struct route4_head *head = tp->root;
432 struct route4_filter *f, *f1, **fp;
433 struct route4_bucket *b;
Patrick McHardyadd93b62008-01-22 22:11:33 -0800434 struct nlattr *opt = tca[TCA_OPTIONS];
435 struct nlattr *tb[TCA_ROUTE4_MAX + 1];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700436 unsigned int h, th;
437 u32 old_handle = 0;
438 int err;
439
440 if (opt == NULL)
441 return handle ? -EINVAL : 0;
442
Patrick McHardy6fa8c012008-01-23 20:36:12 -0800443 err = nla_parse_nested(tb, TCA_ROUTE4_MAX, opt, route4_policy);
Patrick McHardycee63722008-01-23 20:33:32 -0800444 if (err < 0)
445 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700446
447 if ((f = (struct route4_filter*)*arg) != NULL) {
448 if (f->handle != handle && handle)
449 return -EINVAL;
450
451 if (f->bkt)
452 old_handle = f->handle;
453
454 err = route4_set_parms(tp, base, f, handle, head, tb,
Patrick McHardyadd93b62008-01-22 22:11:33 -0800455 tca[TCA_RATE], 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700456 if (err < 0)
457 return err;
458
459 goto reinsert;
460 }
461
462 err = -ENOBUFS;
463 if (head == NULL) {
Panagiotis Issaris0da974f2006-07-21 14:51:30 -0700464 head = kzalloc(sizeof(struct route4_head), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700465 if (head == NULL)
466 goto errout;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700467
468 tcf_tree_lock(tp);
469 tp->root = head;
470 tcf_tree_unlock(tp);
471 }
472
Panagiotis Issaris0da974f2006-07-21 14:51:30 -0700473 f = kzalloc(sizeof(struct route4_filter), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700474 if (f == NULL)
475 goto errout;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700476
477 err = route4_set_parms(tp, base, f, handle, head, tb,
Patrick McHardyadd93b62008-01-22 22:11:33 -0800478 tca[TCA_RATE], 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700479 if (err < 0)
480 goto errout;
481
482reinsert:
483 h = from_hash(f->handle >> 16);
484 for (fp = &f->bkt->ht[h]; (f1=*fp) != NULL; fp = &f1->next)
485 if (f->handle < f1->handle)
486 break;
487
488 f->next = f1;
489 tcf_tree_lock(tp);
490 *fp = f;
491
492 if (old_handle && f->handle != old_handle) {
493 th = to_hash(old_handle);
494 h = from_hash(old_handle >> 16);
495 if ((b = head->table[th]) != NULL) {
496 for (fp = &b->ht[h]; *fp; fp = &(*fp)->next) {
497 if (*fp == f) {
498 *fp = f->next;
499 break;
500 }
501 }
502 }
503 }
504 tcf_tree_unlock(tp);
505
David S. Miller15b458f2008-07-16 02:42:51 -0700506 route4_reset_fastmap(tp->q, head, f->id);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700507 *arg = (unsigned long)f;
508 return 0;
509
510errout:
Jesper Juhla51482b2005-11-08 09:41:34 -0800511 kfree(f);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700512 return err;
513}
514
515static void route4_walk(struct tcf_proto *tp, struct tcf_walker *arg)
516{
517 struct route4_head *head = tp->root;
518 unsigned h, h1;
519
520 if (head == NULL)
521 arg->stop = 1;
522
523 if (arg->stop)
524 return;
525
526 for (h = 0; h <= 256; h++) {
527 struct route4_bucket *b = head->table[h];
528
529 if (b) {
530 for (h1 = 0; h1 <= 32; h1++) {
531 struct route4_filter *f;
532
533 for (f = b->ht[h1]; f; f = f->next) {
534 if (arg->count < arg->skip) {
535 arg->count++;
536 continue;
537 }
538 if (arg->fn(tp, (unsigned long)f, arg) < 0) {
539 arg->stop = 1;
540 return;
541 }
542 arg->count++;
543 }
544 }
545 }
546 }
547}
548
549static int route4_dump(struct tcf_proto *tp, unsigned long fh,
550 struct sk_buff *skb, struct tcmsg *t)
551{
552 struct route4_filter *f = (struct route4_filter*)fh;
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -0700553 unsigned char *b = skb_tail_pointer(skb);
Patrick McHardy4b3550ef2008-01-23 20:34:11 -0800554 struct nlattr *nest;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700555 u32 id;
556
557 if (f == NULL)
558 return skb->len;
559
560 t->tcm_handle = f->handle;
561
Patrick McHardy4b3550ef2008-01-23 20:34:11 -0800562 nest = nla_nest_start(skb, TCA_OPTIONS);
563 if (nest == NULL)
564 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700565
566 if (!(f->handle&0x8000)) {
567 id = f->id&0xFF;
Patrick McHardy24beeab2008-01-23 20:34:48 -0800568 NLA_PUT_U32(skb, TCA_ROUTE4_TO, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700569 }
570 if (f->handle&0x80000000) {
571 if ((f->handle>>16) != 0xFFFF)
Patrick McHardy24beeab2008-01-23 20:34:48 -0800572 NLA_PUT_U32(skb, TCA_ROUTE4_IIF, f->iif);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700573 } else {
574 id = f->id>>16;
Patrick McHardy24beeab2008-01-23 20:34:48 -0800575 NLA_PUT_U32(skb, TCA_ROUTE4_FROM, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700576 }
577 if (f->res.classid)
Patrick McHardy24beeab2008-01-23 20:34:48 -0800578 NLA_PUT_U32(skb, TCA_ROUTE4_CLASSID, f->res.classid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700579
580 if (tcf_exts_dump(skb, &f->exts, &route_ext_map) < 0)
Patrick McHardyadd93b62008-01-22 22:11:33 -0800581 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700582
Patrick McHardy4b3550ef2008-01-23 20:34:11 -0800583 nla_nest_end(skb, nest);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700584
585 if (tcf_exts_dump_stats(skb, &f->exts, &route_ext_map) < 0)
Patrick McHardyadd93b62008-01-22 22:11:33 -0800586 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700587
588 return skb->len;
589
Patrick McHardyadd93b62008-01-22 22:11:33 -0800590nla_put_failure:
Arnaldo Carvalho de Melodc5fc572007-03-25 23:06:12 -0700591 nlmsg_trim(skb, b);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700592 return -1;
593}
594
Patrick McHardy2eb9d752008-01-22 22:10:42 -0800595static struct tcf_proto_ops cls_route4_ops __read_mostly = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700596 .kind = "route",
597 .classify = route4_classify,
598 .init = route4_init,
599 .destroy = route4_destroy,
600 .get = route4_get,
601 .put = route4_put,
602 .change = route4_change,
603 .delete = route4_delete,
604 .walk = route4_walk,
605 .dump = route4_dump,
606 .owner = THIS_MODULE,
607};
608
609static int __init init_route4(void)
610{
611 return register_tcf_proto_ops(&cls_route4_ops);
612}
613
614static void __exit exit_route4(void)
615{
616 unregister_tcf_proto_ops(&cls_route4_ops);
617}
618
619module_init(init_route4)
620module_exit(exit_route4)
621MODULE_LICENSE("GPL");