blob: cb76e3c725a08581b7b0d1b62841bfb0ca775d04 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * ROUTE - implementation of the IP router.
7 *
8 * Version: $Id: route.c,v 1.103 2002/01/12 07:44:09 davem Exp $
9 *
Jesper Juhl02c30a82005-05-05 16:16:16 -070010 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Alan Cox, <gw4pts@gw4pts.ampr.org>
13 * Linus Torvalds, <Linus.Torvalds@helsinki.fi>
14 * Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
15 *
16 * Fixes:
17 * Alan Cox : Verify area fixes.
18 * Alan Cox : cli() protects routing changes
19 * Rui Oliveira : ICMP routing table updates
20 * (rco@di.uminho.pt) Routing table insertion and update
21 * Linus Torvalds : Rewrote bits to be sensible
22 * Alan Cox : Added BSD route gw semantics
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +090023 * Alan Cox : Super /proc >4K
Linus Torvalds1da177e2005-04-16 15:20:36 -070024 * Alan Cox : MTU in route table
25 * Alan Cox : MSS actually. Also added the window
26 * clamper.
27 * Sam Lantinga : Fixed route matching in rt_del()
28 * Alan Cox : Routing cache support.
29 * Alan Cox : Removed compatibility cruft.
30 * Alan Cox : RTF_REJECT support.
31 * Alan Cox : TCP irtt support.
32 * Jonathan Naylor : Added Metric support.
33 * Miquel van Smoorenburg : BSD API fixes.
34 * Miquel van Smoorenburg : Metrics.
35 * Alan Cox : Use __u32 properly
36 * Alan Cox : Aligned routing errors more closely with BSD
37 * our system is still very different.
38 * Alan Cox : Faster /proc handling
39 * Alexey Kuznetsov : Massive rework to support tree based routing,
40 * routing caches and better behaviour.
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +090041 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070042 * Olaf Erb : irtt wasn't being copied right.
43 * Bjorn Ekwall : Kerneld route support.
44 * Alan Cox : Multicast fixed (I hope)
45 * Pavel Krauz : Limited broadcast fixed
46 * Mike McLagan : Routing by source
47 * Alexey Kuznetsov : End of old history. Split to fib.c and
48 * route.c and rewritten from scratch.
49 * Andi Kleen : Load-limit warning messages.
50 * Vitaly E. Lavrov : Transparent proxy revived after year coma.
51 * Vitaly E. Lavrov : Race condition in ip_route_input_slow.
52 * Tobias Ringstrom : Uninitialized res.type in ip_route_output_slow.
53 * Vladimir V. Ivanov : IP rule info (flowid) is really useful.
54 * Marc Boucher : routing by fwmark
55 * Robert Olsson : Added rt_cache statistics
56 * Arnaldo C. Melo : Convert proc stuff to seq_file
Eric Dumazetbb1d23b2005-07-05 15:00:32 -070057 * Eric Dumazet : hashed spinlocks and rt_check_expire() fixes.
Ilia Sotnikovcef26852006-03-25 01:38:55 -080058 * Ilia Sotnikov : Ignore TOS on PMTUD and Redirect
59 * Ilia Sotnikov : Removed TOS from hash calculations
Linus Torvalds1da177e2005-04-16 15:20:36 -070060 *
61 * This program is free software; you can redistribute it and/or
62 * modify it under the terms of the GNU General Public License
63 * as published by the Free Software Foundation; either version
64 * 2 of the License, or (at your option) any later version.
65 */
66
Linus Torvalds1da177e2005-04-16 15:20:36 -070067#include <linux/module.h>
68#include <asm/uaccess.h>
69#include <asm/system.h>
70#include <linux/bitops.h>
71#include <linux/types.h>
72#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070073#include <linux/mm.h>
Eric Dumazet424c4b72005-07-05 14:58:19 -070074#include <linux/bootmem.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070075#include <linux/string.h>
76#include <linux/socket.h>
77#include <linux/sockios.h>
78#include <linux/errno.h>
79#include <linux/in.h>
80#include <linux/inet.h>
81#include <linux/netdevice.h>
82#include <linux/proc_fs.h>
83#include <linux/init.h>
84#include <linux/skbuff.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070085#include <linux/inetdevice.h>
86#include <linux/igmp.h>
87#include <linux/pkt_sched.h>
88#include <linux/mroute.h>
89#include <linux/netfilter_ipv4.h>
90#include <linux/random.h>
91#include <linux/jhash.h>
92#include <linux/rcupdate.h>
93#include <linux/times.h>
94#include <net/protocol.h>
95#include <net/ip.h>
96#include <net/route.h>
97#include <net/inetpeer.h>
98#include <net/sock.h>
99#include <net/ip_fib.h>
100#include <net/arp.h>
101#include <net/tcp.h>
102#include <net/icmp.h>
103#include <net/xfrm.h>
104#include <net/ip_mp_alg.h>
Tom Tucker8d717402006-07-30 20:43:36 -0700105#include <net/netevent.h>
Thomas Graf63f34442007-03-22 11:55:17 -0700106#include <net/rtnetlink.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107#ifdef CONFIG_SYSCTL
108#include <linux/sysctl.h>
109#endif
110
111#define RT_FL_TOS(oldflp) \
112 ((u32)(oldflp->fl4_tos & (IPTOS_RT_MASK | RTO_ONLINK)))
113
114#define IP_MAX_MTU 0xFFF0
115
116#define RT_GC_TIMEOUT (300*HZ)
117
118static int ip_rt_min_delay = 2 * HZ;
119static int ip_rt_max_delay = 10 * HZ;
120static int ip_rt_max_size;
121static int ip_rt_gc_timeout = RT_GC_TIMEOUT;
122static int ip_rt_gc_interval = 60 * HZ;
123static int ip_rt_gc_min_interval = HZ / 2;
124static int ip_rt_redirect_number = 9;
125static int ip_rt_redirect_load = HZ / 50;
126static int ip_rt_redirect_silence = ((HZ / 50) << (9 + 1));
127static int ip_rt_error_cost = HZ;
128static int ip_rt_error_burst = 5 * HZ;
129static int ip_rt_gc_elasticity = 8;
130static int ip_rt_mtu_expires = 10 * 60 * HZ;
131static int ip_rt_min_pmtu = 512 + 20 + 20;
132static int ip_rt_min_advmss = 256;
133static int ip_rt_secret_interval = 10 * 60 * HZ;
134static unsigned long rt_deadline;
135
136#define RTprint(a...) printk(KERN_DEBUG a)
137
138static struct timer_list rt_flush_timer;
139static struct timer_list rt_periodic_timer;
140static struct timer_list rt_secret_timer;
141
142/*
143 * Interface to generic destination cache.
144 */
145
146static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie);
147static void ipv4_dst_destroy(struct dst_entry *dst);
148static void ipv4_dst_ifdown(struct dst_entry *dst,
149 struct net_device *dev, int how);
150static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst);
151static void ipv4_link_failure(struct sk_buff *skb);
152static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu);
153static int rt_garbage_collect(void);
154
155
156static struct dst_ops ipv4_dst_ops = {
157 .family = AF_INET,
158 .protocol = __constant_htons(ETH_P_IP),
159 .gc = rt_garbage_collect,
160 .check = ipv4_dst_check,
161 .destroy = ipv4_dst_destroy,
162 .ifdown = ipv4_dst_ifdown,
163 .negative_advice = ipv4_negative_advice,
164 .link_failure = ipv4_link_failure,
165 .update_pmtu = ip_rt_update_pmtu,
166 .entry_size = sizeof(struct rtable),
167};
168
169#define ECN_OR_COST(class) TC_PRIO_##class
170
171__u8 ip_tos2prio[16] = {
172 TC_PRIO_BESTEFFORT,
173 ECN_OR_COST(FILLER),
174 TC_PRIO_BESTEFFORT,
175 ECN_OR_COST(BESTEFFORT),
176 TC_PRIO_BULK,
177 ECN_OR_COST(BULK),
178 TC_PRIO_BULK,
179 ECN_OR_COST(BULK),
180 TC_PRIO_INTERACTIVE,
181 ECN_OR_COST(INTERACTIVE),
182 TC_PRIO_INTERACTIVE,
183 ECN_OR_COST(INTERACTIVE),
184 TC_PRIO_INTERACTIVE_BULK,
185 ECN_OR_COST(INTERACTIVE_BULK),
186 TC_PRIO_INTERACTIVE_BULK,
187 ECN_OR_COST(INTERACTIVE_BULK)
188};
189
190
191/*
192 * Route cache.
193 */
194
195/* The locking scheme is rather straight forward:
196 *
197 * 1) Read-Copy Update protects the buckets of the central route hash.
198 * 2) Only writers remove entries, and they hold the lock
199 * as they look at rtable reference counts.
200 * 3) Only readers acquire references to rtable entries,
201 * they do so with atomic increments and with the
202 * lock held.
203 */
204
205struct rt_hash_bucket {
206 struct rtable *chain;
Eric Dumazet22c047c2005-07-05 14:55:24 -0700207};
Ingo Molnar8a25d5d2006-07-03 00:24:54 -0700208#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || \
209 defined(CONFIG_PROVE_LOCKING)
Eric Dumazet22c047c2005-07-05 14:55:24 -0700210/*
211 * Instead of using one spinlock for each rt_hash_bucket, we use a table of spinlocks
212 * The size of this table is a power of two and depends on the number of CPUS.
Ingo Molnar62051202006-07-03 00:24:59 -0700213 * (on lockdep we have a quite big spinlock_t, so keep the size down there)
Eric Dumazet22c047c2005-07-05 14:55:24 -0700214 */
Ingo Molnar62051202006-07-03 00:24:59 -0700215#ifdef CONFIG_LOCKDEP
216# define RT_HASH_LOCK_SZ 256
Eric Dumazet22c047c2005-07-05 14:55:24 -0700217#else
Ingo Molnar62051202006-07-03 00:24:59 -0700218# if NR_CPUS >= 32
219# define RT_HASH_LOCK_SZ 4096
220# elif NR_CPUS >= 16
221# define RT_HASH_LOCK_SZ 2048
222# elif NR_CPUS >= 8
223# define RT_HASH_LOCK_SZ 1024
224# elif NR_CPUS >= 4
225# define RT_HASH_LOCK_SZ 512
226# else
227# define RT_HASH_LOCK_SZ 256
228# endif
Eric Dumazet22c047c2005-07-05 14:55:24 -0700229#endif
230
231static spinlock_t *rt_hash_locks;
232# define rt_hash_lock_addr(slot) &rt_hash_locks[(slot) & (RT_HASH_LOCK_SZ - 1)]
233# define rt_hash_lock_init() { \
234 int i; \
235 rt_hash_locks = kmalloc(sizeof(spinlock_t) * RT_HASH_LOCK_SZ, GFP_KERNEL); \
236 if (!rt_hash_locks) panic("IP: failed to allocate rt_hash_locks\n"); \
237 for (i = 0; i < RT_HASH_LOCK_SZ; i++) \
238 spin_lock_init(&rt_hash_locks[i]); \
239 }
240#else
241# define rt_hash_lock_addr(slot) NULL
242# define rt_hash_lock_init()
243#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244
245static struct rt_hash_bucket *rt_hash_table;
246static unsigned rt_hash_mask;
247static int rt_hash_log;
248static unsigned int rt_hash_rnd;
249
Eric Dumazet2f970d82006-01-17 02:54:36 -0800250static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat);
Andrew Mortondbd29152006-01-17 21:58:01 -0800251#define RT_CACHE_STAT_INC(field) \
Paul Mackerrasbfe5d832006-06-25 05:47:14 -0700252 (__raw_get_cpu_var(rt_cache_stat).field++)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253
254static int rt_intern_hash(unsigned hash, struct rtable *rth,
255 struct rtable **res);
256
Ilia Sotnikovcef26852006-03-25 01:38:55 -0800257static unsigned int rt_hash_code(u32 daddr, u32 saddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700258{
Ilia Sotnikovcef26852006-03-25 01:38:55 -0800259 return (jhash_2words(daddr, saddr, rt_hash_rnd)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700260 & rt_hash_mask);
261}
262
Al Viro8c7bc842006-09-26 21:26:19 -0700263#define rt_hash(daddr, saddr, idx) \
264 rt_hash_code((__force u32)(__be32)(daddr),\
265 (__force u32)(__be32)(saddr) ^ ((idx) << 5))
266
Linus Torvalds1da177e2005-04-16 15:20:36 -0700267#ifdef CONFIG_PROC_FS
268struct rt_cache_iter_state {
269 int bucket;
270};
271
272static struct rtable *rt_cache_get_first(struct seq_file *seq)
273{
274 struct rtable *r = NULL;
275 struct rt_cache_iter_state *st = seq->private;
276
277 for (st->bucket = rt_hash_mask; st->bucket >= 0; --st->bucket) {
278 rcu_read_lock_bh();
279 r = rt_hash_table[st->bucket].chain;
280 if (r)
281 break;
282 rcu_read_unlock_bh();
283 }
284 return r;
285}
286
287static struct rtable *rt_cache_get_next(struct seq_file *seq, struct rtable *r)
288{
289 struct rt_cache_iter_state *st = rcu_dereference(seq->private);
290
Eric Dumazet093c2ca2007-02-09 16:19:26 -0800291 r = r->u.dst.rt_next;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292 while (!r) {
293 rcu_read_unlock_bh();
294 if (--st->bucket < 0)
295 break;
296 rcu_read_lock_bh();
297 r = rt_hash_table[st->bucket].chain;
298 }
299 return r;
300}
301
302static struct rtable *rt_cache_get_idx(struct seq_file *seq, loff_t pos)
303{
304 struct rtable *r = rt_cache_get_first(seq);
305
306 if (r)
307 while (pos && (r = rt_cache_get_next(seq, r)))
308 --pos;
309 return pos ? NULL : r;
310}
311
312static void *rt_cache_seq_start(struct seq_file *seq, loff_t *pos)
313{
314 return *pos ? rt_cache_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
315}
316
317static void *rt_cache_seq_next(struct seq_file *seq, void *v, loff_t *pos)
318{
319 struct rtable *r = NULL;
320
321 if (v == SEQ_START_TOKEN)
322 r = rt_cache_get_first(seq);
323 else
324 r = rt_cache_get_next(seq, v);
325 ++*pos;
326 return r;
327}
328
329static void rt_cache_seq_stop(struct seq_file *seq, void *v)
330{
331 if (v && v != SEQ_START_TOKEN)
332 rcu_read_unlock_bh();
333}
334
335static int rt_cache_seq_show(struct seq_file *seq, void *v)
336{
337 if (v == SEQ_START_TOKEN)
338 seq_printf(seq, "%-127s\n",
339 "Iface\tDestination\tGateway \tFlags\t\tRefCnt\tUse\t"
340 "Metric\tSource\t\tMTU\tWindow\tIRTT\tTOS\tHHRef\t"
341 "HHUptod\tSpecDst");
342 else {
343 struct rtable *r = v;
344 char temp[256];
345
346 sprintf(temp, "%s\t%08lX\t%08lX\t%8X\t%d\t%u\t%d\t"
347 "%08lX\t%d\t%u\t%u\t%02X\t%d\t%1d\t%08X",
348 r->u.dst.dev ? r->u.dst.dev->name : "*",
349 (unsigned long)r->rt_dst, (unsigned long)r->rt_gateway,
350 r->rt_flags, atomic_read(&r->u.dst.__refcnt),
351 r->u.dst.__use, 0, (unsigned long)r->rt_src,
352 (dst_metric(&r->u.dst, RTAX_ADVMSS) ?
353 (int)dst_metric(&r->u.dst, RTAX_ADVMSS) + 40 : 0),
354 dst_metric(&r->u.dst, RTAX_WINDOW),
355 (int)((dst_metric(&r->u.dst, RTAX_RTT) >> 3) +
356 dst_metric(&r->u.dst, RTAX_RTTVAR)),
357 r->fl.fl4_tos,
358 r->u.dst.hh ? atomic_read(&r->u.dst.hh->hh_refcnt) : -1,
359 r->u.dst.hh ? (r->u.dst.hh->hh_output ==
360 dev_queue_xmit) : 0,
361 r->rt_spec_dst);
362 seq_printf(seq, "%-127s\n", temp);
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900363 }
364 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700365}
366
Stephen Hemmingerf6908082007-03-12 14:34:29 -0700367static const struct seq_operations rt_cache_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700368 .start = rt_cache_seq_start,
369 .next = rt_cache_seq_next,
370 .stop = rt_cache_seq_stop,
371 .show = rt_cache_seq_show,
372};
373
374static int rt_cache_seq_open(struct inode *inode, struct file *file)
375{
376 struct seq_file *seq;
377 int rc = -ENOMEM;
378 struct rt_cache_iter_state *s = kmalloc(sizeof(*s), GFP_KERNEL);
379
380 if (!s)
381 goto out;
382 rc = seq_open(file, &rt_cache_seq_ops);
383 if (rc)
384 goto out_kfree;
385 seq = file->private_data;
386 seq->private = s;
387 memset(s, 0, sizeof(*s));
388out:
389 return rc;
390out_kfree:
391 kfree(s);
392 goto out;
393}
394
Arjan van de Ven9a321442007-02-12 00:55:35 -0800395static const struct file_operations rt_cache_seq_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700396 .owner = THIS_MODULE,
397 .open = rt_cache_seq_open,
398 .read = seq_read,
399 .llseek = seq_lseek,
400 .release = seq_release_private,
401};
402
403
404static void *rt_cpu_seq_start(struct seq_file *seq, loff_t *pos)
405{
406 int cpu;
407
408 if (*pos == 0)
409 return SEQ_START_TOKEN;
410
411 for (cpu = *pos-1; cpu < NR_CPUS; ++cpu) {
412 if (!cpu_possible(cpu))
413 continue;
414 *pos = cpu+1;
Eric Dumazet2f970d82006-01-17 02:54:36 -0800415 return &per_cpu(rt_cache_stat, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700416 }
417 return NULL;
418}
419
420static void *rt_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
421{
422 int cpu;
423
424 for (cpu = *pos; cpu < NR_CPUS; ++cpu) {
425 if (!cpu_possible(cpu))
426 continue;
427 *pos = cpu+1;
Eric Dumazet2f970d82006-01-17 02:54:36 -0800428 return &per_cpu(rt_cache_stat, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700429 }
430 return NULL;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900431
Linus Torvalds1da177e2005-04-16 15:20:36 -0700432}
433
434static void rt_cpu_seq_stop(struct seq_file *seq, void *v)
435{
436
437}
438
439static int rt_cpu_seq_show(struct seq_file *seq, void *v)
440{
441 struct rt_cache_stat *st = v;
442
443 if (v == SEQ_START_TOKEN) {
Olaf Rempel5bec0032005-04-28 12:16:08 -0700444 seq_printf(seq, "entries in_hit in_slow_tot in_slow_mc in_no_route in_brd in_martian_dst in_martian_src out_hit out_slow_tot out_slow_mc gc_total gc_ignored gc_goal_miss gc_dst_overflow in_hlist_search out_hlist_search\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700445 return 0;
446 }
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900447
Linus Torvalds1da177e2005-04-16 15:20:36 -0700448 seq_printf(seq,"%08x %08x %08x %08x %08x %08x %08x %08x "
449 " %08x %08x %08x %08x %08x %08x %08x %08x %08x \n",
450 atomic_read(&ipv4_dst_ops.entries),
451 st->in_hit,
452 st->in_slow_tot,
453 st->in_slow_mc,
454 st->in_no_route,
455 st->in_brd,
456 st->in_martian_dst,
457 st->in_martian_src,
458
459 st->out_hit,
460 st->out_slow_tot,
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900461 st->out_slow_mc,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700462
463 st->gc_total,
464 st->gc_ignored,
465 st->gc_goal_miss,
466 st->gc_dst_overflow,
467 st->in_hlist_search,
468 st->out_hlist_search
469 );
470 return 0;
471}
472
Stephen Hemmingerf6908082007-03-12 14:34:29 -0700473static const struct seq_operations rt_cpu_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700474 .start = rt_cpu_seq_start,
475 .next = rt_cpu_seq_next,
476 .stop = rt_cpu_seq_stop,
477 .show = rt_cpu_seq_show,
478};
479
480
481static int rt_cpu_seq_open(struct inode *inode, struct file *file)
482{
483 return seq_open(file, &rt_cpu_seq_ops);
484}
485
Arjan van de Ven9a321442007-02-12 00:55:35 -0800486static const struct file_operations rt_cpu_seq_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700487 .owner = THIS_MODULE,
488 .open = rt_cpu_seq_open,
489 .read = seq_read,
490 .llseek = seq_lseek,
491 .release = seq_release,
492};
493
494#endif /* CONFIG_PROC_FS */
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900495
Linus Torvalds1da177e2005-04-16 15:20:36 -0700496static __inline__ void rt_free(struct rtable *rt)
497{
498 multipath_remove(rt);
499 call_rcu_bh(&rt->u.dst.rcu_head, dst_rcu_free);
500}
501
502static __inline__ void rt_drop(struct rtable *rt)
503{
504 multipath_remove(rt);
505 ip_rt_put(rt);
506 call_rcu_bh(&rt->u.dst.rcu_head, dst_rcu_free);
507}
508
509static __inline__ int rt_fast_clean(struct rtable *rth)
510{
511 /* Kill broadcast/multicast entries very aggresively, if they
512 collide in hash table with more useful entries */
513 return (rth->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) &&
Eric Dumazet093c2ca2007-02-09 16:19:26 -0800514 rth->fl.iif && rth->u.dst.rt_next;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700515}
516
517static __inline__ int rt_valuable(struct rtable *rth)
518{
519 return (rth->rt_flags & (RTCF_REDIRECTED | RTCF_NOTIFY)) ||
520 rth->u.dst.expires;
521}
522
523static int rt_may_expire(struct rtable *rth, unsigned long tmo1, unsigned long tmo2)
524{
525 unsigned long age;
526 int ret = 0;
527
528 if (atomic_read(&rth->u.dst.__refcnt))
529 goto out;
530
531 ret = 1;
532 if (rth->u.dst.expires &&
533 time_after_eq(jiffies, rth->u.dst.expires))
534 goto out;
535
536 age = jiffies - rth->u.dst.lastuse;
537 ret = 0;
538 if ((age <= tmo1 && !rt_fast_clean(rth)) ||
539 (age <= tmo2 && rt_valuable(rth)))
540 goto out;
541 ret = 1;
542out: return ret;
543}
544
545/* Bits of score are:
546 * 31: very valuable
547 * 30: not quite useless
548 * 29..0: usage counter
549 */
550static inline u32 rt_score(struct rtable *rt)
551{
552 u32 score = jiffies - rt->u.dst.lastuse;
553
554 score = ~score & ~(3<<30);
555
556 if (rt_valuable(rt))
557 score |= (1<<31);
558
559 if (!rt->fl.iif ||
560 !(rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST|RTCF_LOCAL)))
561 score |= (1<<30);
562
563 return score;
564}
565
566static inline int compare_keys(struct flowi *fl1, struct flowi *fl2)
567{
Al Viro714e85b2006-11-14 20:51:49 -0800568 return ((__force u32)((fl1->nl_u.ip4_u.daddr ^ fl2->nl_u.ip4_u.daddr) |
569 (fl1->nl_u.ip4_u.saddr ^ fl2->nl_u.ip4_u.saddr)) |
Thomas Graf47dcf0c2006-11-09 15:20:38 -0800570 (fl1->mark ^ fl2->mark) |
David S. Miller8238b212006-10-12 00:49:15 -0700571 (*(u16 *)&fl1->nl_u.ip4_u.tos ^
572 *(u16 *)&fl2->nl_u.ip4_u.tos) |
573 (fl1->oif ^ fl2->oif) |
574 (fl1->iif ^ fl2->iif)) == 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700575}
576
577#ifdef CONFIG_IP_ROUTE_MULTIPATH_CACHED
578static struct rtable **rt_remove_balanced_route(struct rtable **chain_head,
579 struct rtable *expentry,
580 int *removed_count)
581{
582 int passedexpired = 0;
583 struct rtable **nextstep = NULL;
584 struct rtable **rthp = chain_head;
585 struct rtable *rth;
586
587 if (removed_count)
588 *removed_count = 0;
589
590 while ((rth = *rthp) != NULL) {
591 if (rth == expentry)
592 passedexpired = 1;
593
594 if (((*rthp)->u.dst.flags & DST_BALANCED) != 0 &&
595 compare_keys(&(*rthp)->fl, &expentry->fl)) {
596 if (*rthp == expentry) {
Eric Dumazet093c2ca2007-02-09 16:19:26 -0800597 *rthp = rth->u.dst.rt_next;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700598 continue;
599 } else {
Eric Dumazet093c2ca2007-02-09 16:19:26 -0800600 *rthp = rth->u.dst.rt_next;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700601 rt_free(rth);
602 if (removed_count)
603 ++(*removed_count);
604 }
605 } else {
606 if (!((*rthp)->u.dst.flags & DST_BALANCED) &&
607 passedexpired && !nextstep)
Eric Dumazet093c2ca2007-02-09 16:19:26 -0800608 nextstep = &rth->u.dst.rt_next;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700609
Eric Dumazet093c2ca2007-02-09 16:19:26 -0800610 rthp = &rth->u.dst.rt_next;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700611 }
612 }
613
614 rt_free(expentry);
615 if (removed_count)
616 ++(*removed_count);
617
618 return nextstep;
619}
620#endif /* CONFIG_IP_ROUTE_MULTIPATH_CACHED */
621
622
623/* This runs via a timer and thus is always in BH context. */
624static void rt_check_expire(unsigned long dummy)
625{
Eric Dumazetbb1d23b2005-07-05 15:00:32 -0700626 static unsigned int rover;
627 unsigned int i = rover, goal;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700628 struct rtable *rth, **rthp;
629 unsigned long now = jiffies;
Eric Dumazetbb1d23b2005-07-05 15:00:32 -0700630 u64 mult;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700631
Eric Dumazetbb1d23b2005-07-05 15:00:32 -0700632 mult = ((u64)ip_rt_gc_interval) << rt_hash_log;
633 if (ip_rt_gc_timeout > 1)
634 do_div(mult, ip_rt_gc_timeout);
635 goal = (unsigned int)mult;
636 if (goal > rt_hash_mask) goal = rt_hash_mask + 1;
637 for (; goal > 0; goal--) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700638 unsigned long tmo = ip_rt_gc_timeout;
639
640 i = (i + 1) & rt_hash_mask;
641 rthp = &rt_hash_table[i].chain;
642
Eric Dumazetbb1d23b2005-07-05 15:00:32 -0700643 if (*rthp == 0)
644 continue;
Eric Dumazet22c047c2005-07-05 14:55:24 -0700645 spin_lock(rt_hash_lock_addr(i));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700646 while ((rth = *rthp) != NULL) {
647 if (rth->u.dst.expires) {
648 /* Entry is expired even if it is in use */
649 if (time_before_eq(now, rth->u.dst.expires)) {
650 tmo >>= 1;
Eric Dumazet093c2ca2007-02-09 16:19:26 -0800651 rthp = &rth->u.dst.rt_next;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700652 continue;
653 }
654 } else if (!rt_may_expire(rth, tmo, ip_rt_gc_timeout)) {
655 tmo >>= 1;
Eric Dumazet093c2ca2007-02-09 16:19:26 -0800656 rthp = &rth->u.dst.rt_next;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700657 continue;
658 }
659
660 /* Cleanup aged off entries. */
661#ifdef CONFIG_IP_ROUTE_MULTIPATH_CACHED
662 /* remove all related balanced entries if necessary */
663 if (rth->u.dst.flags & DST_BALANCED) {
664 rthp = rt_remove_balanced_route(
665 &rt_hash_table[i].chain,
666 rth, NULL);
667 if (!rthp)
668 break;
669 } else {
Eric Dumazet093c2ca2007-02-09 16:19:26 -0800670 *rthp = rth->u.dst.rt_next;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700671 rt_free(rth);
672 }
673#else /* CONFIG_IP_ROUTE_MULTIPATH_CACHED */
Eric Dumazet093c2ca2007-02-09 16:19:26 -0800674 *rthp = rth->u.dst.rt_next;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900675 rt_free(rth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700676#endif /* CONFIG_IP_ROUTE_MULTIPATH_CACHED */
677 }
Eric Dumazet22c047c2005-07-05 14:55:24 -0700678 spin_unlock(rt_hash_lock_addr(i));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700679
680 /* Fallback loop breaker. */
681 if (time_after(jiffies, now))
682 break;
683 }
684 rover = i;
Eric Dumazetbb1d23b2005-07-05 15:00:32 -0700685 mod_timer(&rt_periodic_timer, jiffies + ip_rt_gc_interval);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700686}
687
688/* This can run from both BH and non-BH contexts, the latter
689 * in the case of a forced flush event.
690 */
691static void rt_run_flush(unsigned long dummy)
692{
693 int i;
694 struct rtable *rth, *next;
695
696 rt_deadline = 0;
697
698 get_random_bytes(&rt_hash_rnd, 4);
699
700 for (i = rt_hash_mask; i >= 0; i--) {
Eric Dumazet22c047c2005-07-05 14:55:24 -0700701 spin_lock_bh(rt_hash_lock_addr(i));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700702 rth = rt_hash_table[i].chain;
703 if (rth)
704 rt_hash_table[i].chain = NULL;
Eric Dumazet22c047c2005-07-05 14:55:24 -0700705 spin_unlock_bh(rt_hash_lock_addr(i));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700706
707 for (; rth; rth = next) {
Eric Dumazet093c2ca2007-02-09 16:19:26 -0800708 next = rth->u.dst.rt_next;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700709 rt_free(rth);
710 }
711 }
712}
713
714static DEFINE_SPINLOCK(rt_flush_lock);
715
716void rt_cache_flush(int delay)
717{
718 unsigned long now = jiffies;
719 int user_mode = !in_softirq();
720
721 if (delay < 0)
722 delay = ip_rt_min_delay;
723
724 /* flush existing multipath state*/
725 multipath_flush();
726
727 spin_lock_bh(&rt_flush_lock);
728
729 if (del_timer(&rt_flush_timer) && delay > 0 && rt_deadline) {
730 long tmo = (long)(rt_deadline - now);
731
732 /* If flush timer is already running
733 and flush request is not immediate (delay > 0):
734
735 if deadline is not achieved, prolongate timer to "delay",
736 otherwise fire it at deadline time.
737 */
738
739 if (user_mode && tmo < ip_rt_max_delay-ip_rt_min_delay)
740 tmo = 0;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900741
Linus Torvalds1da177e2005-04-16 15:20:36 -0700742 if (delay > tmo)
743 delay = tmo;
744 }
745
746 if (delay <= 0) {
747 spin_unlock_bh(&rt_flush_lock);
748 rt_run_flush(0);
749 return;
750 }
751
752 if (rt_deadline == 0)
753 rt_deadline = now + ip_rt_max_delay;
754
755 mod_timer(&rt_flush_timer, now+delay);
756 spin_unlock_bh(&rt_flush_lock);
757}
758
759static void rt_secret_rebuild(unsigned long dummy)
760{
761 unsigned long now = jiffies;
762
763 rt_cache_flush(0);
764 mod_timer(&rt_secret_timer, now + ip_rt_secret_interval);
765}
766
767/*
768 Short description of GC goals.
769
770 We want to build algorithm, which will keep routing cache
771 at some equilibrium point, when number of aged off entries
772 is kept approximately equal to newly generated ones.
773
774 Current expiration strength is variable "expire".
775 We try to adjust it dynamically, so that if networking
776 is idle expires is large enough to keep enough of warm entries,
777 and when load increases it reduces to limit cache size.
778 */
779
780static int rt_garbage_collect(void)
781{
782 static unsigned long expire = RT_GC_TIMEOUT;
783 static unsigned long last_gc;
784 static int rover;
785 static int equilibrium;
786 struct rtable *rth, **rthp;
787 unsigned long now = jiffies;
788 int goal;
789
790 /*
791 * Garbage collection is pretty expensive,
792 * do not make it too frequently.
793 */
794
795 RT_CACHE_STAT_INC(gc_total);
796
797 if (now - last_gc < ip_rt_gc_min_interval &&
798 atomic_read(&ipv4_dst_ops.entries) < ip_rt_max_size) {
799 RT_CACHE_STAT_INC(gc_ignored);
800 goto out;
801 }
802
803 /* Calculate number of entries, which we want to expire now. */
804 goal = atomic_read(&ipv4_dst_ops.entries) -
805 (ip_rt_gc_elasticity << rt_hash_log);
806 if (goal <= 0) {
807 if (equilibrium < ipv4_dst_ops.gc_thresh)
808 equilibrium = ipv4_dst_ops.gc_thresh;
809 goal = atomic_read(&ipv4_dst_ops.entries) - equilibrium;
810 if (goal > 0) {
811 equilibrium += min_t(unsigned int, goal / 2, rt_hash_mask + 1);
812 goal = atomic_read(&ipv4_dst_ops.entries) - equilibrium;
813 }
814 } else {
815 /* We are in dangerous area. Try to reduce cache really
816 * aggressively.
817 */
818 goal = max_t(unsigned int, goal / 2, rt_hash_mask + 1);
819 equilibrium = atomic_read(&ipv4_dst_ops.entries) - goal;
820 }
821
822 if (now - last_gc >= ip_rt_gc_min_interval)
823 last_gc = now;
824
825 if (goal <= 0) {
826 equilibrium += goal;
827 goto work_done;
828 }
829
830 do {
831 int i, k;
832
833 for (i = rt_hash_mask, k = rover; i >= 0; i--) {
834 unsigned long tmo = expire;
835
836 k = (k + 1) & rt_hash_mask;
837 rthp = &rt_hash_table[k].chain;
Eric Dumazet22c047c2005-07-05 14:55:24 -0700838 spin_lock_bh(rt_hash_lock_addr(k));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700839 while ((rth = *rthp) != NULL) {
840 if (!rt_may_expire(rth, tmo, expire)) {
841 tmo >>= 1;
Eric Dumazet093c2ca2007-02-09 16:19:26 -0800842 rthp = &rth->u.dst.rt_next;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700843 continue;
844 }
845#ifdef CONFIG_IP_ROUTE_MULTIPATH_CACHED
846 /* remove all related balanced entries
847 * if necessary
848 */
849 if (rth->u.dst.flags & DST_BALANCED) {
850 int r;
851
852 rthp = rt_remove_balanced_route(
Suresh Bhogavilli85259872006-02-21 13:42:22 -0800853 &rt_hash_table[k].chain,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700854 rth,
855 &r);
856 goal -= r;
857 if (!rthp)
858 break;
859 } else {
Eric Dumazet093c2ca2007-02-09 16:19:26 -0800860 *rthp = rth->u.dst.rt_next;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700861 rt_free(rth);
862 goal--;
863 }
864#else /* CONFIG_IP_ROUTE_MULTIPATH_CACHED */
Eric Dumazet093c2ca2007-02-09 16:19:26 -0800865 *rthp = rth->u.dst.rt_next;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700866 rt_free(rth);
867 goal--;
868#endif /* CONFIG_IP_ROUTE_MULTIPATH_CACHED */
869 }
Eric Dumazet22c047c2005-07-05 14:55:24 -0700870 spin_unlock_bh(rt_hash_lock_addr(k));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700871 if (goal <= 0)
872 break;
873 }
874 rover = k;
875
876 if (goal <= 0)
877 goto work_done;
878
879 /* Goal is not achieved. We stop process if:
880
881 - if expire reduced to zero. Otherwise, expire is halfed.
882 - if table is not full.
883 - if we are called from interrupt.
884 - jiffies check is just fallback/debug loop breaker.
885 We will not spin here for long time in any case.
886 */
887
888 RT_CACHE_STAT_INC(gc_goal_miss);
889
890 if (expire == 0)
891 break;
892
893 expire >>= 1;
894#if RT_CACHE_DEBUG >= 2
895 printk(KERN_DEBUG "expire>> %u %d %d %d\n", expire,
896 atomic_read(&ipv4_dst_ops.entries), goal, i);
897#endif
898
899 if (atomic_read(&ipv4_dst_ops.entries) < ip_rt_max_size)
900 goto out;
901 } while (!in_softirq() && time_before_eq(jiffies, now));
902
903 if (atomic_read(&ipv4_dst_ops.entries) < ip_rt_max_size)
904 goto out;
905 if (net_ratelimit())
906 printk(KERN_WARNING "dst cache overflow\n");
907 RT_CACHE_STAT_INC(gc_dst_overflow);
908 return 1;
909
910work_done:
911 expire += ip_rt_gc_min_interval;
912 if (expire > ip_rt_gc_timeout ||
913 atomic_read(&ipv4_dst_ops.entries) < ipv4_dst_ops.gc_thresh)
914 expire = ip_rt_gc_timeout;
915#if RT_CACHE_DEBUG >= 2
916 printk(KERN_DEBUG "expire++ %u %d %d %d\n", expire,
917 atomic_read(&ipv4_dst_ops.entries), goal, rover);
918#endif
919out: return 0;
920}
921
922static int rt_intern_hash(unsigned hash, struct rtable *rt, struct rtable **rp)
923{
924 struct rtable *rth, **rthp;
925 unsigned long now;
926 struct rtable *cand, **candp;
927 u32 min_score;
928 int chain_length;
929 int attempts = !in_softirq();
930
931restart:
932 chain_length = 0;
933 min_score = ~(u32)0;
934 cand = NULL;
935 candp = NULL;
936 now = jiffies;
937
938 rthp = &rt_hash_table[hash].chain;
939
Eric Dumazet22c047c2005-07-05 14:55:24 -0700940 spin_lock_bh(rt_hash_lock_addr(hash));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700941 while ((rth = *rthp) != NULL) {
942#ifdef CONFIG_IP_ROUTE_MULTIPATH_CACHED
943 if (!(rth->u.dst.flags & DST_BALANCED) &&
944 compare_keys(&rth->fl, &rt->fl)) {
945#else
946 if (compare_keys(&rth->fl, &rt->fl)) {
947#endif
948 /* Put it first */
Eric Dumazet093c2ca2007-02-09 16:19:26 -0800949 *rthp = rth->u.dst.rt_next;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700950 /*
951 * Since lookup is lockfree, the deletion
952 * must be visible to another weakly ordered CPU before
953 * the insertion at the start of the hash chain.
954 */
Eric Dumazet093c2ca2007-02-09 16:19:26 -0800955 rcu_assign_pointer(rth->u.dst.rt_next,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700956 rt_hash_table[hash].chain);
957 /*
958 * Since lookup is lockfree, the update writes
959 * must be ordered for consistency on SMP.
960 */
961 rcu_assign_pointer(rt_hash_table[hash].chain, rth);
962
963 rth->u.dst.__use++;
964 dst_hold(&rth->u.dst);
965 rth->u.dst.lastuse = now;
Eric Dumazet22c047c2005-07-05 14:55:24 -0700966 spin_unlock_bh(rt_hash_lock_addr(hash));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700967
968 rt_drop(rt);
969 *rp = rth;
970 return 0;
971 }
972
973 if (!atomic_read(&rth->u.dst.__refcnt)) {
974 u32 score = rt_score(rth);
975
976 if (score <= min_score) {
977 cand = rth;
978 candp = rthp;
979 min_score = score;
980 }
981 }
982
983 chain_length++;
984
Eric Dumazet093c2ca2007-02-09 16:19:26 -0800985 rthp = &rth->u.dst.rt_next;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700986 }
987
988 if (cand) {
989 /* ip_rt_gc_elasticity used to be average length of chain
990 * length, when exceeded gc becomes really aggressive.
991 *
992 * The second limit is less certain. At the moment it allows
993 * only 2 entries per bucket. We will see.
994 */
995 if (chain_length > ip_rt_gc_elasticity) {
Eric Dumazet093c2ca2007-02-09 16:19:26 -0800996 *candp = cand->u.dst.rt_next;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700997 rt_free(cand);
998 }
999 }
1000
1001 /* Try to bind route to arp only if it is output
1002 route or unicast forwarding path.
1003 */
1004 if (rt->rt_type == RTN_UNICAST || rt->fl.iif == 0) {
1005 int err = arp_bind_neighbour(&rt->u.dst);
1006 if (err) {
Eric Dumazet22c047c2005-07-05 14:55:24 -07001007 spin_unlock_bh(rt_hash_lock_addr(hash));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001008
1009 if (err != -ENOBUFS) {
1010 rt_drop(rt);
1011 return err;
1012 }
1013
1014 /* Neighbour tables are full and nothing
1015 can be released. Try to shrink route cache,
1016 it is most likely it holds some neighbour records.
1017 */
1018 if (attempts-- > 0) {
1019 int saved_elasticity = ip_rt_gc_elasticity;
1020 int saved_int = ip_rt_gc_min_interval;
1021 ip_rt_gc_elasticity = 1;
1022 ip_rt_gc_min_interval = 0;
1023 rt_garbage_collect();
1024 ip_rt_gc_min_interval = saved_int;
1025 ip_rt_gc_elasticity = saved_elasticity;
1026 goto restart;
1027 }
1028
1029 if (net_ratelimit())
1030 printk(KERN_WARNING "Neighbour table overflow.\n");
1031 rt_drop(rt);
1032 return -ENOBUFS;
1033 }
1034 }
1035
Eric Dumazet093c2ca2007-02-09 16:19:26 -08001036 rt->u.dst.rt_next = rt_hash_table[hash].chain;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001037#if RT_CACHE_DEBUG >= 2
Eric Dumazet093c2ca2007-02-09 16:19:26 -08001038 if (rt->u.dst.rt_next) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001039 struct rtable *trt;
1040 printk(KERN_DEBUG "rt_cache @%02x: %u.%u.%u.%u", hash,
1041 NIPQUAD(rt->rt_dst));
Eric Dumazet093c2ca2007-02-09 16:19:26 -08001042 for (trt = rt->u.dst.rt_next; trt; trt = trt->u.dst.rt_next)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001043 printk(" . %u.%u.%u.%u", NIPQUAD(trt->rt_dst));
1044 printk("\n");
1045 }
1046#endif
1047 rt_hash_table[hash].chain = rt;
Eric Dumazet22c047c2005-07-05 14:55:24 -07001048 spin_unlock_bh(rt_hash_lock_addr(hash));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001049 *rp = rt;
1050 return 0;
1051}
1052
1053void rt_bind_peer(struct rtable *rt, int create)
1054{
1055 static DEFINE_SPINLOCK(rt_peer_lock);
1056 struct inet_peer *peer;
1057
1058 peer = inet_getpeer(rt->rt_dst, create);
1059
1060 spin_lock_bh(&rt_peer_lock);
1061 if (rt->peer == NULL) {
1062 rt->peer = peer;
1063 peer = NULL;
1064 }
1065 spin_unlock_bh(&rt_peer_lock);
1066 if (peer)
1067 inet_putpeer(peer);
1068}
1069
1070/*
1071 * Peer allocation may fail only in serious out-of-memory conditions. However
1072 * we still can generate some output.
1073 * Random ID selection looks a bit dangerous because we have no chances to
1074 * select ID being unique in a reasonable period of time.
1075 * But broken packet identifier may be better than no packet at all.
1076 */
1077static void ip_select_fb_ident(struct iphdr *iph)
1078{
1079 static DEFINE_SPINLOCK(ip_fb_id_lock);
1080 static u32 ip_fallback_id;
1081 u32 salt;
1082
1083 spin_lock_bh(&ip_fb_id_lock);
Al Viroe4485152006-09-26 22:15:01 -07001084 salt = secure_ip_id((__force __be32)ip_fallback_id ^ iph->daddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001085 iph->id = htons(salt & 0xFFFF);
1086 ip_fallback_id = salt;
1087 spin_unlock_bh(&ip_fb_id_lock);
1088}
1089
1090void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more)
1091{
1092 struct rtable *rt = (struct rtable *) dst;
1093
1094 if (rt) {
1095 if (rt->peer == NULL)
1096 rt_bind_peer(rt, 1);
1097
1098 /* If peer is attached to destination, it is never detached,
1099 so that we need not to grab a lock to dereference it.
1100 */
1101 if (rt->peer) {
1102 iph->id = htons(inet_getid(rt->peer, more));
1103 return;
1104 }
1105 } else
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001106 printk(KERN_DEBUG "rt_bind_peer(0) @%p\n",
Stephen Hemminger9c2b3322005-04-19 22:39:42 -07001107 __builtin_return_address(0));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001108
1109 ip_select_fb_ident(iph);
1110}
1111
1112static void rt_del(unsigned hash, struct rtable *rt)
1113{
1114 struct rtable **rthp;
1115
Eric Dumazet22c047c2005-07-05 14:55:24 -07001116 spin_lock_bh(rt_hash_lock_addr(hash));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001117 ip_rt_put(rt);
1118 for (rthp = &rt_hash_table[hash].chain; *rthp;
Eric Dumazet093c2ca2007-02-09 16:19:26 -08001119 rthp = &(*rthp)->u.dst.rt_next)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001120 if (*rthp == rt) {
Eric Dumazet093c2ca2007-02-09 16:19:26 -08001121 *rthp = rt->u.dst.rt_next;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001122 rt_free(rt);
1123 break;
1124 }
Eric Dumazet22c047c2005-07-05 14:55:24 -07001125 spin_unlock_bh(rt_hash_lock_addr(hash));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001126}
1127
Al Virof7655222006-09-26 21:25:43 -07001128void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
1129 __be32 saddr, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001130{
1131 int i, k;
1132 struct in_device *in_dev = in_dev_get(dev);
1133 struct rtable *rth, **rthp;
Al Virof7655222006-09-26 21:25:43 -07001134 __be32 skeys[2] = { saddr, 0 };
Linus Torvalds1da177e2005-04-16 15:20:36 -07001135 int ikeys[2] = { dev->ifindex, 0 };
Tom Tucker8d717402006-07-30 20:43:36 -07001136 struct netevent_redirect netevent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001137
Linus Torvalds1da177e2005-04-16 15:20:36 -07001138 if (!in_dev)
1139 return;
1140
1141 if (new_gw == old_gw || !IN_DEV_RX_REDIRECTS(in_dev)
1142 || MULTICAST(new_gw) || BADCLASS(new_gw) || ZERONET(new_gw))
1143 goto reject_redirect;
1144
1145 if (!IN_DEV_SHARED_MEDIA(in_dev)) {
1146 if (!inet_addr_onlink(in_dev, new_gw, old_gw))
1147 goto reject_redirect;
1148 if (IN_DEV_SEC_REDIRECTS(in_dev) && ip_fib_check_default(new_gw, dev))
1149 goto reject_redirect;
1150 } else {
1151 if (inet_addr_type(new_gw) != RTN_UNICAST)
1152 goto reject_redirect;
1153 }
1154
1155 for (i = 0; i < 2; i++) {
1156 for (k = 0; k < 2; k++) {
Al Viro8c7bc842006-09-26 21:26:19 -07001157 unsigned hash = rt_hash(daddr, skeys[i], ikeys[k]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001158
1159 rthp=&rt_hash_table[hash].chain;
1160
1161 rcu_read_lock();
1162 while ((rth = rcu_dereference(*rthp)) != NULL) {
1163 struct rtable *rt;
1164
1165 if (rth->fl.fl4_dst != daddr ||
1166 rth->fl.fl4_src != skeys[i] ||
Linus Torvalds1da177e2005-04-16 15:20:36 -07001167 rth->fl.oif != ikeys[k] ||
1168 rth->fl.iif != 0) {
Eric Dumazet093c2ca2007-02-09 16:19:26 -08001169 rthp = &rth->u.dst.rt_next;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001170 continue;
1171 }
1172
1173 if (rth->rt_dst != daddr ||
1174 rth->rt_src != saddr ||
1175 rth->u.dst.error ||
1176 rth->rt_gateway != old_gw ||
1177 rth->u.dst.dev != dev)
1178 break;
1179
1180 dst_hold(&rth->u.dst);
1181 rcu_read_unlock();
1182
1183 rt = dst_alloc(&ipv4_dst_ops);
1184 if (rt == NULL) {
1185 ip_rt_put(rth);
1186 in_dev_put(in_dev);
1187 return;
1188 }
1189
1190 /* Copy all the information. */
1191 *rt = *rth;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001192 INIT_RCU_HEAD(&rt->u.dst.rcu_head);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001193 rt->u.dst.__use = 1;
1194 atomic_set(&rt->u.dst.__refcnt, 1);
1195 rt->u.dst.child = NULL;
1196 if (rt->u.dst.dev)
1197 dev_hold(rt->u.dst.dev);
1198 if (rt->idev)
1199 in_dev_hold(rt->idev);
1200 rt->u.dst.obsolete = 0;
1201 rt->u.dst.lastuse = jiffies;
1202 rt->u.dst.path = &rt->u.dst;
1203 rt->u.dst.neighbour = NULL;
1204 rt->u.dst.hh = NULL;
1205 rt->u.dst.xfrm = NULL;
1206
1207 rt->rt_flags |= RTCF_REDIRECTED;
1208
1209 /* Gateway is different ... */
1210 rt->rt_gateway = new_gw;
1211
1212 /* Redirect received -> path was valid */
1213 dst_confirm(&rth->u.dst);
1214
1215 if (rt->peer)
1216 atomic_inc(&rt->peer->refcnt);
1217
1218 if (arp_bind_neighbour(&rt->u.dst) ||
1219 !(rt->u.dst.neighbour->nud_state &
1220 NUD_VALID)) {
1221 if (rt->u.dst.neighbour)
1222 neigh_event_send(rt->u.dst.neighbour, NULL);
1223 ip_rt_put(rth);
1224 rt_drop(rt);
1225 goto do_next;
1226 }
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001227
Tom Tucker8d717402006-07-30 20:43:36 -07001228 netevent.old = &rth->u.dst;
1229 netevent.new = &rt->u.dst;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001230 call_netevent_notifiers(NETEVENT_REDIRECT,
1231 &netevent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001232
1233 rt_del(hash, rth);
1234 if (!rt_intern_hash(hash, rt, &rt))
1235 ip_rt_put(rt);
1236 goto do_next;
1237 }
1238 rcu_read_unlock();
1239 do_next:
1240 ;
1241 }
1242 }
1243 in_dev_put(in_dev);
1244 return;
1245
1246reject_redirect:
1247#ifdef CONFIG_IP_ROUTE_VERBOSE
1248 if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit())
1249 printk(KERN_INFO "Redirect from %u.%u.%u.%u on %s about "
1250 "%u.%u.%u.%u ignored.\n"
Ilia Sotnikovcef26852006-03-25 01:38:55 -08001251 " Advised path = %u.%u.%u.%u -> %u.%u.%u.%u\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001252 NIPQUAD(old_gw), dev->name, NIPQUAD(new_gw),
Ilia Sotnikovcef26852006-03-25 01:38:55 -08001253 NIPQUAD(saddr), NIPQUAD(daddr));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001254#endif
1255 in_dev_put(in_dev);
1256}
1257
1258static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
1259{
1260 struct rtable *rt = (struct rtable*)dst;
1261 struct dst_entry *ret = dst;
1262
1263 if (rt) {
1264 if (dst->obsolete) {
1265 ip_rt_put(rt);
1266 ret = NULL;
1267 } else if ((rt->rt_flags & RTCF_REDIRECTED) ||
1268 rt->u.dst.expires) {
Al Viro8c7bc842006-09-26 21:26:19 -07001269 unsigned hash = rt_hash(rt->fl.fl4_dst, rt->fl.fl4_src,
1270 rt->fl.oif);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001271#if RT_CACHE_DEBUG >= 1
1272 printk(KERN_DEBUG "ip_rt_advice: redirect to "
1273 "%u.%u.%u.%u/%02x dropped\n",
1274 NIPQUAD(rt->rt_dst), rt->fl.fl4_tos);
1275#endif
1276 rt_del(hash, rt);
1277 ret = NULL;
1278 }
1279 }
1280 return ret;
1281}
1282
1283/*
1284 * Algorithm:
1285 * 1. The first ip_rt_redirect_number redirects are sent
1286 * with exponential backoff, then we stop sending them at all,
1287 * assuming that the host ignores our redirects.
1288 * 2. If we did not see packets requiring redirects
1289 * during ip_rt_redirect_silence, we assume that the host
1290 * forgot redirected route and start to send redirects again.
1291 *
1292 * This algorithm is much cheaper and more intelligent than dumb load limiting
1293 * in icmp.c.
1294 *
1295 * NOTE. Do not forget to inhibit load limiting for redirects (redundant)
1296 * and "frag. need" (breaks PMTU discovery) in icmp.c.
1297 */
1298
1299void ip_rt_send_redirect(struct sk_buff *skb)
1300{
1301 struct rtable *rt = (struct rtable*)skb->dst;
1302 struct in_device *in_dev = in_dev_get(rt->u.dst.dev);
1303
1304 if (!in_dev)
1305 return;
1306
1307 if (!IN_DEV_TX_REDIRECTS(in_dev))
1308 goto out;
1309
1310 /* No redirected packets during ip_rt_redirect_silence;
1311 * reset the algorithm.
1312 */
1313 if (time_after(jiffies, rt->u.dst.rate_last + ip_rt_redirect_silence))
1314 rt->u.dst.rate_tokens = 0;
1315
1316 /* Too many ignored redirects; do not send anything
1317 * set u.dst.rate_last to the last seen redirected packet.
1318 */
1319 if (rt->u.dst.rate_tokens >= ip_rt_redirect_number) {
1320 rt->u.dst.rate_last = jiffies;
1321 goto out;
1322 }
1323
1324 /* Check for load limit; set rate_last to the latest sent
1325 * redirect.
1326 */
Li Yewang14fb8a72006-12-18 00:26:35 -08001327 if (rt->u.dst.rate_tokens == 0 ||
1328 time_after(jiffies,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001329 (rt->u.dst.rate_last +
1330 (ip_rt_redirect_load << rt->u.dst.rate_tokens)))) {
1331 icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, rt->rt_gateway);
1332 rt->u.dst.rate_last = jiffies;
1333 ++rt->u.dst.rate_tokens;
1334#ifdef CONFIG_IP_ROUTE_VERBOSE
1335 if (IN_DEV_LOG_MARTIANS(in_dev) &&
1336 rt->u.dst.rate_tokens == ip_rt_redirect_number &&
1337 net_ratelimit())
1338 printk(KERN_WARNING "host %u.%u.%u.%u/if%d ignores "
1339 "redirects for %u.%u.%u.%u to %u.%u.%u.%u.\n",
1340 NIPQUAD(rt->rt_src), rt->rt_iif,
1341 NIPQUAD(rt->rt_dst), NIPQUAD(rt->rt_gateway));
1342#endif
1343 }
1344out:
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001345 in_dev_put(in_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001346}
1347
1348static int ip_error(struct sk_buff *skb)
1349{
1350 struct rtable *rt = (struct rtable*)skb->dst;
1351 unsigned long now;
1352 int code;
1353
1354 switch (rt->u.dst.error) {
1355 case EINVAL:
1356 default:
1357 goto out;
1358 case EHOSTUNREACH:
1359 code = ICMP_HOST_UNREACH;
1360 break;
1361 case ENETUNREACH:
1362 code = ICMP_NET_UNREACH;
1363 break;
1364 case EACCES:
1365 code = ICMP_PKT_FILTERED;
1366 break;
1367 }
1368
1369 now = jiffies;
1370 rt->u.dst.rate_tokens += now - rt->u.dst.rate_last;
1371 if (rt->u.dst.rate_tokens > ip_rt_error_burst)
1372 rt->u.dst.rate_tokens = ip_rt_error_burst;
1373 rt->u.dst.rate_last = now;
1374 if (rt->u.dst.rate_tokens >= ip_rt_error_cost) {
1375 rt->u.dst.rate_tokens -= ip_rt_error_cost;
1376 icmp_send(skb, ICMP_DEST_UNREACH, code, 0);
1377 }
1378
1379out: kfree_skb(skb);
1380 return 0;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001381}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001382
1383/*
1384 * The last two values are not from the RFC but
1385 * are needed for AMPRnet AX.25 paths.
1386 */
1387
Arjan van de Ven9b5b5cf2005-11-29 16:21:38 -08001388static const unsigned short mtu_plateau[] =
Linus Torvalds1da177e2005-04-16 15:20:36 -07001389{32000, 17914, 8166, 4352, 2002, 1492, 576, 296, 216, 128 };
1390
1391static __inline__ unsigned short guess_mtu(unsigned short old_mtu)
1392{
1393 int i;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001394
Linus Torvalds1da177e2005-04-16 15:20:36 -07001395 for (i = 0; i < ARRAY_SIZE(mtu_plateau); i++)
1396 if (old_mtu > mtu_plateau[i])
1397 return mtu_plateau[i];
1398 return 68;
1399}
1400
1401unsigned short ip_rt_frag_needed(struct iphdr *iph, unsigned short new_mtu)
1402{
1403 int i;
1404 unsigned short old_mtu = ntohs(iph->tot_len);
1405 struct rtable *rth;
Al Viroe4485152006-09-26 22:15:01 -07001406 __be32 skeys[2] = { iph->saddr, 0, };
1407 __be32 daddr = iph->daddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001408 unsigned short est_mtu = 0;
1409
1410 if (ipv4_config.no_pmtu_disc)
1411 return 0;
1412
1413 for (i = 0; i < 2; i++) {
Al Viro8c7bc842006-09-26 21:26:19 -07001414 unsigned hash = rt_hash(daddr, skeys[i], 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001415
1416 rcu_read_lock();
1417 for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
Eric Dumazet093c2ca2007-02-09 16:19:26 -08001418 rth = rcu_dereference(rth->u.dst.rt_next)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001419 if (rth->fl.fl4_dst == daddr &&
1420 rth->fl.fl4_src == skeys[i] &&
1421 rth->rt_dst == daddr &&
1422 rth->rt_src == iph->saddr &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07001423 rth->fl.iif == 0 &&
1424 !(dst_metric_locked(&rth->u.dst, RTAX_MTU))) {
1425 unsigned short mtu = new_mtu;
1426
1427 if (new_mtu < 68 || new_mtu >= old_mtu) {
1428
1429 /* BSD 4.2 compatibility hack :-( */
1430 if (mtu == 0 &&
1431 old_mtu >= rth->u.dst.metrics[RTAX_MTU-1] &&
1432 old_mtu >= 68 + (iph->ihl << 2))
1433 old_mtu -= iph->ihl << 2;
1434
1435 mtu = guess_mtu(old_mtu);
1436 }
1437 if (mtu <= rth->u.dst.metrics[RTAX_MTU-1]) {
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001438 if (mtu < rth->u.dst.metrics[RTAX_MTU-1]) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001439 dst_confirm(&rth->u.dst);
1440 if (mtu < ip_rt_min_pmtu) {
1441 mtu = ip_rt_min_pmtu;
1442 rth->u.dst.metrics[RTAX_LOCK-1] |=
1443 (1 << RTAX_MTU);
1444 }
1445 rth->u.dst.metrics[RTAX_MTU-1] = mtu;
1446 dst_set_expires(&rth->u.dst,
1447 ip_rt_mtu_expires);
1448 }
1449 est_mtu = mtu;
1450 }
1451 }
1452 }
1453 rcu_read_unlock();
1454 }
1455 return est_mtu ? : new_mtu;
1456}
1457
1458static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu)
1459{
1460 if (dst->metrics[RTAX_MTU-1] > mtu && mtu >= 68 &&
1461 !(dst_metric_locked(dst, RTAX_MTU))) {
1462 if (mtu < ip_rt_min_pmtu) {
1463 mtu = ip_rt_min_pmtu;
1464 dst->metrics[RTAX_LOCK-1] |= (1 << RTAX_MTU);
1465 }
1466 dst->metrics[RTAX_MTU-1] = mtu;
1467 dst_set_expires(dst, ip_rt_mtu_expires);
Tom Tucker8d717402006-07-30 20:43:36 -07001468 call_netevent_notifiers(NETEVENT_PMTU_UPDATE, dst);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001469 }
1470}
1471
1472static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
1473{
1474 return NULL;
1475}
1476
1477static void ipv4_dst_destroy(struct dst_entry *dst)
1478{
1479 struct rtable *rt = (struct rtable *) dst;
1480 struct inet_peer *peer = rt->peer;
1481 struct in_device *idev = rt->idev;
1482
1483 if (peer) {
1484 rt->peer = NULL;
1485 inet_putpeer(peer);
1486 }
1487
1488 if (idev) {
1489 rt->idev = NULL;
1490 in_dev_put(idev);
1491 }
1492}
1493
1494static void ipv4_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
1495 int how)
1496{
1497 struct rtable *rt = (struct rtable *) dst;
1498 struct in_device *idev = rt->idev;
1499 if (dev != &loopback_dev && idev && idev->dev == dev) {
1500 struct in_device *loopback_idev = in_dev_get(&loopback_dev);
1501 if (loopback_idev) {
1502 rt->idev = loopback_idev;
1503 in_dev_put(idev);
1504 }
1505 }
1506}
1507
1508static void ipv4_link_failure(struct sk_buff *skb)
1509{
1510 struct rtable *rt;
1511
1512 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0);
1513
1514 rt = (struct rtable *) skb->dst;
1515 if (rt)
1516 dst_set_expires(&rt->u.dst, 0);
1517}
1518
1519static int ip_rt_bug(struct sk_buff *skb)
1520{
1521 printk(KERN_DEBUG "ip_rt_bug: %u.%u.%u.%u -> %u.%u.%u.%u, %s\n",
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07001522 NIPQUAD(ip_hdr(skb)->saddr), NIPQUAD(ip_hdr(skb)->daddr),
Linus Torvalds1da177e2005-04-16 15:20:36 -07001523 skb->dev ? skb->dev->name : "?");
1524 kfree_skb(skb);
1525 return 0;
1526}
1527
1528/*
1529 We do not cache source address of outgoing interface,
1530 because it is used only by IP RR, TS and SRR options,
1531 so that it out of fast path.
1532
1533 BTW remember: "addr" is allowed to be not aligned
1534 in IP options!
1535 */
1536
1537void ip_rt_get_source(u8 *addr, struct rtable *rt)
1538{
Al Viroa61ced52006-09-26 21:27:54 -07001539 __be32 src;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001540 struct fib_result res;
1541
1542 if (rt->fl.iif == 0)
1543 src = rt->rt_src;
1544 else if (fib_lookup(&rt->fl, &res) == 0) {
1545 src = FIB_RES_PREFSRC(res);
1546 fib_res_put(&res);
1547 } else
1548 src = inet_select_addr(rt->u.dst.dev, rt->rt_gateway,
1549 RT_SCOPE_UNIVERSE);
1550 memcpy(addr, &src, 4);
1551}
1552
1553#ifdef CONFIG_NET_CLS_ROUTE
1554static void set_class_tag(struct rtable *rt, u32 tag)
1555{
1556 if (!(rt->u.dst.tclassid & 0xFFFF))
1557 rt->u.dst.tclassid |= tag & 0xFFFF;
1558 if (!(rt->u.dst.tclassid & 0xFFFF0000))
1559 rt->u.dst.tclassid |= tag & 0xFFFF0000;
1560}
1561#endif
1562
1563static void rt_set_nexthop(struct rtable *rt, struct fib_result *res, u32 itag)
1564{
1565 struct fib_info *fi = res->fi;
1566
1567 if (fi) {
1568 if (FIB_RES_GW(*res) &&
1569 FIB_RES_NH(*res).nh_scope == RT_SCOPE_LINK)
1570 rt->rt_gateway = FIB_RES_GW(*res);
1571 memcpy(rt->u.dst.metrics, fi->fib_metrics,
1572 sizeof(rt->u.dst.metrics));
1573 if (fi->fib_mtu == 0) {
1574 rt->u.dst.metrics[RTAX_MTU-1] = rt->u.dst.dev->mtu;
1575 if (rt->u.dst.metrics[RTAX_LOCK-1] & (1 << RTAX_MTU) &&
1576 rt->rt_gateway != rt->rt_dst &&
1577 rt->u.dst.dev->mtu > 576)
1578 rt->u.dst.metrics[RTAX_MTU-1] = 576;
1579 }
1580#ifdef CONFIG_NET_CLS_ROUTE
1581 rt->u.dst.tclassid = FIB_RES_NH(*res).nh_tclassid;
1582#endif
1583 } else
1584 rt->u.dst.metrics[RTAX_MTU-1]= rt->u.dst.dev->mtu;
1585
1586 if (rt->u.dst.metrics[RTAX_HOPLIMIT-1] == 0)
1587 rt->u.dst.metrics[RTAX_HOPLIMIT-1] = sysctl_ip_default_ttl;
1588 if (rt->u.dst.metrics[RTAX_MTU-1] > IP_MAX_MTU)
1589 rt->u.dst.metrics[RTAX_MTU-1] = IP_MAX_MTU;
1590 if (rt->u.dst.metrics[RTAX_ADVMSS-1] == 0)
1591 rt->u.dst.metrics[RTAX_ADVMSS-1] = max_t(unsigned int, rt->u.dst.dev->mtu - 40,
1592 ip_rt_min_advmss);
1593 if (rt->u.dst.metrics[RTAX_ADVMSS-1] > 65535 - 40)
1594 rt->u.dst.metrics[RTAX_ADVMSS-1] = 65535 - 40;
1595
1596#ifdef CONFIG_NET_CLS_ROUTE
1597#ifdef CONFIG_IP_MULTIPLE_TABLES
1598 set_class_tag(rt, fib_rules_tclass(res));
1599#endif
1600 set_class_tag(rt, itag);
1601#endif
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001602 rt->rt_type = res->type;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001603}
1604
Al Viro9e12bb22006-09-26 21:25:20 -07001605static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001606 u8 tos, struct net_device *dev, int our)
1607{
1608 unsigned hash;
1609 struct rtable *rth;
Al Viroa61ced52006-09-26 21:27:54 -07001610 __be32 spec_dst;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001611 struct in_device *in_dev = in_dev_get(dev);
1612 u32 itag = 0;
1613
1614 /* Primary sanity checks. */
1615
1616 if (in_dev == NULL)
1617 return -EINVAL;
1618
1619 if (MULTICAST(saddr) || BADCLASS(saddr) || LOOPBACK(saddr) ||
1620 skb->protocol != htons(ETH_P_IP))
1621 goto e_inval;
1622
1623 if (ZERONET(saddr)) {
1624 if (!LOCAL_MCAST(daddr))
1625 goto e_inval;
1626 spec_dst = inet_select_addr(dev, 0, RT_SCOPE_LINK);
1627 } else if (fib_validate_source(saddr, 0, tos, 0,
1628 dev, &spec_dst, &itag) < 0)
1629 goto e_inval;
1630
1631 rth = dst_alloc(&ipv4_dst_ops);
1632 if (!rth)
1633 goto e_nobufs;
1634
1635 rth->u.dst.output= ip_rt_bug;
1636
1637 atomic_set(&rth->u.dst.__refcnt, 1);
1638 rth->u.dst.flags= DST_HOST;
1639 if (in_dev->cnf.no_policy)
1640 rth->u.dst.flags |= DST_NOPOLICY;
1641 rth->fl.fl4_dst = daddr;
1642 rth->rt_dst = daddr;
1643 rth->fl.fl4_tos = tos;
Thomas Graf47dcf0c2006-11-09 15:20:38 -08001644 rth->fl.mark = skb->mark;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001645 rth->fl.fl4_src = saddr;
1646 rth->rt_src = saddr;
1647#ifdef CONFIG_NET_CLS_ROUTE
1648 rth->u.dst.tclassid = itag;
1649#endif
1650 rth->rt_iif =
1651 rth->fl.iif = dev->ifindex;
1652 rth->u.dst.dev = &loopback_dev;
1653 dev_hold(rth->u.dst.dev);
1654 rth->idev = in_dev_get(rth->u.dst.dev);
1655 rth->fl.oif = 0;
1656 rth->rt_gateway = daddr;
1657 rth->rt_spec_dst= spec_dst;
1658 rth->rt_type = RTN_MULTICAST;
1659 rth->rt_flags = RTCF_MULTICAST;
1660 if (our) {
1661 rth->u.dst.input= ip_local_deliver;
1662 rth->rt_flags |= RTCF_LOCAL;
1663 }
1664
1665#ifdef CONFIG_IP_MROUTE
1666 if (!LOCAL_MCAST(daddr) && IN_DEV_MFORWARD(in_dev))
1667 rth->u.dst.input = ip_mr_input;
1668#endif
1669 RT_CACHE_STAT_INC(in_slow_mc);
1670
1671 in_dev_put(in_dev);
Al Viro8c7bc842006-09-26 21:26:19 -07001672 hash = rt_hash(daddr, saddr, dev->ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001673 return rt_intern_hash(hash, rth, (struct rtable**) &skb->dst);
1674
1675e_nobufs:
1676 in_dev_put(in_dev);
1677 return -ENOBUFS;
1678
1679e_inval:
1680 in_dev_put(in_dev);
1681 return -EINVAL;
1682}
1683
1684
1685static void ip_handle_martian_source(struct net_device *dev,
1686 struct in_device *in_dev,
1687 struct sk_buff *skb,
Al Viro9e12bb22006-09-26 21:25:20 -07001688 __be32 daddr,
1689 __be32 saddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001690{
1691 RT_CACHE_STAT_INC(in_martian_src);
1692#ifdef CONFIG_IP_ROUTE_VERBOSE
1693 if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit()) {
1694 /*
1695 * RFC1812 recommendation, if source is martian,
1696 * the only hint is MAC header.
1697 */
1698 printk(KERN_WARNING "martian source %u.%u.%u.%u from "
1699 "%u.%u.%u.%u, on dev %s\n",
1700 NIPQUAD(daddr), NIPQUAD(saddr), dev->name);
Arnaldo Carvalho de Melo98e399f2007-03-19 15:33:04 -07001701 if (dev->hard_header_len && skb_mac_header_was_set(skb)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001702 int i;
Arnaldo Carvalho de Melo98e399f2007-03-19 15:33:04 -07001703 const unsigned char *p = skb_mac_header(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001704 printk(KERN_WARNING "ll header: ");
1705 for (i = 0; i < dev->hard_header_len; i++, p++) {
1706 printk("%02x", *p);
1707 if (i < (dev->hard_header_len - 1))
1708 printk(":");
1709 }
1710 printk("\n");
1711 }
1712 }
1713#endif
1714}
1715
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001716static inline int __mkroute_input(struct sk_buff *skb,
1717 struct fib_result* res,
1718 struct in_device *in_dev,
Al Viro9e12bb22006-09-26 21:25:20 -07001719 __be32 daddr, __be32 saddr, u32 tos,
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001720 struct rtable **result)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001721{
1722
1723 struct rtable *rth;
1724 int err;
1725 struct in_device *out_dev;
1726 unsigned flags = 0;
Al Virod9c9df82006-09-26 21:28:14 -07001727 __be32 spec_dst;
1728 u32 itag;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001729
1730 /* get a working reference to the output device */
1731 out_dev = in_dev_get(FIB_RES_DEV(*res));
1732 if (out_dev == NULL) {
1733 if (net_ratelimit())
1734 printk(KERN_CRIT "Bug in ip_route_input" \
1735 "_slow(). Please, report\n");
1736 return -EINVAL;
1737 }
1738
1739
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001740 err = fib_validate_source(saddr, daddr, tos, FIB_RES_OIF(*res),
Linus Torvalds1da177e2005-04-16 15:20:36 -07001741 in_dev->dev, &spec_dst, &itag);
1742 if (err < 0) {
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001743 ip_handle_martian_source(in_dev->dev, in_dev, skb, daddr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001744 saddr);
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001745
Linus Torvalds1da177e2005-04-16 15:20:36 -07001746 err = -EINVAL;
1747 goto cleanup;
1748 }
1749
1750 if (err)
1751 flags |= RTCF_DIRECTSRC;
1752
1753 if (out_dev == in_dev && err && !(flags & (RTCF_NAT | RTCF_MASQ)) &&
1754 (IN_DEV_SHARED_MEDIA(out_dev) ||
1755 inet_addr_onlink(out_dev, saddr, FIB_RES_GW(*res))))
1756 flags |= RTCF_DOREDIRECT;
1757
1758 if (skb->protocol != htons(ETH_P_IP)) {
1759 /* Not IP (i.e. ARP). Do not create route, if it is
1760 * invalid for proxy arp. DNAT routes are always valid.
1761 */
1762 if (out_dev == in_dev && !(flags & RTCF_DNAT)) {
1763 err = -EINVAL;
1764 goto cleanup;
1765 }
1766 }
1767
1768
1769 rth = dst_alloc(&ipv4_dst_ops);
1770 if (!rth) {
1771 err = -ENOBUFS;
1772 goto cleanup;
1773 }
1774
Julian Anastasovce723d82005-09-08 13:34:47 -07001775 atomic_set(&rth->u.dst.__refcnt, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001776 rth->u.dst.flags= DST_HOST;
1777#ifdef CONFIG_IP_ROUTE_MULTIPATH_CACHED
1778 if (res->fi->fib_nhs > 1)
1779 rth->u.dst.flags |= DST_BALANCED;
1780#endif
1781 if (in_dev->cnf.no_policy)
1782 rth->u.dst.flags |= DST_NOPOLICY;
Patrick McHardy1b6651f2006-12-04 19:59:00 -08001783 if (out_dev->cnf.no_xfrm)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001784 rth->u.dst.flags |= DST_NOXFRM;
1785 rth->fl.fl4_dst = daddr;
1786 rth->rt_dst = daddr;
1787 rth->fl.fl4_tos = tos;
Thomas Graf47dcf0c2006-11-09 15:20:38 -08001788 rth->fl.mark = skb->mark;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001789 rth->fl.fl4_src = saddr;
1790 rth->rt_src = saddr;
1791 rth->rt_gateway = daddr;
1792 rth->rt_iif =
1793 rth->fl.iif = in_dev->dev->ifindex;
1794 rth->u.dst.dev = (out_dev)->dev;
1795 dev_hold(rth->u.dst.dev);
1796 rth->idev = in_dev_get(rth->u.dst.dev);
1797 rth->fl.oif = 0;
1798 rth->rt_spec_dst= spec_dst;
1799
1800 rth->u.dst.input = ip_forward;
1801 rth->u.dst.output = ip_output;
1802
1803 rt_set_nexthop(rth, res, itag);
1804
1805 rth->rt_flags = flags;
1806
1807 *result = rth;
1808 err = 0;
1809 cleanup:
1810 /* release the working reference to the output device */
1811 in_dev_put(out_dev);
1812 return err;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001813}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001814
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001815static inline int ip_mkroute_input_def(struct sk_buff *skb,
1816 struct fib_result* res,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001817 const struct flowi *fl,
1818 struct in_device *in_dev,
Al Viro9e12bb22006-09-26 21:25:20 -07001819 __be32 daddr, __be32 saddr, u32 tos)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001820{
Chuck Short7abaa272005-06-22 22:10:23 -07001821 struct rtable* rth = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001822 int err;
1823 unsigned hash;
1824
1825#ifdef CONFIG_IP_ROUTE_MULTIPATH
1826 if (res->fi && res->fi->fib_nhs > 1 && fl->oif == 0)
1827 fib_select_multipath(fl, res);
1828#endif
1829
1830 /* create a routing cache entry */
1831 err = __mkroute_input(skb, res, in_dev, daddr, saddr, tos, &rth);
1832 if (err)
1833 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001834
1835 /* put it into the cache */
Al Viro8c7bc842006-09-26 21:26:19 -07001836 hash = rt_hash(daddr, saddr, fl->iif);
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001837 return rt_intern_hash(hash, rth, (struct rtable**)&skb->dst);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001838}
1839
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001840static inline int ip_mkroute_input(struct sk_buff *skb,
1841 struct fib_result* res,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001842 const struct flowi *fl,
1843 struct in_device *in_dev,
Al Viro9e12bb22006-09-26 21:25:20 -07001844 __be32 daddr, __be32 saddr, u32 tos)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001845{
1846#ifdef CONFIG_IP_ROUTE_MULTIPATH_CACHED
Julian Anastasovce723d82005-09-08 13:34:47 -07001847 struct rtable* rth = NULL, *rtres;
1848 unsigned char hop, hopcount;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001849 int err = -EINVAL;
1850 unsigned int hash;
1851
1852 if (res->fi)
1853 hopcount = res->fi->fib_nhs;
1854 else
1855 hopcount = 1;
1856
Linus Torvalds1da177e2005-04-16 15:20:36 -07001857 /* distinguish between multipath and singlepath */
1858 if (hopcount < 2)
1859 return ip_mkroute_input_def(skb, res, fl, in_dev, daddr,
1860 saddr, tos);
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001861
Linus Torvalds1da177e2005-04-16 15:20:36 -07001862 /* add all alternatives to the routing cache */
1863 for (hop = 0; hop < hopcount; hop++) {
1864 res->nh_sel = hop;
1865
Julian Anastasovce723d82005-09-08 13:34:47 -07001866 /* put reference to previous result */
1867 if (hop)
1868 ip_rt_put(rtres);
1869
Linus Torvalds1da177e2005-04-16 15:20:36 -07001870 /* create a routing cache entry */
1871 err = __mkroute_input(skb, res, in_dev, daddr, saddr, tos,
1872 &rth);
1873 if (err)
1874 return err;
1875
1876 /* put it into the cache */
Al Viro8c7bc842006-09-26 21:26:19 -07001877 hash = rt_hash(daddr, saddr, fl->iif);
Julian Anastasovce723d82005-09-08 13:34:47 -07001878 err = rt_intern_hash(hash, rth, &rtres);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001879 if (err)
1880 return err;
1881
1882 /* forward hop information to multipath impl. */
1883 multipath_set_nhinfo(rth,
1884 FIB_RES_NETWORK(*res),
1885 FIB_RES_NETMASK(*res),
1886 res->prefixlen,
1887 &FIB_RES_NH(*res));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001888 }
Julian Anastasovce723d82005-09-08 13:34:47 -07001889 skb->dst = &rtres->u.dst;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001890 return err;
1891#else /* CONFIG_IP_ROUTE_MULTIPATH_CACHED */
1892 return ip_mkroute_input_def(skb, res, fl, in_dev, daddr, saddr, tos);
1893#endif /* CONFIG_IP_ROUTE_MULTIPATH_CACHED */
1894}
1895
1896
1897/*
1898 * NOTE. We drop all the packets that has local source
1899 * addresses, because every properly looped back packet
1900 * must have correct destination already attached by output routine.
1901 *
1902 * Such approach solves two big problems:
1903 * 1. Not simplex devices are handled properly.
1904 * 2. IP spoofing attempts are filtered with 100% of guarantee.
1905 */
1906
Al Viro9e12bb22006-09-26 21:25:20 -07001907static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001908 u8 tos, struct net_device *dev)
1909{
1910 struct fib_result res;
1911 struct in_device *in_dev = in_dev_get(dev);
1912 struct flowi fl = { .nl_u = { .ip4_u =
1913 { .daddr = daddr,
1914 .saddr = saddr,
1915 .tos = tos,
1916 .scope = RT_SCOPE_UNIVERSE,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001917 } },
Thomas Graf47dcf0c2006-11-09 15:20:38 -08001918 .mark = skb->mark,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001919 .iif = dev->ifindex };
1920 unsigned flags = 0;
1921 u32 itag = 0;
1922 struct rtable * rth;
1923 unsigned hash;
Al Viro9e12bb22006-09-26 21:25:20 -07001924 __be32 spec_dst;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001925 int err = -EINVAL;
1926 int free_res = 0;
1927
1928 /* IP on this device is disabled. */
1929
1930 if (!in_dev)
1931 goto out;
1932
1933 /* Check for the most weird martians, which can be not detected
1934 by fib_lookup.
1935 */
1936
1937 if (MULTICAST(saddr) || BADCLASS(saddr) || LOOPBACK(saddr))
1938 goto martian_source;
1939
Al Viroe4485152006-09-26 22:15:01 -07001940 if (daddr == htonl(0xFFFFFFFF) || (saddr == 0 && daddr == 0))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001941 goto brd_input;
1942
1943 /* Accept zero addresses only to limited broadcast;
1944 * I even do not know to fix it or not. Waiting for complains :-)
1945 */
1946 if (ZERONET(saddr))
1947 goto martian_source;
1948
1949 if (BADCLASS(daddr) || ZERONET(daddr) || LOOPBACK(daddr))
1950 goto martian_destination;
1951
1952 /*
1953 * Now we are ready to route packet.
1954 */
1955 if ((err = fib_lookup(&fl, &res)) != 0) {
1956 if (!IN_DEV_FORWARD(in_dev))
Dietmar Eggemann2c2910a2005-06-28 13:06:23 -07001957 goto e_hostunreach;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001958 goto no_route;
1959 }
1960 free_res = 1;
1961
1962 RT_CACHE_STAT_INC(in_slow_tot);
1963
1964 if (res.type == RTN_BROADCAST)
1965 goto brd_input;
1966
1967 if (res.type == RTN_LOCAL) {
1968 int result;
1969 result = fib_validate_source(saddr, daddr, tos,
1970 loopback_dev.ifindex,
1971 dev, &spec_dst, &itag);
1972 if (result < 0)
1973 goto martian_source;
1974 if (result)
1975 flags |= RTCF_DIRECTSRC;
1976 spec_dst = daddr;
1977 goto local_input;
1978 }
1979
1980 if (!IN_DEV_FORWARD(in_dev))
Dietmar Eggemann2c2910a2005-06-28 13:06:23 -07001981 goto e_hostunreach;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001982 if (res.type != RTN_UNICAST)
1983 goto martian_destination;
1984
1985 err = ip_mkroute_input(skb, &res, &fl, in_dev, daddr, saddr, tos);
1986 if (err == -ENOBUFS)
1987 goto e_nobufs;
1988 if (err == -EINVAL)
1989 goto e_inval;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001990
Linus Torvalds1da177e2005-04-16 15:20:36 -07001991done:
1992 in_dev_put(in_dev);
1993 if (free_res)
1994 fib_res_put(&res);
1995out: return err;
1996
1997brd_input:
1998 if (skb->protocol != htons(ETH_P_IP))
1999 goto e_inval;
2000
2001 if (ZERONET(saddr))
2002 spec_dst = inet_select_addr(dev, 0, RT_SCOPE_LINK);
2003 else {
2004 err = fib_validate_source(saddr, 0, tos, 0, dev, &spec_dst,
2005 &itag);
2006 if (err < 0)
2007 goto martian_source;
2008 if (err)
2009 flags |= RTCF_DIRECTSRC;
2010 }
2011 flags |= RTCF_BROADCAST;
2012 res.type = RTN_BROADCAST;
2013 RT_CACHE_STAT_INC(in_brd);
2014
2015local_input:
2016 rth = dst_alloc(&ipv4_dst_ops);
2017 if (!rth)
2018 goto e_nobufs;
2019
2020 rth->u.dst.output= ip_rt_bug;
2021
2022 atomic_set(&rth->u.dst.__refcnt, 1);
2023 rth->u.dst.flags= DST_HOST;
2024 if (in_dev->cnf.no_policy)
2025 rth->u.dst.flags |= DST_NOPOLICY;
2026 rth->fl.fl4_dst = daddr;
2027 rth->rt_dst = daddr;
2028 rth->fl.fl4_tos = tos;
Thomas Graf47dcf0c2006-11-09 15:20:38 -08002029 rth->fl.mark = skb->mark;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002030 rth->fl.fl4_src = saddr;
2031 rth->rt_src = saddr;
2032#ifdef CONFIG_NET_CLS_ROUTE
2033 rth->u.dst.tclassid = itag;
2034#endif
2035 rth->rt_iif =
2036 rth->fl.iif = dev->ifindex;
2037 rth->u.dst.dev = &loopback_dev;
2038 dev_hold(rth->u.dst.dev);
2039 rth->idev = in_dev_get(rth->u.dst.dev);
2040 rth->rt_gateway = daddr;
2041 rth->rt_spec_dst= spec_dst;
2042 rth->u.dst.input= ip_local_deliver;
2043 rth->rt_flags = flags|RTCF_LOCAL;
2044 if (res.type == RTN_UNREACHABLE) {
2045 rth->u.dst.input= ip_error;
2046 rth->u.dst.error= -err;
2047 rth->rt_flags &= ~RTCF_LOCAL;
2048 }
2049 rth->rt_type = res.type;
Al Viro8c7bc842006-09-26 21:26:19 -07002050 hash = rt_hash(daddr, saddr, fl.iif);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002051 err = rt_intern_hash(hash, rth, (struct rtable**)&skb->dst);
2052 goto done;
2053
2054no_route:
2055 RT_CACHE_STAT_INC(in_no_route);
2056 spec_dst = inet_select_addr(dev, 0, RT_SCOPE_UNIVERSE);
2057 res.type = RTN_UNREACHABLE;
2058 goto local_input;
2059
2060 /*
2061 * Do not cache martian addresses: they should be logged (RFC1812)
2062 */
2063martian_destination:
2064 RT_CACHE_STAT_INC(in_martian_dst);
2065#ifdef CONFIG_IP_ROUTE_VERBOSE
2066 if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit())
2067 printk(KERN_WARNING "martian destination %u.%u.%u.%u from "
2068 "%u.%u.%u.%u, dev %s\n",
2069 NIPQUAD(daddr), NIPQUAD(saddr), dev->name);
2070#endif
Dietmar Eggemann2c2910a2005-06-28 13:06:23 -07002071
2072e_hostunreach:
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09002073 err = -EHOSTUNREACH;
2074 goto done;
Dietmar Eggemann2c2910a2005-06-28 13:06:23 -07002075
Linus Torvalds1da177e2005-04-16 15:20:36 -07002076e_inval:
2077 err = -EINVAL;
2078 goto done;
2079
2080e_nobufs:
2081 err = -ENOBUFS;
2082 goto done;
2083
2084martian_source:
2085 ip_handle_martian_source(dev, in_dev, skb, daddr, saddr);
2086 goto e_inval;
2087}
2088
Al Viro9e12bb22006-09-26 21:25:20 -07002089int ip_route_input(struct sk_buff *skb, __be32 daddr, __be32 saddr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002090 u8 tos, struct net_device *dev)
2091{
2092 struct rtable * rth;
2093 unsigned hash;
2094 int iif = dev->ifindex;
2095
2096 tos &= IPTOS_RT_MASK;
Al Viro8c7bc842006-09-26 21:26:19 -07002097 hash = rt_hash(daddr, saddr, iif);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002098
2099 rcu_read_lock();
2100 for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
Eric Dumazet093c2ca2007-02-09 16:19:26 -08002101 rth = rcu_dereference(rth->u.dst.rt_next)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002102 if (rth->fl.fl4_dst == daddr &&
2103 rth->fl.fl4_src == saddr &&
2104 rth->fl.iif == iif &&
2105 rth->fl.oif == 0 &&
Thomas Graf47dcf0c2006-11-09 15:20:38 -08002106 rth->fl.mark == skb->mark &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07002107 rth->fl.fl4_tos == tos) {
2108 rth->u.dst.lastuse = jiffies;
2109 dst_hold(&rth->u.dst);
2110 rth->u.dst.__use++;
2111 RT_CACHE_STAT_INC(in_hit);
2112 rcu_read_unlock();
2113 skb->dst = (struct dst_entry*)rth;
2114 return 0;
2115 }
2116 RT_CACHE_STAT_INC(in_hlist_search);
2117 }
2118 rcu_read_unlock();
2119
2120 /* Multicast recognition logic is moved from route cache to here.
2121 The problem was that too many Ethernet cards have broken/missing
2122 hardware multicast filters :-( As result the host on multicasting
2123 network acquires a lot of useless route cache entries, sort of
2124 SDR messages from all the world. Now we try to get rid of them.
2125 Really, provided software IP multicast filter is organized
2126 reasonably (at least, hashed), it does not result in a slowdown
2127 comparing with route cache reject entries.
2128 Note, that multicast routers are not affected, because
2129 route cache entry is created eventually.
2130 */
2131 if (MULTICAST(daddr)) {
2132 struct in_device *in_dev;
2133
2134 rcu_read_lock();
Herbert Xue5ed6392005-10-03 14:35:55 -07002135 if ((in_dev = __in_dev_get_rcu(dev)) != NULL) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002136 int our = ip_check_mc(in_dev, daddr, saddr,
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07002137 ip_hdr(skb)->protocol);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002138 if (our
2139#ifdef CONFIG_IP_MROUTE
2140 || (!LOCAL_MCAST(daddr) && IN_DEV_MFORWARD(in_dev))
2141#endif
2142 ) {
2143 rcu_read_unlock();
2144 return ip_route_input_mc(skb, daddr, saddr,
2145 tos, dev, our);
2146 }
2147 }
2148 rcu_read_unlock();
2149 return -EINVAL;
2150 }
2151 return ip_route_input_slow(skb, daddr, saddr, tos, dev);
2152}
2153
2154static inline int __mkroute_output(struct rtable **result,
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09002155 struct fib_result* res,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002156 const struct flowi *fl,
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09002157 const struct flowi *oldflp,
2158 struct net_device *dev_out,
2159 unsigned flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002160{
2161 struct rtable *rth;
2162 struct in_device *in_dev;
2163 u32 tos = RT_FL_TOS(oldflp);
2164 int err = 0;
2165
2166 if (LOOPBACK(fl->fl4_src) && !(dev_out->flags&IFF_LOOPBACK))
2167 return -EINVAL;
2168
Al Viroe4485152006-09-26 22:15:01 -07002169 if (fl->fl4_dst == htonl(0xFFFFFFFF))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002170 res->type = RTN_BROADCAST;
2171 else if (MULTICAST(fl->fl4_dst))
2172 res->type = RTN_MULTICAST;
2173 else if (BADCLASS(fl->fl4_dst) || ZERONET(fl->fl4_dst))
2174 return -EINVAL;
2175
2176 if (dev_out->flags & IFF_LOOPBACK)
2177 flags |= RTCF_LOCAL;
2178
2179 /* get work reference to inet device */
2180 in_dev = in_dev_get(dev_out);
2181 if (!in_dev)
2182 return -EINVAL;
2183
2184 if (res->type == RTN_BROADCAST) {
2185 flags |= RTCF_BROADCAST | RTCF_LOCAL;
2186 if (res->fi) {
2187 fib_info_put(res->fi);
2188 res->fi = NULL;
2189 }
2190 } else if (res->type == RTN_MULTICAST) {
2191 flags |= RTCF_MULTICAST|RTCF_LOCAL;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09002192 if (!ip_check_mc(in_dev, oldflp->fl4_dst, oldflp->fl4_src,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002193 oldflp->proto))
2194 flags &= ~RTCF_LOCAL;
2195 /* If multicast route do not exist use
2196 default one, but do not gateway in this case.
2197 Yes, it is hack.
2198 */
2199 if (res->fi && res->prefixlen < 4) {
2200 fib_info_put(res->fi);
2201 res->fi = NULL;
2202 }
2203 }
2204
2205
2206 rth = dst_alloc(&ipv4_dst_ops);
2207 if (!rth) {
2208 err = -ENOBUFS;
2209 goto cleanup;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09002210 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002211
Julian Anastasovce723d82005-09-08 13:34:47 -07002212 atomic_set(&rth->u.dst.__refcnt, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002213 rth->u.dst.flags= DST_HOST;
2214#ifdef CONFIG_IP_ROUTE_MULTIPATH_CACHED
2215 if (res->fi) {
2216 rth->rt_multipath_alg = res->fi->fib_mp_alg;
2217 if (res->fi->fib_nhs > 1)
2218 rth->u.dst.flags |= DST_BALANCED;
2219 }
2220#endif
2221 if (in_dev->cnf.no_xfrm)
2222 rth->u.dst.flags |= DST_NOXFRM;
2223 if (in_dev->cnf.no_policy)
2224 rth->u.dst.flags |= DST_NOPOLICY;
2225
2226 rth->fl.fl4_dst = oldflp->fl4_dst;
2227 rth->fl.fl4_tos = tos;
2228 rth->fl.fl4_src = oldflp->fl4_src;
2229 rth->fl.oif = oldflp->oif;
Thomas Graf47dcf0c2006-11-09 15:20:38 -08002230 rth->fl.mark = oldflp->mark;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002231 rth->rt_dst = fl->fl4_dst;
2232 rth->rt_src = fl->fl4_src;
2233 rth->rt_iif = oldflp->oif ? : dev_out->ifindex;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09002234 /* get references to the devices that are to be hold by the routing
Linus Torvalds1da177e2005-04-16 15:20:36 -07002235 cache entry */
2236 rth->u.dst.dev = dev_out;
2237 dev_hold(dev_out);
2238 rth->idev = in_dev_get(dev_out);
2239 rth->rt_gateway = fl->fl4_dst;
2240 rth->rt_spec_dst= fl->fl4_src;
2241
2242 rth->u.dst.output=ip_output;
2243
2244 RT_CACHE_STAT_INC(out_slow_tot);
2245
2246 if (flags & RTCF_LOCAL) {
2247 rth->u.dst.input = ip_local_deliver;
2248 rth->rt_spec_dst = fl->fl4_dst;
2249 }
2250 if (flags & (RTCF_BROADCAST | RTCF_MULTICAST)) {
2251 rth->rt_spec_dst = fl->fl4_src;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09002252 if (flags & RTCF_LOCAL &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07002253 !(dev_out->flags & IFF_LOOPBACK)) {
2254 rth->u.dst.output = ip_mc_output;
2255 RT_CACHE_STAT_INC(out_slow_mc);
2256 }
2257#ifdef CONFIG_IP_MROUTE
2258 if (res->type == RTN_MULTICAST) {
2259 if (IN_DEV_MFORWARD(in_dev) &&
2260 !LOCAL_MCAST(oldflp->fl4_dst)) {
2261 rth->u.dst.input = ip_mr_input;
2262 rth->u.dst.output = ip_mc_output;
2263 }
2264 }
2265#endif
2266 }
2267
2268 rt_set_nexthop(rth, res, 0);
2269
2270 rth->rt_flags = flags;
2271
2272 *result = rth;
2273 cleanup:
2274 /* release work reference to inet device */
2275 in_dev_put(in_dev);
2276
2277 return err;
2278}
2279
2280static inline int ip_mkroute_output_def(struct rtable **rp,
2281 struct fib_result* res,
2282 const struct flowi *fl,
2283 const struct flowi *oldflp,
2284 struct net_device *dev_out,
2285 unsigned flags)
2286{
Chuck Short7abaa272005-06-22 22:10:23 -07002287 struct rtable *rth = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002288 int err = __mkroute_output(&rth, res, fl, oldflp, dev_out, flags);
2289 unsigned hash;
2290 if (err == 0) {
Al Viro8c7bc842006-09-26 21:26:19 -07002291 hash = rt_hash(oldflp->fl4_dst, oldflp->fl4_src, oldflp->oif);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002292 err = rt_intern_hash(hash, rth, rp);
2293 }
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09002294
Linus Torvalds1da177e2005-04-16 15:20:36 -07002295 return err;
2296}
2297
2298static inline int ip_mkroute_output(struct rtable** rp,
2299 struct fib_result* res,
2300 const struct flowi *fl,
2301 const struct flowi *oldflp,
2302 struct net_device *dev_out,
2303 unsigned flags)
2304{
2305#ifdef CONFIG_IP_ROUTE_MULTIPATH_CACHED
Linus Torvalds1da177e2005-04-16 15:20:36 -07002306 unsigned char hop;
2307 unsigned hash;
2308 int err = -EINVAL;
Chuck Short7abaa272005-06-22 22:10:23 -07002309 struct rtable *rth = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002310
2311 if (res->fi && res->fi->fib_nhs > 1) {
2312 unsigned char hopcount = res->fi->fib_nhs;
2313
2314 for (hop = 0; hop < hopcount; hop++) {
2315 struct net_device *dev2nexthop;
2316
2317 res->nh_sel = hop;
2318
2319 /* hold a work reference to the output device */
2320 dev2nexthop = FIB_RES_DEV(*res);
2321 dev_hold(dev2nexthop);
2322
Julian Anastasovce723d82005-09-08 13:34:47 -07002323 /* put reference to previous result */
2324 if (hop)
2325 ip_rt_put(*rp);
2326
Linus Torvalds1da177e2005-04-16 15:20:36 -07002327 err = __mkroute_output(&rth, res, fl, oldflp,
2328 dev2nexthop, flags);
2329
2330 if (err != 0)
2331 goto cleanup;
2332
Al Viro8c7bc842006-09-26 21:26:19 -07002333 hash = rt_hash(oldflp->fl4_dst, oldflp->fl4_src,
2334 oldflp->oif);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002335 err = rt_intern_hash(hash, rth, rp);
2336
2337 /* forward hop information to multipath impl. */
2338 multipath_set_nhinfo(rth,
2339 FIB_RES_NETWORK(*res),
2340 FIB_RES_NETMASK(*res),
2341 res->prefixlen,
2342 &FIB_RES_NH(*res));
2343 cleanup:
2344 /* release work reference to output device */
2345 dev_put(dev2nexthop);
2346
2347 if (err != 0)
2348 return err;
2349 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002350 return err;
2351 } else {
2352 return ip_mkroute_output_def(rp, res, fl, oldflp, dev_out,
2353 flags);
2354 }
2355#else /* CONFIG_IP_ROUTE_MULTIPATH_CACHED */
2356 return ip_mkroute_output_def(rp, res, fl, oldflp, dev_out, flags);
2357#endif
2358}
2359
2360/*
2361 * Major route resolver routine.
2362 */
2363
2364static int ip_route_output_slow(struct rtable **rp, const struct flowi *oldflp)
2365{
2366 u32 tos = RT_FL_TOS(oldflp);
2367 struct flowi fl = { .nl_u = { .ip4_u =
2368 { .daddr = oldflp->fl4_dst,
2369 .saddr = oldflp->fl4_src,
2370 .tos = tos & IPTOS_RT_MASK,
2371 .scope = ((tos & RTO_ONLINK) ?
2372 RT_SCOPE_LINK :
2373 RT_SCOPE_UNIVERSE),
Linus Torvalds1da177e2005-04-16 15:20:36 -07002374 } },
Thomas Graf47dcf0c2006-11-09 15:20:38 -08002375 .mark = oldflp->mark,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002376 .iif = loopback_dev.ifindex,
2377 .oif = oldflp->oif };
2378 struct fib_result res;
2379 unsigned flags = 0;
2380 struct net_device *dev_out = NULL;
2381 int free_res = 0;
2382 int err;
2383
2384
2385 res.fi = NULL;
2386#ifdef CONFIG_IP_MULTIPLE_TABLES
2387 res.r = NULL;
2388#endif
2389
2390 if (oldflp->fl4_src) {
2391 err = -EINVAL;
2392 if (MULTICAST(oldflp->fl4_src) ||
2393 BADCLASS(oldflp->fl4_src) ||
2394 ZERONET(oldflp->fl4_src))
2395 goto out;
2396
2397 /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
2398 dev_out = ip_dev_find(oldflp->fl4_src);
Janusz Krzysztofik2d771cd2007-03-26 18:03:44 -07002399 if ((dev_out == NULL) && !(sysctl_ip_nonlocal_bind))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002400 goto out;
2401
2402 /* I removed check for oif == dev_out->oif here.
2403 It was wrong for two reasons:
2404 1. ip_dev_find(saddr) can return wrong iface, if saddr is
2405 assigned to multiple interfaces.
2406 2. Moreover, we are allowed to send packets with saddr
2407 of another iface. --ANK
2408 */
2409
Janusz Krzysztofik2d771cd2007-03-26 18:03:44 -07002410 if (dev_out && oldflp->oif == 0
Al Viroe4485152006-09-26 22:15:01 -07002411 && (MULTICAST(oldflp->fl4_dst) || oldflp->fl4_dst == htonl(0xFFFFFFFF))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002412 /* Special hack: user can direct multicasts
2413 and limited broadcast via necessary interface
2414 without fiddling with IP_MULTICAST_IF or IP_PKTINFO.
2415 This hack is not just for fun, it allows
2416 vic,vat and friends to work.
2417 They bind socket to loopback, set ttl to zero
2418 and expect that it will work.
2419 From the viewpoint of routing cache they are broken,
2420 because we are not allowed to build multicast path
2421 with loopback source addr (look, routing cache
2422 cannot know, that ttl is zero, so that packet
2423 will not leave this host and route is valid).
2424 Luckily, this hack is good workaround.
2425 */
2426
2427 fl.oif = dev_out->ifindex;
2428 goto make_route;
2429 }
2430 if (dev_out)
2431 dev_put(dev_out);
2432 dev_out = NULL;
2433 }
2434
2435
2436 if (oldflp->oif) {
2437 dev_out = dev_get_by_index(oldflp->oif);
2438 err = -ENODEV;
2439 if (dev_out == NULL)
2440 goto out;
Herbert Xue5ed6392005-10-03 14:35:55 -07002441
2442 /* RACE: Check return value of inet_select_addr instead. */
2443 if (__in_dev_get_rtnl(dev_out) == NULL) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002444 dev_put(dev_out);
2445 goto out; /* Wrong error code */
2446 }
2447
Al Viroe4485152006-09-26 22:15:01 -07002448 if (LOCAL_MCAST(oldflp->fl4_dst) || oldflp->fl4_dst == htonl(0xFFFFFFFF)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002449 if (!fl.fl4_src)
2450 fl.fl4_src = inet_select_addr(dev_out, 0,
2451 RT_SCOPE_LINK);
2452 goto make_route;
2453 }
2454 if (!fl.fl4_src) {
2455 if (MULTICAST(oldflp->fl4_dst))
2456 fl.fl4_src = inet_select_addr(dev_out, 0,
2457 fl.fl4_scope);
2458 else if (!oldflp->fl4_dst)
2459 fl.fl4_src = inet_select_addr(dev_out, 0,
2460 RT_SCOPE_HOST);
2461 }
2462 }
2463
2464 if (!fl.fl4_dst) {
2465 fl.fl4_dst = fl.fl4_src;
2466 if (!fl.fl4_dst)
2467 fl.fl4_dst = fl.fl4_src = htonl(INADDR_LOOPBACK);
2468 if (dev_out)
2469 dev_put(dev_out);
2470 dev_out = &loopback_dev;
2471 dev_hold(dev_out);
2472 fl.oif = loopback_dev.ifindex;
2473 res.type = RTN_LOCAL;
2474 flags |= RTCF_LOCAL;
2475 goto make_route;
2476 }
2477
2478 if (fib_lookup(&fl, &res)) {
2479 res.fi = NULL;
2480 if (oldflp->oif) {
2481 /* Apparently, routing tables are wrong. Assume,
2482 that the destination is on link.
2483
2484 WHY? DW.
2485 Because we are allowed to send to iface
2486 even if it has NO routes and NO assigned
2487 addresses. When oif is specified, routing
2488 tables are looked up with only one purpose:
2489 to catch if destination is gatewayed, rather than
2490 direct. Moreover, if MSG_DONTROUTE is set,
2491 we send packet, ignoring both routing tables
2492 and ifaddr state. --ANK
2493
2494
2495 We could make it even if oif is unknown,
2496 likely IPv6, but we do not.
2497 */
2498
2499 if (fl.fl4_src == 0)
2500 fl.fl4_src = inet_select_addr(dev_out, 0,
2501 RT_SCOPE_LINK);
2502 res.type = RTN_UNICAST;
2503 goto make_route;
2504 }
2505 if (dev_out)
2506 dev_put(dev_out);
2507 err = -ENETUNREACH;
2508 goto out;
2509 }
2510 free_res = 1;
2511
2512 if (res.type == RTN_LOCAL) {
2513 if (!fl.fl4_src)
2514 fl.fl4_src = fl.fl4_dst;
2515 if (dev_out)
2516 dev_put(dev_out);
2517 dev_out = &loopback_dev;
2518 dev_hold(dev_out);
2519 fl.oif = dev_out->ifindex;
2520 if (res.fi)
2521 fib_info_put(res.fi);
2522 res.fi = NULL;
2523 flags |= RTCF_LOCAL;
2524 goto make_route;
2525 }
2526
2527#ifdef CONFIG_IP_ROUTE_MULTIPATH
2528 if (res.fi->fib_nhs > 1 && fl.oif == 0)
2529 fib_select_multipath(&fl, &res);
2530 else
2531#endif
2532 if (!res.prefixlen && res.type == RTN_UNICAST && !fl.oif)
2533 fib_select_default(&fl, &res);
2534
2535 if (!fl.fl4_src)
2536 fl.fl4_src = FIB_RES_PREFSRC(res);
2537
2538 if (dev_out)
2539 dev_put(dev_out);
2540 dev_out = FIB_RES_DEV(res);
2541 dev_hold(dev_out);
2542 fl.oif = dev_out->ifindex;
2543
2544
2545make_route:
2546 err = ip_mkroute_output(rp, &res, &fl, oldflp, dev_out, flags);
2547
2548
2549 if (free_res)
2550 fib_res_put(&res);
2551 if (dev_out)
2552 dev_put(dev_out);
2553out: return err;
2554}
2555
2556int __ip_route_output_key(struct rtable **rp, const struct flowi *flp)
2557{
2558 unsigned hash;
2559 struct rtable *rth;
2560
Al Viro8c7bc842006-09-26 21:26:19 -07002561 hash = rt_hash(flp->fl4_dst, flp->fl4_src, flp->oif);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002562
2563 rcu_read_lock_bh();
2564 for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
Eric Dumazet093c2ca2007-02-09 16:19:26 -08002565 rth = rcu_dereference(rth->u.dst.rt_next)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002566 if (rth->fl.fl4_dst == flp->fl4_dst &&
2567 rth->fl.fl4_src == flp->fl4_src &&
2568 rth->fl.iif == 0 &&
2569 rth->fl.oif == flp->oif &&
Thomas Graf47dcf0c2006-11-09 15:20:38 -08002570 rth->fl.mark == flp->mark &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07002571 !((rth->fl.fl4_tos ^ flp->fl4_tos) &
2572 (IPTOS_RT_MASK | RTO_ONLINK))) {
2573
2574 /* check for multipath routes and choose one if
2575 * necessary
2576 */
2577 if (multipath_select_route(flp, rth, rp)) {
2578 dst_hold(&(*rp)->u.dst);
2579 RT_CACHE_STAT_INC(out_hit);
2580 rcu_read_unlock_bh();
2581 return 0;
2582 }
2583
2584 rth->u.dst.lastuse = jiffies;
2585 dst_hold(&rth->u.dst);
2586 rth->u.dst.__use++;
2587 RT_CACHE_STAT_INC(out_hit);
2588 rcu_read_unlock_bh();
2589 *rp = rth;
2590 return 0;
2591 }
2592 RT_CACHE_STAT_INC(out_hlist_search);
2593 }
2594 rcu_read_unlock_bh();
2595
2596 return ip_route_output_slow(rp, flp);
2597}
2598
Arnaldo Carvalho de Melod8c97a92005-08-09 20:12:12 -07002599EXPORT_SYMBOL_GPL(__ip_route_output_key);
2600
Linus Torvalds1da177e2005-04-16 15:20:36 -07002601int ip_route_output_flow(struct rtable **rp, struct flowi *flp, struct sock *sk, int flags)
2602{
2603 int err;
2604
2605 if ((err = __ip_route_output_key(rp, flp)) != 0)
2606 return err;
2607
2608 if (flp->proto) {
2609 if (!flp->fl4_src)
2610 flp->fl4_src = (*rp)->rt_src;
2611 if (!flp->fl4_dst)
2612 flp->fl4_dst = (*rp)->rt_dst;
2613 return xfrm_lookup((struct dst_entry **)rp, flp, sk, flags);
2614 }
2615
2616 return 0;
2617}
2618
Arnaldo Carvalho de Melod8c97a92005-08-09 20:12:12 -07002619EXPORT_SYMBOL_GPL(ip_route_output_flow);
2620
Linus Torvalds1da177e2005-04-16 15:20:36 -07002621int ip_route_output_key(struct rtable **rp, struct flowi *flp)
2622{
2623 return ip_route_output_flow(rp, flp, NULL, 0);
2624}
2625
2626static int rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq, int event,
Jamal Hadi Salimb6544c02005-06-18 22:54:12 -07002627 int nowait, unsigned int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002628{
2629 struct rtable *rt = (struct rtable*)skb->dst;
2630 struct rtmsg *r;
Thomas Grafbe403ea2006-08-17 18:15:17 -07002631 struct nlmsghdr *nlh;
Thomas Grafe3703b32006-11-27 09:27:07 -08002632 long expires;
2633 u32 id = 0, ts = 0, tsage = 0, error;
Thomas Grafbe403ea2006-08-17 18:15:17 -07002634
2635 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*r), flags);
2636 if (nlh == NULL)
Patrick McHardy26932562007-01-31 23:16:40 -08002637 return -EMSGSIZE;
Thomas Grafbe403ea2006-08-17 18:15:17 -07002638
2639 r = nlmsg_data(nlh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002640 r->rtm_family = AF_INET;
2641 r->rtm_dst_len = 32;
2642 r->rtm_src_len = 0;
2643 r->rtm_tos = rt->fl.fl4_tos;
2644 r->rtm_table = RT_TABLE_MAIN;
Thomas Grafbe403ea2006-08-17 18:15:17 -07002645 NLA_PUT_U32(skb, RTA_TABLE, RT_TABLE_MAIN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002646 r->rtm_type = rt->rt_type;
2647 r->rtm_scope = RT_SCOPE_UNIVERSE;
2648 r->rtm_protocol = RTPROT_UNSPEC;
2649 r->rtm_flags = (rt->rt_flags & ~0xFFFF) | RTM_F_CLONED;
2650 if (rt->rt_flags & RTCF_NOTIFY)
2651 r->rtm_flags |= RTM_F_NOTIFY;
Thomas Grafbe403ea2006-08-17 18:15:17 -07002652
Al Viro17fb2c62006-09-26 22:15:25 -07002653 NLA_PUT_BE32(skb, RTA_DST, rt->rt_dst);
Thomas Grafbe403ea2006-08-17 18:15:17 -07002654
Linus Torvalds1da177e2005-04-16 15:20:36 -07002655 if (rt->fl.fl4_src) {
2656 r->rtm_src_len = 32;
Al Viro17fb2c62006-09-26 22:15:25 -07002657 NLA_PUT_BE32(skb, RTA_SRC, rt->fl.fl4_src);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002658 }
2659 if (rt->u.dst.dev)
Thomas Grafbe403ea2006-08-17 18:15:17 -07002660 NLA_PUT_U32(skb, RTA_OIF, rt->u.dst.dev->ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002661#ifdef CONFIG_NET_CLS_ROUTE
2662 if (rt->u.dst.tclassid)
Thomas Grafbe403ea2006-08-17 18:15:17 -07002663 NLA_PUT_U32(skb, RTA_FLOW, rt->u.dst.tclassid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002664#endif
2665#ifdef CONFIG_IP_ROUTE_MULTIPATH_CACHED
Thomas Grafbe403ea2006-08-17 18:15:17 -07002666 if (rt->rt_multipath_alg != IP_MP_ALG_NONE)
2667 NLA_PUT_U32(skb, RTA_MP_ALGO, rt->rt_multipath_alg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002668#endif
2669 if (rt->fl.iif)
Al Viro17fb2c62006-09-26 22:15:25 -07002670 NLA_PUT_BE32(skb, RTA_PREFSRC, rt->rt_spec_dst);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002671 else if (rt->rt_src != rt->fl.fl4_src)
Al Viro17fb2c62006-09-26 22:15:25 -07002672 NLA_PUT_BE32(skb, RTA_PREFSRC, rt->rt_src);
Thomas Grafbe403ea2006-08-17 18:15:17 -07002673
Linus Torvalds1da177e2005-04-16 15:20:36 -07002674 if (rt->rt_dst != rt->rt_gateway)
Al Viro17fb2c62006-09-26 22:15:25 -07002675 NLA_PUT_BE32(skb, RTA_GATEWAY, rt->rt_gateway);
Thomas Grafbe403ea2006-08-17 18:15:17 -07002676
Linus Torvalds1da177e2005-04-16 15:20:36 -07002677 if (rtnetlink_put_metrics(skb, rt->u.dst.metrics) < 0)
Thomas Grafbe403ea2006-08-17 18:15:17 -07002678 goto nla_put_failure;
2679
Thomas Grafe3703b32006-11-27 09:27:07 -08002680 error = rt->u.dst.error;
2681 expires = rt->u.dst.expires ? rt->u.dst.expires - jiffies : 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002682 if (rt->peer) {
Thomas Grafe3703b32006-11-27 09:27:07 -08002683 id = rt->peer->ip_id_count;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002684 if (rt->peer->tcp_ts_stamp) {
Thomas Grafe3703b32006-11-27 09:27:07 -08002685 ts = rt->peer->tcp_ts;
James Morris9d729f72007-03-04 16:12:44 -08002686 tsage = get_seconds() - rt->peer->tcp_ts_stamp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002687 }
2688 }
Thomas Grafbe403ea2006-08-17 18:15:17 -07002689
Linus Torvalds1da177e2005-04-16 15:20:36 -07002690 if (rt->fl.iif) {
2691#ifdef CONFIG_IP_MROUTE
Al Viroe4485152006-09-26 22:15:01 -07002692 __be32 dst = rt->rt_dst;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002693
2694 if (MULTICAST(dst) && !LOCAL_MCAST(dst) &&
2695 ipv4_devconf.mc_forwarding) {
2696 int err = ipmr_get_route(skb, r, nowait);
2697 if (err <= 0) {
2698 if (!nowait) {
2699 if (err == 0)
2700 return 0;
Thomas Grafbe403ea2006-08-17 18:15:17 -07002701 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002702 } else {
2703 if (err == -EMSGSIZE)
Thomas Grafbe403ea2006-08-17 18:15:17 -07002704 goto nla_put_failure;
Thomas Grafe3703b32006-11-27 09:27:07 -08002705 error = err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002706 }
2707 }
2708 } else
2709#endif
Thomas Grafbe403ea2006-08-17 18:15:17 -07002710 NLA_PUT_U32(skb, RTA_IIF, rt->fl.iif);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002711 }
2712
Thomas Grafe3703b32006-11-27 09:27:07 -08002713 if (rtnl_put_cacheinfo(skb, &rt->u.dst, id, ts, tsage,
2714 expires, error) < 0)
2715 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002716
Thomas Grafbe403ea2006-08-17 18:15:17 -07002717 return nlmsg_end(skb, nlh);
2718
2719nla_put_failure:
Patrick McHardy26932562007-01-31 23:16:40 -08002720 nlmsg_cancel(skb, nlh);
2721 return -EMSGSIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002722}
2723
Thomas Graf63f34442007-03-22 11:55:17 -07002724static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002725{
Thomas Grafd889ce32006-08-17 18:15:44 -07002726 struct rtmsg *rtm;
2727 struct nlattr *tb[RTA_MAX+1];
Linus Torvalds1da177e2005-04-16 15:20:36 -07002728 struct rtable *rt = NULL;
Al Viro9e12bb22006-09-26 21:25:20 -07002729 __be32 dst = 0;
2730 __be32 src = 0;
2731 u32 iif;
Thomas Grafd889ce32006-08-17 18:15:44 -07002732 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002733 struct sk_buff *skb;
2734
Thomas Grafd889ce32006-08-17 18:15:44 -07002735 err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv4_policy);
2736 if (err < 0)
2737 goto errout;
2738
2739 rtm = nlmsg_data(nlh);
2740
Linus Torvalds1da177e2005-04-16 15:20:36 -07002741 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
Thomas Grafd889ce32006-08-17 18:15:44 -07002742 if (skb == NULL) {
2743 err = -ENOBUFS;
2744 goto errout;
2745 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002746
2747 /* Reserve room for dummy headers, this skb can pass
2748 through good chunk of routing engine.
2749 */
Arnaldo Carvalho de Melo459a98e2007-03-19 15:30:44 -07002750 skb_reset_mac_header(skb);
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -07002751 skb_reset_network_header(skb);
Stephen Hemmingerd2c962b2006-04-17 17:27:11 -07002752
2753 /* Bugfix: need to give ip_route_input enough of an IP header to not gag. */
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07002754 ip_hdr(skb)->protocol = IPPROTO_ICMP;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002755 skb_reserve(skb, MAX_HEADER + sizeof(struct iphdr));
2756
Al Viro17fb2c62006-09-26 22:15:25 -07002757 src = tb[RTA_SRC] ? nla_get_be32(tb[RTA_SRC]) : 0;
2758 dst = tb[RTA_DST] ? nla_get_be32(tb[RTA_DST]) : 0;
Thomas Grafd889ce32006-08-17 18:15:44 -07002759 iif = tb[RTA_IIF] ? nla_get_u32(tb[RTA_IIF]) : 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002760
2761 if (iif) {
Thomas Grafd889ce32006-08-17 18:15:44 -07002762 struct net_device *dev;
2763
2764 dev = __dev_get_by_index(iif);
2765 if (dev == NULL) {
2766 err = -ENODEV;
2767 goto errout_free;
2768 }
2769
Linus Torvalds1da177e2005-04-16 15:20:36 -07002770 skb->protocol = htons(ETH_P_IP);
2771 skb->dev = dev;
2772 local_bh_disable();
2773 err = ip_route_input(skb, dst, src, rtm->rtm_tos, dev);
2774 local_bh_enable();
Thomas Grafd889ce32006-08-17 18:15:44 -07002775
2776 rt = (struct rtable*) skb->dst;
2777 if (err == 0 && rt->u.dst.error)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002778 err = -rt->u.dst.error;
2779 } else {
Thomas Grafd889ce32006-08-17 18:15:44 -07002780 struct flowi fl = {
2781 .nl_u = {
2782 .ip4_u = {
2783 .daddr = dst,
2784 .saddr = src,
2785 .tos = rtm->rtm_tos,
2786 },
2787 },
2788 .oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0,
2789 };
Linus Torvalds1da177e2005-04-16 15:20:36 -07002790 err = ip_route_output_key(&rt, &fl);
2791 }
Thomas Grafd889ce32006-08-17 18:15:44 -07002792
Linus Torvalds1da177e2005-04-16 15:20:36 -07002793 if (err)
Thomas Grafd889ce32006-08-17 18:15:44 -07002794 goto errout_free;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002795
2796 skb->dst = &rt->u.dst;
2797 if (rtm->rtm_flags & RTM_F_NOTIFY)
2798 rt->rt_flags |= RTCF_NOTIFY;
2799
Linus Torvalds1da177e2005-04-16 15:20:36 -07002800 err = rt_fill_info(skb, NETLINK_CB(in_skb).pid, nlh->nlmsg_seq,
Jamal Hadi Salimb6544c02005-06-18 22:54:12 -07002801 RTM_NEWROUTE, 0, 0);
Thomas Grafd889ce32006-08-17 18:15:44 -07002802 if (err <= 0)
2803 goto errout_free;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002804
Thomas Graf2942e902006-08-15 00:30:25 -07002805 err = rtnl_unicast(skb, NETLINK_CB(in_skb).pid);
Thomas Grafd889ce32006-08-17 18:15:44 -07002806errout:
Thomas Graf2942e902006-08-15 00:30:25 -07002807 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002808
Thomas Grafd889ce32006-08-17 18:15:44 -07002809errout_free:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002810 kfree_skb(skb);
Thomas Grafd889ce32006-08-17 18:15:44 -07002811 goto errout;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002812}
2813
2814int ip_rt_dump(struct sk_buff *skb, struct netlink_callback *cb)
2815{
2816 struct rtable *rt;
2817 int h, s_h;
2818 int idx, s_idx;
2819
2820 s_h = cb->args[0];
2821 s_idx = idx = cb->args[1];
2822 for (h = 0; h <= rt_hash_mask; h++) {
2823 if (h < s_h) continue;
2824 if (h > s_h)
2825 s_idx = 0;
2826 rcu_read_lock_bh();
2827 for (rt = rcu_dereference(rt_hash_table[h].chain), idx = 0; rt;
Eric Dumazet093c2ca2007-02-09 16:19:26 -08002828 rt = rcu_dereference(rt->u.dst.rt_next), idx++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002829 if (idx < s_idx)
2830 continue;
2831 skb->dst = dst_clone(&rt->u.dst);
2832 if (rt_fill_info(skb, NETLINK_CB(cb->skb).pid,
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09002833 cb->nlh->nlmsg_seq, RTM_NEWROUTE,
Jamal Hadi Salimb6544c02005-06-18 22:54:12 -07002834 1, NLM_F_MULTI) <= 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002835 dst_release(xchg(&skb->dst, NULL));
2836 rcu_read_unlock_bh();
2837 goto done;
2838 }
2839 dst_release(xchg(&skb->dst, NULL));
2840 }
2841 rcu_read_unlock_bh();
2842 }
2843
2844done:
2845 cb->args[0] = h;
2846 cb->args[1] = idx;
2847 return skb->len;
2848}
2849
2850void ip_rt_multicast_event(struct in_device *in_dev)
2851{
2852 rt_cache_flush(0);
2853}
2854
2855#ifdef CONFIG_SYSCTL
2856static int flush_delay;
2857
2858static int ipv4_sysctl_rtcache_flush(ctl_table *ctl, int write,
2859 struct file *filp, void __user *buffer,
2860 size_t *lenp, loff_t *ppos)
2861{
2862 if (write) {
2863 proc_dointvec(ctl, write, filp, buffer, lenp, ppos);
2864 rt_cache_flush(flush_delay);
2865 return 0;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09002866 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002867
2868 return -EINVAL;
2869}
2870
2871static int ipv4_sysctl_rtcache_flush_strategy(ctl_table *table,
2872 int __user *name,
2873 int nlen,
2874 void __user *oldval,
2875 size_t __user *oldlenp,
2876 void __user *newval,
Alexey Dobriyan1f29bcd2006-12-10 02:19:10 -08002877 size_t newlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002878{
2879 int delay;
2880 if (newlen != sizeof(int))
2881 return -EINVAL;
2882 if (get_user(delay, (int __user *)newval))
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09002883 return -EFAULT;
2884 rt_cache_flush(delay);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002885 return 0;
2886}
2887
2888ctl_table ipv4_route_table[] = {
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09002889 {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002890 .ctl_name = NET_IPV4_ROUTE_FLUSH,
2891 .procname = "flush",
2892 .data = &flush_delay,
2893 .maxlen = sizeof(int),
Dave Jones7e3e0362005-04-28 12:11:03 -07002894 .mode = 0200,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002895 .proc_handler = &ipv4_sysctl_rtcache_flush,
2896 .strategy = &ipv4_sysctl_rtcache_flush_strategy,
2897 },
2898 {
2899 .ctl_name = NET_IPV4_ROUTE_MIN_DELAY,
2900 .procname = "min_delay",
2901 .data = &ip_rt_min_delay,
2902 .maxlen = sizeof(int),
2903 .mode = 0644,
2904 .proc_handler = &proc_dointvec_jiffies,
2905 .strategy = &sysctl_jiffies,
2906 },
2907 {
2908 .ctl_name = NET_IPV4_ROUTE_MAX_DELAY,
2909 .procname = "max_delay",
2910 .data = &ip_rt_max_delay,
2911 .maxlen = sizeof(int),
2912 .mode = 0644,
2913 .proc_handler = &proc_dointvec_jiffies,
2914 .strategy = &sysctl_jiffies,
2915 },
2916 {
2917 .ctl_name = NET_IPV4_ROUTE_GC_THRESH,
2918 .procname = "gc_thresh",
2919 .data = &ipv4_dst_ops.gc_thresh,
2920 .maxlen = sizeof(int),
2921 .mode = 0644,
2922 .proc_handler = &proc_dointvec,
2923 },
2924 {
2925 .ctl_name = NET_IPV4_ROUTE_MAX_SIZE,
2926 .procname = "max_size",
2927 .data = &ip_rt_max_size,
2928 .maxlen = sizeof(int),
2929 .mode = 0644,
2930 .proc_handler = &proc_dointvec,
2931 },
2932 {
2933 /* Deprecated. Use gc_min_interval_ms */
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09002934
Linus Torvalds1da177e2005-04-16 15:20:36 -07002935 .ctl_name = NET_IPV4_ROUTE_GC_MIN_INTERVAL,
2936 .procname = "gc_min_interval",
2937 .data = &ip_rt_gc_min_interval,
2938 .maxlen = sizeof(int),
2939 .mode = 0644,
2940 .proc_handler = &proc_dointvec_jiffies,
2941 .strategy = &sysctl_jiffies,
2942 },
2943 {
2944 .ctl_name = NET_IPV4_ROUTE_GC_MIN_INTERVAL_MS,
2945 .procname = "gc_min_interval_ms",
2946 .data = &ip_rt_gc_min_interval,
2947 .maxlen = sizeof(int),
2948 .mode = 0644,
2949 .proc_handler = &proc_dointvec_ms_jiffies,
2950 .strategy = &sysctl_ms_jiffies,
2951 },
2952 {
2953 .ctl_name = NET_IPV4_ROUTE_GC_TIMEOUT,
2954 .procname = "gc_timeout",
2955 .data = &ip_rt_gc_timeout,
2956 .maxlen = sizeof(int),
2957 .mode = 0644,
2958 .proc_handler = &proc_dointvec_jiffies,
2959 .strategy = &sysctl_jiffies,
2960 },
2961 {
2962 .ctl_name = NET_IPV4_ROUTE_GC_INTERVAL,
2963 .procname = "gc_interval",
2964 .data = &ip_rt_gc_interval,
2965 .maxlen = sizeof(int),
2966 .mode = 0644,
2967 .proc_handler = &proc_dointvec_jiffies,
2968 .strategy = &sysctl_jiffies,
2969 },
2970 {
2971 .ctl_name = NET_IPV4_ROUTE_REDIRECT_LOAD,
2972 .procname = "redirect_load",
2973 .data = &ip_rt_redirect_load,
2974 .maxlen = sizeof(int),
2975 .mode = 0644,
2976 .proc_handler = &proc_dointvec,
2977 },
2978 {
2979 .ctl_name = NET_IPV4_ROUTE_REDIRECT_NUMBER,
2980 .procname = "redirect_number",
2981 .data = &ip_rt_redirect_number,
2982 .maxlen = sizeof(int),
2983 .mode = 0644,
2984 .proc_handler = &proc_dointvec,
2985 },
2986 {
2987 .ctl_name = NET_IPV4_ROUTE_REDIRECT_SILENCE,
2988 .procname = "redirect_silence",
2989 .data = &ip_rt_redirect_silence,
2990 .maxlen = sizeof(int),
2991 .mode = 0644,
2992 .proc_handler = &proc_dointvec,
2993 },
2994 {
2995 .ctl_name = NET_IPV4_ROUTE_ERROR_COST,
2996 .procname = "error_cost",
2997 .data = &ip_rt_error_cost,
2998 .maxlen = sizeof(int),
2999 .mode = 0644,
3000 .proc_handler = &proc_dointvec,
3001 },
3002 {
3003 .ctl_name = NET_IPV4_ROUTE_ERROR_BURST,
3004 .procname = "error_burst",
3005 .data = &ip_rt_error_burst,
3006 .maxlen = sizeof(int),
3007 .mode = 0644,
3008 .proc_handler = &proc_dointvec,
3009 },
3010 {
3011 .ctl_name = NET_IPV4_ROUTE_GC_ELASTICITY,
3012 .procname = "gc_elasticity",
3013 .data = &ip_rt_gc_elasticity,
3014 .maxlen = sizeof(int),
3015 .mode = 0644,
3016 .proc_handler = &proc_dointvec,
3017 },
3018 {
3019 .ctl_name = NET_IPV4_ROUTE_MTU_EXPIRES,
3020 .procname = "mtu_expires",
3021 .data = &ip_rt_mtu_expires,
3022 .maxlen = sizeof(int),
3023 .mode = 0644,
3024 .proc_handler = &proc_dointvec_jiffies,
3025 .strategy = &sysctl_jiffies,
3026 },
3027 {
3028 .ctl_name = NET_IPV4_ROUTE_MIN_PMTU,
3029 .procname = "min_pmtu",
3030 .data = &ip_rt_min_pmtu,
3031 .maxlen = sizeof(int),
3032 .mode = 0644,
3033 .proc_handler = &proc_dointvec,
3034 },
3035 {
3036 .ctl_name = NET_IPV4_ROUTE_MIN_ADVMSS,
3037 .procname = "min_adv_mss",
3038 .data = &ip_rt_min_advmss,
3039 .maxlen = sizeof(int),
3040 .mode = 0644,
3041 .proc_handler = &proc_dointvec,
3042 },
3043 {
3044 .ctl_name = NET_IPV4_ROUTE_SECRET_INTERVAL,
3045 .procname = "secret_interval",
3046 .data = &ip_rt_secret_interval,
3047 .maxlen = sizeof(int),
3048 .mode = 0644,
3049 .proc_handler = &proc_dointvec_jiffies,
3050 .strategy = &sysctl_jiffies,
3051 },
3052 { .ctl_name = 0 }
3053};
3054#endif
3055
3056#ifdef CONFIG_NET_CLS_ROUTE
3057struct ip_rt_acct *ip_rt_acct;
3058
3059/* This code sucks. But you should have seen it before! --RR */
3060
3061/* IP route accounting ptr for this logical cpu number. */
3062#define IP_RT_ACCT_CPU(i) (ip_rt_acct + i * 256)
3063
3064#ifdef CONFIG_PROC_FS
3065static int ip_rt_acct_read(char *buffer, char **start, off_t offset,
3066 int length, int *eof, void *data)
3067{
3068 unsigned int i;
3069
3070 if ((offset & 3) || (length & 3))
3071 return -EIO;
3072
3073 if (offset >= sizeof(struct ip_rt_acct) * 256) {
3074 *eof = 1;
3075 return 0;
3076 }
3077
3078 if (offset + length >= sizeof(struct ip_rt_acct) * 256) {
3079 length = sizeof(struct ip_rt_acct) * 256 - offset;
3080 *eof = 1;
3081 }
3082
3083 offset /= sizeof(u32);
3084
3085 if (length > 0) {
3086 u32 *src = ((u32 *) IP_RT_ACCT_CPU(0)) + offset;
3087 u32 *dst = (u32 *) buffer;
3088
3089 /* Copy first cpu. */
3090 *start = buffer;
3091 memcpy(dst, src, length);
3092
3093 /* Add the other cpus in, one int at a time */
KAMEZAWA Hiroyuki6f912042006-04-10 22:52:50 -07003094 for_each_possible_cpu(i) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003095 unsigned int j;
3096
3097 src = ((u32 *) IP_RT_ACCT_CPU(i)) + offset;
3098
3099 for (j = 0; j < length/4; j++)
3100 dst[j] += src[j];
3101 }
3102 }
3103 return length;
3104}
3105#endif /* CONFIG_PROC_FS */
3106#endif /* CONFIG_NET_CLS_ROUTE */
3107
3108static __initdata unsigned long rhash_entries;
3109static int __init set_rhash_entries(char *str)
3110{
3111 if (!str)
3112 return 0;
3113 rhash_entries = simple_strtoul(str, &str, 0);
3114 return 1;
3115}
3116__setup("rhash_entries=", set_rhash_entries);
3117
3118int __init ip_rt_init(void)
3119{
Eric Dumazet424c4b72005-07-05 14:58:19 -07003120 int rc = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003121
3122 rt_hash_rnd = (int) ((num_physpages ^ (num_physpages>>8)) ^
3123 (jiffies ^ (jiffies >> 7)));
3124
3125#ifdef CONFIG_NET_CLS_ROUTE
Eric Dumazet424c4b72005-07-05 14:58:19 -07003126 {
3127 int order;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003128 for (order = 0;
3129 (PAGE_SIZE << order) < 256 * sizeof(struct ip_rt_acct) * NR_CPUS; order++)
3130 /* NOTHING */;
3131 ip_rt_acct = (struct ip_rt_acct *)__get_free_pages(GFP_KERNEL, order);
3132 if (!ip_rt_acct)
3133 panic("IP: failed to allocate ip_rt_acct\n");
3134 memset(ip_rt_acct, 0, PAGE_SIZE << order);
Eric Dumazet424c4b72005-07-05 14:58:19 -07003135 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003136#endif
3137
Alexey Dobriyane5d679f332006-08-26 19:25:52 -07003138 ipv4_dst_ops.kmem_cachep =
3139 kmem_cache_create("ip_dst_cache", sizeof(struct rtable), 0,
3140 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003141
Eric Dumazet424c4b72005-07-05 14:58:19 -07003142 rt_hash_table = (struct rt_hash_bucket *)
3143 alloc_large_system_hash("IP route cache",
3144 sizeof(struct rt_hash_bucket),
3145 rhash_entries,
3146 (num_physpages >= 128 * 1024) ?
Mike Stroyan18955cf2005-11-29 16:12:55 -08003147 15 : 17,
Kirill Korotaev8d1502d2006-08-07 20:44:22 -07003148 0,
Eric Dumazet424c4b72005-07-05 14:58:19 -07003149 &rt_hash_log,
3150 &rt_hash_mask,
3151 0);
Eric Dumazet22c047c2005-07-05 14:55:24 -07003152 memset(rt_hash_table, 0, (rt_hash_mask + 1) * sizeof(struct rt_hash_bucket));
3153 rt_hash_lock_init();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003154
3155 ipv4_dst_ops.gc_thresh = (rt_hash_mask + 1);
3156 ip_rt_max_size = (rt_hash_mask + 1) * 16;
3157
Linus Torvalds1da177e2005-04-16 15:20:36 -07003158 devinet_init();
3159 ip_fib_init();
3160
3161 init_timer(&rt_flush_timer);
3162 rt_flush_timer.function = rt_run_flush;
3163 init_timer(&rt_periodic_timer);
3164 rt_periodic_timer.function = rt_check_expire;
3165 init_timer(&rt_secret_timer);
3166 rt_secret_timer.function = rt_secret_rebuild;
3167
3168 /* All the timers, started at system startup tend
3169 to synchronize. Perturb it a bit.
3170 */
3171 rt_periodic_timer.expires = jiffies + net_random() % ip_rt_gc_interval +
3172 ip_rt_gc_interval;
3173 add_timer(&rt_periodic_timer);
3174
3175 rt_secret_timer.expires = jiffies + net_random() % ip_rt_secret_interval +
3176 ip_rt_secret_interval;
3177 add_timer(&rt_secret_timer);
3178
3179#ifdef CONFIG_PROC_FS
3180 {
3181 struct proc_dir_entry *rtstat_pde = NULL; /* keep gcc happy */
3182 if (!proc_net_fops_create("rt_cache", S_IRUGO, &rt_cache_seq_fops) ||
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09003183 !(rtstat_pde = create_proc_entry("rt_cache", S_IRUGO,
3184 proc_net_stat))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003185 return -ENOMEM;
3186 }
3187 rtstat_pde->proc_fops = &rt_cpu_seq_fops;
3188 }
3189#ifdef CONFIG_NET_CLS_ROUTE
3190 create_proc_read_entry("rt_acct", 0, proc_net, ip_rt_acct_read, NULL);
3191#endif
3192#endif
3193#ifdef CONFIG_XFRM
3194 xfrm_init();
3195 xfrm4_init();
3196#endif
Thomas Graf63f34442007-03-22 11:55:17 -07003197 rtnl_register(PF_INET, RTM_GETROUTE, inet_rtm_getroute, NULL);
3198
Linus Torvalds1da177e2005-04-16 15:20:36 -07003199 return rc;
3200}
3201
3202EXPORT_SYMBOL(__ip_select_ident);
3203EXPORT_SYMBOL(ip_route_input);
3204EXPORT_SYMBOL(ip_route_output_key);