blob: 155138d8ec8bb9d7ef0e907b36b0725e0910394d [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * ROUTE - implementation of the IP router.
7 *
Jesper Juhl02c30a82005-05-05 16:16:16 -07008 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Alan Cox, <gw4pts@gw4pts.ampr.org>
11 * Linus Torvalds, <Linus.Torvalds@helsinki.fi>
12 * Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
13 *
14 * Fixes:
15 * Alan Cox : Verify area fixes.
16 * Alan Cox : cli() protects routing changes
17 * Rui Oliveira : ICMP routing table updates
18 * (rco@di.uminho.pt) Routing table insertion and update
19 * Linus Torvalds : Rewrote bits to be sensible
20 * Alan Cox : Added BSD route gw semantics
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +090021 * Alan Cox : Super /proc >4K
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 * Alan Cox : MTU in route table
23 * Alan Cox : MSS actually. Also added the window
24 * clamper.
25 * Sam Lantinga : Fixed route matching in rt_del()
26 * Alan Cox : Routing cache support.
27 * Alan Cox : Removed compatibility cruft.
28 * Alan Cox : RTF_REJECT support.
29 * Alan Cox : TCP irtt support.
30 * Jonathan Naylor : Added Metric support.
31 * Miquel van Smoorenburg : BSD API fixes.
32 * Miquel van Smoorenburg : Metrics.
33 * Alan Cox : Use __u32 properly
34 * Alan Cox : Aligned routing errors more closely with BSD
35 * our system is still very different.
36 * Alan Cox : Faster /proc handling
37 * Alexey Kuznetsov : Massive rework to support tree based routing,
38 * routing caches and better behaviour.
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +090039 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070040 * Olaf Erb : irtt wasn't being copied right.
41 * Bjorn Ekwall : Kerneld route support.
42 * Alan Cox : Multicast fixed (I hope)
43 * Pavel Krauz : Limited broadcast fixed
44 * Mike McLagan : Routing by source
45 * Alexey Kuznetsov : End of old history. Split to fib.c and
46 * route.c and rewritten from scratch.
47 * Andi Kleen : Load-limit warning messages.
48 * Vitaly E. Lavrov : Transparent proxy revived after year coma.
49 * Vitaly E. Lavrov : Race condition in ip_route_input_slow.
50 * Tobias Ringstrom : Uninitialized res.type in ip_route_output_slow.
51 * Vladimir V. Ivanov : IP rule info (flowid) is really useful.
52 * Marc Boucher : routing by fwmark
53 * Robert Olsson : Added rt_cache statistics
54 * Arnaldo C. Melo : Convert proc stuff to seq_file
Eric Dumazetbb1d23b2005-07-05 15:00:32 -070055 * Eric Dumazet : hashed spinlocks and rt_check_expire() fixes.
Ilia Sotnikovcef26852006-03-25 01:38:55 -080056 * Ilia Sotnikov : Ignore TOS on PMTUD and Redirect
57 * Ilia Sotnikov : Removed TOS from hash calculations
Linus Torvalds1da177e2005-04-16 15:20:36 -070058 *
59 * This program is free software; you can redistribute it and/or
60 * modify it under the terms of the GNU General Public License
61 * as published by the Free Software Foundation; either version
62 * 2 of the License, or (at your option) any later version.
63 */
64
Linus Torvalds1da177e2005-04-16 15:20:36 -070065#include <linux/module.h>
66#include <asm/uaccess.h>
67#include <asm/system.h>
68#include <linux/bitops.h>
69#include <linux/types.h>
70#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070071#include <linux/mm.h>
Eric Dumazet424c4b72005-07-05 14:58:19 -070072#include <linux/bootmem.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070073#include <linux/string.h>
74#include <linux/socket.h>
75#include <linux/sockios.h>
76#include <linux/errno.h>
77#include <linux/in.h>
78#include <linux/inet.h>
79#include <linux/netdevice.h>
80#include <linux/proc_fs.h>
81#include <linux/init.h>
Eric Dumazet39c90ec2007-09-15 10:55:54 -070082#include <linux/workqueue.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070083#include <linux/skbuff.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070084#include <linux/inetdevice.h>
85#include <linux/igmp.h>
86#include <linux/pkt_sched.h>
87#include <linux/mroute.h>
88#include <linux/netfilter_ipv4.h>
89#include <linux/random.h>
90#include <linux/jhash.h>
91#include <linux/rcupdate.h>
92#include <linux/times.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090093#include <linux/slab.h>
Herbert Xu352e5122007-11-13 21:34:06 -080094#include <net/dst.h>
Eric W. Biederman457c4cb2007-09-12 12:01:34 +020095#include <net/net_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070096#include <net/protocol.h>
97#include <net/ip.h>
98#include <net/route.h>
99#include <net/inetpeer.h>
100#include <net/sock.h>
101#include <net/ip_fib.h>
102#include <net/arp.h>
103#include <net/tcp.h>
104#include <net/icmp.h>
105#include <net/xfrm.h>
Tom Tucker8d717402006-07-30 20:43:36 -0700106#include <net/netevent.h>
Thomas Graf63f34442007-03-22 11:55:17 -0700107#include <net/rtnetlink.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108#ifdef CONFIG_SYSCTL
109#include <linux/sysctl.h>
110#endif
David Miller3769cff2011-07-11 22:44:24 +0000111#include <net/atmclip.h>
David S. Miller6e5714e2011-08-03 20:50:44 -0700112#include <net/secure_seq.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113
David S. Miller68a5e3d2011-03-11 20:07:33 -0500114#define RT_FL_TOS(oldflp4) \
115 ((u32)(oldflp4->flowi4_tos & (IPTOS_RT_MASK | RTO_ONLINK)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116
117#define IP_MAX_MTU 0xFFF0
118
119#define RT_GC_TIMEOUT (300*HZ)
120
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121static int ip_rt_max_size;
Stephen Hemminger817bc4d2008-03-22 17:43:59 -0700122static int ip_rt_gc_timeout __read_mostly = RT_GC_TIMEOUT;
Stephen Hemminger817bc4d2008-03-22 17:43:59 -0700123static int ip_rt_gc_min_interval __read_mostly = HZ / 2;
124static int ip_rt_redirect_number __read_mostly = 9;
125static int ip_rt_redirect_load __read_mostly = HZ / 50;
126static int ip_rt_redirect_silence __read_mostly = ((HZ / 50) << (9 + 1));
127static int ip_rt_error_cost __read_mostly = HZ;
128static int ip_rt_error_burst __read_mostly = 5 * HZ;
129static int ip_rt_gc_elasticity __read_mostly = 8;
130static int ip_rt_mtu_expires __read_mostly = 10 * 60 * HZ;
131static int ip_rt_min_pmtu __read_mostly = 512 + 20 + 20;
132static int ip_rt_min_advmss __read_mostly = 256;
Neil Horman1080d702008-10-27 12:28:25 -0700133static int rt_chain_length_max __read_mostly = 20;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135/*
136 * Interface to generic destination cache.
137 */
138
139static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie);
David S. Miller0dbaee32010-12-13 12:52:14 -0800140static unsigned int ipv4_default_advmss(const struct dst_entry *dst);
David S. Millerd33e4552010-12-14 13:01:14 -0800141static unsigned int ipv4_default_mtu(const struct dst_entry *dst);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142static void ipv4_dst_destroy(struct dst_entry *dst);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst);
144static void ipv4_link_failure(struct sk_buff *skb);
145static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu);
Daniel Lezcano569d3642008-01-18 03:56:57 -0800146static int rt_garbage_collect(struct dst_ops *ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147
Eric Dumazet72cdd1d2010-11-11 07:14:07 +0000148static void ipv4_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
149 int how)
150{
151}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152
David S. Miller62fa8a82011-01-26 20:51:05 -0800153static u32 *ipv4_cow_metrics(struct dst_entry *dst, unsigned long old)
154{
David S. Miller06582542011-01-27 14:58:42 -0800155 struct rtable *rt = (struct rtable *) dst;
156 struct inet_peer *peer;
157 u32 *p = NULL;
David S. Miller62fa8a82011-01-26 20:51:05 -0800158
David S. Miller06582542011-01-27 14:58:42 -0800159 if (!rt->peer)
David S. Millera48eff12011-05-18 18:42:43 -0400160 rt_bind_peer(rt, rt->rt_dst, 1);
David S. Miller06582542011-01-27 14:58:42 -0800161
162 peer = rt->peer;
163 if (peer) {
David S. Miller62fa8a82011-01-26 20:51:05 -0800164 u32 *old_p = __DST_METRICS_PTR(old);
165 unsigned long prev, new;
166
David S. Miller06582542011-01-27 14:58:42 -0800167 p = peer->metrics;
168 if (inet_metrics_new(peer))
169 memcpy(p, old_p, sizeof(u32) * RTAX_MAX);
David S. Miller62fa8a82011-01-26 20:51:05 -0800170
171 new = (unsigned long) p;
172 prev = cmpxchg(&dst->_metrics, old, new);
173
174 if (prev != old) {
David S. Miller62fa8a82011-01-26 20:51:05 -0800175 p = __DST_METRICS_PTR(prev);
176 if (prev & DST_METRICS_READ_ONLY)
177 p = NULL;
178 } else {
David S. Miller62fa8a82011-01-26 20:51:05 -0800179 if (rt->fi) {
180 fib_info_put(rt->fi);
181 rt->fi = NULL;
182 }
183 }
184 }
185 return p;
186}
187
David S. Millerd3aaeb32011-07-18 00:40:17 -0700188static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst, const void *daddr);
189
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190static struct dst_ops ipv4_dst_ops = {
191 .family = AF_INET,
Harvey Harrison09640e62009-02-01 00:45:17 -0800192 .protocol = cpu_to_be16(ETH_P_IP),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193 .gc = rt_garbage_collect,
194 .check = ipv4_dst_check,
David S. Miller0dbaee32010-12-13 12:52:14 -0800195 .default_advmss = ipv4_default_advmss,
David S. Millerd33e4552010-12-14 13:01:14 -0800196 .default_mtu = ipv4_default_mtu,
David S. Miller62fa8a82011-01-26 20:51:05 -0800197 .cow_metrics = ipv4_cow_metrics,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198 .destroy = ipv4_dst_destroy,
199 .ifdown = ipv4_dst_ifdown,
200 .negative_advice = ipv4_negative_advice,
201 .link_failure = ipv4_link_failure,
202 .update_pmtu = ip_rt_update_pmtu,
Herbert Xu1ac06e02008-05-20 14:32:14 -0700203 .local_out = __ip_local_out,
David S. Millerd3aaeb32011-07-18 00:40:17 -0700204 .neigh_lookup = ipv4_neigh_lookup,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205};
206
207#define ECN_OR_COST(class) TC_PRIO_##class
208
Philippe De Muyter4839c522007-07-09 15:32:57 -0700209const __u8 ip_tos2prio[16] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700210 TC_PRIO_BESTEFFORT,
Dan Siemon4a2b9c32011-03-15 13:56:07 +0000211 ECN_OR_COST(BESTEFFORT),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212 TC_PRIO_BESTEFFORT,
213 ECN_OR_COST(BESTEFFORT),
214 TC_PRIO_BULK,
215 ECN_OR_COST(BULK),
216 TC_PRIO_BULK,
217 ECN_OR_COST(BULK),
218 TC_PRIO_INTERACTIVE,
219 ECN_OR_COST(INTERACTIVE),
220 TC_PRIO_INTERACTIVE,
221 ECN_OR_COST(INTERACTIVE),
222 TC_PRIO_INTERACTIVE_BULK,
223 ECN_OR_COST(INTERACTIVE_BULK),
224 TC_PRIO_INTERACTIVE_BULK,
225 ECN_OR_COST(INTERACTIVE_BULK)
226};
227
228
229/*
230 * Route cache.
231 */
232
233/* The locking scheme is rather straight forward:
234 *
235 * 1) Read-Copy Update protects the buckets of the central route hash.
236 * 2) Only writers remove entries, and they hold the lock
237 * as they look at rtable reference counts.
238 * 3) Only readers acquire references to rtable entries,
239 * they do so with atomic increments and with the
240 * lock held.
241 */
242
243struct rt_hash_bucket {
Eric Dumazet1c317202010-10-25 21:02:07 +0000244 struct rtable __rcu *chain;
Eric Dumazet22c047c2005-07-05 14:55:24 -0700245};
Neil Horman1080d702008-10-27 12:28:25 -0700246
Ingo Molnar8a25d5d2006-07-03 00:24:54 -0700247#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || \
248 defined(CONFIG_PROVE_LOCKING)
Eric Dumazet22c047c2005-07-05 14:55:24 -0700249/*
250 * Instead of using one spinlock for each rt_hash_bucket, we use a table of spinlocks
251 * The size of this table is a power of two and depends on the number of CPUS.
Ingo Molnar62051202006-07-03 00:24:59 -0700252 * (on lockdep we have a quite big spinlock_t, so keep the size down there)
Eric Dumazet22c047c2005-07-05 14:55:24 -0700253 */
Ingo Molnar62051202006-07-03 00:24:59 -0700254#ifdef CONFIG_LOCKDEP
255# define RT_HASH_LOCK_SZ 256
Eric Dumazet22c047c2005-07-05 14:55:24 -0700256#else
Ingo Molnar62051202006-07-03 00:24:59 -0700257# if NR_CPUS >= 32
258# define RT_HASH_LOCK_SZ 4096
259# elif NR_CPUS >= 16
260# define RT_HASH_LOCK_SZ 2048
261# elif NR_CPUS >= 8
262# define RT_HASH_LOCK_SZ 1024
263# elif NR_CPUS >= 4
264# define RT_HASH_LOCK_SZ 512
265# else
266# define RT_HASH_LOCK_SZ 256
267# endif
Eric Dumazet22c047c2005-07-05 14:55:24 -0700268#endif
269
270static spinlock_t *rt_hash_locks;
271# define rt_hash_lock_addr(slot) &rt_hash_locks[(slot) & (RT_HASH_LOCK_SZ - 1)]
Pavel Emelyanov1ff1cc22007-12-05 21:15:05 -0800272
273static __init void rt_hash_lock_init(void)
274{
275 int i;
276
277 rt_hash_locks = kmalloc(sizeof(spinlock_t) * RT_HASH_LOCK_SZ,
278 GFP_KERNEL);
279 if (!rt_hash_locks)
280 panic("IP: failed to allocate rt_hash_locks\n");
281
282 for (i = 0; i < RT_HASH_LOCK_SZ; i++)
283 spin_lock_init(&rt_hash_locks[i]);
284}
Eric Dumazet22c047c2005-07-05 14:55:24 -0700285#else
286# define rt_hash_lock_addr(slot) NULL
Pavel Emelyanov1ff1cc22007-12-05 21:15:05 -0800287
288static inline void rt_hash_lock_init(void)
289{
290}
Eric Dumazet22c047c2005-07-05 14:55:24 -0700291#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292
Stephen Hemminger817bc4d2008-03-22 17:43:59 -0700293static struct rt_hash_bucket *rt_hash_table __read_mostly;
294static unsigned rt_hash_mask __read_mostly;
295static unsigned int rt_hash_log __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700296
Eric Dumazet2f970d82006-01-17 02:54:36 -0800297static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat);
Eric Dumazet27f39c73e2010-05-19 22:07:23 +0000298#define RT_CACHE_STAT_INC(field) __this_cpu_inc(rt_cache_stat.field)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700299
Denis V. Lunevb00180d2008-07-05 19:04:09 -0700300static inline unsigned int rt_hash(__be32 daddr, __be32 saddr, int idx,
Eric Dumazet0eae88f2010-04-20 19:06:52 -0700301 int genid)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700302{
Eric Dumazet0eae88f2010-04-20 19:06:52 -0700303 return jhash_3words((__force u32)daddr, (__force u32)saddr,
Denis V. Lunevb00180d2008-07-05 19:04:09 -0700304 idx, genid)
Eric Dumazet29e75252008-01-31 17:05:09 -0800305 & rt_hash_mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700306}
307
Denis V. Luneve84f84f2008-07-05 19:04:32 -0700308static inline int rt_genid(struct net *net)
309{
310 return atomic_read(&net->ipv4.rt_genid);
311}
312
Linus Torvalds1da177e2005-04-16 15:20:36 -0700313#ifdef CONFIG_PROC_FS
314struct rt_cache_iter_state {
Denis V. Luneva75e9362008-02-28 20:50:55 -0800315 struct seq_net_private p;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700316 int bucket;
Eric Dumazet29e75252008-01-31 17:05:09 -0800317 int genid;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700318};
319
YOSHIFUJI Hideaki12188542008-03-26 02:36:06 +0900320static struct rtable *rt_cache_get_first(struct seq_file *seq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700321{
YOSHIFUJI Hideaki12188542008-03-26 02:36:06 +0900322 struct rt_cache_iter_state *st = seq->private;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700323 struct rtable *r = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700324
325 for (st->bucket = rt_hash_mask; st->bucket >= 0; --st->bucket) {
Eric Dumazet33d480c2011-08-11 19:30:52 +0000326 if (!rcu_access_pointer(rt_hash_table[st->bucket].chain))
Eric Dumazeta6272662008-08-28 01:11:25 -0700327 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700328 rcu_read_lock_bh();
Paul E. McKenneya898def2010-02-22 17:04:49 -0800329 r = rcu_dereference_bh(rt_hash_table[st->bucket].chain);
Eric Dumazet29e75252008-01-31 17:05:09 -0800330 while (r) {
Changli Gaod8d1f302010-06-10 23:31:35 -0700331 if (dev_net(r->dst.dev) == seq_file_net(seq) &&
Denis V. Luneva75e9362008-02-28 20:50:55 -0800332 r->rt_genid == st->genid)
Eric Dumazet29e75252008-01-31 17:05:09 -0800333 return r;
Changli Gaod8d1f302010-06-10 23:31:35 -0700334 r = rcu_dereference_bh(r->dst.rt_next);
Eric Dumazet29e75252008-01-31 17:05:09 -0800335 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336 rcu_read_unlock_bh();
337 }
Eric Dumazet29e75252008-01-31 17:05:09 -0800338 return r;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700339}
340
YOSHIFUJI Hideaki12188542008-03-26 02:36:06 +0900341static struct rtable *__rt_cache_get_next(struct seq_file *seq,
Denis V. Lunev642d6312008-02-28 20:50:33 -0800342 struct rtable *r)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700343{
YOSHIFUJI Hideaki12188542008-03-26 02:36:06 +0900344 struct rt_cache_iter_state *st = seq->private;
Eric Dumazeta6272662008-08-28 01:11:25 -0700345
Eric Dumazet1c317202010-10-25 21:02:07 +0000346 r = rcu_dereference_bh(r->dst.rt_next);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700347 while (!r) {
348 rcu_read_unlock_bh();
Eric Dumazeta6272662008-08-28 01:11:25 -0700349 do {
350 if (--st->bucket < 0)
351 return NULL;
Eric Dumazet33d480c2011-08-11 19:30:52 +0000352 } while (!rcu_access_pointer(rt_hash_table[st->bucket].chain));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700353 rcu_read_lock_bh();
Eric Dumazet1c317202010-10-25 21:02:07 +0000354 r = rcu_dereference_bh(rt_hash_table[st->bucket].chain);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355 }
Eric Dumazet1c317202010-10-25 21:02:07 +0000356 return r;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700357}
358
YOSHIFUJI Hideaki12188542008-03-26 02:36:06 +0900359static struct rtable *rt_cache_get_next(struct seq_file *seq,
Denis V. Lunev642d6312008-02-28 20:50:33 -0800360 struct rtable *r)
361{
YOSHIFUJI Hideaki12188542008-03-26 02:36:06 +0900362 struct rt_cache_iter_state *st = seq->private;
363 while ((r = __rt_cache_get_next(seq, r)) != NULL) {
Changli Gaod8d1f302010-06-10 23:31:35 -0700364 if (dev_net(r->dst.dev) != seq_file_net(seq))
Denis V. Luneva75e9362008-02-28 20:50:55 -0800365 continue;
Denis V. Lunev642d6312008-02-28 20:50:33 -0800366 if (r->rt_genid == st->genid)
367 break;
368 }
369 return r;
370}
371
YOSHIFUJI Hideaki12188542008-03-26 02:36:06 +0900372static struct rtable *rt_cache_get_idx(struct seq_file *seq, loff_t pos)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700373{
YOSHIFUJI Hideaki12188542008-03-26 02:36:06 +0900374 struct rtable *r = rt_cache_get_first(seq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700375
376 if (r)
YOSHIFUJI Hideaki12188542008-03-26 02:36:06 +0900377 while (pos && (r = rt_cache_get_next(seq, r)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378 --pos;
379 return pos ? NULL : r;
380}
381
382static void *rt_cache_seq_start(struct seq_file *seq, loff_t *pos)
383{
Eric Dumazet29e75252008-01-31 17:05:09 -0800384 struct rt_cache_iter_state *st = seq->private;
Eric Dumazet29e75252008-01-31 17:05:09 -0800385 if (*pos)
YOSHIFUJI Hideaki12188542008-03-26 02:36:06 +0900386 return rt_cache_get_idx(seq, *pos - 1);
Denis V. Luneve84f84f2008-07-05 19:04:32 -0700387 st->genid = rt_genid(seq_file_net(seq));
Eric Dumazet29e75252008-01-31 17:05:09 -0800388 return SEQ_START_TOKEN;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700389}
390
391static void *rt_cache_seq_next(struct seq_file *seq, void *v, loff_t *pos)
392{
Eric Dumazet29e75252008-01-31 17:05:09 -0800393 struct rtable *r;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700394
395 if (v == SEQ_START_TOKEN)
YOSHIFUJI Hideaki12188542008-03-26 02:36:06 +0900396 r = rt_cache_get_first(seq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700397 else
YOSHIFUJI Hideaki12188542008-03-26 02:36:06 +0900398 r = rt_cache_get_next(seq, v);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700399 ++*pos;
400 return r;
401}
402
403static void rt_cache_seq_stop(struct seq_file *seq, void *v)
404{
405 if (v && v != SEQ_START_TOKEN)
406 rcu_read_unlock_bh();
407}
408
409static int rt_cache_seq_show(struct seq_file *seq, void *v)
410{
411 if (v == SEQ_START_TOKEN)
412 seq_printf(seq, "%-127s\n",
413 "Iface\tDestination\tGateway \tFlags\t\tRefCnt\tUse\t"
414 "Metric\tSource\t\tMTU\tWindow\tIRTT\tTOS\tHHRef\t"
415 "HHUptod\tSpecDst");
416 else {
417 struct rtable *r = v;
David S. Miller69cce1d2011-07-17 23:09:49 -0700418 struct neighbour *n;
Pavel Emelyanov5e659e42008-04-24 01:02:16 -0700419 int len;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700420
David S. Miller69cce1d2011-07-17 23:09:49 -0700421 n = dst_get_neighbour(&r->dst);
Eric Dumazet0eae88f2010-04-20 19:06:52 -0700422 seq_printf(seq, "%s\t%08X\t%08X\t%8X\t%d\t%u\t%d\t"
423 "%08X\t%d\t%u\t%u\t%02X\t%d\t%1d\t%08X%n",
Changli Gaod8d1f302010-06-10 23:31:35 -0700424 r->dst.dev ? r->dst.dev->name : "*",
Eric Dumazet0eae88f2010-04-20 19:06:52 -0700425 (__force u32)r->rt_dst,
426 (__force u32)r->rt_gateway,
Changli Gaod8d1f302010-06-10 23:31:35 -0700427 r->rt_flags, atomic_read(&r->dst.__refcnt),
428 r->dst.__use, 0, (__force u32)r->rt_src,
David S. Miller0dbaee32010-12-13 12:52:14 -0800429 dst_metric_advmss(&r->dst) + 40,
Changli Gaod8d1f302010-06-10 23:31:35 -0700430 dst_metric(&r->dst, RTAX_WINDOW),
431 (int)((dst_metric(&r->dst, RTAX_RTT) >> 3) +
432 dst_metric(&r->dst, RTAX_RTTVAR)),
David S. Miller475949d2011-05-03 19:45:15 -0700433 r->rt_key_tos,
David S. Millerf6b72b62011-07-14 07:53:20 -0700434 -1,
David S. Miller69cce1d2011-07-17 23:09:49 -0700435 (n && (n->nud_state & NUD_CONNECTED)) ? 1 : 0,
Pavel Emelyanov5e659e42008-04-24 01:02:16 -0700436 r->rt_spec_dst, &len);
437
438 seq_printf(seq, "%*s\n", 127 - len, "");
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900439 }
440 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700441}
442
Stephen Hemmingerf6908082007-03-12 14:34:29 -0700443static const struct seq_operations rt_cache_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700444 .start = rt_cache_seq_start,
445 .next = rt_cache_seq_next,
446 .stop = rt_cache_seq_stop,
447 .show = rt_cache_seq_show,
448};
449
450static int rt_cache_seq_open(struct inode *inode, struct file *file)
451{
Denis V. Luneva75e9362008-02-28 20:50:55 -0800452 return seq_open_net(inode, file, &rt_cache_seq_ops,
Pavel Emelyanovcf7732e2007-10-10 02:29:29 -0700453 sizeof(struct rt_cache_iter_state));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700454}
455
Arjan van de Ven9a321442007-02-12 00:55:35 -0800456static const struct file_operations rt_cache_seq_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700457 .owner = THIS_MODULE,
458 .open = rt_cache_seq_open,
459 .read = seq_read,
460 .llseek = seq_lseek,
Denis V. Luneva75e9362008-02-28 20:50:55 -0800461 .release = seq_release_net,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700462};
463
464
465static void *rt_cpu_seq_start(struct seq_file *seq, loff_t *pos)
466{
467 int cpu;
468
469 if (*pos == 0)
470 return SEQ_START_TOKEN;
471
Rusty Russell0f23174a2008-12-29 12:23:42 +0000472 for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700473 if (!cpu_possible(cpu))
474 continue;
475 *pos = cpu+1;
Eric Dumazet2f970d82006-01-17 02:54:36 -0800476 return &per_cpu(rt_cache_stat, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700477 }
478 return NULL;
479}
480
481static void *rt_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
482{
483 int cpu;
484
Rusty Russell0f23174a2008-12-29 12:23:42 +0000485 for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700486 if (!cpu_possible(cpu))
487 continue;
488 *pos = cpu+1;
Eric Dumazet2f970d82006-01-17 02:54:36 -0800489 return &per_cpu(rt_cache_stat, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700490 }
491 return NULL;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900492
Linus Torvalds1da177e2005-04-16 15:20:36 -0700493}
494
495static void rt_cpu_seq_stop(struct seq_file *seq, void *v)
496{
497
498}
499
500static int rt_cpu_seq_show(struct seq_file *seq, void *v)
501{
502 struct rt_cache_stat *st = v;
503
504 if (v == SEQ_START_TOKEN) {
Olaf Rempel5bec0032005-04-28 12:16:08 -0700505 seq_printf(seq, "entries in_hit in_slow_tot in_slow_mc in_no_route in_brd in_martian_dst in_martian_src out_hit out_slow_tot out_slow_mc gc_total gc_ignored gc_goal_miss gc_dst_overflow in_hlist_search out_hlist_search\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700506 return 0;
507 }
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900508
Linus Torvalds1da177e2005-04-16 15:20:36 -0700509 seq_printf(seq,"%08x %08x %08x %08x %08x %08x %08x %08x "
510 " %08x %08x %08x %08x %08x %08x %08x %08x %08x \n",
Eric Dumazetfc66f952010-10-08 06:37:34 +0000511 dst_entries_get_slow(&ipv4_dst_ops),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700512 st->in_hit,
513 st->in_slow_tot,
514 st->in_slow_mc,
515 st->in_no_route,
516 st->in_brd,
517 st->in_martian_dst,
518 st->in_martian_src,
519
520 st->out_hit,
521 st->out_slow_tot,
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900522 st->out_slow_mc,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700523
524 st->gc_total,
525 st->gc_ignored,
526 st->gc_goal_miss,
527 st->gc_dst_overflow,
528 st->in_hlist_search,
529 st->out_hlist_search
530 );
531 return 0;
532}
533
Stephen Hemmingerf6908082007-03-12 14:34:29 -0700534static const struct seq_operations rt_cpu_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700535 .start = rt_cpu_seq_start,
536 .next = rt_cpu_seq_next,
537 .stop = rt_cpu_seq_stop,
538 .show = rt_cpu_seq_show,
539};
540
541
542static int rt_cpu_seq_open(struct inode *inode, struct file *file)
543{
544 return seq_open(file, &rt_cpu_seq_ops);
545}
546
Arjan van de Ven9a321442007-02-12 00:55:35 -0800547static const struct file_operations rt_cpu_seq_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700548 .owner = THIS_MODULE,
549 .open = rt_cpu_seq_open,
550 .read = seq_read,
551 .llseek = seq_lseek,
552 .release = seq_release,
553};
554
Patrick McHardyc7066f72011-01-14 13:36:42 +0100555#ifdef CONFIG_IP_ROUTE_CLASSID
Alexey Dobriyana661c412009-11-25 15:40:35 -0800556static int rt_acct_proc_show(struct seq_file *m, void *v)
Pavel Emelyanov78c686e2007-12-05 21:13:48 -0800557{
Alexey Dobriyana661c412009-11-25 15:40:35 -0800558 struct ip_rt_acct *dst, *src;
559 unsigned int i, j;
Pavel Emelyanov78c686e2007-12-05 21:13:48 -0800560
Alexey Dobriyana661c412009-11-25 15:40:35 -0800561 dst = kcalloc(256, sizeof(struct ip_rt_acct), GFP_KERNEL);
562 if (!dst)
563 return -ENOMEM;
Pavel Emelyanov78c686e2007-12-05 21:13:48 -0800564
Alexey Dobriyana661c412009-11-25 15:40:35 -0800565 for_each_possible_cpu(i) {
566 src = (struct ip_rt_acct *)per_cpu_ptr(ip_rt_acct, i);
567 for (j = 0; j < 256; j++) {
568 dst[j].o_bytes += src[j].o_bytes;
569 dst[j].o_packets += src[j].o_packets;
570 dst[j].i_bytes += src[j].i_bytes;
571 dst[j].i_packets += src[j].i_packets;
Pavel Emelyanov78c686e2007-12-05 21:13:48 -0800572 }
573 }
Alexey Dobriyana661c412009-11-25 15:40:35 -0800574
575 seq_write(m, dst, 256 * sizeof(struct ip_rt_acct));
576 kfree(dst);
577 return 0;
Pavel Emelyanov78c686e2007-12-05 21:13:48 -0800578}
Alexey Dobriyana661c412009-11-25 15:40:35 -0800579
580static int rt_acct_proc_open(struct inode *inode, struct file *file)
581{
582 return single_open(file, rt_acct_proc_show, NULL);
583}
584
585static const struct file_operations rt_acct_proc_fops = {
586 .owner = THIS_MODULE,
587 .open = rt_acct_proc_open,
588 .read = seq_read,
589 .llseek = seq_lseek,
590 .release = single_release,
591};
Pavel Emelyanov78c686e2007-12-05 21:13:48 -0800592#endif
Pavel Emelyanov107f1632007-12-05 21:14:28 -0800593
Denis V. Lunev73b38712008-02-28 20:51:18 -0800594static int __net_init ip_rt_do_proc_init(struct net *net)
Pavel Emelyanov107f1632007-12-05 21:14:28 -0800595{
596 struct proc_dir_entry *pde;
597
598 pde = proc_net_fops_create(net, "rt_cache", S_IRUGO,
599 &rt_cache_seq_fops);
600 if (!pde)
601 goto err1;
602
Wang Chen77020722008-02-28 14:14:25 -0800603 pde = proc_create("rt_cache", S_IRUGO,
604 net->proc_net_stat, &rt_cpu_seq_fops);
Pavel Emelyanov107f1632007-12-05 21:14:28 -0800605 if (!pde)
606 goto err2;
607
Patrick McHardyc7066f72011-01-14 13:36:42 +0100608#ifdef CONFIG_IP_ROUTE_CLASSID
Alexey Dobriyana661c412009-11-25 15:40:35 -0800609 pde = proc_create("rt_acct", 0, net->proc_net, &rt_acct_proc_fops);
Pavel Emelyanov107f1632007-12-05 21:14:28 -0800610 if (!pde)
611 goto err3;
612#endif
613 return 0;
614
Patrick McHardyc7066f72011-01-14 13:36:42 +0100615#ifdef CONFIG_IP_ROUTE_CLASSID
Pavel Emelyanov107f1632007-12-05 21:14:28 -0800616err3:
617 remove_proc_entry("rt_cache", net->proc_net_stat);
618#endif
619err2:
620 remove_proc_entry("rt_cache", net->proc_net);
621err1:
622 return -ENOMEM;
623}
Denis V. Lunev73b38712008-02-28 20:51:18 -0800624
625static void __net_exit ip_rt_do_proc_exit(struct net *net)
626{
627 remove_proc_entry("rt_cache", net->proc_net_stat);
628 remove_proc_entry("rt_cache", net->proc_net);
Patrick McHardyc7066f72011-01-14 13:36:42 +0100629#ifdef CONFIG_IP_ROUTE_CLASSID
Denis V. Lunev73b38712008-02-28 20:51:18 -0800630 remove_proc_entry("rt_acct", net->proc_net);
Alexey Dobriyan0a931ac2010-01-17 03:32:50 +0000631#endif
Denis V. Lunev73b38712008-02-28 20:51:18 -0800632}
633
634static struct pernet_operations ip_rt_proc_ops __net_initdata = {
635 .init = ip_rt_do_proc_init,
636 .exit = ip_rt_do_proc_exit,
637};
638
639static int __init ip_rt_proc_init(void)
640{
641 return register_pernet_subsys(&ip_rt_proc_ops);
642}
643
Pavel Emelyanov107f1632007-12-05 21:14:28 -0800644#else
Denis V. Lunev73b38712008-02-28 20:51:18 -0800645static inline int ip_rt_proc_init(void)
Pavel Emelyanov107f1632007-12-05 21:14:28 -0800646{
647 return 0;
648}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700649#endif /* CONFIG_PROC_FS */
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900650
Stephen Hemminger5969f712008-04-10 01:52:09 -0700651static inline void rt_free(struct rtable *rt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700652{
Changli Gaod8d1f302010-06-10 23:31:35 -0700653 call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700654}
655
Stephen Hemminger5969f712008-04-10 01:52:09 -0700656static inline void rt_drop(struct rtable *rt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700657{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700658 ip_rt_put(rt);
Changli Gaod8d1f302010-06-10 23:31:35 -0700659 call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700660}
661
Stephen Hemminger5969f712008-04-10 01:52:09 -0700662static inline int rt_fast_clean(struct rtable *rth)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700663{
664 /* Kill broadcast/multicast entries very aggresively, if they
665 collide in hash table with more useful entries */
666 return (rth->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) &&
David S. Millerc7537962010-11-11 17:07:48 -0800667 rt_is_input_route(rth) && rth->dst.rt_next;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700668}
669
Stephen Hemminger5969f712008-04-10 01:52:09 -0700670static inline int rt_valuable(struct rtable *rth)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700671{
672 return (rth->rt_flags & (RTCF_REDIRECTED | RTCF_NOTIFY)) ||
David S. Miller2c8cec52011-02-09 20:42:07 -0800673 (rth->peer && rth->peer->pmtu_expires);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700674}
675
676static int rt_may_expire(struct rtable *rth, unsigned long tmo1, unsigned long tmo2)
677{
678 unsigned long age;
679 int ret = 0;
680
Changli Gaod8d1f302010-06-10 23:31:35 -0700681 if (atomic_read(&rth->dst.__refcnt))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700682 goto out;
683
Changli Gaod8d1f302010-06-10 23:31:35 -0700684 age = jiffies - rth->dst.lastuse;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700685 if ((age <= tmo1 && !rt_fast_clean(rth)) ||
686 (age <= tmo2 && rt_valuable(rth)))
687 goto out;
688 ret = 1;
689out: return ret;
690}
691
692/* Bits of score are:
693 * 31: very valuable
694 * 30: not quite useless
695 * 29..0: usage counter
696 */
697static inline u32 rt_score(struct rtable *rt)
698{
Changli Gaod8d1f302010-06-10 23:31:35 -0700699 u32 score = jiffies - rt->dst.lastuse;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700700
701 score = ~score & ~(3<<30);
702
703 if (rt_valuable(rt))
704 score |= (1<<31);
705
David S. Millerc7537962010-11-11 17:07:48 -0800706 if (rt_is_output_route(rt) ||
Linus Torvalds1da177e2005-04-16 15:20:36 -0700707 !(rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST|RTCF_LOCAL)))
708 score |= (1<<30);
709
710 return score;
711}
712
Neil Horman1080d702008-10-27 12:28:25 -0700713static inline bool rt_caching(const struct net *net)
714{
715 return net->ipv4.current_rt_cache_rebuild_count <=
716 net->ipv4.sysctl_rt_cache_rebuild_count;
717}
718
David S. Miller5e2b61f2011-03-04 21:47:09 -0800719static inline bool compare_hash_inputs(const struct rtable *rt1,
720 const struct rtable *rt2)
Neil Horman1080d702008-10-27 12:28:25 -0700721{
David S. Miller5e2b61f2011-03-04 21:47:09 -0800722 return ((((__force u32)rt1->rt_key_dst ^ (__force u32)rt2->rt_key_dst) |
723 ((__force u32)rt1->rt_key_src ^ (__force u32)rt2->rt_key_src) |
Julian Anastasov97a80412011-08-09 04:01:16 +0000724 (rt1->rt_route_iif ^ rt2->rt_route_iif)) == 0);
Neil Horman1080d702008-10-27 12:28:25 -0700725}
726
David S. Miller5e2b61f2011-03-04 21:47:09 -0800727static inline int compare_keys(struct rtable *rt1, struct rtable *rt2)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700728{
David S. Miller5e2b61f2011-03-04 21:47:09 -0800729 return (((__force u32)rt1->rt_key_dst ^ (__force u32)rt2->rt_key_dst) |
730 ((__force u32)rt1->rt_key_src ^ (__force u32)rt2->rt_key_src) |
731 (rt1->rt_mark ^ rt2->rt_mark) |
David S. Miller475949d2011-05-03 19:45:15 -0700732 (rt1->rt_key_tos ^ rt2->rt_key_tos) |
Julian Anastasovd547f722011-08-07 22:20:20 -0700733 (rt1->rt_route_iif ^ rt2->rt_route_iif) |
Julian Anastasov97a80412011-08-09 04:01:16 +0000734 (rt1->rt_oif ^ rt2->rt_oif)) == 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700735}
736
Denis V. Lunevb5921912008-01-22 23:50:25 -0800737static inline int compare_netns(struct rtable *rt1, struct rtable *rt2)
738{
Changli Gaod8d1f302010-06-10 23:31:35 -0700739 return net_eq(dev_net(rt1->dst.dev), dev_net(rt2->dst.dev));
Denis V. Lunevb5921912008-01-22 23:50:25 -0800740}
741
Denis V. Luneve84f84f2008-07-05 19:04:32 -0700742static inline int rt_is_expired(struct rtable *rth)
743{
Changli Gaod8d1f302010-06-10 23:31:35 -0700744 return rth->rt_genid != rt_genid(dev_net(rth->dst.dev));
Denis V. Luneve84f84f2008-07-05 19:04:32 -0700745}
746
Eric Dumazetbeb659b2007-11-19 22:43:37 -0800747/*
748 * Perform a full scan of hash table and free all entries.
749 * Can be called by a softirq or a process.
750 * In the later case, we want to be reschedule if necessary
751 */
David S. Miller6561a3b2010-12-19 21:11:20 -0800752static void rt_do_flush(struct net *net, int process_context)
Eric Dumazetbeb659b2007-11-19 22:43:37 -0800753{
754 unsigned int i;
755 struct rtable *rth, *next;
756
757 for (i = 0; i <= rt_hash_mask; i++) {
David S. Miller6561a3b2010-12-19 21:11:20 -0800758 struct rtable __rcu **pprev;
759 struct rtable *list;
760
Eric Dumazetbeb659b2007-11-19 22:43:37 -0800761 if (process_context && need_resched())
762 cond_resched();
Eric Dumazet33d480c2011-08-11 19:30:52 +0000763 rth = rcu_access_pointer(rt_hash_table[i].chain);
Eric Dumazetbeb659b2007-11-19 22:43:37 -0800764 if (!rth)
765 continue;
766
767 spin_lock_bh(rt_hash_lock_addr(i));
Denis V. Lunev32cb5b42008-07-05 19:06:12 -0700768
David S. Miller6561a3b2010-12-19 21:11:20 -0800769 list = NULL;
770 pprev = &rt_hash_table[i].chain;
771 rth = rcu_dereference_protected(*pprev,
Eric Dumazet1c317202010-10-25 21:02:07 +0000772 lockdep_is_held(rt_hash_lock_addr(i)));
Denis V. Lunev32cb5b42008-07-05 19:06:12 -0700773
David S. Miller6561a3b2010-12-19 21:11:20 -0800774 while (rth) {
775 next = rcu_dereference_protected(rth->dst.rt_next,
776 lockdep_is_held(rt_hash_lock_addr(i)));
Denis V. Lunev32cb5b42008-07-05 19:06:12 -0700777
David S. Miller6561a3b2010-12-19 21:11:20 -0800778 if (!net ||
779 net_eq(dev_net(rth->dst.dev), net)) {
780 rcu_assign_pointer(*pprev, next);
781 rcu_assign_pointer(rth->dst.rt_next, list);
782 list = rth;
Denis V. Lunev32cb5b42008-07-05 19:06:12 -0700783 } else {
David S. Miller6561a3b2010-12-19 21:11:20 -0800784 pprev = &rth->dst.rt_next;
Denis V. Lunev32cb5b42008-07-05 19:06:12 -0700785 }
David S. Miller6561a3b2010-12-19 21:11:20 -0800786 rth = next;
Denis V. Lunev32cb5b42008-07-05 19:06:12 -0700787 }
David S. Miller6561a3b2010-12-19 21:11:20 -0800788
Eric Dumazetbeb659b2007-11-19 22:43:37 -0800789 spin_unlock_bh(rt_hash_lock_addr(i));
790
David S. Miller6561a3b2010-12-19 21:11:20 -0800791 for (; list; list = next) {
792 next = rcu_dereference_protected(list->dst.rt_next, 1);
793 rt_free(list);
Eric Dumazetbeb659b2007-11-19 22:43:37 -0800794 }
795 }
796}
797
Neil Horman1080d702008-10-27 12:28:25 -0700798/*
799 * While freeing expired entries, we compute average chain length
800 * and standard deviation, using fixed-point arithmetic.
801 * This to have an estimation of rt_chain_length_max
802 * rt_chain_length_max = max(elasticity, AVG + 4*SD)
803 * We use 3 bits for frational part, and 29 (or 61) for magnitude.
804 */
805
806#define FRACT_BITS 3
807#define ONE (1UL << FRACT_BITS)
808
Eric Dumazet98376382010-03-08 03:20:00 +0000809/*
810 * Given a hash chain and an item in this hash chain,
811 * find if a previous entry has the same hash_inputs
812 * (but differs on tos, mark or oif)
813 * Returns 0 if an alias is found.
814 * Returns ONE if rth has no alias before itself.
815 */
816static int has_noalias(const struct rtable *head, const struct rtable *rth)
817{
818 const struct rtable *aux = head;
819
820 while (aux != rth) {
David S. Miller5e2b61f2011-03-04 21:47:09 -0800821 if (compare_hash_inputs(aux, rth))
Eric Dumazet98376382010-03-08 03:20:00 +0000822 return 0;
Eric Dumazet1c317202010-10-25 21:02:07 +0000823 aux = rcu_dereference_protected(aux->dst.rt_next, 1);
Eric Dumazet98376382010-03-08 03:20:00 +0000824 }
825 return ONE;
826}
827
Eric Dumazet29e75252008-01-31 17:05:09 -0800828/*
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300829 * Perturbation of rt_genid by a small quantity [1..256]
Eric Dumazet29e75252008-01-31 17:05:09 -0800830 * Using 8 bits of shuffling ensure we can call rt_cache_invalidate()
831 * many times (2^24) without giving recent rt_genid.
832 * Jenkins hash is strong enough that litle changes of rt_genid are OK.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700833 */
Denis V. Lunev86c657f2008-07-05 19:03:31 -0700834static void rt_cache_invalidate(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700835{
Eric Dumazet29e75252008-01-31 17:05:09 -0800836 unsigned char shuffle;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700837
Eric Dumazet29e75252008-01-31 17:05:09 -0800838 get_random_bytes(&shuffle, sizeof(shuffle));
Denis V. Luneve84f84f2008-07-05 19:04:32 -0700839 atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700840}
841
Eric Dumazetbeb659b2007-11-19 22:43:37 -0800842/*
Eric Dumazet29e75252008-01-31 17:05:09 -0800843 * delay < 0 : invalidate cache (fast : entries will be deleted later)
844 * delay >= 0 : invalidate & flush cache (can be long)
845 */
Denis V. Lunev76e6ebf2008-07-05 19:00:44 -0700846void rt_cache_flush(struct net *net, int delay)
Eric Dumazet29e75252008-01-31 17:05:09 -0800847{
Denis V. Lunev86c657f2008-07-05 19:03:31 -0700848 rt_cache_invalidate(net);
Eric Dumazet29e75252008-01-31 17:05:09 -0800849 if (delay >= 0)
David S. Miller6561a3b2010-12-19 21:11:20 -0800850 rt_do_flush(net, !in_softirq());
Eric Dumazet29e75252008-01-31 17:05:09 -0800851}
852
Eric W. Biedermana5ee1552009-11-29 15:45:58 +0000853/* Flush previous cache invalidated entries from the cache */
David S. Miller6561a3b2010-12-19 21:11:20 -0800854void rt_cache_flush_batch(struct net *net)
Eric W. Biedermana5ee1552009-11-29 15:45:58 +0000855{
David S. Miller6561a3b2010-12-19 21:11:20 -0800856 rt_do_flush(net, !in_softirq());
Eric W. Biedermana5ee1552009-11-29 15:45:58 +0000857}
858
Neil Horman1080d702008-10-27 12:28:25 -0700859static void rt_emergency_hash_rebuild(struct net *net)
860{
Neil Horman3ee94372010-05-08 01:57:52 -0700861 if (net_ratelimit())
Neil Horman1080d702008-10-27 12:28:25 -0700862 printk(KERN_WARNING "Route hash chain too long!\n");
Neil Horman3ee94372010-05-08 01:57:52 -0700863 rt_cache_invalidate(net);
Neil Horman1080d702008-10-27 12:28:25 -0700864}
865
Linus Torvalds1da177e2005-04-16 15:20:36 -0700866/*
867 Short description of GC goals.
868
869 We want to build algorithm, which will keep routing cache
870 at some equilibrium point, when number of aged off entries
871 is kept approximately equal to newly generated ones.
872
873 Current expiration strength is variable "expire".
874 We try to adjust it dynamically, so that if networking
875 is idle expires is large enough to keep enough of warm entries,
876 and when load increases it reduces to limit cache size.
877 */
878
Daniel Lezcano569d3642008-01-18 03:56:57 -0800879static int rt_garbage_collect(struct dst_ops *ops)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700880{
881 static unsigned long expire = RT_GC_TIMEOUT;
882 static unsigned long last_gc;
883 static int rover;
884 static int equilibrium;
Eric Dumazet1c317202010-10-25 21:02:07 +0000885 struct rtable *rth;
886 struct rtable __rcu **rthp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700887 unsigned long now = jiffies;
888 int goal;
Eric Dumazetfc66f952010-10-08 06:37:34 +0000889 int entries = dst_entries_get_fast(&ipv4_dst_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700890
891 /*
892 * Garbage collection is pretty expensive,
893 * do not make it too frequently.
894 */
895
896 RT_CACHE_STAT_INC(gc_total);
897
898 if (now - last_gc < ip_rt_gc_min_interval &&
Eric Dumazetfc66f952010-10-08 06:37:34 +0000899 entries < ip_rt_max_size) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700900 RT_CACHE_STAT_INC(gc_ignored);
901 goto out;
902 }
903
Eric Dumazetfc66f952010-10-08 06:37:34 +0000904 entries = dst_entries_get_slow(&ipv4_dst_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700905 /* Calculate number of entries, which we want to expire now. */
Eric Dumazetfc66f952010-10-08 06:37:34 +0000906 goal = entries - (ip_rt_gc_elasticity << rt_hash_log);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700907 if (goal <= 0) {
908 if (equilibrium < ipv4_dst_ops.gc_thresh)
909 equilibrium = ipv4_dst_ops.gc_thresh;
Eric Dumazetfc66f952010-10-08 06:37:34 +0000910 goal = entries - equilibrium;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700911 if (goal > 0) {
Eric Dumazetb790ced2007-12-21 01:49:07 -0800912 equilibrium += min_t(unsigned int, goal >> 1, rt_hash_mask + 1);
Eric Dumazetfc66f952010-10-08 06:37:34 +0000913 goal = entries - equilibrium;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700914 }
915 } else {
916 /* We are in dangerous area. Try to reduce cache really
917 * aggressively.
918 */
Eric Dumazetb790ced2007-12-21 01:49:07 -0800919 goal = max_t(unsigned int, goal >> 1, rt_hash_mask + 1);
Eric Dumazetfc66f952010-10-08 06:37:34 +0000920 equilibrium = entries - goal;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700921 }
922
923 if (now - last_gc >= ip_rt_gc_min_interval)
924 last_gc = now;
925
926 if (goal <= 0) {
927 equilibrium += goal;
928 goto work_done;
929 }
930
931 do {
932 int i, k;
933
934 for (i = rt_hash_mask, k = rover; i >= 0; i--) {
935 unsigned long tmo = expire;
936
937 k = (k + 1) & rt_hash_mask;
938 rthp = &rt_hash_table[k].chain;
Eric Dumazet22c047c2005-07-05 14:55:24 -0700939 spin_lock_bh(rt_hash_lock_addr(k));
Eric Dumazet1c317202010-10-25 21:02:07 +0000940 while ((rth = rcu_dereference_protected(*rthp,
941 lockdep_is_held(rt_hash_lock_addr(k)))) != NULL) {
Denis V. Luneve84f84f2008-07-05 19:04:32 -0700942 if (!rt_is_expired(rth) &&
Eric Dumazet29e75252008-01-31 17:05:09 -0800943 !rt_may_expire(rth, tmo, expire)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700944 tmo >>= 1;
Changli Gaod8d1f302010-06-10 23:31:35 -0700945 rthp = &rth->dst.rt_next;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700946 continue;
947 }
Changli Gaod8d1f302010-06-10 23:31:35 -0700948 *rthp = rth->dst.rt_next;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700949 rt_free(rth);
950 goal--;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700951 }
Eric Dumazet22c047c2005-07-05 14:55:24 -0700952 spin_unlock_bh(rt_hash_lock_addr(k));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700953 if (goal <= 0)
954 break;
955 }
956 rover = k;
957
958 if (goal <= 0)
959 goto work_done;
960
961 /* Goal is not achieved. We stop process if:
962
963 - if expire reduced to zero. Otherwise, expire is halfed.
964 - if table is not full.
965 - if we are called from interrupt.
966 - jiffies check is just fallback/debug loop breaker.
967 We will not spin here for long time in any case.
968 */
969
970 RT_CACHE_STAT_INC(gc_goal_miss);
971
972 if (expire == 0)
973 break;
974
975 expire >>= 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700976
Eric Dumazetfc66f952010-10-08 06:37:34 +0000977 if (dst_entries_get_fast(&ipv4_dst_ops) < ip_rt_max_size)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700978 goto out;
979 } while (!in_softirq() && time_before_eq(jiffies, now));
980
Eric Dumazetfc66f952010-10-08 06:37:34 +0000981 if (dst_entries_get_fast(&ipv4_dst_ops) < ip_rt_max_size)
982 goto out;
983 if (dst_entries_get_slow(&ipv4_dst_ops) < ip_rt_max_size)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700984 goto out;
985 if (net_ratelimit())
986 printk(KERN_WARNING "dst cache overflow\n");
987 RT_CACHE_STAT_INC(gc_dst_overflow);
988 return 1;
989
990work_done:
991 expire += ip_rt_gc_min_interval;
992 if (expire > ip_rt_gc_timeout ||
Eric Dumazetfc66f952010-10-08 06:37:34 +0000993 dst_entries_get_fast(&ipv4_dst_ops) < ipv4_dst_ops.gc_thresh ||
994 dst_entries_get_slow(&ipv4_dst_ops) < ipv4_dst_ops.gc_thresh)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700995 expire = ip_rt_gc_timeout;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700996out: return 0;
997}
998
Eric Dumazet98376382010-03-08 03:20:00 +0000999/*
1000 * Returns number of entries in a hash chain that have different hash_inputs
1001 */
1002static int slow_chain_length(const struct rtable *head)
1003{
1004 int length = 0;
1005 const struct rtable *rth = head;
1006
1007 while (rth) {
1008 length += has_noalias(head, rth);
Eric Dumazet1c317202010-10-25 21:02:07 +00001009 rth = rcu_dereference_protected(rth->dst.rt_next, 1);
Eric Dumazet98376382010-03-08 03:20:00 +00001010 }
1011 return length >> FRACT_BITS;
1012}
1013
David S. Millerd3aaeb32011-07-18 00:40:17 -07001014static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst, const void *daddr)
David Miller3769cff2011-07-11 22:44:24 +00001015{
David Miller3769cff2011-07-11 22:44:24 +00001016 struct neigh_table *tbl = &arp_tbl;
David S. Millerd3aaeb32011-07-18 00:40:17 -07001017 static const __be32 inaddr_any = 0;
1018 struct net_device *dev = dst->dev;
1019 const __be32 *pkey = daddr;
David Miller3769cff2011-07-11 22:44:24 +00001020 struct neighbour *n;
1021
1022#if defined(CONFIG_ATM_CLIP) || defined(CONFIG_ATM_CLIP_MODULE)
1023 if (dev->type == ARPHRD_ATM)
1024 tbl = clip_tbl_hook;
1025#endif
David Miller3769cff2011-07-11 22:44:24 +00001026 if (dev->flags & (IFF_LOOPBACK | IFF_POINTOPOINT))
David S. Millerd3aaeb32011-07-18 00:40:17 -07001027 pkey = &inaddr_any;
1028
1029 n = __ipv4_neigh_lookup(tbl, dev, *(__force u32 *)pkey);
1030 if (n)
1031 return n;
1032 return neigh_create(tbl, pkey, dev);
1033}
1034
1035static int rt_bind_neighbour(struct rtable *rt)
1036{
1037 struct neighbour *n = ipv4_neigh_lookup(&rt->dst, &rt->rt_gateway);
David Miller3769cff2011-07-11 22:44:24 +00001038 if (IS_ERR(n))
1039 return PTR_ERR(n);
David S. Miller69cce1d2011-07-17 23:09:49 -07001040 dst_set_neighbour(&rt->dst, n);
David Miller3769cff2011-07-11 22:44:24 +00001041
1042 return 0;
1043}
1044
David S. Millerb23dd4f2011-03-02 14:31:35 -08001045static struct rtable *rt_intern_hash(unsigned hash, struct rtable *rt,
1046 struct sk_buff *skb, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001047{
Eric Dumazet1c317202010-10-25 21:02:07 +00001048 struct rtable *rth, *cand;
1049 struct rtable __rcu **rthp, **candp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001050 unsigned long now;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001051 u32 min_score;
1052 int chain_length;
1053 int attempts = !in_softirq();
1054
1055restart:
1056 chain_length = 0;
1057 min_score = ~(u32)0;
1058 cand = NULL;
1059 candp = NULL;
1060 now = jiffies;
1061
Changli Gaod8d1f302010-06-10 23:31:35 -07001062 if (!rt_caching(dev_net(rt->dst.dev))) {
Neil Horman73e42892009-06-20 01:15:16 -07001063 /*
1064 * If we're not caching, just tell the caller we
1065 * were successful and don't touch the route. The
1066 * caller hold the sole reference to the cache entry, and
1067 * it will be released when the caller is done with it.
1068 * If we drop it here, the callers have no way to resolve routes
1069 * when we're not caching. Instead, just point *rp at rt, so
1070 * the caller gets a single use out of the route
Neil Hormanb6280b42009-06-22 10:18:53 +00001071 * Note that we do rt_free on this new route entry, so that
1072 * once its refcount hits zero, we are still able to reap it
1073 * (Thanks Alexey)
Eric Dumazet27b75c92010-10-15 05:44:11 +00001074 * Note: To avoid expensive rcu stuff for this uncached dst,
1075 * we set DST_NOCACHE so that dst_release() can free dst without
1076 * waiting a grace period.
Neil Horman73e42892009-06-20 01:15:16 -07001077 */
Neil Hormanb6280b42009-06-22 10:18:53 +00001078
Eric Dumazetc7d44262010-10-03 22:17:54 -07001079 rt->dst.flags |= DST_NOCACHE;
David S. Millerc7537962010-11-11 17:07:48 -08001080 if (rt->rt_type == RTN_UNICAST || rt_is_output_route(rt)) {
David Miller3769cff2011-07-11 22:44:24 +00001081 int err = rt_bind_neighbour(rt);
Neil Hormanb6280b42009-06-22 10:18:53 +00001082 if (err) {
1083 if (net_ratelimit())
1084 printk(KERN_WARNING
1085 "Neighbour table failure & not caching routes.\n");
Eric Dumazet27b75c92010-10-15 05:44:11 +00001086 ip_rt_put(rt);
David S. Millerb23dd4f2011-03-02 14:31:35 -08001087 return ERR_PTR(err);
Neil Hormanb6280b42009-06-22 10:18:53 +00001088 }
1089 }
1090
Neil Hormanb6280b42009-06-22 10:18:53 +00001091 goto skip_hashing;
Neil Horman1080d702008-10-27 12:28:25 -07001092 }
1093
Linus Torvalds1da177e2005-04-16 15:20:36 -07001094 rthp = &rt_hash_table[hash].chain;
1095
Eric Dumazet22c047c2005-07-05 14:55:24 -07001096 spin_lock_bh(rt_hash_lock_addr(hash));
Eric Dumazet1c317202010-10-25 21:02:07 +00001097 while ((rth = rcu_dereference_protected(*rthp,
1098 lockdep_is_held(rt_hash_lock_addr(hash)))) != NULL) {
Denis V. Luneve84f84f2008-07-05 19:04:32 -07001099 if (rt_is_expired(rth)) {
Changli Gaod8d1f302010-06-10 23:31:35 -07001100 *rthp = rth->dst.rt_next;
Eric Dumazet29e75252008-01-31 17:05:09 -08001101 rt_free(rth);
1102 continue;
1103 }
David S. Miller5e2b61f2011-03-04 21:47:09 -08001104 if (compare_keys(rth, rt) && compare_netns(rth, rt)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001105 /* Put it first */
Changli Gaod8d1f302010-06-10 23:31:35 -07001106 *rthp = rth->dst.rt_next;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001107 /*
1108 * Since lookup is lockfree, the deletion
1109 * must be visible to another weakly ordered CPU before
1110 * the insertion at the start of the hash chain.
1111 */
Changli Gaod8d1f302010-06-10 23:31:35 -07001112 rcu_assign_pointer(rth->dst.rt_next,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001113 rt_hash_table[hash].chain);
1114 /*
1115 * Since lookup is lockfree, the update writes
1116 * must be ordered for consistency on SMP.
1117 */
1118 rcu_assign_pointer(rt_hash_table[hash].chain, rth);
1119
Changli Gaod8d1f302010-06-10 23:31:35 -07001120 dst_use(&rth->dst, now);
Eric Dumazet22c047c2005-07-05 14:55:24 -07001121 spin_unlock_bh(rt_hash_lock_addr(hash));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001122
1123 rt_drop(rt);
David S. Millerb23dd4f2011-03-02 14:31:35 -08001124 if (skb)
Changli Gaod8d1f302010-06-10 23:31:35 -07001125 skb_dst_set(skb, &rth->dst);
David S. Millerb23dd4f2011-03-02 14:31:35 -08001126 return rth;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001127 }
1128
Changli Gaod8d1f302010-06-10 23:31:35 -07001129 if (!atomic_read(&rth->dst.__refcnt)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001130 u32 score = rt_score(rth);
1131
1132 if (score <= min_score) {
1133 cand = rth;
1134 candp = rthp;
1135 min_score = score;
1136 }
1137 }
1138
1139 chain_length++;
1140
Changli Gaod8d1f302010-06-10 23:31:35 -07001141 rthp = &rth->dst.rt_next;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001142 }
1143
1144 if (cand) {
1145 /* ip_rt_gc_elasticity used to be average length of chain
1146 * length, when exceeded gc becomes really aggressive.
1147 *
1148 * The second limit is less certain. At the moment it allows
1149 * only 2 entries per bucket. We will see.
1150 */
1151 if (chain_length > ip_rt_gc_elasticity) {
Changli Gaod8d1f302010-06-10 23:31:35 -07001152 *candp = cand->dst.rt_next;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001153 rt_free(cand);
1154 }
Neil Horman1080d702008-10-27 12:28:25 -07001155 } else {
Eric Dumazet98376382010-03-08 03:20:00 +00001156 if (chain_length > rt_chain_length_max &&
1157 slow_chain_length(rt_hash_table[hash].chain) > rt_chain_length_max) {
Changli Gaod8d1f302010-06-10 23:31:35 -07001158 struct net *net = dev_net(rt->dst.dev);
Neil Horman1080d702008-10-27 12:28:25 -07001159 int num = ++net->ipv4.current_rt_cache_rebuild_count;
Pavel Emelyanovb35ecb52010-03-24 07:43:17 +00001160 if (!rt_caching(net)) {
Neil Horman1080d702008-10-27 12:28:25 -07001161 printk(KERN_WARNING "%s: %d rebuilds is over limit, route caching disabled\n",
Changli Gaod8d1f302010-06-10 23:31:35 -07001162 rt->dst.dev->name, num);
Neil Horman1080d702008-10-27 12:28:25 -07001163 }
Pavel Emelyanovb35ecb52010-03-24 07:43:17 +00001164 rt_emergency_hash_rebuild(net);
Pavel Emelyanov6a2bad72010-03-24 21:51:22 +00001165 spin_unlock_bh(rt_hash_lock_addr(hash));
1166
David S. Miller5e2b61f2011-03-04 21:47:09 -08001167 hash = rt_hash(rt->rt_key_dst, rt->rt_key_src,
Pavel Emelyanov6a2bad72010-03-24 21:51:22 +00001168 ifindex, rt_genid(net));
1169 goto restart;
Neil Horman1080d702008-10-27 12:28:25 -07001170 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001171 }
1172
1173 /* Try to bind route to arp only if it is output
1174 route or unicast forwarding path.
1175 */
David S. Millerc7537962010-11-11 17:07:48 -08001176 if (rt->rt_type == RTN_UNICAST || rt_is_output_route(rt)) {
David Miller3769cff2011-07-11 22:44:24 +00001177 int err = rt_bind_neighbour(rt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001178 if (err) {
Eric Dumazet22c047c2005-07-05 14:55:24 -07001179 spin_unlock_bh(rt_hash_lock_addr(hash));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001180
1181 if (err != -ENOBUFS) {
1182 rt_drop(rt);
David S. Millerb23dd4f2011-03-02 14:31:35 -08001183 return ERR_PTR(err);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001184 }
1185
1186 /* Neighbour tables are full and nothing
1187 can be released. Try to shrink route cache,
1188 it is most likely it holds some neighbour records.
1189 */
1190 if (attempts-- > 0) {
1191 int saved_elasticity = ip_rt_gc_elasticity;
1192 int saved_int = ip_rt_gc_min_interval;
1193 ip_rt_gc_elasticity = 1;
1194 ip_rt_gc_min_interval = 0;
Daniel Lezcano569d3642008-01-18 03:56:57 -08001195 rt_garbage_collect(&ipv4_dst_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001196 ip_rt_gc_min_interval = saved_int;
1197 ip_rt_gc_elasticity = saved_elasticity;
1198 goto restart;
1199 }
1200
1201 if (net_ratelimit())
Ulrich Weber7e1b33e2010-09-27 15:02:18 -07001202 printk(KERN_WARNING "ipv4: Neighbour table overflow.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001203 rt_drop(rt);
David S. Millerb23dd4f2011-03-02 14:31:35 -08001204 return ERR_PTR(-ENOBUFS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001205 }
1206 }
1207
Changli Gaod8d1f302010-06-10 23:31:35 -07001208 rt->dst.rt_next = rt_hash_table[hash].chain;
Neil Horman1080d702008-10-27 12:28:25 -07001209
Eric Dumazet00269b52008-10-16 14:18:29 -07001210 /*
1211 * Since lookup is lockfree, we must make sure
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001212 * previous writes to rt are committed to memory
Eric Dumazet00269b52008-10-16 14:18:29 -07001213 * before making rt visible to other CPUS.
1214 */
Eric Dumazet1ddbcb02009-05-19 20:14:28 +00001215 rcu_assign_pointer(rt_hash_table[hash].chain, rt);
Neil Horman1080d702008-10-27 12:28:25 -07001216
Eric Dumazet22c047c2005-07-05 14:55:24 -07001217 spin_unlock_bh(rt_hash_lock_addr(hash));
Neil Horman73e42892009-06-20 01:15:16 -07001218
Neil Hormanb6280b42009-06-22 10:18:53 +00001219skip_hashing:
David S. Millerb23dd4f2011-03-02 14:31:35 -08001220 if (skb)
Changli Gaod8d1f302010-06-10 23:31:35 -07001221 skb_dst_set(skb, &rt->dst);
David S. Millerb23dd4f2011-03-02 14:31:35 -08001222 return rt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001223}
1224
David S. Miller6431cbc2011-02-07 20:38:06 -08001225static atomic_t __rt_peer_genid = ATOMIC_INIT(0);
1226
1227static u32 rt_peer_genid(void)
1228{
1229 return atomic_read(&__rt_peer_genid);
1230}
1231
David S. Millera48eff12011-05-18 18:42:43 -04001232void rt_bind_peer(struct rtable *rt, __be32 daddr, int create)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001233{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001234 struct inet_peer *peer;
1235
David S. Millera48eff12011-05-18 18:42:43 -04001236 peer = inet_getpeer_v4(daddr, create);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001237
Eric Dumazet49e8ab02010-08-19 06:10:45 +00001238 if (peer && cmpxchg(&rt->peer, NULL, peer) != NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001239 inet_putpeer(peer);
David S. Miller6431cbc2011-02-07 20:38:06 -08001240 else
1241 rt->rt_peer_genid = rt_peer_genid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001242}
1243
1244/*
1245 * Peer allocation may fail only in serious out-of-memory conditions. However
1246 * we still can generate some output.
1247 * Random ID selection looks a bit dangerous because we have no chances to
1248 * select ID being unique in a reasonable period of time.
1249 * But broken packet identifier may be better than no packet at all.
1250 */
1251static void ip_select_fb_ident(struct iphdr *iph)
1252{
1253 static DEFINE_SPINLOCK(ip_fb_id_lock);
1254 static u32 ip_fallback_id;
1255 u32 salt;
1256
1257 spin_lock_bh(&ip_fb_id_lock);
Al Viroe4485152006-09-26 22:15:01 -07001258 salt = secure_ip_id((__force __be32)ip_fallback_id ^ iph->daddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001259 iph->id = htons(salt & 0xFFFF);
1260 ip_fallback_id = salt;
1261 spin_unlock_bh(&ip_fb_id_lock);
1262}
1263
1264void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more)
1265{
1266 struct rtable *rt = (struct rtable *) dst;
1267
1268 if (rt) {
1269 if (rt->peer == NULL)
David S. Millera48eff12011-05-18 18:42:43 -04001270 rt_bind_peer(rt, rt->rt_dst, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001271
1272 /* If peer is attached to destination, it is never detached,
1273 so that we need not to grab a lock to dereference it.
1274 */
1275 if (rt->peer) {
1276 iph->id = htons(inet_getid(rt->peer, more));
1277 return;
1278 }
1279 } else
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001280 printk(KERN_DEBUG "rt_bind_peer(0) @%p\n",
Stephen Hemminger9c2b3322005-04-19 22:39:42 -07001281 __builtin_return_address(0));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001282
1283 ip_select_fb_ident(iph);
1284}
Eric Dumazet4bc2f182010-07-09 21:22:10 +00001285EXPORT_SYMBOL(__ip_select_ident);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001286
1287static void rt_del(unsigned hash, struct rtable *rt)
1288{
Eric Dumazet1c317202010-10-25 21:02:07 +00001289 struct rtable __rcu **rthp;
1290 struct rtable *aux;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001291
Eric Dumazet29e75252008-01-31 17:05:09 -08001292 rthp = &rt_hash_table[hash].chain;
Eric Dumazet22c047c2005-07-05 14:55:24 -07001293 spin_lock_bh(rt_hash_lock_addr(hash));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001294 ip_rt_put(rt);
Eric Dumazet1c317202010-10-25 21:02:07 +00001295 while ((aux = rcu_dereference_protected(*rthp,
1296 lockdep_is_held(rt_hash_lock_addr(hash)))) != NULL) {
Denis V. Luneve84f84f2008-07-05 19:04:32 -07001297 if (aux == rt || rt_is_expired(aux)) {
Changli Gaod8d1f302010-06-10 23:31:35 -07001298 *rthp = aux->dst.rt_next;
Eric Dumazet29e75252008-01-31 17:05:09 -08001299 rt_free(aux);
1300 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001301 }
Changli Gaod8d1f302010-06-10 23:31:35 -07001302 rthp = &aux->dst.rt_next;
Eric Dumazet29e75252008-01-31 17:05:09 -08001303 }
Eric Dumazet22c047c2005-07-05 14:55:24 -07001304 spin_unlock_bh(rt_hash_lock_addr(hash));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001305}
1306
Eric Dumazeted7865a42010-06-07 21:49:44 -07001307/* called in rcu_read_lock() section */
Al Virof7655222006-09-26 21:25:43 -07001308void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
1309 __be32 saddr, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001310{
Flavio Leitner7cc91502011-10-24 02:56:38 -04001311 int s, i;
Eric Dumazeted7865a42010-06-07 21:49:44 -07001312 struct in_device *in_dev = __in_dev_get_rcu(dev);
Flavio Leitner7cc91502011-10-24 02:56:38 -04001313 struct rtable *rt;
1314 __be32 skeys[2] = { saddr, 0 };
1315 int ikeys[2] = { dev->ifindex, 0 };
1316 struct flowi4 fl4;
David S. Millerf39925d2011-02-09 22:00:16 -08001317 struct inet_peer *peer;
Denis V. Lunev317805b2008-02-28 20:50:06 -08001318 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001319
Linus Torvalds1da177e2005-04-16 15:20:36 -07001320 if (!in_dev)
1321 return;
1322
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09001323 net = dev_net(dev);
Joe Perches9d4fb272009-11-23 10:41:23 -08001324 if (new_gw == old_gw || !IN_DEV_RX_REDIRECTS(in_dev) ||
1325 ipv4_is_multicast(new_gw) || ipv4_is_lbcast(new_gw) ||
1326 ipv4_is_zeronet(new_gw))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001327 goto reject_redirect;
1328
1329 if (!IN_DEV_SHARED_MEDIA(in_dev)) {
1330 if (!inet_addr_onlink(in_dev, new_gw, old_gw))
1331 goto reject_redirect;
1332 if (IN_DEV_SEC_REDIRECTS(in_dev) && ip_fib_check_default(new_gw, dev))
1333 goto reject_redirect;
1334 } else {
Denis V. Lunev317805b2008-02-28 20:50:06 -08001335 if (inet_addr_type(net, new_gw) != RTN_UNICAST)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001336 goto reject_redirect;
1337 }
1338
Flavio Leitner7cc91502011-10-24 02:56:38 -04001339 memset(&fl4, 0, sizeof(fl4));
1340 fl4.daddr = daddr;
1341 for (s = 0; s < 2; s++) {
1342 for (i = 0; i < 2; i++) {
1343 fl4.flowi4_oif = ikeys[i];
1344 fl4.saddr = skeys[s];
1345 rt = __ip_route_output_key(net, &fl4);
1346 if (IS_ERR(rt))
1347 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001348
Flavio Leitner7cc91502011-10-24 02:56:38 -04001349 if (rt->dst.error || rt->dst.dev != dev ||
1350 rt->rt_gateway != old_gw) {
1351 ip_rt_put(rt);
1352 continue;
1353 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001354
Flavio Leitner7cc91502011-10-24 02:56:38 -04001355 if (!rt->peer)
1356 rt_bind_peer(rt, rt->rt_dst, 1);
1357
1358 peer = rt->peer;
1359 if (peer) {
1360 peer->redirect_learned.a4 = new_gw;
1361 atomic_inc(&__rt_peer_genid);
1362 }
1363
1364 ip_rt_put(rt);
1365 return;
1366 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001367 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001368 return;
1369
1370reject_redirect:
1371#ifdef CONFIG_IP_ROUTE_VERBOSE
1372 if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit())
Harvey Harrison673d57e2008-10-31 00:53:57 -07001373 printk(KERN_INFO "Redirect from %pI4 on %s about %pI4 ignored.\n"
1374 " Advised path = %pI4 -> %pI4\n",
1375 &old_gw, dev->name, &new_gw,
1376 &saddr, &daddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001377#endif
Eric Dumazeted7865a42010-06-07 21:49:44 -07001378 ;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001379}
1380
Eric Dumazetfe6fe792011-06-08 06:07:07 +00001381static bool peer_pmtu_expired(struct inet_peer *peer)
1382{
1383 unsigned long orig = ACCESS_ONCE(peer->pmtu_expires);
1384
1385 return orig &&
1386 time_after_eq(jiffies, orig) &&
1387 cmpxchg(&peer->pmtu_expires, orig, 0) == orig;
1388}
1389
1390static bool peer_pmtu_cleaned(struct inet_peer *peer)
1391{
1392 unsigned long orig = ACCESS_ONCE(peer->pmtu_expires);
1393
1394 return orig &&
1395 cmpxchg(&peer->pmtu_expires, orig, 0) == orig;
1396}
1397
Linus Torvalds1da177e2005-04-16 15:20:36 -07001398static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
1399{
Eric Dumazetee6b9672008-03-05 18:30:47 -08001400 struct rtable *rt = (struct rtable *)dst;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001401 struct dst_entry *ret = dst;
1402
1403 if (rt) {
Timo Teräsd11a4dc2010-03-18 23:20:20 +00001404 if (dst->obsolete > 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001405 ip_rt_put(rt);
1406 ret = NULL;
David S. Miller2c8cec52011-02-09 20:42:07 -08001407 } else if (rt->rt_flags & RTCF_REDIRECTED) {
David S. Miller5e2b61f2011-03-04 21:47:09 -08001408 unsigned hash = rt_hash(rt->rt_key_dst, rt->rt_key_src,
1409 rt->rt_oif,
Denis V. Luneve84f84f2008-07-05 19:04:32 -07001410 rt_genid(dev_net(dst->dev)));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001411 rt_del(hash, rt);
1412 ret = NULL;
Eric Dumazetfe6fe792011-06-08 06:07:07 +00001413 } else if (rt->peer && peer_pmtu_expired(rt->peer)) {
1414 dst_metric_set(dst, RTAX_MTU, rt->peer->pmtu_orig);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001415 }
1416 }
1417 return ret;
1418}
1419
1420/*
1421 * Algorithm:
1422 * 1. The first ip_rt_redirect_number redirects are sent
1423 * with exponential backoff, then we stop sending them at all,
1424 * assuming that the host ignores our redirects.
1425 * 2. If we did not see packets requiring redirects
1426 * during ip_rt_redirect_silence, we assume that the host
1427 * forgot redirected route and start to send redirects again.
1428 *
1429 * This algorithm is much cheaper and more intelligent than dumb load limiting
1430 * in icmp.c.
1431 *
1432 * NOTE. Do not forget to inhibit load limiting for redirects (redundant)
1433 * and "frag. need" (breaks PMTU discovery) in icmp.c.
1434 */
1435
1436void ip_rt_send_redirect(struct sk_buff *skb)
1437{
Eric Dumazet511c3f92009-06-02 05:14:27 +00001438 struct rtable *rt = skb_rtable(skb);
Eric Dumazet30038fc2009-08-28 23:52:01 -07001439 struct in_device *in_dev;
David S. Miller92d86822011-02-04 15:55:25 -08001440 struct inet_peer *peer;
Eric Dumazet30038fc2009-08-28 23:52:01 -07001441 int log_martians;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001442
Eric Dumazet30038fc2009-08-28 23:52:01 -07001443 rcu_read_lock();
Changli Gaod8d1f302010-06-10 23:31:35 -07001444 in_dev = __in_dev_get_rcu(rt->dst.dev);
Eric Dumazet30038fc2009-08-28 23:52:01 -07001445 if (!in_dev || !IN_DEV_TX_REDIRECTS(in_dev)) {
1446 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001447 return;
Eric Dumazet30038fc2009-08-28 23:52:01 -07001448 }
1449 log_martians = IN_DEV_LOG_MARTIANS(in_dev);
1450 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001451
David S. Miller92d86822011-02-04 15:55:25 -08001452 if (!rt->peer)
David S. Millera48eff12011-05-18 18:42:43 -04001453 rt_bind_peer(rt, rt->rt_dst, 1);
David S. Miller92d86822011-02-04 15:55:25 -08001454 peer = rt->peer;
1455 if (!peer) {
1456 icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, rt->rt_gateway);
1457 return;
1458 }
1459
Linus Torvalds1da177e2005-04-16 15:20:36 -07001460 /* No redirected packets during ip_rt_redirect_silence;
1461 * reset the algorithm.
1462 */
David S. Miller92d86822011-02-04 15:55:25 -08001463 if (time_after(jiffies, peer->rate_last + ip_rt_redirect_silence))
1464 peer->rate_tokens = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001465
1466 /* Too many ignored redirects; do not send anything
Changli Gaod8d1f302010-06-10 23:31:35 -07001467 * set dst.rate_last to the last seen redirected packet.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001468 */
David S. Miller92d86822011-02-04 15:55:25 -08001469 if (peer->rate_tokens >= ip_rt_redirect_number) {
1470 peer->rate_last = jiffies;
Eric Dumazet30038fc2009-08-28 23:52:01 -07001471 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001472 }
1473
1474 /* Check for load limit; set rate_last to the latest sent
1475 * redirect.
1476 */
David S. Miller92d86822011-02-04 15:55:25 -08001477 if (peer->rate_tokens == 0 ||
Li Yewang14fb8a72006-12-18 00:26:35 -08001478 time_after(jiffies,
David S. Miller92d86822011-02-04 15:55:25 -08001479 (peer->rate_last +
1480 (ip_rt_redirect_load << peer->rate_tokens)))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001481 icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, rt->rt_gateway);
David S. Miller92d86822011-02-04 15:55:25 -08001482 peer->rate_last = jiffies;
1483 ++peer->rate_tokens;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001484#ifdef CONFIG_IP_ROUTE_VERBOSE
Eric Dumazet30038fc2009-08-28 23:52:01 -07001485 if (log_martians &&
David S. Miller92d86822011-02-04 15:55:25 -08001486 peer->rate_tokens == ip_rt_redirect_number &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07001487 net_ratelimit())
Harvey Harrison673d57e2008-10-31 00:53:57 -07001488 printk(KERN_WARNING "host %pI4/if%d ignores redirects for %pI4 to %pI4.\n",
David S. Millerc5be24f2011-05-13 18:01:21 -04001489 &ip_hdr(skb)->saddr, rt->rt_iif,
Harvey Harrison673d57e2008-10-31 00:53:57 -07001490 &rt->rt_dst, &rt->rt_gateway);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001491#endif
1492 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001493}
1494
1495static int ip_error(struct sk_buff *skb)
1496{
Eric Dumazet511c3f92009-06-02 05:14:27 +00001497 struct rtable *rt = skb_rtable(skb);
David S. Miller92d86822011-02-04 15:55:25 -08001498 struct inet_peer *peer;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001499 unsigned long now;
David S. Miller92d86822011-02-04 15:55:25 -08001500 bool send;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001501 int code;
1502
Changli Gaod8d1f302010-06-10 23:31:35 -07001503 switch (rt->dst.error) {
Joe Perches4500ebf2011-07-01 09:43:07 +00001504 case EINVAL:
1505 default:
1506 goto out;
1507 case EHOSTUNREACH:
1508 code = ICMP_HOST_UNREACH;
1509 break;
1510 case ENETUNREACH:
1511 code = ICMP_NET_UNREACH;
1512 IP_INC_STATS_BH(dev_net(rt->dst.dev),
1513 IPSTATS_MIB_INNOROUTES);
1514 break;
1515 case EACCES:
1516 code = ICMP_PKT_FILTERED;
1517 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001518 }
1519
David S. Miller92d86822011-02-04 15:55:25 -08001520 if (!rt->peer)
David S. Millera48eff12011-05-18 18:42:43 -04001521 rt_bind_peer(rt, rt->rt_dst, 1);
David S. Miller92d86822011-02-04 15:55:25 -08001522 peer = rt->peer;
1523
1524 send = true;
1525 if (peer) {
1526 now = jiffies;
1527 peer->rate_tokens += now - peer->rate_last;
1528 if (peer->rate_tokens > ip_rt_error_burst)
1529 peer->rate_tokens = ip_rt_error_burst;
1530 peer->rate_last = now;
1531 if (peer->rate_tokens >= ip_rt_error_cost)
1532 peer->rate_tokens -= ip_rt_error_cost;
1533 else
1534 send = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001535 }
David S. Miller92d86822011-02-04 15:55:25 -08001536 if (send)
1537 icmp_send(skb, ICMP_DEST_UNREACH, code, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001538
1539out: kfree_skb(skb);
1540 return 0;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001541}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001542
1543/*
1544 * The last two values are not from the RFC but
1545 * are needed for AMPRnet AX.25 paths.
1546 */
1547
Arjan van de Ven9b5b5cf2005-11-29 16:21:38 -08001548static const unsigned short mtu_plateau[] =
Linus Torvalds1da177e2005-04-16 15:20:36 -07001549{32000, 17914, 8166, 4352, 2002, 1492, 576, 296, 216, 128 };
1550
Stephen Hemminger5969f712008-04-10 01:52:09 -07001551static inline unsigned short guess_mtu(unsigned short old_mtu)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001552{
1553 int i;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001554
Linus Torvalds1da177e2005-04-16 15:20:36 -07001555 for (i = 0; i < ARRAY_SIZE(mtu_plateau); i++)
1556 if (old_mtu > mtu_plateau[i])
1557 return mtu_plateau[i];
1558 return 68;
1559}
1560
Eric Dumazetb71d1d42011-04-22 04:53:02 +00001561unsigned short ip_rt_frag_needed(struct net *net, const struct iphdr *iph,
Timo Teras0010e462008-04-29 03:32:25 -07001562 unsigned short new_mtu,
1563 struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001564{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001565 unsigned short old_mtu = ntohs(iph->tot_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001566 unsigned short est_mtu = 0;
David S. Miller2c8cec52011-02-09 20:42:07 -08001567 struct inet_peer *peer;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001568
David S. Miller2c8cec52011-02-09 20:42:07 -08001569 peer = inet_getpeer_v4(iph->daddr, 1);
1570 if (peer) {
1571 unsigned short mtu = new_mtu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001572
David S. Miller2c8cec52011-02-09 20:42:07 -08001573 if (new_mtu < 68 || new_mtu >= old_mtu) {
1574 /* BSD 4.2 derived systems incorrectly adjust
1575 * tot_len by the IP header length, and report
1576 * a zero MTU in the ICMP message.
1577 */
1578 if (mtu == 0 &&
1579 old_mtu >= 68 + (iph->ihl << 2))
1580 old_mtu -= iph->ihl << 2;
1581 mtu = guess_mtu(old_mtu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001582 }
David S. Miller2c8cec52011-02-09 20:42:07 -08001583
1584 if (mtu < ip_rt_min_pmtu)
1585 mtu = ip_rt_min_pmtu;
1586 if (!peer->pmtu_expires || mtu < peer->pmtu_learned) {
Hiroaki SHIMODA46af3182011-03-09 20:09:58 +00001587 unsigned long pmtu_expires;
1588
1589 pmtu_expires = jiffies + ip_rt_mtu_expires;
1590 if (!pmtu_expires)
1591 pmtu_expires = 1UL;
1592
David S. Miller2c8cec52011-02-09 20:42:07 -08001593 est_mtu = mtu;
1594 peer->pmtu_learned = mtu;
Hiroaki SHIMODA46af3182011-03-09 20:09:58 +00001595 peer->pmtu_expires = pmtu_expires;
Gao feng59445b62011-10-19 15:34:09 +00001596 atomic_inc(&__rt_peer_genid);
David S. Miller2c8cec52011-02-09 20:42:07 -08001597 }
1598
1599 inet_putpeer(peer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001600 }
1601 return est_mtu ? : new_mtu;
1602}
1603
David S. Miller2c8cec52011-02-09 20:42:07 -08001604static void check_peer_pmtu(struct dst_entry *dst, struct inet_peer *peer)
1605{
Eric Dumazetfe6fe792011-06-08 06:07:07 +00001606 unsigned long expires = ACCESS_ONCE(peer->pmtu_expires);
David S. Miller2c8cec52011-02-09 20:42:07 -08001607
Eric Dumazetfe6fe792011-06-08 06:07:07 +00001608 if (!expires)
1609 return;
Hiroaki SHIMODA46af3182011-03-09 20:09:58 +00001610 if (time_before(jiffies, expires)) {
David S. Miller2c8cec52011-02-09 20:42:07 -08001611 u32 orig_dst_mtu = dst_mtu(dst);
1612 if (peer->pmtu_learned < orig_dst_mtu) {
1613 if (!peer->pmtu_orig)
1614 peer->pmtu_orig = dst_metric_raw(dst, RTAX_MTU);
1615 dst_metric_set(dst, RTAX_MTU, peer->pmtu_learned);
1616 }
1617 } else if (cmpxchg(&peer->pmtu_expires, expires, 0) == expires)
1618 dst_metric_set(dst, RTAX_MTU, peer->pmtu_orig);
1619}
1620
Linus Torvalds1da177e2005-04-16 15:20:36 -07001621static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu)
1622{
David S. Miller2c8cec52011-02-09 20:42:07 -08001623 struct rtable *rt = (struct rtable *) dst;
1624 struct inet_peer *peer;
1625
1626 dst_confirm(dst);
1627
1628 if (!rt->peer)
David S. Millera48eff12011-05-18 18:42:43 -04001629 rt_bind_peer(rt, rt->rt_dst, 1);
David S. Miller2c8cec52011-02-09 20:42:07 -08001630 peer = rt->peer;
1631 if (peer) {
Eric Dumazetfe6fe792011-06-08 06:07:07 +00001632 unsigned long pmtu_expires = ACCESS_ONCE(peer->pmtu_expires);
1633
David S. Miller2c8cec52011-02-09 20:42:07 -08001634 if (mtu < ip_rt_min_pmtu)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001635 mtu = ip_rt_min_pmtu;
Eric Dumazetfe6fe792011-06-08 06:07:07 +00001636 if (!pmtu_expires || mtu < peer->pmtu_learned) {
Hiroaki SHIMODA46af3182011-03-09 20:09:58 +00001637
1638 pmtu_expires = jiffies + ip_rt_mtu_expires;
1639 if (!pmtu_expires)
1640 pmtu_expires = 1UL;
1641
David S. Miller2c8cec52011-02-09 20:42:07 -08001642 peer->pmtu_learned = mtu;
Hiroaki SHIMODA46af3182011-03-09 20:09:58 +00001643 peer->pmtu_expires = pmtu_expires;
David S. Miller2c8cec52011-02-09 20:42:07 -08001644
1645 atomic_inc(&__rt_peer_genid);
1646 rt->rt_peer_genid = rt_peer_genid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001647 }
Hiroaki SHIMODA46af3182011-03-09 20:09:58 +00001648 check_peer_pmtu(dst, peer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001649 }
1650}
1651
David S. Millerf39925d2011-02-09 22:00:16 -08001652static int check_peer_redir(struct dst_entry *dst, struct inet_peer *peer)
1653{
1654 struct rtable *rt = (struct rtable *) dst;
1655 __be32 orig_gw = rt->rt_gateway;
Eric Dumazetf2c31e32011-07-29 19:00:53 +00001656 struct neighbour *n, *old_n;
David S. Millerf39925d2011-02-09 22:00:16 -08001657
1658 dst_confirm(&rt->dst);
1659
David S. Millerf39925d2011-02-09 22:00:16 -08001660 rt->rt_gateway = peer->redirect_learned.a4;
Eric Dumazetf2c31e32011-07-29 19:00:53 +00001661
1662 n = ipv4_neigh_lookup(&rt->dst, &rt->rt_gateway);
1663 if (IS_ERR(n))
1664 return PTR_ERR(n);
1665 old_n = xchg(&rt->dst._neighbour, n);
1666 if (old_n)
1667 neigh_release(old_n);
David S. Miller69cce1d2011-07-17 23:09:49 -07001668 if (!n || !(n->nud_state & NUD_VALID)) {
1669 if (n)
1670 neigh_event_send(n, NULL);
David S. Millerf39925d2011-02-09 22:00:16 -08001671 rt->rt_gateway = orig_gw;
1672 return -EAGAIN;
1673 } else {
1674 rt->rt_flags |= RTCF_REDIRECTED;
David S. Miller69cce1d2011-07-17 23:09:49 -07001675 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, n);
David S. Millerf39925d2011-02-09 22:00:16 -08001676 }
1677 return 0;
1678}
1679
Linus Torvalds1da177e2005-04-16 15:20:36 -07001680static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
1681{
David S. Miller6431cbc2011-02-07 20:38:06 -08001682 struct rtable *rt = (struct rtable *) dst;
1683
1684 if (rt_is_expired(rt))
Timo Teräsd11a4dc2010-03-18 23:20:20 +00001685 return NULL;
David S. Miller6431cbc2011-02-07 20:38:06 -08001686 if (rt->rt_peer_genid != rt_peer_genid()) {
David S. Miller2c8cec52011-02-09 20:42:07 -08001687 struct inet_peer *peer;
1688
David S. Miller6431cbc2011-02-07 20:38:06 -08001689 if (!rt->peer)
David S. Millera48eff12011-05-18 18:42:43 -04001690 rt_bind_peer(rt, rt->rt_dst, 0);
David S. Miller6431cbc2011-02-07 20:38:06 -08001691
David S. Miller2c8cec52011-02-09 20:42:07 -08001692 peer = rt->peer;
Eric Dumazetfe6fe792011-06-08 06:07:07 +00001693 if (peer) {
David S. Miller2c8cec52011-02-09 20:42:07 -08001694 check_peer_pmtu(dst, peer);
1695
Eric Dumazetfe6fe792011-06-08 06:07:07 +00001696 if (peer->redirect_learned.a4 &&
1697 peer->redirect_learned.a4 != rt->rt_gateway) {
1698 if (check_peer_redir(dst, peer))
1699 return NULL;
1700 }
David S. Millerf39925d2011-02-09 22:00:16 -08001701 }
1702
David S. Miller6431cbc2011-02-07 20:38:06 -08001703 rt->rt_peer_genid = rt_peer_genid();
1704 }
Timo Teräsd11a4dc2010-03-18 23:20:20 +00001705 return dst;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001706}
1707
1708static void ipv4_dst_destroy(struct dst_entry *dst)
1709{
1710 struct rtable *rt = (struct rtable *) dst;
1711 struct inet_peer *peer = rt->peer;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001712
David S. Miller62fa8a82011-01-26 20:51:05 -08001713 if (rt->fi) {
1714 fib_info_put(rt->fi);
1715 rt->fi = NULL;
1716 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001717 if (peer) {
1718 rt->peer = NULL;
1719 inet_putpeer(peer);
1720 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001721}
1722
Linus Torvalds1da177e2005-04-16 15:20:36 -07001723
1724static void ipv4_link_failure(struct sk_buff *skb)
1725{
1726 struct rtable *rt;
1727
1728 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0);
1729
Eric Dumazet511c3f92009-06-02 05:14:27 +00001730 rt = skb_rtable(skb);
Eric Dumazetfe6fe792011-06-08 06:07:07 +00001731 if (rt && rt->peer && peer_pmtu_cleaned(rt->peer))
1732 dst_metric_set(&rt->dst, RTAX_MTU, rt->peer->pmtu_orig);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001733}
1734
1735static int ip_rt_bug(struct sk_buff *skb)
1736{
Harvey Harrison673d57e2008-10-31 00:53:57 -07001737 printk(KERN_DEBUG "ip_rt_bug: %pI4 -> %pI4, %s\n",
1738 &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001739 skb->dev ? skb->dev->name : "?");
1740 kfree_skb(skb);
Dave Jonesc378a9c2011-05-21 07:16:42 +00001741 WARN_ON(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001742 return 0;
1743}
1744
1745/*
1746 We do not cache source address of outgoing interface,
1747 because it is used only by IP RR, TS and SRR options,
1748 so that it out of fast path.
1749
1750 BTW remember: "addr" is allowed to be not aligned
1751 in IP options!
1752 */
1753
David S. Miller8e363602011-05-13 17:29:41 -04001754void ip_rt_get_source(u8 *addr, struct sk_buff *skb, struct rtable *rt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001755{
Al Viroa61ced52006-09-26 21:27:54 -07001756 __be32 src;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001757
David S. Millerc7537962010-11-11 17:07:48 -08001758 if (rt_is_output_route(rt))
David S. Millerc5be24f2011-05-13 18:01:21 -04001759 src = ip_hdr(skb)->saddr;
Eric Dumazetebc0ffa2010-10-05 10:41:36 +00001760 else {
David S. Miller8e363602011-05-13 17:29:41 -04001761 struct fib_result res;
1762 struct flowi4 fl4;
1763 struct iphdr *iph;
1764
1765 iph = ip_hdr(skb);
1766
1767 memset(&fl4, 0, sizeof(fl4));
1768 fl4.daddr = iph->daddr;
1769 fl4.saddr = iph->saddr;
Julian Anastasovb0fe4a32011-07-23 02:00:41 +00001770 fl4.flowi4_tos = RT_TOS(iph->tos);
David S. Miller8e363602011-05-13 17:29:41 -04001771 fl4.flowi4_oif = rt->dst.dev->ifindex;
1772 fl4.flowi4_iif = skb->dev->ifindex;
1773 fl4.flowi4_mark = skb->mark;
David S. Miller5e2b61f2011-03-04 21:47:09 -08001774
Eric Dumazetebc0ffa2010-10-05 10:41:36 +00001775 rcu_read_lock();
David S. Miller68a5e3d2011-03-11 20:07:33 -05001776 if (fib_lookup(dev_net(rt->dst.dev), &fl4, &res) == 0)
David S. Miller436c3b62011-03-24 17:42:21 -07001777 src = FIB_RES_PREFSRC(dev_net(rt->dst.dev), res);
Eric Dumazetebc0ffa2010-10-05 10:41:36 +00001778 else
1779 src = inet_select_addr(rt->dst.dev, rt->rt_gateway,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001780 RT_SCOPE_UNIVERSE);
Eric Dumazetebc0ffa2010-10-05 10:41:36 +00001781 rcu_read_unlock();
1782 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001783 memcpy(addr, &src, 4);
1784}
1785
Patrick McHardyc7066f72011-01-14 13:36:42 +01001786#ifdef CONFIG_IP_ROUTE_CLASSID
Linus Torvalds1da177e2005-04-16 15:20:36 -07001787static void set_class_tag(struct rtable *rt, u32 tag)
1788{
Changli Gaod8d1f302010-06-10 23:31:35 -07001789 if (!(rt->dst.tclassid & 0xFFFF))
1790 rt->dst.tclassid |= tag & 0xFFFF;
1791 if (!(rt->dst.tclassid & 0xFFFF0000))
1792 rt->dst.tclassid |= tag & 0xFFFF0000;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001793}
1794#endif
1795
David S. Miller0dbaee32010-12-13 12:52:14 -08001796static unsigned int ipv4_default_advmss(const struct dst_entry *dst)
1797{
1798 unsigned int advmss = dst_metric_raw(dst, RTAX_ADVMSS);
1799
1800 if (advmss == 0) {
1801 advmss = max_t(unsigned int, dst->dev->mtu - 40,
1802 ip_rt_min_advmss);
1803 if (advmss > 65535 - 40)
1804 advmss = 65535 - 40;
1805 }
1806 return advmss;
1807}
1808
David S. Millerd33e4552010-12-14 13:01:14 -08001809static unsigned int ipv4_default_mtu(const struct dst_entry *dst)
1810{
1811 unsigned int mtu = dst->dev->mtu;
1812
1813 if (unlikely(dst_metric_locked(dst, RTAX_MTU))) {
1814 const struct rtable *rt = (const struct rtable *) dst;
1815
1816 if (rt->rt_gateway != rt->rt_dst && mtu > 576)
1817 mtu = 576;
1818 }
1819
1820 if (mtu > IP_MAX_MTU)
1821 mtu = IP_MAX_MTU;
1822
1823 return mtu;
1824}
1825
David S. Miller813b3b52011-04-28 14:48:42 -07001826static void rt_init_metrics(struct rtable *rt, const struct flowi4 *fl4,
David S. Miller5e2b61f2011-03-04 21:47:09 -08001827 struct fib_info *fi)
David S. Millera4daad62011-01-27 22:01:53 -08001828{
David S. Miller0131ba42011-02-04 14:37:30 -08001829 struct inet_peer *peer;
1830 int create = 0;
1831
1832 /* If a peer entry exists for this destination, we must hook
1833 * it up in order to get at cached metrics.
1834 */
David S. Miller813b3b52011-04-28 14:48:42 -07001835 if (fl4 && (fl4->flowi4_flags & FLOWI_FLAG_PRECOW_METRICS))
David S. Miller0131ba42011-02-04 14:37:30 -08001836 create = 1;
1837
David S. Miller3c0afdc2011-03-04 21:26:07 -08001838 rt->peer = peer = inet_getpeer_v4(rt->rt_dst, create);
David S. Miller0131ba42011-02-04 14:37:30 -08001839 if (peer) {
David S. Miller3c0afdc2011-03-04 21:26:07 -08001840 rt->rt_peer_genid = rt_peer_genid();
David S. Miller0131ba42011-02-04 14:37:30 -08001841 if (inet_metrics_new(peer))
1842 memcpy(peer->metrics, fi->fib_metrics,
1843 sizeof(u32) * RTAX_MAX);
1844 dst_init_metrics(&rt->dst, peer->metrics, false);
David S. Miller2c8cec52011-02-09 20:42:07 -08001845
Eric Dumazetfe6fe792011-06-08 06:07:07 +00001846 check_peer_pmtu(&rt->dst, peer);
David S. Millerf39925d2011-02-09 22:00:16 -08001847 if (peer->redirect_learned.a4 &&
1848 peer->redirect_learned.a4 != rt->rt_gateway) {
1849 rt->rt_gateway = peer->redirect_learned.a4;
1850 rt->rt_flags |= RTCF_REDIRECTED;
1851 }
David S. Miller0131ba42011-02-04 14:37:30 -08001852 } else {
David S. Millerb8dad612011-01-28 14:07:16 -08001853 if (fi->fib_metrics != (u32 *) dst_default_metrics) {
1854 rt->fi = fi;
1855 atomic_inc(&fi->fib_clntref);
1856 }
David S. Millera4daad62011-01-27 22:01:53 -08001857 dst_init_metrics(&rt->dst, fi->fib_metrics, true);
David S. Millera4daad62011-01-27 22:01:53 -08001858 }
1859}
1860
David S. Miller813b3b52011-04-28 14:48:42 -07001861static void rt_set_nexthop(struct rtable *rt, const struct flowi4 *fl4,
David S. Miller5e2b61f2011-03-04 21:47:09 -08001862 const struct fib_result *res,
David S. Miller982721f2011-02-16 21:44:24 -08001863 struct fib_info *fi, u16 type, u32 itag)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001864{
David S. Millerdefb3512010-12-08 21:16:57 -08001865 struct dst_entry *dst = &rt->dst;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001866
1867 if (fi) {
1868 if (FIB_RES_GW(*res) &&
1869 FIB_RES_NH(*res).nh_scope == RT_SCOPE_LINK)
1870 rt->rt_gateway = FIB_RES_GW(*res);
David S. Miller813b3b52011-04-28 14:48:42 -07001871 rt_init_metrics(rt, fl4, fi);
Patrick McHardyc7066f72011-01-14 13:36:42 +01001872#ifdef CONFIG_IP_ROUTE_CLASSID
David S. Millerdefb3512010-12-08 21:16:57 -08001873 dst->tclassid = FIB_RES_NH(*res).nh_tclassid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001874#endif
David S. Millerd33e4552010-12-14 13:01:14 -08001875 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001876
David S. Millerdefb3512010-12-08 21:16:57 -08001877 if (dst_mtu(dst) > IP_MAX_MTU)
1878 dst_metric_set(dst, RTAX_MTU, IP_MAX_MTU);
David S. Miller0dbaee32010-12-13 12:52:14 -08001879 if (dst_metric_raw(dst, RTAX_ADVMSS) > 65535 - 40)
David S. Millerdefb3512010-12-08 21:16:57 -08001880 dst_metric_set(dst, RTAX_ADVMSS, 65535 - 40);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001881
Patrick McHardyc7066f72011-01-14 13:36:42 +01001882#ifdef CONFIG_IP_ROUTE_CLASSID
Linus Torvalds1da177e2005-04-16 15:20:36 -07001883#ifdef CONFIG_IP_MULTIPLE_TABLES
1884 set_class_tag(rt, fib_rules_tclass(res));
1885#endif
1886 set_class_tag(rt, itag);
1887#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001888}
1889
David S. Miller5c1e6aa2011-04-28 14:13:38 -07001890static struct rtable *rt_dst_alloc(struct net_device *dev,
1891 bool nopolicy, bool noxfrm)
David S. Miller0c4dcd52011-02-17 15:42:37 -08001892{
David S. Miller5c1e6aa2011-04-28 14:13:38 -07001893 return dst_alloc(&ipv4_dst_ops, dev, 1, -1,
1894 DST_HOST |
1895 (nopolicy ? DST_NOPOLICY : 0) |
1896 (noxfrm ? DST_NOXFRM : 0));
David S. Miller0c4dcd52011-02-17 15:42:37 -08001897}
1898
Eric Dumazet96d36222010-06-02 19:21:31 +00001899/* called in rcu_read_lock() section */
Al Viro9e12bb22006-09-26 21:25:20 -07001900static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001901 u8 tos, struct net_device *dev, int our)
1902{
Eric Dumazet96d36222010-06-02 19:21:31 +00001903 unsigned int hash;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001904 struct rtable *rth;
Al Viroa61ced52006-09-26 21:27:54 -07001905 __be32 spec_dst;
Eric Dumazet96d36222010-06-02 19:21:31 +00001906 struct in_device *in_dev = __in_dev_get_rcu(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001907 u32 itag = 0;
Eric Dumazetb5f7e752010-06-02 12:05:27 +00001908 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001909
1910 /* Primary sanity checks. */
1911
1912 if (in_dev == NULL)
1913 return -EINVAL;
1914
Jan Engelhardt1e637c72008-01-21 03:18:08 -08001915 if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) ||
Joe Perchesf97c1e02007-12-16 13:45:43 -08001916 ipv4_is_loopback(saddr) || skb->protocol != htons(ETH_P_IP))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001917 goto e_inval;
1918
Joe Perchesf97c1e02007-12-16 13:45:43 -08001919 if (ipv4_is_zeronet(saddr)) {
1920 if (!ipv4_is_local_multicast(daddr))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001921 goto e_inval;
1922 spec_dst = inet_select_addr(dev, 0, RT_SCOPE_LINK);
Eric Dumazetb5f7e752010-06-02 12:05:27 +00001923 } else {
Michael Smith5c04c812011-04-07 04:51:50 +00001924 err = fib_validate_source(skb, saddr, 0, tos, 0, dev, &spec_dst,
1925 &itag);
Eric Dumazetb5f7e752010-06-02 12:05:27 +00001926 if (err < 0)
1927 goto e_err;
1928 }
David S. Miller5c1e6aa2011-04-28 14:13:38 -07001929 rth = rt_dst_alloc(init_net.loopback_dev,
1930 IN_DEV_CONF_GET(in_dev, NOPOLICY), false);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001931 if (!rth)
1932 goto e_nobufs;
1933
Patrick McHardyc7066f72011-01-14 13:36:42 +01001934#ifdef CONFIG_IP_ROUTE_CLASSID
Changli Gaod8d1f302010-06-10 23:31:35 -07001935 rth->dst.tclassid = itag;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001936#endif
David S. Millercf911662011-04-28 14:31:47 -07001937 rth->dst.output = ip_rt_bug;
1938
1939 rth->rt_key_dst = daddr;
1940 rth->rt_key_src = saddr;
Denis V. Luneve84f84f2008-07-05 19:04:32 -07001941 rth->rt_genid = rt_genid(dev_net(dev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001942 rth->rt_flags = RTCF_MULTICAST;
Eric Dumazet29e75252008-01-31 17:05:09 -08001943 rth->rt_type = RTN_MULTICAST;
David S. Miller475949d2011-05-03 19:45:15 -07001944 rth->rt_key_tos = tos;
David S. Millercf911662011-04-28 14:31:47 -07001945 rth->rt_dst = daddr;
1946 rth->rt_src = saddr;
1947 rth->rt_route_iif = dev->ifindex;
1948 rth->rt_iif = dev->ifindex;
1949 rth->rt_oif = 0;
1950 rth->rt_mark = skb->mark;
1951 rth->rt_gateway = daddr;
1952 rth->rt_spec_dst= spec_dst;
1953 rth->rt_peer_genid = 0;
1954 rth->peer = NULL;
1955 rth->fi = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001956 if (our) {
Changli Gaod8d1f302010-06-10 23:31:35 -07001957 rth->dst.input= ip_local_deliver;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001958 rth->rt_flags |= RTCF_LOCAL;
1959 }
1960
1961#ifdef CONFIG_IP_MROUTE
Joe Perchesf97c1e02007-12-16 13:45:43 -08001962 if (!ipv4_is_local_multicast(daddr) && IN_DEV_MFORWARD(in_dev))
Changli Gaod8d1f302010-06-10 23:31:35 -07001963 rth->dst.input = ip_mr_input;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001964#endif
1965 RT_CACHE_STAT_INC(in_slow_mc);
1966
Denis V. Luneve84f84f2008-07-05 19:04:32 -07001967 hash = rt_hash(daddr, saddr, dev->ifindex, rt_genid(dev_net(dev)));
David S. Millerb23dd4f2011-03-02 14:31:35 -08001968 rth = rt_intern_hash(hash, rth, skb, dev->ifindex);
Eric Dumazet9aa3c942011-06-18 11:59:18 -07001969 return IS_ERR(rth) ? PTR_ERR(rth) : 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001970
1971e_nobufs:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001972 return -ENOBUFS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001973e_inval:
Eric Dumazet96d36222010-06-02 19:21:31 +00001974 return -EINVAL;
Eric Dumazetb5f7e752010-06-02 12:05:27 +00001975e_err:
Eric Dumazetb5f7e752010-06-02 12:05:27 +00001976 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001977}
1978
1979
1980static void ip_handle_martian_source(struct net_device *dev,
1981 struct in_device *in_dev,
1982 struct sk_buff *skb,
Al Viro9e12bb22006-09-26 21:25:20 -07001983 __be32 daddr,
1984 __be32 saddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001985{
1986 RT_CACHE_STAT_INC(in_martian_src);
1987#ifdef CONFIG_IP_ROUTE_VERBOSE
1988 if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit()) {
1989 /*
1990 * RFC1812 recommendation, if source is martian,
1991 * the only hint is MAC header.
1992 */
Harvey Harrison673d57e2008-10-31 00:53:57 -07001993 printk(KERN_WARNING "martian source %pI4 from %pI4, on dev %s\n",
1994 &daddr, &saddr, dev->name);
Arnaldo Carvalho de Melo98e399f2007-03-19 15:33:04 -07001995 if (dev->hard_header_len && skb_mac_header_was_set(skb)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001996 int i;
Arnaldo Carvalho de Melo98e399f2007-03-19 15:33:04 -07001997 const unsigned char *p = skb_mac_header(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001998 printk(KERN_WARNING "ll header: ");
1999 for (i = 0; i < dev->hard_header_len; i++, p++) {
2000 printk("%02x", *p);
2001 if (i < (dev->hard_header_len - 1))
2002 printk(":");
2003 }
2004 printk("\n");
2005 }
2006 }
2007#endif
2008}
2009
Eric Dumazet47360222010-06-03 04:13:21 +00002010/* called in rcu_read_lock() section */
Stephen Hemminger5969f712008-04-10 01:52:09 -07002011static int __mkroute_input(struct sk_buff *skb,
David S. Miller982721f2011-02-16 21:44:24 -08002012 const struct fib_result *res,
Stephen Hemminger5969f712008-04-10 01:52:09 -07002013 struct in_device *in_dev,
2014 __be32 daddr, __be32 saddr, u32 tos,
2015 struct rtable **result)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002016{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002017 struct rtable *rth;
2018 int err;
2019 struct in_device *out_dev;
Eric Dumazet47360222010-06-03 04:13:21 +00002020 unsigned int flags = 0;
Al Virod9c9df82006-09-26 21:28:14 -07002021 __be32 spec_dst;
2022 u32 itag;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002023
2024 /* get a working reference to the output device */
Eric Dumazet47360222010-06-03 04:13:21 +00002025 out_dev = __in_dev_get_rcu(FIB_RES_DEV(*res));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002026 if (out_dev == NULL) {
2027 if (net_ratelimit())
2028 printk(KERN_CRIT "Bug in ip_route_input" \
2029 "_slow(). Please, report\n");
2030 return -EINVAL;
2031 }
2032
2033
Michael Smith5c04c812011-04-07 04:51:50 +00002034 err = fib_validate_source(skb, saddr, daddr, tos, FIB_RES_OIF(*res),
2035 in_dev->dev, &spec_dst, &itag);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002036 if (err < 0) {
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09002037 ip_handle_martian_source(in_dev->dev, in_dev, skb, daddr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002038 saddr);
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09002039
Linus Torvalds1da177e2005-04-16 15:20:36 -07002040 goto cleanup;
2041 }
2042
2043 if (err)
2044 flags |= RTCF_DIRECTSRC;
2045
Thomas Graf51b77ca2008-06-03 16:36:01 -07002046 if (out_dev == in_dev && err &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07002047 (IN_DEV_SHARED_MEDIA(out_dev) ||
2048 inet_addr_onlink(out_dev, saddr, FIB_RES_GW(*res))))
2049 flags |= RTCF_DOREDIRECT;
2050
2051 if (skb->protocol != htons(ETH_P_IP)) {
2052 /* Not IP (i.e. ARP). Do not create route, if it is
2053 * invalid for proxy arp. DNAT routes are always valid.
Jesper Dangaard Brouer65324142010-01-05 05:50:47 +00002054 *
2055 * Proxy arp feature have been extended to allow, ARP
2056 * replies back to the same interface, to support
2057 * Private VLAN switch technologies. See arp.c.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002058 */
Jesper Dangaard Brouer65324142010-01-05 05:50:47 +00002059 if (out_dev == in_dev &&
2060 IN_DEV_PROXY_ARP_PVLAN(in_dev) == 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002061 err = -EINVAL;
2062 goto cleanup;
2063 }
2064 }
2065
David S. Miller5c1e6aa2011-04-28 14:13:38 -07002066 rth = rt_dst_alloc(out_dev->dev,
2067 IN_DEV_CONF_GET(in_dev, NOPOLICY),
David S. Miller0c4dcd52011-02-17 15:42:37 -08002068 IN_DEV_CONF_GET(out_dev, NOXFRM));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002069 if (!rth) {
2070 err = -ENOBUFS;
2071 goto cleanup;
2072 }
2073
David S. Miller5e2b61f2011-03-04 21:47:09 -08002074 rth->rt_key_dst = daddr;
David S. Miller5e2b61f2011-03-04 21:47:09 -08002075 rth->rt_key_src = saddr;
David S. Millercf911662011-04-28 14:31:47 -07002076 rth->rt_genid = rt_genid(dev_net(rth->dst.dev));
2077 rth->rt_flags = flags;
2078 rth->rt_type = res->type;
David S. Miller475949d2011-05-03 19:45:15 -07002079 rth->rt_key_tos = tos;
David S. Millercf911662011-04-28 14:31:47 -07002080 rth->rt_dst = daddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002081 rth->rt_src = saddr;
OGAWA Hirofumi1b86a582011-04-07 14:04:08 -07002082 rth->rt_route_iif = in_dev->dev->ifindex;
David S. Miller5e2b61f2011-03-04 21:47:09 -08002083 rth->rt_iif = in_dev->dev->ifindex;
David S. Miller5e2b61f2011-03-04 21:47:09 -08002084 rth->rt_oif = 0;
David S. Millercf911662011-04-28 14:31:47 -07002085 rth->rt_mark = skb->mark;
2086 rth->rt_gateway = daddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002087 rth->rt_spec_dst= spec_dst;
David S. Millercf911662011-04-28 14:31:47 -07002088 rth->rt_peer_genid = 0;
2089 rth->peer = NULL;
2090 rth->fi = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002091
Changli Gaod8d1f302010-06-10 23:31:35 -07002092 rth->dst.input = ip_forward;
2093 rth->dst.output = ip_output;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002094
David S. Miller5e2b61f2011-03-04 21:47:09 -08002095 rt_set_nexthop(rth, NULL, res, res->fi, res->type, itag);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002096
Linus Torvalds1da177e2005-04-16 15:20:36 -07002097 *result = rth;
2098 err = 0;
2099 cleanup:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002100 return err;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09002101}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002102
Stephen Hemminger5969f712008-04-10 01:52:09 -07002103static int ip_mkroute_input(struct sk_buff *skb,
2104 struct fib_result *res,
David S. Miller68a5e3d2011-03-11 20:07:33 -05002105 const struct flowi4 *fl4,
Stephen Hemminger5969f712008-04-10 01:52:09 -07002106 struct in_device *in_dev,
2107 __be32 daddr, __be32 saddr, u32 tos)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002108{
Chuck Short7abaa272005-06-22 22:10:23 -07002109 struct rtable* rth = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002110 int err;
2111 unsigned hash;
2112
2113#ifdef CONFIG_IP_ROUTE_MULTIPATH
David S. Millerff3fccb2011-03-10 16:23:24 -08002114 if (res->fi && res->fi->fib_nhs > 1)
David S. Miller1b7fe5932011-03-10 17:01:16 -08002115 fib_select_multipath(res);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002116#endif
2117
2118 /* create a routing cache entry */
2119 err = __mkroute_input(skb, res, in_dev, daddr, saddr, tos, &rth);
2120 if (err)
2121 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002122
2123 /* put it into the cache */
David S. Miller68a5e3d2011-03-11 20:07:33 -05002124 hash = rt_hash(daddr, saddr, fl4->flowi4_iif,
Changli Gaod8d1f302010-06-10 23:31:35 -07002125 rt_genid(dev_net(rth->dst.dev)));
David S. Miller68a5e3d2011-03-11 20:07:33 -05002126 rth = rt_intern_hash(hash, rth, skb, fl4->flowi4_iif);
David S. Millerb23dd4f2011-03-02 14:31:35 -08002127 if (IS_ERR(rth))
2128 return PTR_ERR(rth);
2129 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002130}
2131
Linus Torvalds1da177e2005-04-16 15:20:36 -07002132/*
2133 * NOTE. We drop all the packets that has local source
2134 * addresses, because every properly looped back packet
2135 * must have correct destination already attached by output routine.
2136 *
2137 * Such approach solves two big problems:
2138 * 1. Not simplex devices are handled properly.
2139 * 2. IP spoofing attempts are filtered with 100% of guarantee.
Eric Dumazetebc0ffa2010-10-05 10:41:36 +00002140 * called with rcu_read_lock()
Linus Torvalds1da177e2005-04-16 15:20:36 -07002141 */
2142
Al Viro9e12bb22006-09-26 21:25:20 -07002143static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002144 u8 tos, struct net_device *dev)
2145{
2146 struct fib_result res;
Eric Dumazet96d36222010-06-02 19:21:31 +00002147 struct in_device *in_dev = __in_dev_get_rcu(dev);
David S. Miller68a5e3d2011-03-11 20:07:33 -05002148 struct flowi4 fl4;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002149 unsigned flags = 0;
2150 u32 itag = 0;
2151 struct rtable * rth;
2152 unsigned hash;
Al Viro9e12bb22006-09-26 21:25:20 -07002153 __be32 spec_dst;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002154 int err = -EINVAL;
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09002155 struct net * net = dev_net(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002156
2157 /* IP on this device is disabled. */
2158
2159 if (!in_dev)
2160 goto out;
2161
2162 /* Check for the most weird martians, which can be not detected
2163 by fib_lookup.
2164 */
2165
Jan Engelhardt1e637c72008-01-21 03:18:08 -08002166 if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) ||
Joe Perchesf97c1e02007-12-16 13:45:43 -08002167 ipv4_is_loopback(saddr))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002168 goto martian_source;
2169
Andy Walls27a954b2010-10-17 15:11:22 +00002170 if (ipv4_is_lbcast(daddr) || (saddr == 0 && daddr == 0))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002171 goto brd_input;
2172
2173 /* Accept zero addresses only to limited broadcast;
2174 * I even do not know to fix it or not. Waiting for complains :-)
2175 */
Joe Perchesf97c1e02007-12-16 13:45:43 -08002176 if (ipv4_is_zeronet(saddr))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002177 goto martian_source;
2178
Andy Walls27a954b2010-10-17 15:11:22 +00002179 if (ipv4_is_zeronet(daddr) || ipv4_is_loopback(daddr))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002180 goto martian_destination;
2181
2182 /*
2183 * Now we are ready to route packet.
2184 */
David S. Miller68a5e3d2011-03-11 20:07:33 -05002185 fl4.flowi4_oif = 0;
2186 fl4.flowi4_iif = dev->ifindex;
2187 fl4.flowi4_mark = skb->mark;
2188 fl4.flowi4_tos = tos;
2189 fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
2190 fl4.daddr = daddr;
2191 fl4.saddr = saddr;
2192 err = fib_lookup(net, &fl4, &res);
Eric Dumazetebc0ffa2010-10-05 10:41:36 +00002193 if (err != 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002194 if (!IN_DEV_FORWARD(in_dev))
Dietmar Eggemann2c2910a2005-06-28 13:06:23 -07002195 goto e_hostunreach;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002196 goto no_route;
2197 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002198
2199 RT_CACHE_STAT_INC(in_slow_tot);
2200
2201 if (res.type == RTN_BROADCAST)
2202 goto brd_input;
2203
2204 if (res.type == RTN_LOCAL) {
Michael Smith5c04c812011-04-07 04:51:50 +00002205 err = fib_validate_source(skb, saddr, daddr, tos,
Eric Dumazetebc0ffa2010-10-05 10:41:36 +00002206 net->loopback_dev->ifindex,
Michael Smith5c04c812011-04-07 04:51:50 +00002207 dev, &spec_dst, &itag);
Eric Dumazetb5f7e752010-06-02 12:05:27 +00002208 if (err < 0)
2209 goto martian_source_keep_err;
2210 if (err)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002211 flags |= RTCF_DIRECTSRC;
2212 spec_dst = daddr;
2213 goto local_input;
2214 }
2215
2216 if (!IN_DEV_FORWARD(in_dev))
Dietmar Eggemann2c2910a2005-06-28 13:06:23 -07002217 goto e_hostunreach;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002218 if (res.type != RTN_UNICAST)
2219 goto martian_destination;
2220
David S. Miller68a5e3d2011-03-11 20:07:33 -05002221 err = ip_mkroute_input(skb, &res, &fl4, in_dev, daddr, saddr, tos);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002222out: return err;
2223
2224brd_input:
2225 if (skb->protocol != htons(ETH_P_IP))
2226 goto e_inval;
2227
Joe Perchesf97c1e02007-12-16 13:45:43 -08002228 if (ipv4_is_zeronet(saddr))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002229 spec_dst = inet_select_addr(dev, 0, RT_SCOPE_LINK);
2230 else {
Michael Smith5c04c812011-04-07 04:51:50 +00002231 err = fib_validate_source(skb, saddr, 0, tos, 0, dev, &spec_dst,
2232 &itag);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002233 if (err < 0)
Eric Dumazetb5f7e752010-06-02 12:05:27 +00002234 goto martian_source_keep_err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002235 if (err)
2236 flags |= RTCF_DIRECTSRC;
2237 }
2238 flags |= RTCF_BROADCAST;
2239 res.type = RTN_BROADCAST;
2240 RT_CACHE_STAT_INC(in_brd);
2241
2242local_input:
David S. Miller5c1e6aa2011-04-28 14:13:38 -07002243 rth = rt_dst_alloc(net->loopback_dev,
2244 IN_DEV_CONF_GET(in_dev, NOPOLICY), false);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002245 if (!rth)
2246 goto e_nobufs;
2247
David S. Millercf911662011-04-28 14:31:47 -07002248 rth->dst.input= ip_local_deliver;
Changli Gaod8d1f302010-06-10 23:31:35 -07002249 rth->dst.output= ip_rt_bug;
David S. Millercf911662011-04-28 14:31:47 -07002250#ifdef CONFIG_IP_ROUTE_CLASSID
2251 rth->dst.tclassid = itag;
2252#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002253
David S. Miller5e2b61f2011-03-04 21:47:09 -08002254 rth->rt_key_dst = daddr;
David S. Miller5e2b61f2011-03-04 21:47:09 -08002255 rth->rt_key_src = saddr;
David S. Millercf911662011-04-28 14:31:47 -07002256 rth->rt_genid = rt_genid(net);
2257 rth->rt_flags = flags|RTCF_LOCAL;
2258 rth->rt_type = res.type;
David S. Miller475949d2011-05-03 19:45:15 -07002259 rth->rt_key_tos = tos;
David S. Millercf911662011-04-28 14:31:47 -07002260 rth->rt_dst = daddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002261 rth->rt_src = saddr;
Patrick McHardyc7066f72011-01-14 13:36:42 +01002262#ifdef CONFIG_IP_ROUTE_CLASSID
Changli Gaod8d1f302010-06-10 23:31:35 -07002263 rth->dst.tclassid = itag;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002264#endif
OGAWA Hirofumi1b86a582011-04-07 14:04:08 -07002265 rth->rt_route_iif = dev->ifindex;
David S. Miller5e2b61f2011-03-04 21:47:09 -08002266 rth->rt_iif = dev->ifindex;
David S. Millercf911662011-04-28 14:31:47 -07002267 rth->rt_oif = 0;
2268 rth->rt_mark = skb->mark;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002269 rth->rt_gateway = daddr;
2270 rth->rt_spec_dst= spec_dst;
David S. Millercf911662011-04-28 14:31:47 -07002271 rth->rt_peer_genid = 0;
2272 rth->peer = NULL;
2273 rth->fi = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002274 if (res.type == RTN_UNREACHABLE) {
Changli Gaod8d1f302010-06-10 23:31:35 -07002275 rth->dst.input= ip_error;
2276 rth->dst.error= -err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002277 rth->rt_flags &= ~RTCF_LOCAL;
2278 }
David S. Miller68a5e3d2011-03-11 20:07:33 -05002279 hash = rt_hash(daddr, saddr, fl4.flowi4_iif, rt_genid(net));
2280 rth = rt_intern_hash(hash, rth, skb, fl4.flowi4_iif);
David S. Millerb23dd4f2011-03-02 14:31:35 -08002281 err = 0;
2282 if (IS_ERR(rth))
2283 err = PTR_ERR(rth);
Eric Dumazetebc0ffa2010-10-05 10:41:36 +00002284 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002285
2286no_route:
2287 RT_CACHE_STAT_INC(in_no_route);
2288 spec_dst = inet_select_addr(dev, 0, RT_SCOPE_UNIVERSE);
2289 res.type = RTN_UNREACHABLE;
Mitsuru Chinen7f538782007-12-07 01:07:24 -08002290 if (err == -ESRCH)
2291 err = -ENETUNREACH;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002292 goto local_input;
2293
2294 /*
2295 * Do not cache martian addresses: they should be logged (RFC1812)
2296 */
2297martian_destination:
2298 RT_CACHE_STAT_INC(in_martian_dst);
2299#ifdef CONFIG_IP_ROUTE_VERBOSE
2300 if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit())
Harvey Harrison673d57e2008-10-31 00:53:57 -07002301 printk(KERN_WARNING "martian destination %pI4 from %pI4, dev %s\n",
2302 &daddr, &saddr, dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002303#endif
Dietmar Eggemann2c2910a2005-06-28 13:06:23 -07002304
2305e_hostunreach:
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09002306 err = -EHOSTUNREACH;
Eric Dumazetebc0ffa2010-10-05 10:41:36 +00002307 goto out;
Dietmar Eggemann2c2910a2005-06-28 13:06:23 -07002308
Linus Torvalds1da177e2005-04-16 15:20:36 -07002309e_inval:
2310 err = -EINVAL;
Eric Dumazetebc0ffa2010-10-05 10:41:36 +00002311 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002312
2313e_nobufs:
2314 err = -ENOBUFS;
Eric Dumazetebc0ffa2010-10-05 10:41:36 +00002315 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002316
2317martian_source:
Eric Dumazetb5f7e752010-06-02 12:05:27 +00002318 err = -EINVAL;
2319martian_source_keep_err:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002320 ip_handle_martian_source(dev, in_dev, skb, daddr, saddr);
Eric Dumazetebc0ffa2010-10-05 10:41:36 +00002321 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002322}
2323
Eric Dumazet407eadd2010-05-10 11:32:55 +00002324int ip_route_input_common(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2325 u8 tos, struct net_device *dev, bool noref)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002326{
2327 struct rtable * rth;
2328 unsigned hash;
2329 int iif = dev->ifindex;
Denis V. Lunevb5921912008-01-22 23:50:25 -08002330 struct net *net;
Eric Dumazet96d36222010-06-02 19:21:31 +00002331 int res;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002332
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09002333 net = dev_net(dev);
Neil Horman1080d702008-10-27 12:28:25 -07002334
Eric Dumazet96d36222010-06-02 19:21:31 +00002335 rcu_read_lock();
2336
Neil Horman1080d702008-10-27 12:28:25 -07002337 if (!rt_caching(net))
2338 goto skip_cache;
2339
Linus Torvalds1da177e2005-04-16 15:20:36 -07002340 tos &= IPTOS_RT_MASK;
Denis V. Luneve84f84f2008-07-05 19:04:32 -07002341 hash = rt_hash(daddr, saddr, iif, rt_genid(net));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002342
Linus Torvalds1da177e2005-04-16 15:20:36 -07002343 for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
Changli Gaod8d1f302010-06-10 23:31:35 -07002344 rth = rcu_dereference(rth->dst.rt_next)) {
David S. Miller5e2b61f2011-03-04 21:47:09 -08002345 if ((((__force u32)rth->rt_key_dst ^ (__force u32)daddr) |
2346 ((__force u32)rth->rt_key_src ^ (__force u32)saddr) |
Julian Anastasov97a80412011-08-09 04:01:16 +00002347 (rth->rt_route_iif ^ iif) |
David S. Miller475949d2011-05-03 19:45:15 -07002348 (rth->rt_key_tos ^ tos)) == 0 &&
David S. Miller5e2b61f2011-03-04 21:47:09 -08002349 rth->rt_mark == skb->mark &&
Changli Gaod8d1f302010-06-10 23:31:35 -07002350 net_eq(dev_net(rth->dst.dev), net) &&
Denis V. Luneve84f84f2008-07-05 19:04:32 -07002351 !rt_is_expired(rth)) {
Eric Dumazet407eadd2010-05-10 11:32:55 +00002352 if (noref) {
Changli Gaod8d1f302010-06-10 23:31:35 -07002353 dst_use_noref(&rth->dst, jiffies);
2354 skb_dst_set_noref(skb, &rth->dst);
Eric Dumazet407eadd2010-05-10 11:32:55 +00002355 } else {
Changli Gaod8d1f302010-06-10 23:31:35 -07002356 dst_use(&rth->dst, jiffies);
2357 skb_dst_set(skb, &rth->dst);
Eric Dumazet407eadd2010-05-10 11:32:55 +00002358 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002359 RT_CACHE_STAT_INC(in_hit);
2360 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002361 return 0;
2362 }
2363 RT_CACHE_STAT_INC(in_hlist_search);
2364 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002365
Neil Horman1080d702008-10-27 12:28:25 -07002366skip_cache:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002367 /* Multicast recognition logic is moved from route cache to here.
2368 The problem was that too many Ethernet cards have broken/missing
2369 hardware multicast filters :-( As result the host on multicasting
2370 network acquires a lot of useless route cache entries, sort of
2371 SDR messages from all the world. Now we try to get rid of them.
2372 Really, provided software IP multicast filter is organized
2373 reasonably (at least, hashed), it does not result in a slowdown
2374 comparing with route cache reject entries.
2375 Note, that multicast routers are not affected, because
2376 route cache entry is created eventually.
2377 */
Joe Perchesf97c1e02007-12-16 13:45:43 -08002378 if (ipv4_is_multicast(daddr)) {
Eric Dumazet96d36222010-06-02 19:21:31 +00002379 struct in_device *in_dev = __in_dev_get_rcu(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002380
Eric Dumazet96d36222010-06-02 19:21:31 +00002381 if (in_dev) {
David S. Millerdbdd9a52011-03-10 16:34:38 -08002382 int our = ip_check_mc_rcu(in_dev, daddr, saddr,
2383 ip_hdr(skb)->protocol);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002384 if (our
2385#ifdef CONFIG_IP_MROUTE
Joe Perches9d4fb272009-11-23 10:41:23 -08002386 ||
2387 (!ipv4_is_local_multicast(daddr) &&
2388 IN_DEV_MFORWARD(in_dev))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002389#endif
Joe Perches9d4fb272009-11-23 10:41:23 -08002390 ) {
Eric Dumazet96d36222010-06-02 19:21:31 +00002391 int res = ip_route_input_mc(skb, daddr, saddr,
2392 tos, dev, our);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002393 rcu_read_unlock();
Eric Dumazet96d36222010-06-02 19:21:31 +00002394 return res;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002395 }
2396 }
2397 rcu_read_unlock();
2398 return -EINVAL;
2399 }
Eric Dumazet96d36222010-06-02 19:21:31 +00002400 res = ip_route_input_slow(skb, daddr, saddr, tos, dev);
2401 rcu_read_unlock();
2402 return res;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002403}
Eric Dumazet407eadd2010-05-10 11:32:55 +00002404EXPORT_SYMBOL(ip_route_input_common);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002405
Eric Dumazetebc0ffa2010-10-05 10:41:36 +00002406/* called with rcu_read_lock() */
David S. Miller982721f2011-02-16 21:44:24 -08002407static struct rtable *__mkroute_output(const struct fib_result *res,
David S. Miller68a5e3d2011-03-11 20:07:33 -05002408 const struct flowi4 *fl4,
David S. Miller813b3b52011-04-28 14:48:42 -07002409 __be32 orig_daddr, __be32 orig_saddr,
2410 int orig_oif, struct net_device *dev_out,
David S. Miller5ada5522011-02-17 15:29:00 -08002411 unsigned int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002412{
David S. Miller982721f2011-02-16 21:44:24 -08002413 struct fib_info *fi = res->fi;
David S. Miller813b3b52011-04-28 14:48:42 -07002414 u32 tos = RT_FL_TOS(fl4);
David S. Miller5ada5522011-02-17 15:29:00 -08002415 struct in_device *in_dev;
David S. Miller982721f2011-02-16 21:44:24 -08002416 u16 type = res->type;
David S. Miller5ada5522011-02-17 15:29:00 -08002417 struct rtable *rth;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002418
David S. Miller68a5e3d2011-03-11 20:07:33 -05002419 if (ipv4_is_loopback(fl4->saddr) && !(dev_out->flags & IFF_LOOPBACK))
David S. Miller5ada5522011-02-17 15:29:00 -08002420 return ERR_PTR(-EINVAL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002421
David S. Miller68a5e3d2011-03-11 20:07:33 -05002422 if (ipv4_is_lbcast(fl4->daddr))
David S. Miller982721f2011-02-16 21:44:24 -08002423 type = RTN_BROADCAST;
David S. Miller68a5e3d2011-03-11 20:07:33 -05002424 else if (ipv4_is_multicast(fl4->daddr))
David S. Miller982721f2011-02-16 21:44:24 -08002425 type = RTN_MULTICAST;
David S. Miller68a5e3d2011-03-11 20:07:33 -05002426 else if (ipv4_is_zeronet(fl4->daddr))
David S. Miller5ada5522011-02-17 15:29:00 -08002427 return ERR_PTR(-EINVAL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002428
2429 if (dev_out->flags & IFF_LOOPBACK)
2430 flags |= RTCF_LOCAL;
2431
Eric Dumazetdd28d1a2010-09-29 11:53:50 +00002432 in_dev = __in_dev_get_rcu(dev_out);
Eric Dumazetebc0ffa2010-10-05 10:41:36 +00002433 if (!in_dev)
David S. Miller5ada5522011-02-17 15:29:00 -08002434 return ERR_PTR(-EINVAL);
Eric Dumazetebc0ffa2010-10-05 10:41:36 +00002435
David S. Miller982721f2011-02-16 21:44:24 -08002436 if (type == RTN_BROADCAST) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002437 flags |= RTCF_BROADCAST | RTCF_LOCAL;
David S. Miller982721f2011-02-16 21:44:24 -08002438 fi = NULL;
2439 } else if (type == RTN_MULTICAST) {
Eric Dumazetdd28d1a2010-09-29 11:53:50 +00002440 flags |= RTCF_MULTICAST | RTCF_LOCAL;
David S. Miller813b3b52011-04-28 14:48:42 -07002441 if (!ip_check_mc_rcu(in_dev, fl4->daddr, fl4->saddr,
2442 fl4->flowi4_proto))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002443 flags &= ~RTCF_LOCAL;
2444 /* If multicast route do not exist use
Eric Dumazetdd28d1a2010-09-29 11:53:50 +00002445 * default one, but do not gateway in this case.
2446 * Yes, it is hack.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002447 */
David S. Miller982721f2011-02-16 21:44:24 -08002448 if (fi && res->prefixlen < 4)
2449 fi = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002450 }
2451
David S. Miller5c1e6aa2011-04-28 14:13:38 -07002452 rth = rt_dst_alloc(dev_out,
2453 IN_DEV_CONF_GET(in_dev, NOPOLICY),
David S. Miller0c4dcd52011-02-17 15:42:37 -08002454 IN_DEV_CONF_GET(in_dev, NOXFRM));
Dimitris Michailidis8391d072010-10-07 14:48:38 +00002455 if (!rth)
David S. Miller5ada5522011-02-17 15:29:00 -08002456 return ERR_PTR(-ENOBUFS);
Dimitris Michailidis8391d072010-10-07 14:48:38 +00002457
David S. Millercf911662011-04-28 14:31:47 -07002458 rth->dst.output = ip_output;
2459
David S. Miller813b3b52011-04-28 14:48:42 -07002460 rth->rt_key_dst = orig_daddr;
2461 rth->rt_key_src = orig_saddr;
David S. Millercf911662011-04-28 14:31:47 -07002462 rth->rt_genid = rt_genid(dev_net(dev_out));
2463 rth->rt_flags = flags;
2464 rth->rt_type = type;
David S. Miller475949d2011-05-03 19:45:15 -07002465 rth->rt_key_tos = tos;
David S. Miller68a5e3d2011-03-11 20:07:33 -05002466 rth->rt_dst = fl4->daddr;
2467 rth->rt_src = fl4->saddr;
OGAWA Hirofumi1b86a582011-04-07 14:04:08 -07002468 rth->rt_route_iif = 0;
David S. Miller813b3b52011-04-28 14:48:42 -07002469 rth->rt_iif = orig_oif ? : dev_out->ifindex;
2470 rth->rt_oif = orig_oif;
2471 rth->rt_mark = fl4->flowi4_mark;
David S. Miller68a5e3d2011-03-11 20:07:33 -05002472 rth->rt_gateway = fl4->daddr;
2473 rth->rt_spec_dst= fl4->saddr;
David S. Millercf911662011-04-28 14:31:47 -07002474 rth->rt_peer_genid = 0;
2475 rth->peer = NULL;
2476 rth->fi = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002477
2478 RT_CACHE_STAT_INC(out_slow_tot);
2479
2480 if (flags & RTCF_LOCAL) {
Changli Gaod8d1f302010-06-10 23:31:35 -07002481 rth->dst.input = ip_local_deliver;
David S. Miller68a5e3d2011-03-11 20:07:33 -05002482 rth->rt_spec_dst = fl4->daddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002483 }
2484 if (flags & (RTCF_BROADCAST | RTCF_MULTICAST)) {
David S. Miller68a5e3d2011-03-11 20:07:33 -05002485 rth->rt_spec_dst = fl4->saddr;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09002486 if (flags & RTCF_LOCAL &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07002487 !(dev_out->flags & IFF_LOOPBACK)) {
Changli Gaod8d1f302010-06-10 23:31:35 -07002488 rth->dst.output = ip_mc_output;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002489 RT_CACHE_STAT_INC(out_slow_mc);
2490 }
2491#ifdef CONFIG_IP_MROUTE
David S. Miller982721f2011-02-16 21:44:24 -08002492 if (type == RTN_MULTICAST) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002493 if (IN_DEV_MFORWARD(in_dev) &&
David S. Miller813b3b52011-04-28 14:48:42 -07002494 !ipv4_is_local_multicast(fl4->daddr)) {
Changli Gaod8d1f302010-06-10 23:31:35 -07002495 rth->dst.input = ip_mr_input;
2496 rth->dst.output = ip_mc_output;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002497 }
2498 }
2499#endif
2500 }
2501
David S. Miller813b3b52011-04-28 14:48:42 -07002502 rt_set_nexthop(rth, fl4, res, fi, type, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002503
David S. Miller5ada5522011-02-17 15:29:00 -08002504 return rth;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002505}
2506
Linus Torvalds1da177e2005-04-16 15:20:36 -07002507/*
2508 * Major route resolver routine.
Eric Dumazet0197aa32010-09-30 03:33:58 +00002509 * called with rcu_read_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002510 */
2511
David S. Miller813b3b52011-04-28 14:48:42 -07002512static struct rtable *ip_route_output_slow(struct net *net, struct flowi4 *fl4)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002513{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002514 struct net_device *dev_out = NULL;
David S. Miller813b3b52011-04-28 14:48:42 -07002515 u32 tos = RT_FL_TOS(fl4);
2516 unsigned int flags = 0;
2517 struct fib_result res;
David S. Miller5ada5522011-02-17 15:29:00 -08002518 struct rtable *rth;
David S. Miller813b3b52011-04-28 14:48:42 -07002519 __be32 orig_daddr;
2520 __be32 orig_saddr;
2521 int orig_oif;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002522
2523 res.fi = NULL;
2524#ifdef CONFIG_IP_MULTIPLE_TABLES
2525 res.r = NULL;
2526#endif
2527
David S. Miller813b3b52011-04-28 14:48:42 -07002528 orig_daddr = fl4->daddr;
2529 orig_saddr = fl4->saddr;
2530 orig_oif = fl4->flowi4_oif;
2531
2532 fl4->flowi4_iif = net->loopback_dev->ifindex;
2533 fl4->flowi4_tos = tos & IPTOS_RT_MASK;
2534 fl4->flowi4_scope = ((tos & RTO_ONLINK) ?
2535 RT_SCOPE_LINK : RT_SCOPE_UNIVERSE);
David S. Miller44713b62011-03-04 21:24:47 -08002536
David S. Miller010c2702011-02-17 15:37:09 -08002537 rcu_read_lock();
David S. Miller813b3b52011-04-28 14:48:42 -07002538 if (fl4->saddr) {
David S. Millerb23dd4f2011-03-02 14:31:35 -08002539 rth = ERR_PTR(-EINVAL);
David S. Miller813b3b52011-04-28 14:48:42 -07002540 if (ipv4_is_multicast(fl4->saddr) ||
2541 ipv4_is_lbcast(fl4->saddr) ||
2542 ipv4_is_zeronet(fl4->saddr))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002543 goto out;
2544
Linus Torvalds1da177e2005-04-16 15:20:36 -07002545 /* I removed check for oif == dev_out->oif here.
2546 It was wrong for two reasons:
Denis V. Lunev1ab35272008-01-22 22:04:30 -08002547 1. ip_dev_find(net, saddr) can return wrong iface, if saddr
2548 is assigned to multiple interfaces.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002549 2. Moreover, we are allowed to send packets with saddr
2550 of another iface. --ANK
2551 */
2552
David S. Miller813b3b52011-04-28 14:48:42 -07002553 if (fl4->flowi4_oif == 0 &&
2554 (ipv4_is_multicast(fl4->daddr) ||
2555 ipv4_is_lbcast(fl4->daddr))) {
Julian Anastasova210d012008-10-01 07:28:28 -07002556 /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
David S. Miller813b3b52011-04-28 14:48:42 -07002557 dev_out = __ip_dev_find(net, fl4->saddr, false);
Julian Anastasova210d012008-10-01 07:28:28 -07002558 if (dev_out == NULL)
2559 goto out;
2560
Linus Torvalds1da177e2005-04-16 15:20:36 -07002561 /* Special hack: user can direct multicasts
2562 and limited broadcast via necessary interface
2563 without fiddling with IP_MULTICAST_IF or IP_PKTINFO.
2564 This hack is not just for fun, it allows
2565 vic,vat and friends to work.
2566 They bind socket to loopback, set ttl to zero
2567 and expect that it will work.
2568 From the viewpoint of routing cache they are broken,
2569 because we are not allowed to build multicast path
2570 with loopback source addr (look, routing cache
2571 cannot know, that ttl is zero, so that packet
2572 will not leave this host and route is valid).
2573 Luckily, this hack is good workaround.
2574 */
2575
David S. Miller813b3b52011-04-28 14:48:42 -07002576 fl4->flowi4_oif = dev_out->ifindex;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002577 goto make_route;
2578 }
Julian Anastasova210d012008-10-01 07:28:28 -07002579
David S. Miller813b3b52011-04-28 14:48:42 -07002580 if (!(fl4->flowi4_flags & FLOWI_FLAG_ANYSRC)) {
Julian Anastasova210d012008-10-01 07:28:28 -07002581 /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
David S. Miller813b3b52011-04-28 14:48:42 -07002582 if (!__ip_dev_find(net, fl4->saddr, false))
Julian Anastasova210d012008-10-01 07:28:28 -07002583 goto out;
Julian Anastasova210d012008-10-01 07:28:28 -07002584 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002585 }
2586
2587
David S. Miller813b3b52011-04-28 14:48:42 -07002588 if (fl4->flowi4_oif) {
2589 dev_out = dev_get_by_index_rcu(net, fl4->flowi4_oif);
David S. Millerb23dd4f2011-03-02 14:31:35 -08002590 rth = ERR_PTR(-ENODEV);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002591 if (dev_out == NULL)
2592 goto out;
Herbert Xue5ed6392005-10-03 14:35:55 -07002593
2594 /* RACE: Check return value of inet_select_addr instead. */
Eric Dumazetfc75fc82010-12-22 04:39:39 +00002595 if (!(dev_out->flags & IFF_UP) || !__in_dev_get_rcu(dev_out)) {
David S. Millerb23dd4f2011-03-02 14:31:35 -08002596 rth = ERR_PTR(-ENETUNREACH);
Eric Dumazetfc75fc82010-12-22 04:39:39 +00002597 goto out;
2598 }
David S. Miller813b3b52011-04-28 14:48:42 -07002599 if (ipv4_is_local_multicast(fl4->daddr) ||
2600 ipv4_is_lbcast(fl4->daddr)) {
2601 if (!fl4->saddr)
2602 fl4->saddr = inet_select_addr(dev_out, 0,
2603 RT_SCOPE_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002604 goto make_route;
2605 }
David S. Miller813b3b52011-04-28 14:48:42 -07002606 if (fl4->saddr) {
2607 if (ipv4_is_multicast(fl4->daddr))
2608 fl4->saddr = inet_select_addr(dev_out, 0,
2609 fl4->flowi4_scope);
2610 else if (!fl4->daddr)
2611 fl4->saddr = inet_select_addr(dev_out, 0,
2612 RT_SCOPE_HOST);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002613 }
2614 }
2615
David S. Miller813b3b52011-04-28 14:48:42 -07002616 if (!fl4->daddr) {
2617 fl4->daddr = fl4->saddr;
2618 if (!fl4->daddr)
2619 fl4->daddr = fl4->saddr = htonl(INADDR_LOOPBACK);
Denis V. Lunevb40afd02008-01-22 22:06:19 -08002620 dev_out = net->loopback_dev;
David S. Miller813b3b52011-04-28 14:48:42 -07002621 fl4->flowi4_oif = net->loopback_dev->ifindex;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002622 res.type = RTN_LOCAL;
2623 flags |= RTCF_LOCAL;
2624 goto make_route;
2625 }
2626
David S. Miller813b3b52011-04-28 14:48:42 -07002627 if (fib_lookup(net, fl4, &res)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002628 res.fi = NULL;
David S. Miller813b3b52011-04-28 14:48:42 -07002629 if (fl4->flowi4_oif) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002630 /* Apparently, routing tables are wrong. Assume,
2631 that the destination is on link.
2632
2633 WHY? DW.
2634 Because we are allowed to send to iface
2635 even if it has NO routes and NO assigned
2636 addresses. When oif is specified, routing
2637 tables are looked up with only one purpose:
2638 to catch if destination is gatewayed, rather than
2639 direct. Moreover, if MSG_DONTROUTE is set,
2640 we send packet, ignoring both routing tables
2641 and ifaddr state. --ANK
2642
2643
2644 We could make it even if oif is unknown,
2645 likely IPv6, but we do not.
2646 */
2647
David S. Miller813b3b52011-04-28 14:48:42 -07002648 if (fl4->saddr == 0)
2649 fl4->saddr = inet_select_addr(dev_out, 0,
2650 RT_SCOPE_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002651 res.type = RTN_UNICAST;
2652 goto make_route;
2653 }
David S. Millerb23dd4f2011-03-02 14:31:35 -08002654 rth = ERR_PTR(-ENETUNREACH);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002655 goto out;
2656 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002657
2658 if (res.type == RTN_LOCAL) {
David S. Miller813b3b52011-04-28 14:48:42 -07002659 if (!fl4->saddr) {
Joel Sing9fc3bbb2011-01-03 20:24:20 +00002660 if (res.fi->fib_prefsrc)
David S. Miller813b3b52011-04-28 14:48:42 -07002661 fl4->saddr = res.fi->fib_prefsrc;
Joel Sing9fc3bbb2011-01-03 20:24:20 +00002662 else
David S. Miller813b3b52011-04-28 14:48:42 -07002663 fl4->saddr = fl4->daddr;
Joel Sing9fc3bbb2011-01-03 20:24:20 +00002664 }
Denis V. Lunevb40afd02008-01-22 22:06:19 -08002665 dev_out = net->loopback_dev;
David S. Miller813b3b52011-04-28 14:48:42 -07002666 fl4->flowi4_oif = dev_out->ifindex;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002667 res.fi = NULL;
2668 flags |= RTCF_LOCAL;
2669 goto make_route;
2670 }
2671
2672#ifdef CONFIG_IP_ROUTE_MULTIPATH
David S. Miller813b3b52011-04-28 14:48:42 -07002673 if (res.fi->fib_nhs > 1 && fl4->flowi4_oif == 0)
David S. Miller1b7fe5932011-03-10 17:01:16 -08002674 fib_select_multipath(&res);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002675 else
2676#endif
David S. Miller21d8c492011-04-14 14:49:37 -07002677 if (!res.prefixlen &&
2678 res.table->tb_num_default > 1 &&
David S. Miller813b3b52011-04-28 14:48:42 -07002679 res.type == RTN_UNICAST && !fl4->flowi4_oif)
David S. Miller0c838ff2011-01-31 16:16:50 -08002680 fib_select_default(&res);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002681
David S. Miller813b3b52011-04-28 14:48:42 -07002682 if (!fl4->saddr)
2683 fl4->saddr = FIB_RES_PREFSRC(net, res);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002684
Linus Torvalds1da177e2005-04-16 15:20:36 -07002685 dev_out = FIB_RES_DEV(res);
David S. Miller813b3b52011-04-28 14:48:42 -07002686 fl4->flowi4_oif = dev_out->ifindex;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002687
2688
2689make_route:
David S. Miller813b3b52011-04-28 14:48:42 -07002690 rth = __mkroute_output(&res, fl4, orig_daddr, orig_saddr, orig_oif,
2691 dev_out, flags);
David S. Millerb23dd4f2011-03-02 14:31:35 -08002692 if (!IS_ERR(rth)) {
David S. Miller5ada5522011-02-17 15:29:00 -08002693 unsigned int hash;
2694
David S. Miller813b3b52011-04-28 14:48:42 -07002695 hash = rt_hash(orig_daddr, orig_saddr, orig_oif,
David S. Miller5ada5522011-02-17 15:29:00 -08002696 rt_genid(dev_net(dev_out)));
David S. Miller813b3b52011-04-28 14:48:42 -07002697 rth = rt_intern_hash(hash, rth, NULL, orig_oif);
David S. Miller5ada5522011-02-17 15:29:00 -08002698 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002699
David S. Miller010c2702011-02-17 15:37:09 -08002700out:
2701 rcu_read_unlock();
David S. Millerb23dd4f2011-03-02 14:31:35 -08002702 return rth;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002703}
2704
David S. Miller813b3b52011-04-28 14:48:42 -07002705struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *flp4)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002706{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002707 struct rtable *rth;
David S. Miller010c2702011-02-17 15:37:09 -08002708 unsigned int hash;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002709
Neil Horman1080d702008-10-27 12:28:25 -07002710 if (!rt_caching(net))
2711 goto slow_output;
2712
David S. Miller9d6ec932011-03-12 01:12:47 -05002713 hash = rt_hash(flp4->daddr, flp4->saddr, flp4->flowi4_oif, rt_genid(net));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002714
2715 rcu_read_lock_bh();
Paul E. McKenneya898def2010-02-22 17:04:49 -08002716 for (rth = rcu_dereference_bh(rt_hash_table[hash].chain); rth;
Changli Gaod8d1f302010-06-10 23:31:35 -07002717 rth = rcu_dereference_bh(rth->dst.rt_next)) {
David S. Miller9d6ec932011-03-12 01:12:47 -05002718 if (rth->rt_key_dst == flp4->daddr &&
2719 rth->rt_key_src == flp4->saddr &&
David S. Millerc7537962010-11-11 17:07:48 -08002720 rt_is_output_route(rth) &&
David S. Miller9d6ec932011-03-12 01:12:47 -05002721 rth->rt_oif == flp4->flowi4_oif &&
2722 rth->rt_mark == flp4->flowi4_mark &&
David S. Miller475949d2011-05-03 19:45:15 -07002723 !((rth->rt_key_tos ^ flp4->flowi4_tos) &
Denis V. Lunevb5921912008-01-22 23:50:25 -08002724 (IPTOS_RT_MASK | RTO_ONLINK)) &&
Changli Gaod8d1f302010-06-10 23:31:35 -07002725 net_eq(dev_net(rth->dst.dev), net) &&
Denis V. Luneve84f84f2008-07-05 19:04:32 -07002726 !rt_is_expired(rth)) {
Changli Gaod8d1f302010-06-10 23:31:35 -07002727 dst_use(&rth->dst, jiffies);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002728 RT_CACHE_STAT_INC(out_hit);
2729 rcu_read_unlock_bh();
David S. Miller56157872011-05-02 14:37:45 -07002730 if (!flp4->saddr)
2731 flp4->saddr = rth->rt_src;
2732 if (!flp4->daddr)
2733 flp4->daddr = rth->rt_dst;
David S. Millerb23dd4f2011-03-02 14:31:35 -08002734 return rth;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002735 }
2736 RT_CACHE_STAT_INC(out_hlist_search);
2737 }
2738 rcu_read_unlock_bh();
2739
Neil Horman1080d702008-10-27 12:28:25 -07002740slow_output:
David S. Miller9d6ec932011-03-12 01:12:47 -05002741 return ip_route_output_slow(net, flp4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002742}
Arnaldo Carvalho de Melod8c97a92005-08-09 20:12:12 -07002743EXPORT_SYMBOL_GPL(__ip_route_output_key);
2744
Jianzhao Wangae2688d2010-09-08 14:35:43 -07002745static struct dst_entry *ipv4_blackhole_dst_check(struct dst_entry *dst, u32 cookie)
2746{
2747 return NULL;
2748}
2749
Roland Dreierec831ea2011-01-31 13:16:00 -08002750static unsigned int ipv4_blackhole_default_mtu(const struct dst_entry *dst)
2751{
2752 return 0;
2753}
2754
David S. Miller14e50e52007-05-24 18:17:54 -07002755static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, u32 mtu)
2756{
2757}
2758
Held Bernhard0972ddb2011-04-24 22:07:32 +00002759static u32 *ipv4_rt_blackhole_cow_metrics(struct dst_entry *dst,
2760 unsigned long old)
2761{
2762 return NULL;
2763}
2764
David S. Miller14e50e52007-05-24 18:17:54 -07002765static struct dst_ops ipv4_dst_blackhole_ops = {
2766 .family = AF_INET,
Harvey Harrison09640e62009-02-01 00:45:17 -08002767 .protocol = cpu_to_be16(ETH_P_IP),
David S. Miller14e50e52007-05-24 18:17:54 -07002768 .destroy = ipv4_dst_destroy,
Jianzhao Wangae2688d2010-09-08 14:35:43 -07002769 .check = ipv4_blackhole_dst_check,
Roland Dreierec831ea2011-01-31 13:16:00 -08002770 .default_mtu = ipv4_blackhole_default_mtu,
Eric Dumazet214f45c2011-02-18 11:39:01 -08002771 .default_advmss = ipv4_default_advmss,
David S. Miller14e50e52007-05-24 18:17:54 -07002772 .update_pmtu = ipv4_rt_blackhole_update_pmtu,
Held Bernhard0972ddb2011-04-24 22:07:32 +00002773 .cow_metrics = ipv4_rt_blackhole_cow_metrics,
David S. Millerd3aaeb32011-07-18 00:40:17 -07002774 .neigh_lookup = ipv4_neigh_lookup,
David S. Miller14e50e52007-05-24 18:17:54 -07002775};
2776
David S. Miller2774c132011-03-01 14:59:04 -08002777struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_orig)
David S. Miller14e50e52007-05-24 18:17:54 -07002778{
David S. Miller5c1e6aa2011-04-28 14:13:38 -07002779 struct rtable *rt = dst_alloc(&ipv4_dst_blackhole_ops, NULL, 1, 0, 0);
David S. Miller2774c132011-03-01 14:59:04 -08002780 struct rtable *ort = (struct rtable *) dst_orig;
David S. Miller14e50e52007-05-24 18:17:54 -07002781
2782 if (rt) {
Changli Gaod8d1f302010-06-10 23:31:35 -07002783 struct dst_entry *new = &rt->dst;
David S. Miller14e50e52007-05-24 18:17:54 -07002784
David S. Miller14e50e52007-05-24 18:17:54 -07002785 new->__use = 1;
Herbert Xu352e5122007-11-13 21:34:06 -08002786 new->input = dst_discard;
2787 new->output = dst_discard;
David S. Millerdefb3512010-12-08 21:16:57 -08002788 dst_copy_metrics(new, &ort->dst);
David S. Miller14e50e52007-05-24 18:17:54 -07002789
Changli Gaod8d1f302010-06-10 23:31:35 -07002790 new->dev = ort->dst.dev;
David S. Miller14e50e52007-05-24 18:17:54 -07002791 if (new->dev)
2792 dev_hold(new->dev);
2793
David S. Miller5e2b61f2011-03-04 21:47:09 -08002794 rt->rt_key_dst = ort->rt_key_dst;
2795 rt->rt_key_src = ort->rt_key_src;
David S. Miller475949d2011-05-03 19:45:15 -07002796 rt->rt_key_tos = ort->rt_key_tos;
OGAWA Hirofumi1b86a582011-04-07 14:04:08 -07002797 rt->rt_route_iif = ort->rt_route_iif;
David S. Miller5e2b61f2011-03-04 21:47:09 -08002798 rt->rt_iif = ort->rt_iif;
2799 rt->rt_oif = ort->rt_oif;
2800 rt->rt_mark = ort->rt_mark;
David S. Miller14e50e52007-05-24 18:17:54 -07002801
Denis V. Luneve84f84f2008-07-05 19:04:32 -07002802 rt->rt_genid = rt_genid(net);
David S. Miller14e50e52007-05-24 18:17:54 -07002803 rt->rt_flags = ort->rt_flags;
2804 rt->rt_type = ort->rt_type;
2805 rt->rt_dst = ort->rt_dst;
2806 rt->rt_src = ort->rt_src;
David S. Miller14e50e52007-05-24 18:17:54 -07002807 rt->rt_gateway = ort->rt_gateway;
2808 rt->rt_spec_dst = ort->rt_spec_dst;
2809 rt->peer = ort->peer;
2810 if (rt->peer)
2811 atomic_inc(&rt->peer->refcnt);
David S. Miller62fa8a82011-01-26 20:51:05 -08002812 rt->fi = ort->fi;
2813 if (rt->fi)
2814 atomic_inc(&rt->fi->fib_clntref);
David S. Miller14e50e52007-05-24 18:17:54 -07002815
2816 dst_free(new);
2817 }
2818
David S. Miller2774c132011-03-01 14:59:04 -08002819 dst_release(dst_orig);
2820
2821 return rt ? &rt->dst : ERR_PTR(-ENOMEM);
David S. Miller14e50e52007-05-24 18:17:54 -07002822}
2823
David S. Miller9d6ec932011-03-12 01:12:47 -05002824struct rtable *ip_route_output_flow(struct net *net, struct flowi4 *flp4,
David S. Millerb23dd4f2011-03-02 14:31:35 -08002825 struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002826{
David S. Miller9d6ec932011-03-12 01:12:47 -05002827 struct rtable *rt = __ip_route_output_key(net, flp4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002828
David S. Millerb23dd4f2011-03-02 14:31:35 -08002829 if (IS_ERR(rt))
2830 return rt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002831
David S. Miller56157872011-05-02 14:37:45 -07002832 if (flp4->flowi4_proto)
David S. Miller9d6ec932011-03-12 01:12:47 -05002833 rt = (struct rtable *) xfrm_lookup(net, &rt->dst,
2834 flowi4_to_flowi(flp4),
2835 sk, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002836
David S. Millerb23dd4f2011-03-02 14:31:35 -08002837 return rt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002838}
Arnaldo Carvalho de Melod8c97a92005-08-09 20:12:12 -07002839EXPORT_SYMBOL_GPL(ip_route_output_flow);
2840
Benjamin Thery4feb88e2009-01-22 04:56:23 +00002841static int rt_fill_info(struct net *net,
2842 struct sk_buff *skb, u32 pid, u32 seq, int event,
Jamal Hadi Salimb6544c02005-06-18 22:54:12 -07002843 int nowait, unsigned int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002844{
Eric Dumazet511c3f92009-06-02 05:14:27 +00002845 struct rtable *rt = skb_rtable(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002846 struct rtmsg *r;
Thomas Grafbe403ea2006-08-17 18:15:17 -07002847 struct nlmsghdr *nlh;
Eric Dumazetfe6fe792011-06-08 06:07:07 +00002848 long expires = 0;
2849 const struct inet_peer *peer = rt->peer;
Thomas Grafe3703b32006-11-27 09:27:07 -08002850 u32 id = 0, ts = 0, tsage = 0, error;
Thomas Grafbe403ea2006-08-17 18:15:17 -07002851
2852 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*r), flags);
2853 if (nlh == NULL)
Patrick McHardy26932562007-01-31 23:16:40 -08002854 return -EMSGSIZE;
Thomas Grafbe403ea2006-08-17 18:15:17 -07002855
2856 r = nlmsg_data(nlh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002857 r->rtm_family = AF_INET;
2858 r->rtm_dst_len = 32;
2859 r->rtm_src_len = 0;
David S. Miller475949d2011-05-03 19:45:15 -07002860 r->rtm_tos = rt->rt_key_tos;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002861 r->rtm_table = RT_TABLE_MAIN;
Thomas Grafbe403ea2006-08-17 18:15:17 -07002862 NLA_PUT_U32(skb, RTA_TABLE, RT_TABLE_MAIN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002863 r->rtm_type = rt->rt_type;
2864 r->rtm_scope = RT_SCOPE_UNIVERSE;
2865 r->rtm_protocol = RTPROT_UNSPEC;
2866 r->rtm_flags = (rt->rt_flags & ~0xFFFF) | RTM_F_CLONED;
2867 if (rt->rt_flags & RTCF_NOTIFY)
2868 r->rtm_flags |= RTM_F_NOTIFY;
Thomas Grafbe403ea2006-08-17 18:15:17 -07002869
Al Viro17fb2c62006-09-26 22:15:25 -07002870 NLA_PUT_BE32(skb, RTA_DST, rt->rt_dst);
Thomas Grafbe403ea2006-08-17 18:15:17 -07002871
David S. Miller5e2b61f2011-03-04 21:47:09 -08002872 if (rt->rt_key_src) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002873 r->rtm_src_len = 32;
David S. Miller5e2b61f2011-03-04 21:47:09 -08002874 NLA_PUT_BE32(skb, RTA_SRC, rt->rt_key_src);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002875 }
Changli Gaod8d1f302010-06-10 23:31:35 -07002876 if (rt->dst.dev)
2877 NLA_PUT_U32(skb, RTA_OIF, rt->dst.dev->ifindex);
Patrick McHardyc7066f72011-01-14 13:36:42 +01002878#ifdef CONFIG_IP_ROUTE_CLASSID
Changli Gaod8d1f302010-06-10 23:31:35 -07002879 if (rt->dst.tclassid)
2880 NLA_PUT_U32(skb, RTA_FLOW, rt->dst.tclassid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002881#endif
David S. Millerc7537962010-11-11 17:07:48 -08002882 if (rt_is_input_route(rt))
Al Viro17fb2c62006-09-26 22:15:25 -07002883 NLA_PUT_BE32(skb, RTA_PREFSRC, rt->rt_spec_dst);
David S. Miller5e2b61f2011-03-04 21:47:09 -08002884 else if (rt->rt_src != rt->rt_key_src)
Al Viro17fb2c62006-09-26 22:15:25 -07002885 NLA_PUT_BE32(skb, RTA_PREFSRC, rt->rt_src);
Thomas Grafbe403ea2006-08-17 18:15:17 -07002886
Linus Torvalds1da177e2005-04-16 15:20:36 -07002887 if (rt->rt_dst != rt->rt_gateway)
Al Viro17fb2c62006-09-26 22:15:25 -07002888 NLA_PUT_BE32(skb, RTA_GATEWAY, rt->rt_gateway);
Thomas Grafbe403ea2006-08-17 18:15:17 -07002889
David S. Millerdefb3512010-12-08 21:16:57 -08002890 if (rtnetlink_put_metrics(skb, dst_metrics_ptr(&rt->dst)) < 0)
Thomas Grafbe403ea2006-08-17 18:15:17 -07002891 goto nla_put_failure;
2892
David S. Miller5e2b61f2011-03-04 21:47:09 -08002893 if (rt->rt_mark)
2894 NLA_PUT_BE32(skb, RTA_MARK, rt->rt_mark);
Eric Dumazet963bfee2010-07-20 22:03:14 +00002895
Changli Gaod8d1f302010-06-10 23:31:35 -07002896 error = rt->dst.error;
Eric Dumazetfe6fe792011-06-08 06:07:07 +00002897 if (peer) {
Eric Dumazet317fe0e2010-06-16 04:52:13 +00002898 inet_peer_refcheck(rt->peer);
Eric Dumazetfe6fe792011-06-08 06:07:07 +00002899 id = atomic_read(&peer->ip_id_count) & 0xffff;
2900 if (peer->tcp_ts_stamp) {
2901 ts = peer->tcp_ts;
2902 tsage = get_seconds() - peer->tcp_ts_stamp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002903 }
Eric Dumazetfe6fe792011-06-08 06:07:07 +00002904 expires = ACCESS_ONCE(peer->pmtu_expires);
2905 if (expires)
2906 expires -= jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002907 }
Thomas Grafbe403ea2006-08-17 18:15:17 -07002908
David S. Millerc7537962010-11-11 17:07:48 -08002909 if (rt_is_input_route(rt)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002910#ifdef CONFIG_IP_MROUTE
Al Viroe4485152006-09-26 22:15:01 -07002911 __be32 dst = rt->rt_dst;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002912
Joe Perchesf97c1e02007-12-16 13:45:43 -08002913 if (ipv4_is_multicast(dst) && !ipv4_is_local_multicast(dst) &&
Benjamin Thery4feb88e2009-01-22 04:56:23 +00002914 IPV4_DEVCONF_ALL(net, MC_FORWARDING)) {
David S. Miller9a1b9492011-05-04 12:18:54 -07002915 int err = ipmr_get_route(net, skb,
2916 rt->rt_src, rt->rt_dst,
2917 r, nowait);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002918 if (err <= 0) {
2919 if (!nowait) {
2920 if (err == 0)
2921 return 0;
Thomas Grafbe403ea2006-08-17 18:15:17 -07002922 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002923 } else {
2924 if (err == -EMSGSIZE)
Thomas Grafbe403ea2006-08-17 18:15:17 -07002925 goto nla_put_failure;
Thomas Grafe3703b32006-11-27 09:27:07 -08002926 error = err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002927 }
2928 }
2929 } else
2930#endif
David S. Miller5e2b61f2011-03-04 21:47:09 -08002931 NLA_PUT_U32(skb, RTA_IIF, rt->rt_iif);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002932 }
2933
Changli Gaod8d1f302010-06-10 23:31:35 -07002934 if (rtnl_put_cacheinfo(skb, &rt->dst, id, ts, tsage,
Thomas Grafe3703b32006-11-27 09:27:07 -08002935 expires, error) < 0)
2936 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002937
Thomas Grafbe403ea2006-08-17 18:15:17 -07002938 return nlmsg_end(skb, nlh);
2939
2940nla_put_failure:
Patrick McHardy26932562007-01-31 23:16:40 -08002941 nlmsg_cancel(skb, nlh);
2942 return -EMSGSIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002943}
2944
Thomas Graf63f34442007-03-22 11:55:17 -07002945static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002946{
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09002947 struct net *net = sock_net(in_skb->sk);
Thomas Grafd889ce32006-08-17 18:15:44 -07002948 struct rtmsg *rtm;
2949 struct nlattr *tb[RTA_MAX+1];
Linus Torvalds1da177e2005-04-16 15:20:36 -07002950 struct rtable *rt = NULL;
Al Viro9e12bb22006-09-26 21:25:20 -07002951 __be32 dst = 0;
2952 __be32 src = 0;
2953 u32 iif;
Thomas Grafd889ce32006-08-17 18:15:44 -07002954 int err;
Eric Dumazet963bfee2010-07-20 22:03:14 +00002955 int mark;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002956 struct sk_buff *skb;
2957
Thomas Grafd889ce32006-08-17 18:15:44 -07002958 err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv4_policy);
2959 if (err < 0)
2960 goto errout;
2961
2962 rtm = nlmsg_data(nlh);
2963
Linus Torvalds1da177e2005-04-16 15:20:36 -07002964 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
Thomas Grafd889ce32006-08-17 18:15:44 -07002965 if (skb == NULL) {
2966 err = -ENOBUFS;
2967 goto errout;
2968 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002969
2970 /* Reserve room for dummy headers, this skb can pass
2971 through good chunk of routing engine.
2972 */
Arnaldo Carvalho de Melo459a98e2007-03-19 15:30:44 -07002973 skb_reset_mac_header(skb);
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -07002974 skb_reset_network_header(skb);
Stephen Hemmingerd2c962b2006-04-17 17:27:11 -07002975
2976 /* Bugfix: need to give ip_route_input enough of an IP header to not gag. */
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07002977 ip_hdr(skb)->protocol = IPPROTO_ICMP;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002978 skb_reserve(skb, MAX_HEADER + sizeof(struct iphdr));
2979
Al Viro17fb2c62006-09-26 22:15:25 -07002980 src = tb[RTA_SRC] ? nla_get_be32(tb[RTA_SRC]) : 0;
2981 dst = tb[RTA_DST] ? nla_get_be32(tb[RTA_DST]) : 0;
Thomas Grafd889ce32006-08-17 18:15:44 -07002982 iif = tb[RTA_IIF] ? nla_get_u32(tb[RTA_IIF]) : 0;
Eric Dumazet963bfee2010-07-20 22:03:14 +00002983 mark = tb[RTA_MARK] ? nla_get_u32(tb[RTA_MARK]) : 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002984
2985 if (iif) {
Thomas Grafd889ce32006-08-17 18:15:44 -07002986 struct net_device *dev;
2987
Denis V. Lunev19375042008-02-28 20:52:04 -08002988 dev = __dev_get_by_index(net, iif);
Thomas Grafd889ce32006-08-17 18:15:44 -07002989 if (dev == NULL) {
2990 err = -ENODEV;
2991 goto errout_free;
2992 }
2993
Linus Torvalds1da177e2005-04-16 15:20:36 -07002994 skb->protocol = htons(ETH_P_IP);
2995 skb->dev = dev;
Eric Dumazet963bfee2010-07-20 22:03:14 +00002996 skb->mark = mark;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002997 local_bh_disable();
2998 err = ip_route_input(skb, dst, src, rtm->rtm_tos, dev);
2999 local_bh_enable();
Thomas Grafd889ce32006-08-17 18:15:44 -07003000
Eric Dumazet511c3f92009-06-02 05:14:27 +00003001 rt = skb_rtable(skb);
Changli Gaod8d1f302010-06-10 23:31:35 -07003002 if (err == 0 && rt->dst.error)
3003 err = -rt->dst.error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003004 } else {
David S. Miller68a5e3d2011-03-11 20:07:33 -05003005 struct flowi4 fl4 = {
3006 .daddr = dst,
3007 .saddr = src,
3008 .flowi4_tos = rtm->rtm_tos,
3009 .flowi4_oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0,
3010 .flowi4_mark = mark,
Thomas Grafd889ce32006-08-17 18:15:44 -07003011 };
David S. Miller9d6ec932011-03-12 01:12:47 -05003012 rt = ip_route_output_key(net, &fl4);
David S. Millerb23dd4f2011-03-02 14:31:35 -08003013
3014 err = 0;
3015 if (IS_ERR(rt))
3016 err = PTR_ERR(rt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003017 }
Thomas Grafd889ce32006-08-17 18:15:44 -07003018
Linus Torvalds1da177e2005-04-16 15:20:36 -07003019 if (err)
Thomas Grafd889ce32006-08-17 18:15:44 -07003020 goto errout_free;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003021
Changli Gaod8d1f302010-06-10 23:31:35 -07003022 skb_dst_set(skb, &rt->dst);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003023 if (rtm->rtm_flags & RTM_F_NOTIFY)
3024 rt->rt_flags |= RTCF_NOTIFY;
3025
Benjamin Thery4feb88e2009-01-22 04:56:23 +00003026 err = rt_fill_info(net, skb, NETLINK_CB(in_skb).pid, nlh->nlmsg_seq,
Denis V. Lunev19375042008-02-28 20:52:04 -08003027 RTM_NEWROUTE, 0, 0);
Thomas Grafd889ce32006-08-17 18:15:44 -07003028 if (err <= 0)
3029 goto errout_free;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003030
Denis V. Lunev19375042008-02-28 20:52:04 -08003031 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).pid);
Thomas Grafd889ce32006-08-17 18:15:44 -07003032errout:
Thomas Graf2942e902006-08-15 00:30:25 -07003033 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003034
Thomas Grafd889ce32006-08-17 18:15:44 -07003035errout_free:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003036 kfree_skb(skb);
Thomas Grafd889ce32006-08-17 18:15:44 -07003037 goto errout;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003038}
3039
3040int ip_rt_dump(struct sk_buff *skb, struct netlink_callback *cb)
3041{
3042 struct rtable *rt;
3043 int h, s_h;
3044 int idx, s_idx;
Denis V. Lunev19375042008-02-28 20:52:04 -08003045 struct net *net;
3046
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09003047 net = sock_net(skb->sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003048
3049 s_h = cb->args[0];
Eric Dumazetd8c92832008-01-07 21:52:14 -08003050 if (s_h < 0)
3051 s_h = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003052 s_idx = idx = cb->args[1];
Eric Dumazeta6272662008-08-28 01:11:25 -07003053 for (h = s_h; h <= rt_hash_mask; h++, s_idx = 0) {
3054 if (!rt_hash_table[h].chain)
3055 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003056 rcu_read_lock_bh();
Paul E. McKenneya898def2010-02-22 17:04:49 -08003057 for (rt = rcu_dereference_bh(rt_hash_table[h].chain), idx = 0; rt;
Changli Gaod8d1f302010-06-10 23:31:35 -07003058 rt = rcu_dereference_bh(rt->dst.rt_next), idx++) {
3059 if (!net_eq(dev_net(rt->dst.dev), net) || idx < s_idx)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003060 continue;
Denis V. Luneve84f84f2008-07-05 19:04:32 -07003061 if (rt_is_expired(rt))
Eric Dumazet29e75252008-01-31 17:05:09 -08003062 continue;
Changli Gaod8d1f302010-06-10 23:31:35 -07003063 skb_dst_set_noref(skb, &rt->dst);
Benjamin Thery4feb88e2009-01-22 04:56:23 +00003064 if (rt_fill_info(net, skb, NETLINK_CB(cb->skb).pid,
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09003065 cb->nlh->nlmsg_seq, RTM_NEWROUTE,
Jamal Hadi Salimb6544c02005-06-18 22:54:12 -07003066 1, NLM_F_MULTI) <= 0) {
Eric Dumazetadf30902009-06-02 05:19:30 +00003067 skb_dst_drop(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003068 rcu_read_unlock_bh();
3069 goto done;
3070 }
Eric Dumazetadf30902009-06-02 05:19:30 +00003071 skb_dst_drop(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003072 }
3073 rcu_read_unlock_bh();
3074 }
3075
3076done:
3077 cb->args[0] = h;
3078 cb->args[1] = idx;
3079 return skb->len;
3080}
3081
3082void ip_rt_multicast_event(struct in_device *in_dev)
3083{
Denis V. Lunev76e6ebf2008-07-05 19:00:44 -07003084 rt_cache_flush(dev_net(in_dev->dev), 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003085}
3086
3087#ifdef CONFIG_SYSCTL
Denis V. Lunev81c684d2008-07-08 03:05:28 -07003088static int ipv4_sysctl_rtcache_flush(ctl_table *__ctl, int write,
Alexey Dobriyan8d65af72009-09-23 15:57:19 -07003089 void __user *buffer,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003090 size_t *lenp, loff_t *ppos)
3091{
3092 if (write) {
Denis V. Lunev639e1042008-07-05 19:02:06 -07003093 int flush_delay;
Denis V. Lunev81c684d2008-07-08 03:05:28 -07003094 ctl_table ctl;
Denis V. Lunev39a23e72008-07-05 19:02:33 -07003095 struct net *net;
Denis V. Lunev639e1042008-07-05 19:02:06 -07003096
Denis V. Lunev81c684d2008-07-08 03:05:28 -07003097 memcpy(&ctl, __ctl, sizeof(ctl));
3098 ctl.data = &flush_delay;
Alexey Dobriyan8d65af72009-09-23 15:57:19 -07003099 proc_dointvec(&ctl, write, buffer, lenp, ppos);
Denis V. Lunev639e1042008-07-05 19:02:06 -07003100
Denis V. Lunev81c684d2008-07-08 03:05:28 -07003101 net = (struct net *)__ctl->extra1;
Denis V. Lunev39a23e72008-07-05 19:02:33 -07003102 rt_cache_flush(net, flush_delay);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003103 return 0;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09003104 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003105
3106 return -EINVAL;
3107}
3108
Al Viroeeb61f72008-07-27 08:59:33 +01003109static ctl_table ipv4_route_table[] = {
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09003110 {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003111 .procname = "gc_thresh",
3112 .data = &ipv4_dst_ops.gc_thresh,
3113 .maxlen = sizeof(int),
3114 .mode = 0644,
Alexey Dobriyan6d9f2392008-11-03 18:21:05 -08003115 .proc_handler = proc_dointvec,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003116 },
3117 {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003118 .procname = "max_size",
3119 .data = &ip_rt_max_size,
3120 .maxlen = sizeof(int),
3121 .mode = 0644,
Alexey Dobriyan6d9f2392008-11-03 18:21:05 -08003122 .proc_handler = proc_dointvec,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003123 },
3124 {
3125 /* Deprecated. Use gc_min_interval_ms */
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09003126
Linus Torvalds1da177e2005-04-16 15:20:36 -07003127 .procname = "gc_min_interval",
3128 .data = &ip_rt_gc_min_interval,
3129 .maxlen = sizeof(int),
3130 .mode = 0644,
Alexey Dobriyan6d9f2392008-11-03 18:21:05 -08003131 .proc_handler = proc_dointvec_jiffies,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003132 },
3133 {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003134 .procname = "gc_min_interval_ms",
3135 .data = &ip_rt_gc_min_interval,
3136 .maxlen = sizeof(int),
3137 .mode = 0644,
Alexey Dobriyan6d9f2392008-11-03 18:21:05 -08003138 .proc_handler = proc_dointvec_ms_jiffies,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003139 },
3140 {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003141 .procname = "gc_timeout",
3142 .data = &ip_rt_gc_timeout,
3143 .maxlen = sizeof(int),
3144 .mode = 0644,
Alexey Dobriyan6d9f2392008-11-03 18:21:05 -08003145 .proc_handler = proc_dointvec_jiffies,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003146 },
3147 {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003148 .procname = "redirect_load",
3149 .data = &ip_rt_redirect_load,
3150 .maxlen = sizeof(int),
3151 .mode = 0644,
Alexey Dobriyan6d9f2392008-11-03 18:21:05 -08003152 .proc_handler = proc_dointvec,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003153 },
3154 {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003155 .procname = "redirect_number",
3156 .data = &ip_rt_redirect_number,
3157 .maxlen = sizeof(int),
3158 .mode = 0644,
Alexey Dobriyan6d9f2392008-11-03 18:21:05 -08003159 .proc_handler = proc_dointvec,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003160 },
3161 {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003162 .procname = "redirect_silence",
3163 .data = &ip_rt_redirect_silence,
3164 .maxlen = sizeof(int),
3165 .mode = 0644,
Alexey Dobriyan6d9f2392008-11-03 18:21:05 -08003166 .proc_handler = proc_dointvec,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003167 },
3168 {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003169 .procname = "error_cost",
3170 .data = &ip_rt_error_cost,
3171 .maxlen = sizeof(int),
3172 .mode = 0644,
Alexey Dobriyan6d9f2392008-11-03 18:21:05 -08003173 .proc_handler = proc_dointvec,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003174 },
3175 {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003176 .procname = "error_burst",
3177 .data = &ip_rt_error_burst,
3178 .maxlen = sizeof(int),
3179 .mode = 0644,
Alexey Dobriyan6d9f2392008-11-03 18:21:05 -08003180 .proc_handler = proc_dointvec,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003181 },
3182 {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003183 .procname = "gc_elasticity",
3184 .data = &ip_rt_gc_elasticity,
3185 .maxlen = sizeof(int),
3186 .mode = 0644,
Alexey Dobriyan6d9f2392008-11-03 18:21:05 -08003187 .proc_handler = proc_dointvec,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003188 },
3189 {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003190 .procname = "mtu_expires",
3191 .data = &ip_rt_mtu_expires,
3192 .maxlen = sizeof(int),
3193 .mode = 0644,
Alexey Dobriyan6d9f2392008-11-03 18:21:05 -08003194 .proc_handler = proc_dointvec_jiffies,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003195 },
3196 {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003197 .procname = "min_pmtu",
3198 .data = &ip_rt_min_pmtu,
3199 .maxlen = sizeof(int),
3200 .mode = 0644,
Alexey Dobriyan6d9f2392008-11-03 18:21:05 -08003201 .proc_handler = proc_dointvec,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003202 },
3203 {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003204 .procname = "min_adv_mss",
3205 .data = &ip_rt_min_advmss,
3206 .maxlen = sizeof(int),
3207 .mode = 0644,
Alexey Dobriyan6d9f2392008-11-03 18:21:05 -08003208 .proc_handler = proc_dointvec,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003209 },
Eric W. Biedermanf8572d82009-11-05 13:32:03 -08003210 { }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003211};
Denis V. Lunev39a23e72008-07-05 19:02:33 -07003212
Al Viro2f4520d2008-08-25 15:17:44 -07003213static struct ctl_table empty[1];
3214
3215static struct ctl_table ipv4_skeleton[] =
3216{
Eric W. Biedermanf8572d82009-11-05 13:32:03 -08003217 { .procname = "route",
Hugh Dickinsd994af02008-08-27 02:35:18 -07003218 .mode = 0555, .child = ipv4_route_table},
Eric W. Biedermanf8572d82009-11-05 13:32:03 -08003219 { .procname = "neigh",
Hugh Dickinsd994af02008-08-27 02:35:18 -07003220 .mode = 0555, .child = empty},
Al Viro2f4520d2008-08-25 15:17:44 -07003221 { }
Denis V. Lunev39a23e72008-07-05 19:02:33 -07003222};
3223
Al Viro2f4520d2008-08-25 15:17:44 -07003224static __net_initdata struct ctl_path ipv4_path[] = {
Eric W. Biedermanf8572d82009-11-05 13:32:03 -08003225 { .procname = "net", },
3226 { .procname = "ipv4", },
Al Viro2f4520d2008-08-25 15:17:44 -07003227 { },
3228};
Denis V. Lunev39a23e72008-07-05 19:02:33 -07003229
3230static struct ctl_table ipv4_route_flush_table[] = {
3231 {
Denis V. Lunev39a23e72008-07-05 19:02:33 -07003232 .procname = "flush",
3233 .maxlen = sizeof(int),
3234 .mode = 0200,
Alexey Dobriyan6d9f2392008-11-03 18:21:05 -08003235 .proc_handler = ipv4_sysctl_rtcache_flush,
Denis V. Lunev39a23e72008-07-05 19:02:33 -07003236 },
Eric W. Biedermanf8572d82009-11-05 13:32:03 -08003237 { },
Denis V. Lunev39a23e72008-07-05 19:02:33 -07003238};
3239
Al Viro2f4520d2008-08-25 15:17:44 -07003240static __net_initdata struct ctl_path ipv4_route_path[] = {
Eric W. Biedermanf8572d82009-11-05 13:32:03 -08003241 { .procname = "net", },
3242 { .procname = "ipv4", },
3243 { .procname = "route", },
Al Viro2f4520d2008-08-25 15:17:44 -07003244 { },
3245};
3246
Denis V. Lunev39a23e72008-07-05 19:02:33 -07003247static __net_init int sysctl_route_net_init(struct net *net)
3248{
3249 struct ctl_table *tbl;
3250
3251 tbl = ipv4_route_flush_table;
Octavian Purdila09ad9bc2009-11-25 15:14:13 -08003252 if (!net_eq(net, &init_net)) {
Denis V. Lunev39a23e72008-07-05 19:02:33 -07003253 tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL);
3254 if (tbl == NULL)
3255 goto err_dup;
3256 }
3257 tbl[0].extra1 = net;
3258
3259 net->ipv4.route_hdr =
3260 register_net_sysctl_table(net, ipv4_route_path, tbl);
3261 if (net->ipv4.route_hdr == NULL)
3262 goto err_reg;
3263 return 0;
3264
3265err_reg:
3266 if (tbl != ipv4_route_flush_table)
3267 kfree(tbl);
3268err_dup:
3269 return -ENOMEM;
3270}
3271
3272static __net_exit void sysctl_route_net_exit(struct net *net)
3273{
3274 struct ctl_table *tbl;
3275
3276 tbl = net->ipv4.route_hdr->ctl_table_arg;
3277 unregister_net_sysctl_table(net->ipv4.route_hdr);
3278 BUG_ON(tbl == ipv4_route_flush_table);
3279 kfree(tbl);
3280}
3281
3282static __net_initdata struct pernet_operations sysctl_route_ops = {
3283 .init = sysctl_route_net_init,
3284 .exit = sysctl_route_net_exit,
3285};
Linus Torvalds1da177e2005-04-16 15:20:36 -07003286#endif
3287
Neil Horman3ee94372010-05-08 01:57:52 -07003288static __net_init int rt_genid_init(struct net *net)
Denis V. Lunev9f5e97e2008-07-05 19:02:59 -07003289{
Neil Horman3ee94372010-05-08 01:57:52 -07003290 get_random_bytes(&net->ipv4.rt_genid,
3291 sizeof(net->ipv4.rt_genid));
David S. Miller436c3b62011-03-24 17:42:21 -07003292 get_random_bytes(&net->ipv4.dev_addr_genid,
3293 sizeof(net->ipv4.dev_addr_genid));
Denis V. Lunev9f5e97e2008-07-05 19:02:59 -07003294 return 0;
3295}
3296
Neil Horman3ee94372010-05-08 01:57:52 -07003297static __net_initdata struct pernet_operations rt_genid_ops = {
3298 .init = rt_genid_init,
Denis V. Lunev9f5e97e2008-07-05 19:02:59 -07003299};
3300
3301
Patrick McHardyc7066f72011-01-14 13:36:42 +01003302#ifdef CONFIG_IP_ROUTE_CLASSID
Tejun Heo7d720c32010-02-16 15:20:26 +00003303struct ip_rt_acct __percpu *ip_rt_acct __read_mostly;
Patrick McHardyc7066f72011-01-14 13:36:42 +01003304#endif /* CONFIG_IP_ROUTE_CLASSID */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003305
3306static __initdata unsigned long rhash_entries;
3307static int __init set_rhash_entries(char *str)
3308{
3309 if (!str)
3310 return 0;
3311 rhash_entries = simple_strtoul(str, &str, 0);
3312 return 1;
3313}
3314__setup("rhash_entries=", set_rhash_entries);
3315
3316int __init ip_rt_init(void)
3317{
Eric Dumazet424c4b72005-07-05 14:58:19 -07003318 int rc = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003319
Patrick McHardyc7066f72011-01-14 13:36:42 +01003320#ifdef CONFIG_IP_ROUTE_CLASSID
Ingo Molnar0dcec8c2009-02-25 14:07:33 +01003321 ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct), __alignof__(struct ip_rt_acct));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003322 if (!ip_rt_acct)
3323 panic("IP: failed to allocate ip_rt_acct\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003324#endif
3325
Alexey Dobriyane5d679f332006-08-26 19:25:52 -07003326 ipv4_dst_ops.kmem_cachep =
3327 kmem_cache_create("ip_dst_cache", sizeof(struct rtable), 0,
Paul Mundt20c2df82007-07-20 10:11:58 +09003328 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003329
David S. Miller14e50e52007-05-24 18:17:54 -07003330 ipv4_dst_blackhole_ops.kmem_cachep = ipv4_dst_ops.kmem_cachep;
3331
Eric Dumazetfc66f952010-10-08 06:37:34 +00003332 if (dst_entries_init(&ipv4_dst_ops) < 0)
3333 panic("IP: failed to allocate ipv4_dst_ops counter\n");
3334
3335 if (dst_entries_init(&ipv4_dst_blackhole_ops) < 0)
3336 panic("IP: failed to allocate ipv4_dst_blackhole_ops counter\n");
3337
Eric Dumazet424c4b72005-07-05 14:58:19 -07003338 rt_hash_table = (struct rt_hash_bucket *)
3339 alloc_large_system_hash("IP route cache",
3340 sizeof(struct rt_hash_bucket),
3341 rhash_entries,
Jan Beulich44813742009-09-21 17:03:05 -07003342 (totalram_pages >= 128 * 1024) ?
Mike Stroyan18955cf2005-11-29 16:12:55 -08003343 15 : 17,
Kirill Korotaev8d1502d2006-08-07 20:44:22 -07003344 0,
Eric Dumazet424c4b72005-07-05 14:58:19 -07003345 &rt_hash_log,
3346 &rt_hash_mask,
Anton Blanchardc9503e02009-04-27 05:42:24 -07003347 rhash_entries ? 0 : 512 * 1024);
Eric Dumazet22c047c2005-07-05 14:55:24 -07003348 memset(rt_hash_table, 0, (rt_hash_mask + 1) * sizeof(struct rt_hash_bucket));
3349 rt_hash_lock_init();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003350
3351 ipv4_dst_ops.gc_thresh = (rt_hash_mask + 1);
3352 ip_rt_max_size = (rt_hash_mask + 1) * 16;
3353
Linus Torvalds1da177e2005-04-16 15:20:36 -07003354 devinet_init();
3355 ip_fib_init();
3356
Denis V. Lunev73b38712008-02-28 20:51:18 -08003357 if (ip_rt_proc_init())
Pavel Emelyanov107f1632007-12-05 21:14:28 -08003358 printk(KERN_ERR "Unable to create route proc files\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003359#ifdef CONFIG_XFRM
3360 xfrm_init();
Neil Hormana33bc5c2009-07-30 18:52:15 -07003361 xfrm4_init(ip_rt_max_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003362#endif
Greg Rosec7ac8672011-06-10 01:27:09 +00003363 rtnl_register(PF_INET, RTM_GETROUTE, inet_rtm_getroute, NULL, NULL);
Thomas Graf63f34442007-03-22 11:55:17 -07003364
Denis V. Lunev39a23e72008-07-05 19:02:33 -07003365#ifdef CONFIG_SYSCTL
3366 register_pernet_subsys(&sysctl_route_ops);
3367#endif
Neil Horman3ee94372010-05-08 01:57:52 -07003368 register_pernet_subsys(&rt_genid_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003369 return rc;
3370}
3371
Al Viroa1bc6eb2008-07-30 06:32:52 -04003372#ifdef CONFIG_SYSCTL
Al Viroeeb61f72008-07-27 08:59:33 +01003373/*
3374 * We really need to sanitize the damn ipv4 init order, then all
3375 * this nonsense will go away.
3376 */
3377void __init ip_static_sysctl_init(void)
3378{
Al Viro2f4520d2008-08-25 15:17:44 -07003379 register_sysctl_paths(ipv4_path, ipv4_skeleton);
Al Viroeeb61f72008-07-27 08:59:33 +01003380}
Al Viroa1bc6eb2008-07-30 06:32:52 -04003381#endif