blob: 62d4d90c1389c4ea7da37c81779b2f55207d2a92 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * ROUTE - implementation of the IP router.
7 *
Jesper Juhl02c30a82005-05-05 16:16:16 -07008 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Alan Cox, <gw4pts@gw4pts.ampr.org>
11 * Linus Torvalds, <Linus.Torvalds@helsinki.fi>
12 * Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
13 *
14 * Fixes:
15 * Alan Cox : Verify area fixes.
16 * Alan Cox : cli() protects routing changes
17 * Rui Oliveira : ICMP routing table updates
18 * (rco@di.uminho.pt) Routing table insertion and update
19 * Linus Torvalds : Rewrote bits to be sensible
20 * Alan Cox : Added BSD route gw semantics
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +090021 * Alan Cox : Super /proc >4K
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 * Alan Cox : MTU in route table
23 * Alan Cox : MSS actually. Also added the window
24 * clamper.
25 * Sam Lantinga : Fixed route matching in rt_del()
26 * Alan Cox : Routing cache support.
27 * Alan Cox : Removed compatibility cruft.
28 * Alan Cox : RTF_REJECT support.
29 * Alan Cox : TCP irtt support.
30 * Jonathan Naylor : Added Metric support.
31 * Miquel van Smoorenburg : BSD API fixes.
32 * Miquel van Smoorenburg : Metrics.
33 * Alan Cox : Use __u32 properly
34 * Alan Cox : Aligned routing errors more closely with BSD
35 * our system is still very different.
36 * Alan Cox : Faster /proc handling
37 * Alexey Kuznetsov : Massive rework to support tree based routing,
38 * routing caches and better behaviour.
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +090039 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070040 * Olaf Erb : irtt wasn't being copied right.
41 * Bjorn Ekwall : Kerneld route support.
42 * Alan Cox : Multicast fixed (I hope)
43 * Pavel Krauz : Limited broadcast fixed
44 * Mike McLagan : Routing by source
45 * Alexey Kuznetsov : End of old history. Split to fib.c and
46 * route.c and rewritten from scratch.
47 * Andi Kleen : Load-limit warning messages.
48 * Vitaly E. Lavrov : Transparent proxy revived after year coma.
49 * Vitaly E. Lavrov : Race condition in ip_route_input_slow.
50 * Tobias Ringstrom : Uninitialized res.type in ip_route_output_slow.
51 * Vladimir V. Ivanov : IP rule info (flowid) is really useful.
52 * Marc Boucher : routing by fwmark
53 * Robert Olsson : Added rt_cache statistics
54 * Arnaldo C. Melo : Convert proc stuff to seq_file
Eric Dumazetbb1d23b2005-07-05 15:00:32 -070055 * Eric Dumazet : hashed spinlocks and rt_check_expire() fixes.
Ilia Sotnikovcef26852006-03-25 01:38:55 -080056 * Ilia Sotnikov : Ignore TOS on PMTUD and Redirect
57 * Ilia Sotnikov : Removed TOS from hash calculations
Linus Torvalds1da177e2005-04-16 15:20:36 -070058 *
59 * This program is free software; you can redistribute it and/or
60 * modify it under the terms of the GNU General Public License
61 * as published by the Free Software Foundation; either version
62 * 2 of the License, or (at your option) any later version.
63 */
64
Joe Perchesafd465032012-03-12 07:03:32 +000065#define pr_fmt(fmt) "IPv4: " fmt
66
Linus Torvalds1da177e2005-04-16 15:20:36 -070067#include <linux/module.h>
68#include <asm/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070069#include <linux/bitops.h>
70#include <linux/types.h>
71#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070072#include <linux/mm.h>
73#include <linux/string.h>
74#include <linux/socket.h>
75#include <linux/sockios.h>
76#include <linux/errno.h>
77#include <linux/in.h>
78#include <linux/inet.h>
79#include <linux/netdevice.h>
80#include <linux/proc_fs.h>
81#include <linux/init.h>
82#include <linux/skbuff.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070083#include <linux/inetdevice.h>
84#include <linux/igmp.h>
85#include <linux/pkt_sched.h>
86#include <linux/mroute.h>
87#include <linux/netfilter_ipv4.h>
88#include <linux/random.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070089#include <linux/rcupdate.h>
90#include <linux/times.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090091#include <linux/slab.h>
Eric Dumazet73f156a2014-06-02 05:26:03 -070092#include <linux/jhash.h>
Herbert Xu352e5122007-11-13 21:34:06 -080093#include <net/dst.h>
Thomas Graf1b7179d2015-07-21 10:43:59 +020094#include <net/dst_metadata.h>
Eric W. Biederman457c4cb2007-09-12 12:01:34 +020095#include <net/net_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070096#include <net/protocol.h>
97#include <net/ip.h>
98#include <net/route.h>
99#include <net/inetpeer.h>
100#include <net/sock.h>
101#include <net/ip_fib.h>
102#include <net/arp.h>
103#include <net/tcp.h>
104#include <net/icmp.h>
105#include <net/xfrm.h>
Roopa Prabhu571e7222015-07-21 10:43:47 +0200106#include <net/lwtunnel.h>
Tom Tucker8d717402006-07-30 20:43:36 -0700107#include <net/netevent.h>
Thomas Graf63f34442007-03-22 11:55:17 -0700108#include <net/rtnetlink.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109#ifdef CONFIG_SYSCTL
110#include <linux/sysctl.h>
Shan Wei7426a562012-04-18 18:05:46 +0000111#include <linux/kmemleak.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112#endif
David S. Miller6e5714e2011-08-03 20:50:44 -0700113#include <net/secure_seq.h>
Thomas Graf1b7179d2015-07-21 10:43:59 +0200114#include <net/ip_tunnels.h>
David Ahern385add92015-09-29 20:07:13 -0700115#include <net/l3mdev.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116
David S. Miller68a5e3d2011-03-11 20:07:33 -0500117#define RT_FL_TOS(oldflp4) \
Julian Anastasovf61759e2011-12-02 11:39:42 +0000118 ((oldflp4)->flowi4_tos & (IPTOS_RT_MASK | RTO_ONLINK))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120#define RT_GC_TIMEOUT (300*HZ)
121
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122static int ip_rt_max_size;
Stephen Hemminger817bc4d2008-03-22 17:43:59 -0700123static int ip_rt_redirect_number __read_mostly = 9;
124static int ip_rt_redirect_load __read_mostly = HZ / 50;
125static int ip_rt_redirect_silence __read_mostly = ((HZ / 50) << (9 + 1));
126static int ip_rt_error_cost __read_mostly = HZ;
127static int ip_rt_error_burst __read_mostly = 5 * HZ;
Stephen Hemminger817bc4d2008-03-22 17:43:59 -0700128static int ip_rt_mtu_expires __read_mostly = 10 * 60 * HZ;
129static int ip_rt_min_pmtu __read_mostly = 512 + 20 + 20;
130static int ip_rt_min_advmss __read_mostly = 256;
Eric Dumazet9f28a2f2011-12-21 15:47:16 -0500131
Xin Longdeed49d2016-02-18 21:21:19 +0800132static int ip_rt_gc_timeout __read_mostly = RT_GC_TIMEOUT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700133/*
134 * Interface to generic destination cache.
135 */
136
137static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie);
David S. Miller0dbaee32010-12-13 12:52:14 -0800138static unsigned int ipv4_default_advmss(const struct dst_entry *dst);
Steffen Klassertebb762f2011-11-23 02:12:51 +0000139static unsigned int ipv4_mtu(const struct dst_entry *dst);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700140static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst);
141static void ipv4_link_failure(struct sk_buff *skb);
David S. Miller6700c272012-07-17 03:29:28 -0700142static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
143 struct sk_buff *skb, u32 mtu);
144static void ip_do_redirect(struct dst_entry *dst, struct sock *sk,
145 struct sk_buff *skb);
David S. Millercaacf052012-07-31 15:06:50 -0700146static void ipv4_dst_destroy(struct dst_entry *dst);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147
David S. Miller62fa8a82011-01-26 20:51:05 -0800148static u32 *ipv4_cow_metrics(struct dst_entry *dst, unsigned long old)
149{
David S. Miller31248732012-07-10 07:08:18 -0700150 WARN_ON(1);
151 return NULL;
David S. Miller62fa8a82011-01-26 20:51:05 -0800152}
153
David S. Millerf894cbf2012-07-02 21:52:24 -0700154static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
155 struct sk_buff *skb,
156 const void *daddr);
David S. Millerd3aaeb32011-07-18 00:40:17 -0700157
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158static struct dst_ops ipv4_dst_ops = {
159 .family = AF_INET,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160 .check = ipv4_dst_check,
David S. Miller0dbaee32010-12-13 12:52:14 -0800161 .default_advmss = ipv4_default_advmss,
Steffen Klassertebb762f2011-11-23 02:12:51 +0000162 .mtu = ipv4_mtu,
David S. Miller62fa8a82011-01-26 20:51:05 -0800163 .cow_metrics = ipv4_cow_metrics,
David S. Millercaacf052012-07-31 15:06:50 -0700164 .destroy = ipv4_dst_destroy,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165 .negative_advice = ipv4_negative_advice,
166 .link_failure = ipv4_link_failure,
167 .update_pmtu = ip_rt_update_pmtu,
David S. Millere47a1852012-07-11 20:55:47 -0700168 .redirect = ip_do_redirect,
Eric W. Biedermanb92dacd2015-10-07 16:48:37 -0500169 .local_out = __ip_local_out,
David S. Millerd3aaeb32011-07-18 00:40:17 -0700170 .neigh_lookup = ipv4_neigh_lookup,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171};
172
173#define ECN_OR_COST(class) TC_PRIO_##class
174
Philippe De Muyter4839c522007-07-09 15:32:57 -0700175const __u8 ip_tos2prio[16] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176 TC_PRIO_BESTEFFORT,
Dan Siemon4a2b9c32011-03-15 13:56:07 +0000177 ECN_OR_COST(BESTEFFORT),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700178 TC_PRIO_BESTEFFORT,
179 ECN_OR_COST(BESTEFFORT),
180 TC_PRIO_BULK,
181 ECN_OR_COST(BULK),
182 TC_PRIO_BULK,
183 ECN_OR_COST(BULK),
184 TC_PRIO_INTERACTIVE,
185 ECN_OR_COST(INTERACTIVE),
186 TC_PRIO_INTERACTIVE,
187 ECN_OR_COST(INTERACTIVE),
188 TC_PRIO_INTERACTIVE_BULK,
189 ECN_OR_COST(INTERACTIVE_BULK),
190 TC_PRIO_INTERACTIVE_BULK,
191 ECN_OR_COST(INTERACTIVE_BULK)
192};
Amir Vadaid4a96862012-04-04 21:33:28 +0000193EXPORT_SYMBOL(ip_tos2prio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194
Eric Dumazet2f970d82006-01-17 02:54:36 -0800195static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat);
Christoph Lameter3ed66e92014-04-07 15:39:40 -0700196#define RT_CACHE_STAT_INC(field) raw_cpu_inc(rt_cache_stat.field)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198#ifdef CONFIG_PROC_FS
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199static void *rt_cache_seq_start(struct seq_file *seq, loff_t *pos)
200{
Eric Dumazet29e75252008-01-31 17:05:09 -0800201 if (*pos)
David S. Miller89aef892012-07-17 11:00:09 -0700202 return NULL;
Eric Dumazet29e75252008-01-31 17:05:09 -0800203 return SEQ_START_TOKEN;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204}
205
206static void *rt_cache_seq_next(struct seq_file *seq, void *v, loff_t *pos)
207{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208 ++*pos;
David S. Miller89aef892012-07-17 11:00:09 -0700209 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700210}
211
212static void rt_cache_seq_stop(struct seq_file *seq, void *v)
213{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214}
215
216static int rt_cache_seq_show(struct seq_file *seq, void *v)
217{
218 if (v == SEQ_START_TOKEN)
219 seq_printf(seq, "%-127s\n",
220 "Iface\tDestination\tGateway \tFlags\t\tRefCnt\tUse\t"
221 "Metric\tSource\t\tMTU\tWindow\tIRTT\tTOS\tHHRef\t"
222 "HHUptod\tSpecDst");
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900223 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224}
225
Stephen Hemmingerf6908082007-03-12 14:34:29 -0700226static const struct seq_operations rt_cache_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227 .start = rt_cache_seq_start,
228 .next = rt_cache_seq_next,
229 .stop = rt_cache_seq_stop,
230 .show = rt_cache_seq_show,
231};
232
233static int rt_cache_seq_open(struct inode *inode, struct file *file)
234{
David S. Miller89aef892012-07-17 11:00:09 -0700235 return seq_open(file, &rt_cache_seq_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700236}
237
Arjan van de Ven9a321442007-02-12 00:55:35 -0800238static const struct file_operations rt_cache_seq_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700239 .owner = THIS_MODULE,
240 .open = rt_cache_seq_open,
241 .read = seq_read,
242 .llseek = seq_lseek,
David S. Miller89aef892012-07-17 11:00:09 -0700243 .release = seq_release,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244};
245
246
247static void *rt_cpu_seq_start(struct seq_file *seq, loff_t *pos)
248{
249 int cpu;
250
251 if (*pos == 0)
252 return SEQ_START_TOKEN;
253
Rusty Russell0f23174a2008-12-29 12:23:42 +0000254 for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700255 if (!cpu_possible(cpu))
256 continue;
257 *pos = cpu+1;
Eric Dumazet2f970d82006-01-17 02:54:36 -0800258 return &per_cpu(rt_cache_stat, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259 }
260 return NULL;
261}
262
263static void *rt_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
264{
265 int cpu;
266
Rusty Russell0f23174a2008-12-29 12:23:42 +0000267 for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700268 if (!cpu_possible(cpu))
269 continue;
270 *pos = cpu+1;
Eric Dumazet2f970d82006-01-17 02:54:36 -0800271 return &per_cpu(rt_cache_stat, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272 }
273 return NULL;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900274
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275}
276
277static void rt_cpu_seq_stop(struct seq_file *seq, void *v)
278{
279
280}
281
282static int rt_cpu_seq_show(struct seq_file *seq, void *v)
283{
284 struct rt_cache_stat *st = v;
285
286 if (v == SEQ_START_TOKEN) {
Olaf Rempel5bec0032005-04-28 12:16:08 -0700287 seq_printf(seq, "entries in_hit in_slow_tot in_slow_mc in_no_route in_brd in_martian_dst in_martian_src out_hit out_slow_tot out_slow_mc gc_total gc_ignored gc_goal_miss gc_dst_overflow in_hlist_search out_hlist_search\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700288 return 0;
289 }
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900290
Linus Torvalds1da177e2005-04-16 15:20:36 -0700291 seq_printf(seq,"%08x %08x %08x %08x %08x %08x %08x %08x "
292 " %08x %08x %08x %08x %08x %08x %08x %08x %08x \n",
Eric Dumazetfc66f952010-10-08 06:37:34 +0000293 dst_entries_get_slow(&ipv4_dst_ops),
Eric Dumazet0baf2b32013-10-16 02:49:04 -0700294 0, /* st->in_hit */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295 st->in_slow_tot,
296 st->in_slow_mc,
297 st->in_no_route,
298 st->in_brd,
299 st->in_martian_dst,
300 st->in_martian_src,
301
Eric Dumazet0baf2b32013-10-16 02:49:04 -0700302 0, /* st->out_hit */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700303 st->out_slow_tot,
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900304 st->out_slow_mc,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700305
Eric Dumazet0baf2b32013-10-16 02:49:04 -0700306 0, /* st->gc_total */
307 0, /* st->gc_ignored */
308 0, /* st->gc_goal_miss */
309 0, /* st->gc_dst_overflow */
310 0, /* st->in_hlist_search */
311 0 /* st->out_hlist_search */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700312 );
313 return 0;
314}
315
Stephen Hemmingerf6908082007-03-12 14:34:29 -0700316static const struct seq_operations rt_cpu_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700317 .start = rt_cpu_seq_start,
318 .next = rt_cpu_seq_next,
319 .stop = rt_cpu_seq_stop,
320 .show = rt_cpu_seq_show,
321};
322
323
324static int rt_cpu_seq_open(struct inode *inode, struct file *file)
325{
326 return seq_open(file, &rt_cpu_seq_ops);
327}
328
Arjan van de Ven9a321442007-02-12 00:55:35 -0800329static const struct file_operations rt_cpu_seq_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700330 .owner = THIS_MODULE,
331 .open = rt_cpu_seq_open,
332 .read = seq_read,
333 .llseek = seq_lseek,
334 .release = seq_release,
335};
336
Patrick McHardyc7066f72011-01-14 13:36:42 +0100337#ifdef CONFIG_IP_ROUTE_CLASSID
Alexey Dobriyana661c412009-11-25 15:40:35 -0800338static int rt_acct_proc_show(struct seq_file *m, void *v)
Pavel Emelyanov78c686e2007-12-05 21:13:48 -0800339{
Alexey Dobriyana661c412009-11-25 15:40:35 -0800340 struct ip_rt_acct *dst, *src;
341 unsigned int i, j;
Pavel Emelyanov78c686e2007-12-05 21:13:48 -0800342
Alexey Dobriyana661c412009-11-25 15:40:35 -0800343 dst = kcalloc(256, sizeof(struct ip_rt_acct), GFP_KERNEL);
344 if (!dst)
345 return -ENOMEM;
Pavel Emelyanov78c686e2007-12-05 21:13:48 -0800346
Alexey Dobriyana661c412009-11-25 15:40:35 -0800347 for_each_possible_cpu(i) {
348 src = (struct ip_rt_acct *)per_cpu_ptr(ip_rt_acct, i);
349 for (j = 0; j < 256; j++) {
350 dst[j].o_bytes += src[j].o_bytes;
351 dst[j].o_packets += src[j].o_packets;
352 dst[j].i_bytes += src[j].i_bytes;
353 dst[j].i_packets += src[j].i_packets;
Pavel Emelyanov78c686e2007-12-05 21:13:48 -0800354 }
355 }
Alexey Dobriyana661c412009-11-25 15:40:35 -0800356
357 seq_write(m, dst, 256 * sizeof(struct ip_rt_acct));
358 kfree(dst);
359 return 0;
Pavel Emelyanov78c686e2007-12-05 21:13:48 -0800360}
Alexey Dobriyana661c412009-11-25 15:40:35 -0800361
362static int rt_acct_proc_open(struct inode *inode, struct file *file)
363{
364 return single_open(file, rt_acct_proc_show, NULL);
365}
366
367static const struct file_operations rt_acct_proc_fops = {
368 .owner = THIS_MODULE,
369 .open = rt_acct_proc_open,
370 .read = seq_read,
371 .llseek = seq_lseek,
372 .release = single_release,
373};
Pavel Emelyanov78c686e2007-12-05 21:13:48 -0800374#endif
Pavel Emelyanov107f1632007-12-05 21:14:28 -0800375
Denis V. Lunev73b38712008-02-28 20:51:18 -0800376static int __net_init ip_rt_do_proc_init(struct net *net)
Pavel Emelyanov107f1632007-12-05 21:14:28 -0800377{
378 struct proc_dir_entry *pde;
379
Gao fengd4beaa62013-02-18 01:34:54 +0000380 pde = proc_create("rt_cache", S_IRUGO, net->proc_net,
381 &rt_cache_seq_fops);
Pavel Emelyanov107f1632007-12-05 21:14:28 -0800382 if (!pde)
383 goto err1;
384
Wang Chen77020722008-02-28 14:14:25 -0800385 pde = proc_create("rt_cache", S_IRUGO,
386 net->proc_net_stat, &rt_cpu_seq_fops);
Pavel Emelyanov107f1632007-12-05 21:14:28 -0800387 if (!pde)
388 goto err2;
389
Patrick McHardyc7066f72011-01-14 13:36:42 +0100390#ifdef CONFIG_IP_ROUTE_CLASSID
Alexey Dobriyana661c412009-11-25 15:40:35 -0800391 pde = proc_create("rt_acct", 0, net->proc_net, &rt_acct_proc_fops);
Pavel Emelyanov107f1632007-12-05 21:14:28 -0800392 if (!pde)
393 goto err3;
394#endif
395 return 0;
396
Patrick McHardyc7066f72011-01-14 13:36:42 +0100397#ifdef CONFIG_IP_ROUTE_CLASSID
Pavel Emelyanov107f1632007-12-05 21:14:28 -0800398err3:
399 remove_proc_entry("rt_cache", net->proc_net_stat);
400#endif
401err2:
402 remove_proc_entry("rt_cache", net->proc_net);
403err1:
404 return -ENOMEM;
405}
Denis V. Lunev73b38712008-02-28 20:51:18 -0800406
407static void __net_exit ip_rt_do_proc_exit(struct net *net)
408{
409 remove_proc_entry("rt_cache", net->proc_net_stat);
410 remove_proc_entry("rt_cache", net->proc_net);
Patrick McHardyc7066f72011-01-14 13:36:42 +0100411#ifdef CONFIG_IP_ROUTE_CLASSID
Denis V. Lunev73b38712008-02-28 20:51:18 -0800412 remove_proc_entry("rt_acct", net->proc_net);
Alexey Dobriyan0a931ac2010-01-17 03:32:50 +0000413#endif
Denis V. Lunev73b38712008-02-28 20:51:18 -0800414}
415
416static struct pernet_operations ip_rt_proc_ops __net_initdata = {
417 .init = ip_rt_do_proc_init,
418 .exit = ip_rt_do_proc_exit,
419};
420
421static int __init ip_rt_proc_init(void)
422{
423 return register_pernet_subsys(&ip_rt_proc_ops);
424}
425
Pavel Emelyanov107f1632007-12-05 21:14:28 -0800426#else
Denis V. Lunev73b38712008-02-28 20:51:18 -0800427static inline int ip_rt_proc_init(void)
Pavel Emelyanov107f1632007-12-05 21:14:28 -0800428{
429 return 0;
430}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700431#endif /* CONFIG_PROC_FS */
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900432
Eric Dumazet4331deb2012-07-25 05:11:23 +0000433static inline bool rt_is_expired(const struct rtable *rth)
Denis V. Luneve84f84f2008-07-05 19:04:32 -0700434{
fan.duca4c3fc2013-07-30 08:33:53 +0800435 return rth->rt_genid != rt_genid_ipv4(dev_net(rth->dst.dev));
Denis V. Luneve84f84f2008-07-05 19:04:32 -0700436}
437
Nicolas Dichtel4ccfe6d2012-09-07 00:45:29 +0000438void rt_cache_flush(struct net *net)
Eric Dumazet29e75252008-01-31 17:05:09 -0800439{
fan.duca4c3fc2013-07-30 08:33:53 +0800440 rt_genid_bump_ipv4(net);
Eric Dumazet98376382010-03-08 03:20:00 +0000441}
442
David S. Millerf894cbf2012-07-02 21:52:24 -0700443static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
444 struct sk_buff *skb,
445 const void *daddr)
David Miller3769cff2011-07-11 22:44:24 +0000446{
David S. Millerd3aaeb32011-07-18 00:40:17 -0700447 struct net_device *dev = dst->dev;
448 const __be32 *pkey = daddr;
David S. Miller39232972012-01-26 15:22:32 -0500449 const struct rtable *rt;
David Miller3769cff2011-07-11 22:44:24 +0000450 struct neighbour *n;
451
David S. Miller39232972012-01-26 15:22:32 -0500452 rt = (const struct rtable *) dst;
David S. Millera263b302012-07-02 02:02:15 -0700453 if (rt->rt_gateway)
David S. Miller39232972012-01-26 15:22:32 -0500454 pkey = (const __be32 *) &rt->rt_gateway;
David S. Millerf894cbf2012-07-02 21:52:24 -0700455 else if (skb)
456 pkey = &ip_hdr(skb)->daddr;
David S. Millerd3aaeb32011-07-18 00:40:17 -0700457
David S. Miller80703d22012-02-15 17:48:35 -0500458 n = __ipv4_neigh_lookup(dev, *(__force u32 *)pkey);
David S. Millerd3aaeb32011-07-18 00:40:17 -0700459 if (n)
460 return n;
David Miller32092ec2011-07-25 00:01:41 +0000461 return neigh_create(&arp_tbl, pkey, dev);
David S. Millerd3aaeb32011-07-18 00:40:17 -0700462}
463
Eric Dumazet04ca6972014-07-26 08:58:10 +0200464#define IP_IDENTS_SZ 2048u
Eric Dumazet04ca6972014-07-26 08:58:10 +0200465
Eric Dumazet355b5902015-05-01 10:37:49 -0700466static atomic_t *ip_idents __read_mostly;
467static u32 *ip_tstamps __read_mostly;
Eric Dumazet04ca6972014-07-26 08:58:10 +0200468
469/* In order to protect privacy, we add a perturbation to identifiers
470 * if one generator is seldom used. This makes hard for an attacker
471 * to infer how many packets were sent between two points in time.
472 */
473u32 ip_idents_reserve(u32 hash, int segs)
474{
Eric Dumazet355b5902015-05-01 10:37:49 -0700475 u32 *p_tstamp = ip_tstamps + hash % IP_IDENTS_SZ;
476 atomic_t *p_id = ip_idents + hash % IP_IDENTS_SZ;
477 u32 old = ACCESS_ONCE(*p_tstamp);
Eric Dumazet04ca6972014-07-26 08:58:10 +0200478 u32 now = (u32)jiffies;
Eric Dumazetadb03112016-09-20 18:06:17 -0700479 u32 new, delta = 0;
Eric Dumazet04ca6972014-07-26 08:58:10 +0200480
Eric Dumazet355b5902015-05-01 10:37:49 -0700481 if (old != now && cmpxchg(p_tstamp, old, now) == old)
Eric Dumazet04ca6972014-07-26 08:58:10 +0200482 delta = prandom_u32_max(now - old);
483
Eric Dumazetadb03112016-09-20 18:06:17 -0700484 /* Do not use atomic_add_return() as it makes UBSAN unhappy */
485 do {
486 old = (u32)atomic_read(p_id);
487 new = old + delta + segs;
488 } while (atomic_cmpxchg(p_id, old, new) != old);
489
490 return new - segs;
Eric Dumazet04ca6972014-07-26 08:58:10 +0200491}
492EXPORT_SYMBOL(ip_idents_reserve);
Eric Dumazet73f156a2014-06-02 05:26:03 -0700493
Hannes Frederic Sowab6a77192015-03-25 17:07:44 +0100494void __ip_select_ident(struct net *net, struct iphdr *iph, int segs)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700495{
Eric Dumazet73f156a2014-06-02 05:26:03 -0700496 static u32 ip_idents_hashrnd __read_mostly;
497 u32 hash, id;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700498
Eric Dumazet73f156a2014-06-02 05:26:03 -0700499 net_get_random_once(&ip_idents_hashrnd, sizeof(ip_idents_hashrnd));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700500
Eric Dumazet04ca6972014-07-26 08:58:10 +0200501 hash = jhash_3words((__force u32)iph->daddr,
502 (__force u32)iph->saddr,
Hannes Frederic Sowab6a77192015-03-25 17:07:44 +0100503 iph->protocol ^ net_hash_mix(net),
Eric Dumazet04ca6972014-07-26 08:58:10 +0200504 ip_idents_hashrnd);
Eric Dumazet73f156a2014-06-02 05:26:03 -0700505 id = ip_idents_reserve(hash, segs);
506 iph->id = htons(id);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700507}
Eric Dumazet4bc2f182010-07-09 21:22:10 +0000508EXPORT_SYMBOL(__ip_select_ident);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700509
Eric Dumazet5abf7f72012-07-17 22:42:13 +0200510static void __build_flow_key(struct flowi4 *fl4, const struct sock *sk,
David S. Miller4895c772012-07-17 04:19:00 -0700511 const struct iphdr *iph,
512 int oif, u8 tos,
513 u8 prot, u32 mark, int flow_flags)
514{
515 if (sk) {
516 const struct inet_sock *inet = inet_sk(sk);
517
518 oif = sk->sk_bound_dev_if;
519 mark = sk->sk_mark;
520 tos = RT_CONN_FLAGS(sk);
521 prot = inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol;
522 }
523 flowi4_init_output(fl4, oif, mark, tos,
524 RT_SCOPE_UNIVERSE, prot,
525 flow_flags,
526 iph->daddr, iph->saddr, 0, 0);
527}
528
Eric Dumazet5abf7f72012-07-17 22:42:13 +0200529static void build_skb_flow_key(struct flowi4 *fl4, const struct sk_buff *skb,
530 const struct sock *sk)
David S. Miller4895c772012-07-17 04:19:00 -0700531{
532 const struct iphdr *iph = ip_hdr(skb);
533 int oif = skb->dev->ifindex;
534 u8 tos = RT_TOS(iph->tos);
535 u8 prot = iph->protocol;
536 u32 mark = skb->mark;
537
538 __build_flow_key(fl4, sk, iph, oif, tos, prot, mark, 0);
539}
540
Eric Dumazet5abf7f72012-07-17 22:42:13 +0200541static void build_sk_flow_key(struct flowi4 *fl4, const struct sock *sk)
David S. Miller4895c772012-07-17 04:19:00 -0700542{
543 const struct inet_sock *inet = inet_sk(sk);
Eric Dumazet5abf7f72012-07-17 22:42:13 +0200544 const struct ip_options_rcu *inet_opt;
David S. Miller4895c772012-07-17 04:19:00 -0700545 __be32 daddr = inet->inet_daddr;
546
547 rcu_read_lock();
548 inet_opt = rcu_dereference(inet->inet_opt);
549 if (inet_opt && inet_opt->opt.srr)
550 daddr = inet_opt->opt.faddr;
551 flowi4_init_output(fl4, sk->sk_bound_dev_if, sk->sk_mark,
552 RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
553 inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol,
554 inet_sk_flowi_flags(sk),
555 daddr, inet->inet_saddr, 0, 0);
556 rcu_read_unlock();
557}
558
Eric Dumazet5abf7f72012-07-17 22:42:13 +0200559static void ip_rt_build_flow_key(struct flowi4 *fl4, const struct sock *sk,
560 const struct sk_buff *skb)
David S. Miller4895c772012-07-17 04:19:00 -0700561{
562 if (skb)
563 build_skb_flow_key(fl4, skb, sk);
564 else
565 build_sk_flow_key(fl4, sk);
566}
567
David S. Millerc5038a82012-07-31 15:02:02 -0700568static inline void rt_free(struct rtable *rt)
569{
570 call_rcu(&rt->dst.rcu_head, dst_rcu_free);
571}
572
573static DEFINE_SPINLOCK(fnhe_lock);
David S. Miller4895c772012-07-17 04:19:00 -0700574
Timo Teräs2ffae992013-06-27 10:27:05 +0300575static void fnhe_flush_routes(struct fib_nh_exception *fnhe)
576{
577 struct rtable *rt;
578
579 rt = rcu_dereference(fnhe->fnhe_rth_input);
580 if (rt) {
581 RCU_INIT_POINTER(fnhe->fnhe_rth_input, NULL);
582 rt_free(rt);
583 }
584 rt = rcu_dereference(fnhe->fnhe_rth_output);
585 if (rt) {
586 RCU_INIT_POINTER(fnhe->fnhe_rth_output, NULL);
587 rt_free(rt);
588 }
589}
590
Julian Anastasovaee06da2012-07-18 10:15:35 +0000591static struct fib_nh_exception *fnhe_oldest(struct fnhe_hash_bucket *hash)
David S. Miller4895c772012-07-17 04:19:00 -0700592{
593 struct fib_nh_exception *fnhe, *oldest;
594
595 oldest = rcu_dereference(hash->chain);
596 for (fnhe = rcu_dereference(oldest->fnhe_next); fnhe;
597 fnhe = rcu_dereference(fnhe->fnhe_next)) {
598 if (time_before(fnhe->fnhe_stamp, oldest->fnhe_stamp))
599 oldest = fnhe;
600 }
Timo Teräs2ffae992013-06-27 10:27:05 +0300601 fnhe_flush_routes(oldest);
David S. Miller4895c772012-07-17 04:19:00 -0700602 return oldest;
603}
604
David S. Millerd3a25c92012-07-17 13:23:08 -0700605static inline u32 fnhe_hashfun(__be32 daddr)
606{
Eric Dumazetd546c622014-09-04 08:21:31 -0700607 static u32 fnhe_hashrnd __read_mostly;
David S. Millerd3a25c92012-07-17 13:23:08 -0700608 u32 hval;
609
Eric Dumazetd546c622014-09-04 08:21:31 -0700610 net_get_random_once(&fnhe_hashrnd, sizeof(fnhe_hashrnd));
611 hval = jhash_1word((__force u32) daddr, fnhe_hashrnd);
612 return hash_32(hval, FNHE_HASH_SHIFT);
David S. Millerd3a25c92012-07-17 13:23:08 -0700613}
614
Timo Teräs387aa652013-05-27 20:46:31 +0000615static void fill_route_from_fnhe(struct rtable *rt, struct fib_nh_exception *fnhe)
616{
617 rt->rt_pmtu = fnhe->fnhe_pmtu;
618 rt->dst.expires = fnhe->fnhe_expires;
619
620 if (fnhe->fnhe_gw) {
621 rt->rt_flags |= RTCF_REDIRECTED;
622 rt->rt_gateway = fnhe->fnhe_gw;
623 rt->rt_uses_gateway = 1;
624 }
625}
626
Julian Anastasovaee06da2012-07-18 10:15:35 +0000627static void update_or_create_fnhe(struct fib_nh *nh, __be32 daddr, __be32 gw,
628 u32 pmtu, unsigned long expires)
David S. Miller4895c772012-07-17 04:19:00 -0700629{
Julian Anastasovaee06da2012-07-18 10:15:35 +0000630 struct fnhe_hash_bucket *hash;
David S. Miller4895c772012-07-17 04:19:00 -0700631 struct fib_nh_exception *fnhe;
Timo Teräs387aa652013-05-27 20:46:31 +0000632 struct rtable *rt;
633 unsigned int i;
David S. Miller4895c772012-07-17 04:19:00 -0700634 int depth;
Julian Anastasovaee06da2012-07-18 10:15:35 +0000635 u32 hval = fnhe_hashfun(daddr);
David S. Miller4895c772012-07-17 04:19:00 -0700636
David S. Millerc5038a82012-07-31 15:02:02 -0700637 spin_lock_bh(&fnhe_lock);
Julian Anastasovaee06da2012-07-18 10:15:35 +0000638
Eric Dumazetcaa41522014-09-03 22:21:56 -0700639 hash = rcu_dereference(nh->nh_exceptions);
David S. Miller4895c772012-07-17 04:19:00 -0700640 if (!hash) {
Julian Anastasovaee06da2012-07-18 10:15:35 +0000641 hash = kzalloc(FNHE_HASH_SIZE * sizeof(*hash), GFP_ATOMIC);
David S. Miller4895c772012-07-17 04:19:00 -0700642 if (!hash)
Julian Anastasovaee06da2012-07-18 10:15:35 +0000643 goto out_unlock;
Eric Dumazetcaa41522014-09-03 22:21:56 -0700644 rcu_assign_pointer(nh->nh_exceptions, hash);
David S. Miller4895c772012-07-17 04:19:00 -0700645 }
646
David S. Miller4895c772012-07-17 04:19:00 -0700647 hash += hval;
648
649 depth = 0;
650 for (fnhe = rcu_dereference(hash->chain); fnhe;
651 fnhe = rcu_dereference(fnhe->fnhe_next)) {
652 if (fnhe->fnhe_daddr == daddr)
Julian Anastasovaee06da2012-07-18 10:15:35 +0000653 break;
David S. Miller4895c772012-07-17 04:19:00 -0700654 depth++;
655 }
656
Julian Anastasovaee06da2012-07-18 10:15:35 +0000657 if (fnhe) {
658 if (gw)
659 fnhe->fnhe_gw = gw;
660 if (pmtu) {
661 fnhe->fnhe_pmtu = pmtu;
Timo Teräs387aa652013-05-27 20:46:31 +0000662 fnhe->fnhe_expires = max(1UL, expires);
Julian Anastasovaee06da2012-07-18 10:15:35 +0000663 }
Timo Teräs387aa652013-05-27 20:46:31 +0000664 /* Update all cached dsts too */
Timo Teräs2ffae992013-06-27 10:27:05 +0300665 rt = rcu_dereference(fnhe->fnhe_rth_input);
666 if (rt)
667 fill_route_from_fnhe(rt, fnhe);
668 rt = rcu_dereference(fnhe->fnhe_rth_output);
Timo Teräs387aa652013-05-27 20:46:31 +0000669 if (rt)
670 fill_route_from_fnhe(rt, fnhe);
Julian Anastasovaee06da2012-07-18 10:15:35 +0000671 } else {
672 if (depth > FNHE_RECLAIM_DEPTH)
673 fnhe = fnhe_oldest(hash);
674 else {
675 fnhe = kzalloc(sizeof(*fnhe), GFP_ATOMIC);
676 if (!fnhe)
677 goto out_unlock;
678
679 fnhe->fnhe_next = hash->chain;
680 rcu_assign_pointer(hash->chain, fnhe);
681 }
Timo Teräs5aad1de2013-05-27 20:46:33 +0000682 fnhe->fnhe_genid = fnhe_genid(dev_net(nh->nh_dev));
Julian Anastasovaee06da2012-07-18 10:15:35 +0000683 fnhe->fnhe_daddr = daddr;
684 fnhe->fnhe_gw = gw;
685 fnhe->fnhe_pmtu = pmtu;
686 fnhe->fnhe_expires = expires;
Timo Teräs387aa652013-05-27 20:46:31 +0000687
688 /* Exception created; mark the cached routes for the nexthop
689 * stale, so anyone caching it rechecks if this exception
690 * applies to them.
691 */
Timo Teräs2ffae992013-06-27 10:27:05 +0300692 rt = rcu_dereference(nh->nh_rth_input);
693 if (rt)
694 rt->dst.obsolete = DST_OBSOLETE_KILL;
695
Timo Teräs387aa652013-05-27 20:46:31 +0000696 for_each_possible_cpu(i) {
697 struct rtable __rcu **prt;
698 prt = per_cpu_ptr(nh->nh_pcpu_rth_output, i);
699 rt = rcu_dereference(*prt);
700 if (rt)
701 rt->dst.obsolete = DST_OBSOLETE_KILL;
702 }
David S. Miller4895c772012-07-17 04:19:00 -0700703 }
David S. Miller4895c772012-07-17 04:19:00 -0700704
David S. Miller4895c772012-07-17 04:19:00 -0700705 fnhe->fnhe_stamp = jiffies;
Julian Anastasovaee06da2012-07-18 10:15:35 +0000706
707out_unlock:
David S. Millerc5038a82012-07-31 15:02:02 -0700708 spin_unlock_bh(&fnhe_lock);
David S. Miller4895c772012-07-17 04:19:00 -0700709}
710
David S. Millerceb33202012-07-17 11:31:28 -0700711static void __ip_do_redirect(struct rtable *rt, struct sk_buff *skb, struct flowi4 *fl4,
712 bool kill_route)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700713{
David S. Millere47a1852012-07-11 20:55:47 -0700714 __be32 new_gw = icmp_hdr(skb)->un.gateway;
David S. Miller94206122012-07-11 20:38:08 -0700715 __be32 old_gw = ip_hdr(skb)->saddr;
David S. Millere47a1852012-07-11 20:55:47 -0700716 struct net_device *dev = skb->dev;
David S. Millere47a1852012-07-11 20:55:47 -0700717 struct in_device *in_dev;
David S. Miller4895c772012-07-17 04:19:00 -0700718 struct fib_result res;
David S. Millere47a1852012-07-11 20:55:47 -0700719 struct neighbour *n;
Denis V. Lunev317805b2008-02-28 20:50:06 -0800720 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700721
David S. Miller94206122012-07-11 20:38:08 -0700722 switch (icmp_hdr(skb)->code & 7) {
723 case ICMP_REDIR_NET:
724 case ICMP_REDIR_NETTOS:
725 case ICMP_REDIR_HOST:
726 case ICMP_REDIR_HOSTTOS:
727 break;
728
729 default:
730 return;
731 }
732
David S. Millere47a1852012-07-11 20:55:47 -0700733 if (rt->rt_gateway != old_gw)
734 return;
735
736 in_dev = __in_dev_get_rcu(dev);
737 if (!in_dev)
738 return;
739
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900740 net = dev_net(dev);
Joe Perches9d4fb272009-11-23 10:41:23 -0800741 if (new_gw == old_gw || !IN_DEV_RX_REDIRECTS(in_dev) ||
742 ipv4_is_multicast(new_gw) || ipv4_is_lbcast(new_gw) ||
743 ipv4_is_zeronet(new_gw))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700744 goto reject_redirect;
745
746 if (!IN_DEV_SHARED_MEDIA(in_dev)) {
747 if (!inet_addr_onlink(in_dev, new_gw, old_gw))
748 goto reject_redirect;
749 if (IN_DEV_SEC_REDIRECTS(in_dev) && ip_fib_check_default(new_gw, dev))
750 goto reject_redirect;
751 } else {
Denis V. Lunev317805b2008-02-28 20:50:06 -0800752 if (inet_addr_type(net, new_gw) != RTN_UNICAST)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700753 goto reject_redirect;
754 }
755
David S. Miller4895c772012-07-17 04:19:00 -0700756 n = ipv4_neigh_lookup(&rt->dst, NULL, &new_gw);
WANG Cong2c1a4312014-09-24 17:07:53 -0700757 if (!IS_ERR(n)) {
David S. Millere47a1852012-07-11 20:55:47 -0700758 if (!(n->nud_state & NUD_VALID)) {
759 neigh_event_send(n, NULL);
760 } else {
Andy Gospodarek0eeb0752015-06-23 13:45:37 -0400761 if (fib_lookup(net, fl4, &res, 0) == 0) {
David S. Miller4895c772012-07-17 04:19:00 -0700762 struct fib_nh *nh = &FIB_RES_NH(res);
David S. Miller4895c772012-07-17 04:19:00 -0700763
Julian Anastasovaee06da2012-07-18 10:15:35 +0000764 update_or_create_fnhe(nh, fl4->daddr, new_gw,
Xin Longdeed49d2016-02-18 21:21:19 +0800765 0, jiffies + ip_rt_gc_timeout);
David S. Miller4895c772012-07-17 04:19:00 -0700766 }
David S. Millerceb33202012-07-17 11:31:28 -0700767 if (kill_route)
768 rt->dst.obsolete = DST_OBSOLETE_KILL;
David S. Millere47a1852012-07-11 20:55:47 -0700769 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, n);
770 }
771 neigh_release(n);
772 }
773 return;
774
775reject_redirect:
776#ifdef CONFIG_IP_ROUTE_VERBOSE
David S. Miller99ee0382012-07-12 07:40:05 -0700777 if (IN_DEV_LOG_MARTIANS(in_dev)) {
778 const struct iphdr *iph = (const struct iphdr *) skb->data;
779 __be32 daddr = iph->daddr;
780 __be32 saddr = iph->saddr;
781
David S. Millere47a1852012-07-11 20:55:47 -0700782 net_info_ratelimited("Redirect from %pI4 on %s about %pI4 ignored\n"
783 " Advised path = %pI4 -> %pI4\n",
784 &old_gw, dev->name, &new_gw,
785 &saddr, &daddr);
David S. Miller99ee0382012-07-12 07:40:05 -0700786 }
David S. Millere47a1852012-07-11 20:55:47 -0700787#endif
788 ;
789}
790
David S. Miller4895c772012-07-17 04:19:00 -0700791static void ip_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb)
792{
793 struct rtable *rt;
794 struct flowi4 fl4;
Michal Kubecekf96ef982013-05-28 08:26:49 +0200795 const struct iphdr *iph = (const struct iphdr *) skb->data;
796 int oif = skb->dev->ifindex;
797 u8 tos = RT_TOS(iph->tos);
798 u8 prot = iph->protocol;
799 u32 mark = skb->mark;
David S. Miller4895c772012-07-17 04:19:00 -0700800
801 rt = (struct rtable *) dst;
802
Michal Kubecekf96ef982013-05-28 08:26:49 +0200803 __build_flow_key(&fl4, sk, iph, oif, tos, prot, mark, 0);
David S. Millerceb33202012-07-17 11:31:28 -0700804 __ip_do_redirect(rt, skb, &fl4, true);
David S. Miller4895c772012-07-17 04:19:00 -0700805}
806
Linus Torvalds1da177e2005-04-16 15:20:36 -0700807static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
808{
Eric Dumazetee6b9672008-03-05 18:30:47 -0800809 struct rtable *rt = (struct rtable *)dst;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700810 struct dst_entry *ret = dst;
811
812 if (rt) {
Timo Teräsd11a4dc2010-03-18 23:20:20 +0000813 if (dst->obsolete > 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700814 ip_rt_put(rt);
815 ret = NULL;
David S. Miller59436342012-07-10 06:58:42 -0700816 } else if ((rt->rt_flags & RTCF_REDIRECTED) ||
817 rt->dst.expires) {
David S. Miller89aef892012-07-17 11:00:09 -0700818 ip_rt_put(rt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700819 ret = NULL;
820 }
821 }
822 return ret;
823}
824
825/*
826 * Algorithm:
827 * 1. The first ip_rt_redirect_number redirects are sent
828 * with exponential backoff, then we stop sending them at all,
829 * assuming that the host ignores our redirects.
830 * 2. If we did not see packets requiring redirects
831 * during ip_rt_redirect_silence, we assume that the host
832 * forgot redirected route and start to send redirects again.
833 *
834 * This algorithm is much cheaper and more intelligent than dumb load limiting
835 * in icmp.c.
836 *
837 * NOTE. Do not forget to inhibit load limiting for redirects (redundant)
838 * and "frag. need" (breaks PMTU discovery) in icmp.c.
839 */
840
841void ip_rt_send_redirect(struct sk_buff *skb)
842{
Eric Dumazet511c3f92009-06-02 05:14:27 +0000843 struct rtable *rt = skb_rtable(skb);
Eric Dumazet30038fc2009-08-28 23:52:01 -0700844 struct in_device *in_dev;
David S. Miller92d86822011-02-04 15:55:25 -0800845 struct inet_peer *peer;
David S. Miller1d861aa2012-07-10 03:58:16 -0700846 struct net *net;
Eric Dumazet30038fc2009-08-28 23:52:01 -0700847 int log_martians;
David Ahern192132b2015-08-27 16:07:03 -0700848 int vif;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700849
Eric Dumazet30038fc2009-08-28 23:52:01 -0700850 rcu_read_lock();
Changli Gaod8d1f302010-06-10 23:31:35 -0700851 in_dev = __in_dev_get_rcu(rt->dst.dev);
Eric Dumazet30038fc2009-08-28 23:52:01 -0700852 if (!in_dev || !IN_DEV_TX_REDIRECTS(in_dev)) {
853 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700854 return;
Eric Dumazet30038fc2009-08-28 23:52:01 -0700855 }
856 log_martians = IN_DEV_LOG_MARTIANS(in_dev);
David Ahern385add92015-09-29 20:07:13 -0700857 vif = l3mdev_master_ifindex_rcu(rt->dst.dev);
Eric Dumazet30038fc2009-08-28 23:52:01 -0700858 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700859
David S. Miller1d861aa2012-07-10 03:58:16 -0700860 net = dev_net(rt->dst.dev);
David Ahern192132b2015-08-27 16:07:03 -0700861 peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr, vif, 1);
David S. Miller92d86822011-02-04 15:55:25 -0800862 if (!peer) {
Julian Anastasove81da0e2012-10-08 11:41:15 +0000863 icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST,
864 rt_nexthop(rt, ip_hdr(skb)->daddr));
David S. Miller92d86822011-02-04 15:55:25 -0800865 return;
866 }
867
Linus Torvalds1da177e2005-04-16 15:20:36 -0700868 /* No redirected packets during ip_rt_redirect_silence;
869 * reset the algorithm.
870 */
David S. Miller92d86822011-02-04 15:55:25 -0800871 if (time_after(jiffies, peer->rate_last + ip_rt_redirect_silence))
872 peer->rate_tokens = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700873
874 /* Too many ignored redirects; do not send anything
Changli Gaod8d1f302010-06-10 23:31:35 -0700875 * set dst.rate_last to the last seen redirected packet.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700876 */
David S. Miller92d86822011-02-04 15:55:25 -0800877 if (peer->rate_tokens >= ip_rt_redirect_number) {
878 peer->rate_last = jiffies;
David S. Miller1d861aa2012-07-10 03:58:16 -0700879 goto out_put_peer;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700880 }
881
882 /* Check for load limit; set rate_last to the latest sent
883 * redirect.
884 */
David S. Miller92d86822011-02-04 15:55:25 -0800885 if (peer->rate_tokens == 0 ||
Li Yewang14fb8a72006-12-18 00:26:35 -0800886 time_after(jiffies,
David S. Miller92d86822011-02-04 15:55:25 -0800887 (peer->rate_last +
888 (ip_rt_redirect_load << peer->rate_tokens)))) {
Julian Anastasove81da0e2012-10-08 11:41:15 +0000889 __be32 gw = rt_nexthop(rt, ip_hdr(skb)->daddr);
890
891 icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, gw);
David S. Miller92d86822011-02-04 15:55:25 -0800892 peer->rate_last = jiffies;
893 ++peer->rate_tokens;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700894#ifdef CONFIG_IP_ROUTE_VERBOSE
Eric Dumazet30038fc2009-08-28 23:52:01 -0700895 if (log_martians &&
Joe Perchese87cc472012-05-13 21:56:26 +0000896 peer->rate_tokens == ip_rt_redirect_number)
897 net_warn_ratelimited("host %pI4/if%d ignores redirects for %pI4 to %pI4\n",
David S. Miller92101b32012-07-23 16:29:00 -0700898 &ip_hdr(skb)->saddr, inet_iif(skb),
Julian Anastasove81da0e2012-10-08 11:41:15 +0000899 &ip_hdr(skb)->daddr, &gw);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700900#endif
901 }
David S. Miller1d861aa2012-07-10 03:58:16 -0700902out_put_peer:
903 inet_putpeer(peer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700904}
905
906static int ip_error(struct sk_buff *skb)
907{
David S. Miller251da412012-06-26 16:27:09 -0700908 struct in_device *in_dev = __in_dev_get_rcu(skb->dev);
Eric Dumazet511c3f92009-06-02 05:14:27 +0000909 struct rtable *rt = skb_rtable(skb);
David S. Miller92d86822011-02-04 15:55:25 -0800910 struct inet_peer *peer;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700911 unsigned long now;
David S. Miller251da412012-06-26 16:27:09 -0700912 struct net *net;
David S. Miller92d86822011-02-04 15:55:25 -0800913 bool send;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700914 int code;
915
Eric W. Biederman381c7592015-05-22 04:58:12 -0500916 /* IP on this device is disabled. */
917 if (!in_dev)
918 goto out;
919
David S. Miller251da412012-06-26 16:27:09 -0700920 net = dev_net(rt->dst.dev);
921 if (!IN_DEV_FORWARD(in_dev)) {
922 switch (rt->dst.error) {
923 case EHOSTUNREACH:
Eric Dumazetb45386e2016-04-27 16:44:35 -0700924 __IP_INC_STATS(net, IPSTATS_MIB_INADDRERRORS);
David S. Miller251da412012-06-26 16:27:09 -0700925 break;
926
927 case ENETUNREACH:
Eric Dumazetb45386e2016-04-27 16:44:35 -0700928 __IP_INC_STATS(net, IPSTATS_MIB_INNOROUTES);
David S. Miller251da412012-06-26 16:27:09 -0700929 break;
930 }
931 goto out;
932 }
933
Changli Gaod8d1f302010-06-10 23:31:35 -0700934 switch (rt->dst.error) {
Joe Perches4500ebf2011-07-01 09:43:07 +0000935 case EINVAL:
936 default:
937 goto out;
938 case EHOSTUNREACH:
939 code = ICMP_HOST_UNREACH;
940 break;
941 case ENETUNREACH:
942 code = ICMP_NET_UNREACH;
Eric Dumazetb45386e2016-04-27 16:44:35 -0700943 __IP_INC_STATS(net, IPSTATS_MIB_INNOROUTES);
Joe Perches4500ebf2011-07-01 09:43:07 +0000944 break;
945 case EACCES:
946 code = ICMP_PKT_FILTERED;
947 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700948 }
949
David Ahern192132b2015-08-27 16:07:03 -0700950 peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr,
David Ahern385add92015-09-29 20:07:13 -0700951 l3mdev_master_ifindex(skb->dev), 1);
David S. Miller92d86822011-02-04 15:55:25 -0800952
953 send = true;
954 if (peer) {
955 now = jiffies;
956 peer->rate_tokens += now - peer->rate_last;
957 if (peer->rate_tokens > ip_rt_error_burst)
958 peer->rate_tokens = ip_rt_error_burst;
959 peer->rate_last = now;
960 if (peer->rate_tokens >= ip_rt_error_cost)
961 peer->rate_tokens -= ip_rt_error_cost;
962 else
963 send = false;
David S. Miller1d861aa2012-07-10 03:58:16 -0700964 inet_putpeer(peer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700965 }
David S. Miller92d86822011-02-04 15:55:25 -0800966 if (send)
967 icmp_send(skb, ICMP_DEST_UNREACH, code, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700968
969out: kfree_skb(skb);
970 return 0;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900971}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700972
Steffen Klassertd851c122012-10-07 22:47:25 +0000973static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700974{
Steffen Klassertd851c122012-10-07 22:47:25 +0000975 struct dst_entry *dst = &rt->dst;
David S. Miller4895c772012-07-17 04:19:00 -0700976 struct fib_result res;
David S. Miller2c8cec52011-02-09 20:42:07 -0800977
Steffen Klassertfa1e4922013-01-16 20:58:10 +0000978 if (dst_metric_locked(dst, RTAX_MTU))
979 return;
980
Herbert Xucb6ccf02015-04-28 11:43:15 +0800981 if (ipv4_mtu(dst) < mtu)
Li Wei3cdaa5b2015-01-29 16:09:03 +0800982 return;
983
David S. Miller59436342012-07-10 06:58:42 -0700984 if (mtu < ip_rt_min_pmtu)
985 mtu = ip_rt_min_pmtu;
Eric Dumazetfe6fe792011-06-08 06:07:07 +0000986
Timo Teräsf0162292013-05-27 20:46:32 +0000987 if (rt->rt_pmtu == mtu &&
988 time_before(jiffies, dst->expires - ip_rt_mtu_expires / 2))
989 return;
990
Eric Dumazetc5ae7d42012-08-28 12:33:07 +0000991 rcu_read_lock();
Andy Gospodarek0eeb0752015-06-23 13:45:37 -0400992 if (fib_lookup(dev_net(dst->dev), fl4, &res, 0) == 0) {
David S. Miller4895c772012-07-17 04:19:00 -0700993 struct fib_nh *nh = &FIB_RES_NH(res);
David S. Miller4895c772012-07-17 04:19:00 -0700994
Julian Anastasovaee06da2012-07-18 10:15:35 +0000995 update_or_create_fnhe(nh, fl4->daddr, 0, mtu,
996 jiffies + ip_rt_mtu_expires);
David S. Miller4895c772012-07-17 04:19:00 -0700997 }
Eric Dumazetc5ae7d42012-08-28 12:33:07 +0000998 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700999}
1000
David S. Miller4895c772012-07-17 04:19:00 -07001001static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
1002 struct sk_buff *skb, u32 mtu)
1003{
1004 struct rtable *rt = (struct rtable *) dst;
1005 struct flowi4 fl4;
1006
1007 ip_rt_build_flow_key(&fl4, sk, skb);
Steffen Klassertd851c122012-10-07 22:47:25 +00001008 __ip_rt_update_pmtu(rt, &fl4, mtu);
David S. Miller4895c772012-07-17 04:19:00 -07001009}
1010
David S. Miller36393392012-06-14 22:21:46 -07001011void ipv4_update_pmtu(struct sk_buff *skb, struct net *net, u32 mtu,
1012 int oif, u32 mark, u8 protocol, int flow_flags)
1013{
David S. Miller4895c772012-07-17 04:19:00 -07001014 const struct iphdr *iph = (const struct iphdr *) skb->data;
David S. Miller36393392012-06-14 22:21:46 -07001015 struct flowi4 fl4;
1016 struct rtable *rt;
1017
Lorenzo Colitti1b3c61d2014-05-13 10:17:34 -07001018 if (!mark)
1019 mark = IP4_REPLY_MARK(net, skb->mark);
1020
David S. Miller4895c772012-07-17 04:19:00 -07001021 __build_flow_key(&fl4, NULL, iph, oif,
1022 RT_TOS(iph->tos), protocol, mark, flow_flags);
David S. Miller36393392012-06-14 22:21:46 -07001023 rt = __ip_route_output_key(net, &fl4);
1024 if (!IS_ERR(rt)) {
David S. Miller4895c772012-07-17 04:19:00 -07001025 __ip_rt_update_pmtu(rt, &fl4, mtu);
David S. Miller36393392012-06-14 22:21:46 -07001026 ip_rt_put(rt);
1027 }
1028}
1029EXPORT_SYMBOL_GPL(ipv4_update_pmtu);
1030
Steffen Klassert9cb3a502013-01-21 01:59:11 +00001031static void __ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
David S. Miller36393392012-06-14 22:21:46 -07001032{
David S. Miller4895c772012-07-17 04:19:00 -07001033 const struct iphdr *iph = (const struct iphdr *) skb->data;
1034 struct flowi4 fl4;
1035 struct rtable *rt;
David S. Miller36393392012-06-14 22:21:46 -07001036
David S. Miller4895c772012-07-17 04:19:00 -07001037 __build_flow_key(&fl4, sk, iph, 0, 0, 0, 0, 0);
Lorenzo Colitti1b3c61d2014-05-13 10:17:34 -07001038
1039 if (!fl4.flowi4_mark)
1040 fl4.flowi4_mark = IP4_REPLY_MARK(sock_net(sk), skb->mark);
1041
David S. Miller4895c772012-07-17 04:19:00 -07001042 rt = __ip_route_output_key(sock_net(sk), &fl4);
1043 if (!IS_ERR(rt)) {
1044 __ip_rt_update_pmtu(rt, &fl4, mtu);
1045 ip_rt_put(rt);
1046 }
David S. Miller36393392012-06-14 22:21:46 -07001047}
Steffen Klassert9cb3a502013-01-21 01:59:11 +00001048
1049void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
1050{
1051 const struct iphdr *iph = (const struct iphdr *) skb->data;
1052 struct flowi4 fl4;
1053 struct rtable *rt;
Eric Dumazet7f502362014-06-30 01:26:23 -07001054 struct dst_entry *odst = NULL;
Steffen Klassertb44108d2013-01-22 00:01:28 +00001055 bool new = false;
Steffen Klassert9cb3a502013-01-21 01:59:11 +00001056
1057 bh_lock_sock(sk);
Hannes Frederic Sowa482fc602013-11-05 02:24:17 +01001058
1059 if (!ip_sk_accept_pmtu(sk))
1060 goto out;
1061
Eric Dumazet7f502362014-06-30 01:26:23 -07001062 odst = sk_dst_get(sk);
Steffen Klassert9cb3a502013-01-21 01:59:11 +00001063
Eric Dumazet7f502362014-06-30 01:26:23 -07001064 if (sock_owned_by_user(sk) || !odst) {
Steffen Klassert9cb3a502013-01-21 01:59:11 +00001065 __ipv4_sk_update_pmtu(skb, sk, mtu);
1066 goto out;
1067 }
1068
1069 __build_flow_key(&fl4, sk, iph, 0, 0, 0, 0, 0);
1070
Eric Dumazet7f502362014-06-30 01:26:23 -07001071 rt = (struct rtable *)odst;
Ian Morris51456b22015-04-03 09:17:26 +01001072 if (odst->obsolete && !odst->ops->check(odst, 0)) {
Steffen Klassert9cb3a502013-01-21 01:59:11 +00001073 rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
1074 if (IS_ERR(rt))
1075 goto out;
Steffen Klassertb44108d2013-01-22 00:01:28 +00001076
1077 new = true;
Steffen Klassert9cb3a502013-01-21 01:59:11 +00001078 }
1079
1080 __ip_rt_update_pmtu((struct rtable *) rt->dst.path, &fl4, mtu);
1081
Eric Dumazet7f502362014-06-30 01:26:23 -07001082 if (!dst_check(&rt->dst, 0)) {
Steffen Klassertb44108d2013-01-22 00:01:28 +00001083 if (new)
1084 dst_release(&rt->dst);
1085
Steffen Klassert9cb3a502013-01-21 01:59:11 +00001086 rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
1087 if (IS_ERR(rt))
1088 goto out;
1089
Steffen Klassertb44108d2013-01-22 00:01:28 +00001090 new = true;
Steffen Klassert9cb3a502013-01-21 01:59:11 +00001091 }
1092
Steffen Klassertb44108d2013-01-22 00:01:28 +00001093 if (new)
Eric Dumazet7f502362014-06-30 01:26:23 -07001094 sk_dst_set(sk, &rt->dst);
Steffen Klassert9cb3a502013-01-21 01:59:11 +00001095
1096out:
1097 bh_unlock_sock(sk);
Eric Dumazet7f502362014-06-30 01:26:23 -07001098 dst_release(odst);
Steffen Klassert9cb3a502013-01-21 01:59:11 +00001099}
David S. Miller36393392012-06-14 22:21:46 -07001100EXPORT_SYMBOL_GPL(ipv4_sk_update_pmtu);
David S. Millerf39925d2011-02-09 22:00:16 -08001101
David S. Millerb42597e2012-07-11 21:25:45 -07001102void ipv4_redirect(struct sk_buff *skb, struct net *net,
1103 int oif, u32 mark, u8 protocol, int flow_flags)
1104{
David S. Miller4895c772012-07-17 04:19:00 -07001105 const struct iphdr *iph = (const struct iphdr *) skb->data;
David S. Millerb42597e2012-07-11 21:25:45 -07001106 struct flowi4 fl4;
1107 struct rtable *rt;
1108
David S. Miller4895c772012-07-17 04:19:00 -07001109 __build_flow_key(&fl4, NULL, iph, oif,
1110 RT_TOS(iph->tos), protocol, mark, flow_flags);
David S. Millerb42597e2012-07-11 21:25:45 -07001111 rt = __ip_route_output_key(net, &fl4);
1112 if (!IS_ERR(rt)) {
David S. Millerceb33202012-07-17 11:31:28 -07001113 __ip_do_redirect(rt, skb, &fl4, false);
David S. Millerb42597e2012-07-11 21:25:45 -07001114 ip_rt_put(rt);
1115 }
1116}
1117EXPORT_SYMBOL_GPL(ipv4_redirect);
1118
1119void ipv4_sk_redirect(struct sk_buff *skb, struct sock *sk)
1120{
David S. Miller4895c772012-07-17 04:19:00 -07001121 const struct iphdr *iph = (const struct iphdr *) skb->data;
1122 struct flowi4 fl4;
1123 struct rtable *rt;
David S. Millerb42597e2012-07-11 21:25:45 -07001124
David S. Miller4895c772012-07-17 04:19:00 -07001125 __build_flow_key(&fl4, sk, iph, 0, 0, 0, 0, 0);
1126 rt = __ip_route_output_key(sock_net(sk), &fl4);
1127 if (!IS_ERR(rt)) {
David S. Millerceb33202012-07-17 11:31:28 -07001128 __ip_do_redirect(rt, skb, &fl4, false);
David S. Miller4895c772012-07-17 04:19:00 -07001129 ip_rt_put(rt);
1130 }
David S. Millerb42597e2012-07-11 21:25:45 -07001131}
1132EXPORT_SYMBOL_GPL(ipv4_sk_redirect);
1133
David S. Millerefbc3682011-12-01 13:38:59 -05001134static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
1135{
1136 struct rtable *rt = (struct rtable *) dst;
1137
David S. Millerceb33202012-07-17 11:31:28 -07001138 /* All IPV4 dsts are created with ->obsolete set to the value
1139 * DST_OBSOLETE_FORCE_CHK which forces validation calls down
1140 * into this function always.
1141 *
Timo Teräs387aa652013-05-27 20:46:31 +00001142 * When a PMTU/redirect information update invalidates a route,
1143 * this is indicated by setting obsolete to DST_OBSOLETE_KILL or
1144 * DST_OBSOLETE_DEAD by dst_free().
David S. Millerceb33202012-07-17 11:31:28 -07001145 */
Timo Teräs387aa652013-05-27 20:46:31 +00001146 if (dst->obsolete != DST_OBSOLETE_FORCE_CHK || rt_is_expired(rt))
David S. Millerefbc3682011-12-01 13:38:59 -05001147 return NULL;
Timo Teräsd11a4dc2010-03-18 23:20:20 +00001148 return dst;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001149}
1150
Linus Torvalds1da177e2005-04-16 15:20:36 -07001151static void ipv4_link_failure(struct sk_buff *skb)
1152{
1153 struct rtable *rt;
1154
1155 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0);
1156
Eric Dumazet511c3f92009-06-02 05:14:27 +00001157 rt = skb_rtable(skb);
David S. Miller59436342012-07-10 06:58:42 -07001158 if (rt)
1159 dst_set_expires(&rt->dst, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001160}
1161
Eric W. Biedermanede20592015-10-07 16:48:47 -05001162static int ip_rt_bug(struct net *net, struct sock *sk, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001163{
Joe Perches91df42b2012-05-15 14:11:54 +00001164 pr_debug("%s: %pI4 -> %pI4, %s\n",
1165 __func__, &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr,
1166 skb->dev ? skb->dev->name : "?");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001167 kfree_skb(skb);
Dave Jonesc378a9c2011-05-21 07:16:42 +00001168 WARN_ON(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001169 return 0;
1170}
1171
1172/*
1173 We do not cache source address of outgoing interface,
1174 because it is used only by IP RR, TS and SRR options,
1175 so that it out of fast path.
1176
1177 BTW remember: "addr" is allowed to be not aligned
1178 in IP options!
1179 */
1180
David S. Miller8e363602011-05-13 17:29:41 -04001181void ip_rt_get_source(u8 *addr, struct sk_buff *skb, struct rtable *rt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001182{
Al Viroa61ced52006-09-26 21:27:54 -07001183 __be32 src;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001184
David S. Millerc7537962010-11-11 17:07:48 -08001185 if (rt_is_output_route(rt))
David S. Millerc5be24f2011-05-13 18:01:21 -04001186 src = ip_hdr(skb)->saddr;
Eric Dumazetebc0ffa2010-10-05 10:41:36 +00001187 else {
David S. Miller8e363602011-05-13 17:29:41 -04001188 struct fib_result res;
1189 struct flowi4 fl4;
1190 struct iphdr *iph;
1191
1192 iph = ip_hdr(skb);
1193
1194 memset(&fl4, 0, sizeof(fl4));
1195 fl4.daddr = iph->daddr;
1196 fl4.saddr = iph->saddr;
Julian Anastasovb0fe4a32011-07-23 02:00:41 +00001197 fl4.flowi4_tos = RT_TOS(iph->tos);
David S. Miller8e363602011-05-13 17:29:41 -04001198 fl4.flowi4_oif = rt->dst.dev->ifindex;
1199 fl4.flowi4_iif = skb->dev->ifindex;
1200 fl4.flowi4_mark = skb->mark;
David S. Miller5e2b61f2011-03-04 21:47:09 -08001201
Eric Dumazetebc0ffa2010-10-05 10:41:36 +00001202 rcu_read_lock();
Andy Gospodarek0eeb0752015-06-23 13:45:37 -04001203 if (fib_lookup(dev_net(rt->dst.dev), &fl4, &res, 0) == 0)
David S. Miller436c3b62011-03-24 17:42:21 -07001204 src = FIB_RES_PREFSRC(dev_net(rt->dst.dev), res);
Eric Dumazetebc0ffa2010-10-05 10:41:36 +00001205 else
David S. Millerf8126f12012-07-13 05:03:45 -07001206 src = inet_select_addr(rt->dst.dev,
1207 rt_nexthop(rt, iph->daddr),
1208 RT_SCOPE_UNIVERSE);
Eric Dumazetebc0ffa2010-10-05 10:41:36 +00001209 rcu_read_unlock();
1210 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001211 memcpy(addr, &src, 4);
1212}
1213
Patrick McHardyc7066f72011-01-14 13:36:42 +01001214#ifdef CONFIG_IP_ROUTE_CLASSID
Linus Torvalds1da177e2005-04-16 15:20:36 -07001215static void set_class_tag(struct rtable *rt, u32 tag)
1216{
Changli Gaod8d1f302010-06-10 23:31:35 -07001217 if (!(rt->dst.tclassid & 0xFFFF))
1218 rt->dst.tclassid |= tag & 0xFFFF;
1219 if (!(rt->dst.tclassid & 0xFFFF0000))
1220 rt->dst.tclassid |= tag & 0xFFFF0000;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001221}
1222#endif
1223
David S. Miller0dbaee32010-12-13 12:52:14 -08001224static unsigned int ipv4_default_advmss(const struct dst_entry *dst)
1225{
1226 unsigned int advmss = dst_metric_raw(dst, RTAX_ADVMSS);
1227
1228 if (advmss == 0) {
1229 advmss = max_t(unsigned int, dst->dev->mtu - 40,
1230 ip_rt_min_advmss);
1231 if (advmss > 65535 - 40)
1232 advmss = 65535 - 40;
1233 }
1234 return advmss;
1235}
1236
Steffen Klassertebb762f2011-11-23 02:12:51 +00001237static unsigned int ipv4_mtu(const struct dst_entry *dst)
David S. Millerd33e4552010-12-14 13:01:14 -08001238{
Steffen Klassert261663b2011-11-23 02:14:50 +00001239 const struct rtable *rt = (const struct rtable *) dst;
David S. Miller59436342012-07-10 06:58:42 -07001240 unsigned int mtu = rt->rt_pmtu;
1241
Alexander Duyck98d75c32012-08-27 06:30:01 +00001242 if (!mtu || time_after_eq(jiffies, rt->dst.expires))
David S. Miller59436342012-07-10 06:58:42 -07001243 mtu = dst_metric_raw(dst, RTAX_MTU);
Steffen Klassert618f9bc2011-11-23 02:13:31 +00001244
Steffen Klassert38d523e2013-01-16 20:55:01 +00001245 if (mtu)
Steffen Klassert618f9bc2011-11-23 02:13:31 +00001246 return mtu;
1247
1248 mtu = dst->dev->mtu;
David S. Millerd33e4552010-12-14 13:01:14 -08001249
1250 if (unlikely(dst_metric_locked(dst, RTAX_MTU))) {
Julian Anastasov155e8332012-10-08 11:41:18 +00001251 if (rt->rt_uses_gateway && mtu > 576)
David S. Millerd33e4552010-12-14 13:01:14 -08001252 mtu = 576;
1253 }
1254
Roopa Prabhu14972cb2016-08-24 20:10:43 -07001255 mtu = min_t(unsigned int, mtu, IP_MAX_MTU);
1256
1257 return mtu - lwtunnel_headroom(dst->lwtstate, mtu);
David S. Millerd33e4552010-12-14 13:01:14 -08001258}
1259
David S. Millerf2bb4be2012-07-17 12:20:47 -07001260static struct fib_nh_exception *find_exception(struct fib_nh *nh, __be32 daddr)
David S. Miller4895c772012-07-17 04:19:00 -07001261{
Eric Dumazetcaa41522014-09-03 22:21:56 -07001262 struct fnhe_hash_bucket *hash = rcu_dereference(nh->nh_exceptions);
David S. Miller4895c772012-07-17 04:19:00 -07001263 struct fib_nh_exception *fnhe;
1264 u32 hval;
1265
David S. Millerf2bb4be2012-07-17 12:20:47 -07001266 if (!hash)
1267 return NULL;
1268
David S. Millerd3a25c92012-07-17 13:23:08 -07001269 hval = fnhe_hashfun(daddr);
David S. Miller4895c772012-07-17 04:19:00 -07001270
1271 for (fnhe = rcu_dereference(hash[hval].chain); fnhe;
1272 fnhe = rcu_dereference(fnhe->fnhe_next)) {
David S. Millerf2bb4be2012-07-17 12:20:47 -07001273 if (fnhe->fnhe_daddr == daddr)
1274 return fnhe;
1275 }
1276 return NULL;
1277}
David S. Miller4895c772012-07-17 04:19:00 -07001278
David S. Millercaacf052012-07-31 15:06:50 -07001279static bool rt_bind_exception(struct rtable *rt, struct fib_nh_exception *fnhe,
David S. Millerf2bb4be2012-07-17 12:20:47 -07001280 __be32 daddr)
1281{
David S. Millercaacf052012-07-31 15:06:50 -07001282 bool ret = false;
1283
David S. Millerc5038a82012-07-31 15:02:02 -07001284 spin_lock_bh(&fnhe_lock);
Julian Anastasovaee06da2012-07-18 10:15:35 +00001285
David S. Millerc5038a82012-07-31 15:02:02 -07001286 if (daddr == fnhe->fnhe_daddr) {
Timo Teräs2ffae992013-06-27 10:27:05 +03001287 struct rtable __rcu **porig;
1288 struct rtable *orig;
Timo Teräs5aad1de2013-05-27 20:46:33 +00001289 int genid = fnhe_genid(dev_net(rt->dst.dev));
Timo Teräs2ffae992013-06-27 10:27:05 +03001290
1291 if (rt_is_input_route(rt))
1292 porig = &fnhe->fnhe_rth_input;
1293 else
1294 porig = &fnhe->fnhe_rth_output;
1295 orig = rcu_dereference(*porig);
Timo Teräs5aad1de2013-05-27 20:46:33 +00001296
1297 if (fnhe->fnhe_genid != genid) {
1298 fnhe->fnhe_genid = genid;
Steffen Klassert13d82bf2012-10-17 21:17:44 +00001299 fnhe->fnhe_gw = 0;
1300 fnhe->fnhe_pmtu = 0;
1301 fnhe->fnhe_expires = 0;
Timo Teräs2ffae992013-06-27 10:27:05 +03001302 fnhe_flush_routes(fnhe);
1303 orig = NULL;
Steffen Klassert13d82bf2012-10-17 21:17:44 +00001304 }
Timo Teräs387aa652013-05-27 20:46:31 +00001305 fill_route_from_fnhe(rt, fnhe);
1306 if (!rt->rt_gateway)
Julian Anastasov155e8332012-10-08 11:41:18 +00001307 rt->rt_gateway = daddr;
David S. Millerf2bb4be2012-07-17 12:20:47 -07001308
Timo Teräs2ffae992013-06-27 10:27:05 +03001309 if (!(rt->dst.flags & DST_NOCACHE)) {
1310 rcu_assign_pointer(*porig, rt);
1311 if (orig)
1312 rt_free(orig);
1313 ret = true;
1314 }
David S. Millerc5038a82012-07-31 15:02:02 -07001315
1316 fnhe->fnhe_stamp = jiffies;
David S. Millerc5038a82012-07-31 15:02:02 -07001317 }
1318 spin_unlock_bh(&fnhe_lock);
David S. Millercaacf052012-07-31 15:06:50 -07001319
1320 return ret;
Eric Dumazet54764bb2012-07-31 01:08:23 +00001321}
1322
David S. Millercaacf052012-07-31 15:06:50 -07001323static bool rt_cache_route(struct fib_nh *nh, struct rtable *rt)
David S. Millerf2bb4be2012-07-17 12:20:47 -07001324{
Eric Dumazetd26b3a72012-07-31 05:45:30 +00001325 struct rtable *orig, *prev, **p;
David S. Millercaacf052012-07-31 15:06:50 -07001326 bool ret = true;
David S. Millerf2bb4be2012-07-17 12:20:47 -07001327
Eric Dumazetd26b3a72012-07-31 05:45:30 +00001328 if (rt_is_input_route(rt)) {
Eric Dumazet54764bb2012-07-31 01:08:23 +00001329 p = (struct rtable **)&nh->nh_rth_input;
Eric Dumazetd26b3a72012-07-31 05:45:30 +00001330 } else {
Christoph Lameter903ceff2014-08-17 12:30:35 -05001331 p = (struct rtable **)raw_cpu_ptr(nh->nh_pcpu_rth_output);
Eric Dumazetd26b3a72012-07-31 05:45:30 +00001332 }
David S. Millerf2bb4be2012-07-17 12:20:47 -07001333 orig = *p;
1334
1335 prev = cmpxchg(p, orig, rt);
1336 if (prev == orig) {
David S. Millerf2bb4be2012-07-17 12:20:47 -07001337 if (orig)
Eric Dumazet54764bb2012-07-31 01:08:23 +00001338 rt_free(orig);
Julian Anastasov155e8332012-10-08 11:41:18 +00001339 } else
David S. Millercaacf052012-07-31 15:06:50 -07001340 ret = false;
David S. Millercaacf052012-07-31 15:06:50 -07001341
1342 return ret;
1343}
1344
Eric Dumazet5055c372015-01-14 15:17:06 -08001345struct uncached_list {
1346 spinlock_t lock;
1347 struct list_head head;
1348};
1349
1350static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt_uncached_list);
David S. Millercaacf052012-07-31 15:06:50 -07001351
1352static void rt_add_uncached_list(struct rtable *rt)
1353{
Eric Dumazet5055c372015-01-14 15:17:06 -08001354 struct uncached_list *ul = raw_cpu_ptr(&rt_uncached_list);
1355
1356 rt->rt_uncached_list = ul;
1357
1358 spin_lock_bh(&ul->lock);
1359 list_add_tail(&rt->rt_uncached, &ul->head);
1360 spin_unlock_bh(&ul->lock);
David S. Millercaacf052012-07-31 15:06:50 -07001361}
1362
1363static void ipv4_dst_destroy(struct dst_entry *dst)
1364{
1365 struct rtable *rt = (struct rtable *) dst;
1366
Eric Dumazet78df76a2012-08-24 05:40:47 +00001367 if (!list_empty(&rt->rt_uncached)) {
Eric Dumazet5055c372015-01-14 15:17:06 -08001368 struct uncached_list *ul = rt->rt_uncached_list;
1369
1370 spin_lock_bh(&ul->lock);
David S. Millercaacf052012-07-31 15:06:50 -07001371 list_del(&rt->rt_uncached);
Eric Dumazet5055c372015-01-14 15:17:06 -08001372 spin_unlock_bh(&ul->lock);
David S. Millercaacf052012-07-31 15:06:50 -07001373 }
1374}
1375
1376void rt_flush_dev(struct net_device *dev)
1377{
Eric Dumazet5055c372015-01-14 15:17:06 -08001378 struct net *net = dev_net(dev);
1379 struct rtable *rt;
1380 int cpu;
David S. Millercaacf052012-07-31 15:06:50 -07001381
Eric Dumazet5055c372015-01-14 15:17:06 -08001382 for_each_possible_cpu(cpu) {
1383 struct uncached_list *ul = &per_cpu(rt_uncached_list, cpu);
1384
1385 spin_lock_bh(&ul->lock);
1386 list_for_each_entry(rt, &ul->head, rt_uncached) {
David S. Millercaacf052012-07-31 15:06:50 -07001387 if (rt->dst.dev != dev)
1388 continue;
1389 rt->dst.dev = net->loopback_dev;
1390 dev_hold(rt->dst.dev);
1391 dev_put(dev);
1392 }
Eric Dumazet5055c372015-01-14 15:17:06 -08001393 spin_unlock_bh(&ul->lock);
David S. Miller4895c772012-07-17 04:19:00 -07001394 }
1395}
1396
Eric Dumazet4331deb2012-07-25 05:11:23 +00001397static bool rt_cache_valid(const struct rtable *rt)
David S. Millerd2d68ba2012-07-17 12:58:50 -07001398{
Eric Dumazet4331deb2012-07-25 05:11:23 +00001399 return rt &&
1400 rt->dst.obsolete == DST_OBSOLETE_FORCE_CHK &&
1401 !rt_is_expired(rt);
David S. Millerd2d68ba2012-07-17 12:58:50 -07001402}
1403
David S. Millerf2bb4be2012-07-17 12:20:47 -07001404static void rt_set_nexthop(struct rtable *rt, __be32 daddr,
David S. Miller5e2b61f2011-03-04 21:47:09 -08001405 const struct fib_result *res,
David S. Millerf2bb4be2012-07-17 12:20:47 -07001406 struct fib_nh_exception *fnhe,
David S. Miller982721f2011-02-16 21:44:24 -08001407 struct fib_info *fi, u16 type, u32 itag)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001408{
David S. Millercaacf052012-07-31 15:06:50 -07001409 bool cached = false;
1410
Linus Torvalds1da177e2005-04-16 15:20:36 -07001411 if (fi) {
David S. Miller4895c772012-07-17 04:19:00 -07001412 struct fib_nh *nh = &FIB_RES_NH(*res);
1413
Julian Anastasov155e8332012-10-08 11:41:18 +00001414 if (nh->nh_gw && nh->nh_scope == RT_SCOPE_LINK) {
David S. Miller4895c772012-07-17 04:19:00 -07001415 rt->rt_gateway = nh->nh_gw;
Julian Anastasov155e8332012-10-08 11:41:18 +00001416 rt->rt_uses_gateway = 1;
1417 }
David S. Miller28605832012-07-17 14:55:59 -07001418 dst_init_metrics(&rt->dst, fi->fib_metrics, true);
Patrick McHardyc7066f72011-01-14 13:36:42 +01001419#ifdef CONFIG_IP_ROUTE_CLASSID
David S. Millerf2bb4be2012-07-17 12:20:47 -07001420 rt->dst.tclassid = nh->nh_tclassid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001421#endif
Jiri Benc61adedf2015-08-20 13:56:25 +02001422 rt->dst.lwtstate = lwtstate_get(nh->nh_lwtstate);
David S. Millerc5038a82012-07-31 15:02:02 -07001423 if (unlikely(fnhe))
David S. Millercaacf052012-07-31 15:06:50 -07001424 cached = rt_bind_exception(rt, fnhe, daddr);
David S. Millerc5038a82012-07-31 15:02:02 -07001425 else if (!(rt->dst.flags & DST_NOCACHE))
David S. Millercaacf052012-07-31 15:06:50 -07001426 cached = rt_cache_route(nh, rt);
Julian Anastasov155e8332012-10-08 11:41:18 +00001427 if (unlikely(!cached)) {
1428 /* Routes we intend to cache in nexthop exception or
1429 * FIB nexthop have the DST_NOCACHE bit clear.
1430 * However, if we are unsuccessful at storing this
1431 * route into the cache we really need to set it.
1432 */
1433 rt->dst.flags |= DST_NOCACHE;
1434 if (!rt->rt_gateway)
1435 rt->rt_gateway = daddr;
1436 rt_add_uncached_list(rt);
1437 }
1438 } else
David S. Millercaacf052012-07-31 15:06:50 -07001439 rt_add_uncached_list(rt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001440
Patrick McHardyc7066f72011-01-14 13:36:42 +01001441#ifdef CONFIG_IP_ROUTE_CLASSID
Linus Torvalds1da177e2005-04-16 15:20:36 -07001442#ifdef CONFIG_IP_MULTIPLE_TABLES
David S. Miller85b91b02012-07-13 08:21:29 -07001443 set_class_tag(rt, res->tclassid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001444#endif
1445 set_class_tag(rt, itag);
1446#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001447}
1448
David Ahern9ab179d2016-04-07 11:10:06 -07001449struct rtable *rt_dst_alloc(struct net_device *dev,
1450 unsigned int flags, u16 type,
1451 bool nopolicy, bool noxfrm, bool will_cache)
David S. Miller0c4dcd52011-02-17 15:42:37 -08001452{
David Ahernd08c4f32015-09-02 13:58:34 -07001453 struct rtable *rt;
1454
1455 rt = dst_alloc(&ipv4_dst_ops, dev, 1, DST_OBSOLETE_FORCE_CHK,
1456 (will_cache ? 0 : (DST_HOST | DST_NOCACHE)) |
1457 (nopolicy ? DST_NOPOLICY : 0) |
1458 (noxfrm ? DST_NOXFRM : 0));
1459
1460 if (rt) {
1461 rt->rt_genid = rt_genid_ipv4(dev_net(dev));
1462 rt->rt_flags = flags;
1463 rt->rt_type = type;
1464 rt->rt_is_input = 0;
1465 rt->rt_iif = 0;
1466 rt->rt_pmtu = 0;
1467 rt->rt_gateway = 0;
1468 rt->rt_uses_gateway = 0;
David Ahernb7503e02015-09-02 13:58:35 -07001469 rt->rt_table_id = 0;
David Ahernd08c4f32015-09-02 13:58:34 -07001470 INIT_LIST_HEAD(&rt->rt_uncached);
1471
1472 rt->dst.output = ip_output;
1473 if (flags & RTCF_LOCAL)
1474 rt->dst.input = ip_local_deliver;
1475 }
1476
1477 return rt;
David S. Miller0c4dcd52011-02-17 15:42:37 -08001478}
David Ahern9ab179d2016-04-07 11:10:06 -07001479EXPORT_SYMBOL(rt_dst_alloc);
David S. Miller0c4dcd52011-02-17 15:42:37 -08001480
Eric Dumazet96d36222010-06-02 19:21:31 +00001481/* called in rcu_read_lock() section */
Al Viro9e12bb22006-09-26 21:25:20 -07001482static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001483 u8 tos, struct net_device *dev, int our)
1484{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001485 struct rtable *rth;
Eric Dumazet96d36222010-06-02 19:21:31 +00001486 struct in_device *in_dev = __in_dev_get_rcu(dev);
David Ahernd08c4f32015-09-02 13:58:34 -07001487 unsigned int flags = RTCF_MULTICAST;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001488 u32 itag = 0;
Eric Dumazetb5f7e752010-06-02 12:05:27 +00001489 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001490
1491 /* Primary sanity checks. */
1492
Ian Morris51456b22015-04-03 09:17:26 +01001493 if (!in_dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001494 return -EINVAL;
1495
Jan Engelhardt1e637c72008-01-21 03:18:08 -08001496 if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) ||
Thomas Grafd0daebc32012-06-12 00:44:01 +00001497 skb->protocol != htons(ETH_P_IP))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001498 goto e_inval;
1499
Alexander Duyck75fea732015-09-28 11:10:38 -07001500 if (ipv4_is_loopback(saddr) && !IN_DEV_ROUTE_LOCALNET(in_dev))
1501 goto e_inval;
Thomas Grafd0daebc32012-06-12 00:44:01 +00001502
Joe Perchesf97c1e02007-12-16 13:45:43 -08001503 if (ipv4_is_zeronet(saddr)) {
1504 if (!ipv4_is_local_multicast(daddr))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001505 goto e_inval;
Eric Dumazetb5f7e752010-06-02 12:05:27 +00001506 } else {
David S. Miller9e56e382012-06-28 18:54:02 -07001507 err = fib_validate_source(skb, saddr, 0, tos, 0, dev,
1508 in_dev, &itag);
Eric Dumazetb5f7e752010-06-02 12:05:27 +00001509 if (err < 0)
1510 goto e_err;
1511 }
David Ahernd08c4f32015-09-02 13:58:34 -07001512 if (our)
1513 flags |= RTCF_LOCAL;
1514
1515 rth = rt_dst_alloc(dev_net(dev)->loopback_dev, flags, RTN_MULTICAST,
David S. Millerf2bb4be2012-07-17 12:20:47 -07001516 IN_DEV_CONF_GET(in_dev, NOPOLICY), false, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001517 if (!rth)
1518 goto e_nobufs;
1519
Patrick McHardyc7066f72011-01-14 13:36:42 +01001520#ifdef CONFIG_IP_ROUTE_CLASSID
Changli Gaod8d1f302010-06-10 23:31:35 -07001521 rth->dst.tclassid = itag;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001522#endif
David S. Millercf911662011-04-28 14:31:47 -07001523 rth->dst.output = ip_rt_bug;
David S. Miller9917e1e82012-07-17 14:44:26 -07001524 rth->rt_is_input= 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001525
1526#ifdef CONFIG_IP_MROUTE
Joe Perchesf97c1e02007-12-16 13:45:43 -08001527 if (!ipv4_is_local_multicast(daddr) && IN_DEV_MFORWARD(in_dev))
Changli Gaod8d1f302010-06-10 23:31:35 -07001528 rth->dst.input = ip_mr_input;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001529#endif
1530 RT_CACHE_STAT_INC(in_slow_mc);
1531
David S. Miller89aef892012-07-17 11:00:09 -07001532 skb_dst_set(skb, &rth->dst);
1533 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001534
1535e_nobufs:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001536 return -ENOBUFS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001537e_inval:
Eric Dumazet96d36222010-06-02 19:21:31 +00001538 return -EINVAL;
Eric Dumazetb5f7e752010-06-02 12:05:27 +00001539e_err:
Eric Dumazetb5f7e752010-06-02 12:05:27 +00001540 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001541}
1542
1543
1544static void ip_handle_martian_source(struct net_device *dev,
1545 struct in_device *in_dev,
1546 struct sk_buff *skb,
Al Viro9e12bb22006-09-26 21:25:20 -07001547 __be32 daddr,
1548 __be32 saddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001549{
1550 RT_CACHE_STAT_INC(in_martian_src);
1551#ifdef CONFIG_IP_ROUTE_VERBOSE
1552 if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit()) {
1553 /*
1554 * RFC1812 recommendation, if source is martian,
1555 * the only hint is MAC header.
1556 */
Joe Perches058bd4d2012-03-11 18:36:11 +00001557 pr_warn("martian source %pI4 from %pI4, on dev %s\n",
Harvey Harrison673d57e2008-10-31 00:53:57 -07001558 &daddr, &saddr, dev->name);
Arnaldo Carvalho de Melo98e399f2007-03-19 15:33:04 -07001559 if (dev->hard_header_len && skb_mac_header_was_set(skb)) {
Joe Perches058bd4d2012-03-11 18:36:11 +00001560 print_hex_dump(KERN_WARNING, "ll header: ",
1561 DUMP_PREFIX_OFFSET, 16, 1,
1562 skb_mac_header(skb),
1563 dev->hard_header_len, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001564 }
1565 }
1566#endif
1567}
1568
Xin Longdeed49d2016-02-18 21:21:19 +08001569static void ip_del_fnhe(struct fib_nh *nh, __be32 daddr)
1570{
1571 struct fnhe_hash_bucket *hash;
1572 struct fib_nh_exception *fnhe, __rcu **fnhe_p;
1573 u32 hval = fnhe_hashfun(daddr);
1574
1575 spin_lock_bh(&fnhe_lock);
1576
1577 hash = rcu_dereference_protected(nh->nh_exceptions,
1578 lockdep_is_held(&fnhe_lock));
1579 hash += hval;
1580
1581 fnhe_p = &hash->chain;
1582 fnhe = rcu_dereference_protected(*fnhe_p, lockdep_is_held(&fnhe_lock));
1583 while (fnhe) {
1584 if (fnhe->fnhe_daddr == daddr) {
1585 rcu_assign_pointer(*fnhe_p, rcu_dereference_protected(
1586 fnhe->fnhe_next, lockdep_is_held(&fnhe_lock)));
1587 fnhe_flush_routes(fnhe);
1588 kfree_rcu(fnhe, rcu);
1589 break;
1590 }
1591 fnhe_p = &fnhe->fnhe_next;
1592 fnhe = rcu_dereference_protected(fnhe->fnhe_next,
1593 lockdep_is_held(&fnhe_lock));
1594 }
1595
1596 spin_unlock_bh(&fnhe_lock);
1597}
1598
Eric Dumazet47360222010-06-03 04:13:21 +00001599/* called in rcu_read_lock() section */
Stephen Hemminger5969f712008-04-10 01:52:09 -07001600static int __mkroute_input(struct sk_buff *skb,
David S. Miller982721f2011-02-16 21:44:24 -08001601 const struct fib_result *res,
Stephen Hemminger5969f712008-04-10 01:52:09 -07001602 struct in_device *in_dev,
David S. Millerc6cffba2012-07-26 11:14:38 +00001603 __be32 daddr, __be32 saddr, u32 tos)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001604{
Timo Teräs2ffae992013-06-27 10:27:05 +03001605 struct fib_nh_exception *fnhe;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001606 struct rtable *rth;
1607 int err;
1608 struct in_device *out_dev;
David S. Millerd2d68ba2012-07-17 12:58:50 -07001609 bool do_cache;
Li RongQingfbdc0ad2014-05-22 16:36:55 +08001610 u32 itag = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001611
1612 /* get a working reference to the output device */
Eric Dumazet47360222010-06-03 04:13:21 +00001613 out_dev = __in_dev_get_rcu(FIB_RES_DEV(*res));
Ian Morris51456b22015-04-03 09:17:26 +01001614 if (!out_dev) {
Joe Perchese87cc472012-05-13 21:56:26 +00001615 net_crit_ratelimited("Bug in ip_route_input_slow(). Please report.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001616 return -EINVAL;
1617 }
1618
Michael Smith5c04c812011-04-07 04:51:50 +00001619 err = fib_validate_source(skb, saddr, daddr, tos, FIB_RES_OIF(*res),
David S. Miller9e56e382012-06-28 18:54:02 -07001620 in_dev->dev, in_dev, &itag);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001621 if (err < 0) {
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001622 ip_handle_martian_source(in_dev->dev, in_dev, skb, daddr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001623 saddr);
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001624
Linus Torvalds1da177e2005-04-16 15:20:36 -07001625 goto cleanup;
1626 }
1627
Julian Anastasove81da0e2012-10-08 11:41:15 +00001628 do_cache = res->fi && !itag;
1629 if (out_dev == in_dev && err && IN_DEV_TX_REDIRECTS(out_dev) &&
Hannes Frederic Sowadf4d9252015-01-23 12:01:26 +01001630 skb->protocol == htons(ETH_P_IP) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07001631 (IN_DEV_SHARED_MEDIA(out_dev) ||
Hannes Frederic Sowadf4d9252015-01-23 12:01:26 +01001632 inet_addr_onlink(out_dev, saddr, FIB_RES_GW(*res))))
1633 IPCB(skb)->flags |= IPSKB_DOREDIRECT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001634
1635 if (skb->protocol != htons(ETH_P_IP)) {
1636 /* Not IP (i.e. ARP). Do not create route, if it is
1637 * invalid for proxy arp. DNAT routes are always valid.
Jesper Dangaard Brouer65324142010-01-05 05:50:47 +00001638 *
1639 * Proxy arp feature have been extended to allow, ARP
1640 * replies back to the same interface, to support
1641 * Private VLAN switch technologies. See arp.c.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001642 */
Jesper Dangaard Brouer65324142010-01-05 05:50:47 +00001643 if (out_dev == in_dev &&
1644 IN_DEV_PROXY_ARP_PVLAN(in_dev) == 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001645 err = -EINVAL;
1646 goto cleanup;
1647 }
1648 }
1649
Timo Teräs2ffae992013-06-27 10:27:05 +03001650 fnhe = find_exception(&FIB_RES_NH(*res), daddr);
Julian Anastasove81da0e2012-10-08 11:41:15 +00001651 if (do_cache) {
Xin Longdeed49d2016-02-18 21:21:19 +08001652 if (fnhe) {
Timo Teräs2ffae992013-06-27 10:27:05 +03001653 rth = rcu_dereference(fnhe->fnhe_rth_input);
Xin Longdeed49d2016-02-18 21:21:19 +08001654 if (rth && rth->dst.expires &&
1655 time_after(jiffies, rth->dst.expires)) {
1656 ip_del_fnhe(&FIB_RES_NH(*res), daddr);
1657 fnhe = NULL;
1658 } else {
1659 goto rt_cache;
1660 }
1661 }
Timo Teräs2ffae992013-06-27 10:27:05 +03001662
Xin Longdeed49d2016-02-18 21:21:19 +08001663 rth = rcu_dereference(FIB_RES_NH(*res).nh_rth_input);
1664
1665rt_cache:
Julian Anastasove81da0e2012-10-08 11:41:15 +00001666 if (rt_cache_valid(rth)) {
1667 skb_dst_set_noref(skb, &rth->dst);
1668 goto out;
David S. Millerd2d68ba2012-07-17 12:58:50 -07001669 }
1670 }
David S. Millerf2bb4be2012-07-17 12:20:47 -07001671
David Ahernd08c4f32015-09-02 13:58:34 -07001672 rth = rt_dst_alloc(out_dev->dev, 0, res->type,
David S. Miller5c1e6aa2011-04-28 14:13:38 -07001673 IN_DEV_CONF_GET(in_dev, NOPOLICY),
David S. Millerd2d68ba2012-07-17 12:58:50 -07001674 IN_DEV_CONF_GET(out_dev, NOXFRM), do_cache);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001675 if (!rth) {
1676 err = -ENOBUFS;
1677 goto cleanup;
1678 }
1679
David S. Miller9917e1e82012-07-17 14:44:26 -07001680 rth->rt_is_input = 1;
David Ahernb7503e02015-09-02 13:58:35 -07001681 if (res->table)
1682 rth->rt_table_id = res->table->tb_id;
Duan Jionga6254862014-02-17 15:23:43 +08001683 RT_CACHE_STAT_INC(in_slow_tot);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001684
Changli Gaod8d1f302010-06-10 23:31:35 -07001685 rth->dst.input = ip_forward;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001686
Timo Teräs2ffae992013-06-27 10:27:05 +03001687 rt_set_nexthop(rth, daddr, res, fnhe, res->fi, res->type, itag);
Jiri Benc61adedf2015-08-20 13:56:25 +02001688 if (lwtunnel_output_redirect(rth->dst.lwtstate)) {
1689 rth->dst.lwtstate->orig_output = rth->dst.output;
Roopa Prabhu8602a622015-07-21 10:43:50 +02001690 rth->dst.output = lwtunnel_output;
Tom Herbert25368622015-08-17 13:42:24 -07001691 }
Jiri Benc61adedf2015-08-20 13:56:25 +02001692 if (lwtunnel_input_redirect(rth->dst.lwtstate)) {
1693 rth->dst.lwtstate->orig_input = rth->dst.input;
Tom Herbert25368622015-08-17 13:42:24 -07001694 rth->dst.input = lwtunnel_input;
1695 }
David S. Millerc6cffba2012-07-26 11:14:38 +00001696 skb_dst_set(skb, &rth->dst);
David S. Millerd2d68ba2012-07-17 12:58:50 -07001697out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001698 err = 0;
1699 cleanup:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001700 return err;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001701}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001702
Peter Nørlund79a13152015-09-30 10:12:22 +02001703#ifdef CONFIG_IP_ROUTE_MULTIPATH
1704
1705/* To make ICMP packets follow the right flow, the multipath hash is
1706 * calculated from the inner IP addresses in reverse order.
1707 */
1708static int ip_multipath_icmp_hash(struct sk_buff *skb)
1709{
1710 const struct iphdr *outer_iph = ip_hdr(skb);
1711 struct icmphdr _icmph;
1712 const struct icmphdr *icmph;
1713 struct iphdr _inner_iph;
1714 const struct iphdr *inner_iph;
1715
1716 if (unlikely((outer_iph->frag_off & htons(IP_OFFSET)) != 0))
1717 goto standard_hash;
1718
1719 icmph = skb_header_pointer(skb, outer_iph->ihl * 4, sizeof(_icmph),
1720 &_icmph);
1721 if (!icmph)
1722 goto standard_hash;
1723
1724 if (icmph->type != ICMP_DEST_UNREACH &&
1725 icmph->type != ICMP_REDIRECT &&
1726 icmph->type != ICMP_TIME_EXCEEDED &&
1727 icmph->type != ICMP_PARAMETERPROB) {
1728 goto standard_hash;
1729 }
1730
1731 inner_iph = skb_header_pointer(skb,
1732 outer_iph->ihl * 4 + sizeof(_icmph),
1733 sizeof(_inner_iph), &_inner_iph);
1734 if (!inner_iph)
1735 goto standard_hash;
1736
1737 return fib_multipath_hash(inner_iph->daddr, inner_iph->saddr);
1738
1739standard_hash:
1740 return fib_multipath_hash(outer_iph->saddr, outer_iph->daddr);
1741}
1742
1743#endif /* CONFIG_IP_ROUTE_MULTIPATH */
1744
Stephen Hemminger5969f712008-04-10 01:52:09 -07001745static int ip_mkroute_input(struct sk_buff *skb,
1746 struct fib_result *res,
David S. Miller68a5e3d2011-03-11 20:07:33 -05001747 const struct flowi4 *fl4,
Stephen Hemminger5969f712008-04-10 01:52:09 -07001748 struct in_device *in_dev,
1749 __be32 daddr, __be32 saddr, u32 tos)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001750{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001751#ifdef CONFIG_IP_ROUTE_MULTIPATH
Peter Nørlund0e884c72015-09-30 10:12:21 +02001752 if (res->fi && res->fi->fib_nhs > 1) {
1753 int h;
1754
Peter Nørlund79a13152015-09-30 10:12:22 +02001755 if (unlikely(ip_hdr(skb)->protocol == IPPROTO_ICMP))
1756 h = ip_multipath_icmp_hash(skb);
1757 else
1758 h = fib_multipath_hash(saddr, daddr);
Peter Nørlund0e884c72015-09-30 10:12:21 +02001759 fib_select_multipath(res, h);
1760 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001761#endif
1762
1763 /* create a routing cache entry */
David S. Millerc6cffba2012-07-26 11:14:38 +00001764 return __mkroute_input(skb, res, in_dev, daddr, saddr, tos);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001765}
1766
Linus Torvalds1da177e2005-04-16 15:20:36 -07001767/*
1768 * NOTE. We drop all the packets that has local source
1769 * addresses, because every properly looped back packet
1770 * must have correct destination already attached by output routine.
1771 *
1772 * Such approach solves two big problems:
1773 * 1. Not simplex devices are handled properly.
1774 * 2. IP spoofing attempts are filtered with 100% of guarantee.
Eric Dumazetebc0ffa2010-10-05 10:41:36 +00001775 * called with rcu_read_lock()
Linus Torvalds1da177e2005-04-16 15:20:36 -07001776 */
1777
Al Viro9e12bb22006-09-26 21:25:20 -07001778static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
David S. Millerc10237e2012-06-27 17:05:06 -07001779 u8 tos, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001780{
1781 struct fib_result res;
Eric Dumazet96d36222010-06-02 19:21:31 +00001782 struct in_device *in_dev = __in_dev_get_rcu(dev);
Thomas Graf1b7179d2015-07-21 10:43:59 +02001783 struct ip_tunnel_info *tun_info;
David S. Miller68a5e3d2011-03-11 20:07:33 -05001784 struct flowi4 fl4;
Eric Dumazet95c96172012-04-15 05:58:06 +00001785 unsigned int flags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001786 u32 itag = 0;
Eric Dumazet95c96172012-04-15 05:58:06 +00001787 struct rtable *rth;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001788 int err = -EINVAL;
Daniel Baluta5e73ea12012-04-15 01:34:41 +00001789 struct net *net = dev_net(dev);
David S. Millerd2d68ba2012-07-17 12:58:50 -07001790 bool do_cache;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001791
1792 /* IP on this device is disabled. */
1793
1794 if (!in_dev)
1795 goto out;
1796
1797 /* Check for the most weird martians, which can be not detected
1798 by fib_lookup.
1799 */
1800
Jiri Benc61adedf2015-08-20 13:56:25 +02001801 tun_info = skb_tunnel_info(skb);
Jiri Benc46fa0622015-08-28 20:48:19 +02001802 if (tun_info && !(tun_info->mode & IP_TUNNEL_INFO_TX))
Thomas Graf1b7179d2015-07-21 10:43:59 +02001803 fl4.flowi4_tun_key.tun_id = tun_info->key.tun_id;
1804 else
1805 fl4.flowi4_tun_key.tun_id = 0;
Thomas Graff38a9eb2015-07-21 10:43:56 +02001806 skb_dst_drop(skb);
1807
Thomas Grafd0daebc32012-06-12 00:44:01 +00001808 if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001809 goto martian_source;
1810
David S. Millerd2d68ba2012-07-17 12:58:50 -07001811 res.fi = NULL;
David Ahernbde6f9d2015-09-16 10:16:39 -06001812 res.table = NULL;
Andy Walls27a954b2010-10-17 15:11:22 +00001813 if (ipv4_is_lbcast(daddr) || (saddr == 0 && daddr == 0))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001814 goto brd_input;
1815
1816 /* Accept zero addresses only to limited broadcast;
1817 * I even do not know to fix it or not. Waiting for complains :-)
1818 */
Joe Perchesf97c1e02007-12-16 13:45:43 -08001819 if (ipv4_is_zeronet(saddr))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001820 goto martian_source;
1821
Thomas Grafd0daebc32012-06-12 00:44:01 +00001822 if (ipv4_is_zeronet(daddr))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001823 goto martian_destination;
1824
Eric Dumazet9eb43e72012-08-03 21:27:25 +00001825 /* Following code try to avoid calling IN_DEV_NET_ROUTE_LOCALNET(),
1826 * and call it once if daddr or/and saddr are loopback addresses
1827 */
1828 if (ipv4_is_loopback(daddr)) {
1829 if (!IN_DEV_NET_ROUTE_LOCALNET(in_dev, net))
Thomas Grafd0daebc32012-06-12 00:44:01 +00001830 goto martian_destination;
Eric Dumazet9eb43e72012-08-03 21:27:25 +00001831 } else if (ipv4_is_loopback(saddr)) {
1832 if (!IN_DEV_NET_ROUTE_LOCALNET(in_dev, net))
Thomas Grafd0daebc32012-06-12 00:44:01 +00001833 goto martian_source;
1834 }
1835
Linus Torvalds1da177e2005-04-16 15:20:36 -07001836 /*
1837 * Now we are ready to route packet.
1838 */
David S. Miller68a5e3d2011-03-11 20:07:33 -05001839 fl4.flowi4_oif = 0;
David Aherne0d56fd2016-09-10 12:09:57 -07001840 fl4.flowi4_iif = dev->ifindex;
David S. Miller68a5e3d2011-03-11 20:07:33 -05001841 fl4.flowi4_mark = skb->mark;
1842 fl4.flowi4_tos = tos;
1843 fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
David Ahernb84f7872015-09-29 19:07:07 -07001844 fl4.flowi4_flags = 0;
David S. Miller68a5e3d2011-03-11 20:07:33 -05001845 fl4.daddr = daddr;
1846 fl4.saddr = saddr;
Andy Gospodarek0eeb0752015-06-23 13:45:37 -04001847 err = fib_lookup(net, &fl4, &res, 0);
Duan Jiongcd0f0b92014-02-14 18:26:22 +08001848 if (err != 0) {
1849 if (!IN_DEV_FORWARD(in_dev))
1850 err = -EHOSTUNREACH;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001851 goto no_route;
Duan Jiongcd0f0b92014-02-14 18:26:22 +08001852 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001853
1854 if (res.type == RTN_BROADCAST)
1855 goto brd_input;
1856
1857 if (res.type == RTN_LOCAL) {
Michael Smith5c04c812011-04-07 04:51:50 +00001858 err = fib_validate_source(skb, saddr, daddr, tos,
Cong Wang0d5edc62014-04-15 16:25:35 -07001859 0, dev, in_dev, &itag);
Eric Dumazetb5f7e752010-06-02 12:05:27 +00001860 if (err < 0)
David Ahern0d753962015-09-28 11:10:44 -07001861 goto martian_source;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001862 goto local_input;
1863 }
1864
Duan Jiongcd0f0b92014-02-14 18:26:22 +08001865 if (!IN_DEV_FORWARD(in_dev)) {
1866 err = -EHOSTUNREACH;
David S. Miller251da412012-06-26 16:27:09 -07001867 goto no_route;
Duan Jiongcd0f0b92014-02-14 18:26:22 +08001868 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001869 if (res.type != RTN_UNICAST)
1870 goto martian_destination;
1871
David S. Miller68a5e3d2011-03-11 20:07:33 -05001872 err = ip_mkroute_input(skb, &res, &fl4, in_dev, daddr, saddr, tos);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001873out: return err;
1874
1875brd_input:
1876 if (skb->protocol != htons(ETH_P_IP))
1877 goto e_inval;
1878
David S. Miller41347dc2012-06-28 04:05:27 -07001879 if (!ipv4_is_zeronet(saddr)) {
David S. Miller9e56e382012-06-28 18:54:02 -07001880 err = fib_validate_source(skb, saddr, 0, tos, 0, dev,
1881 in_dev, &itag);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001882 if (err < 0)
David Ahern0d753962015-09-28 11:10:44 -07001883 goto martian_source;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001884 }
1885 flags |= RTCF_BROADCAST;
1886 res.type = RTN_BROADCAST;
1887 RT_CACHE_STAT_INC(in_brd);
1888
1889local_input:
David S. Millerd2d68ba2012-07-17 12:58:50 -07001890 do_cache = false;
1891 if (res.fi) {
David S. Millerfe3edf42012-07-23 13:22:20 -07001892 if (!itag) {
Eric Dumazet54764bb2012-07-31 01:08:23 +00001893 rth = rcu_dereference(FIB_RES_NH(res).nh_rth_input);
David S. Millerd2d68ba2012-07-17 12:58:50 -07001894 if (rt_cache_valid(rth)) {
David S. Millerc6cffba2012-07-26 11:14:38 +00001895 skb_dst_set_noref(skb, &rth->dst);
1896 err = 0;
1897 goto out;
David S. Millerd2d68ba2012-07-17 12:58:50 -07001898 }
1899 do_cache = true;
1900 }
1901 }
1902
David Ahernd08c4f32015-09-02 13:58:34 -07001903 rth = rt_dst_alloc(net->loopback_dev, flags | RTCF_LOCAL, res.type,
David S. Millerd2d68ba2012-07-17 12:58:50 -07001904 IN_DEV_CONF_GET(in_dev, NOPOLICY), false, do_cache);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001905 if (!rth)
1906 goto e_nobufs;
1907
Changli Gaod8d1f302010-06-10 23:31:35 -07001908 rth->dst.output= ip_rt_bug;
David S. Millercf911662011-04-28 14:31:47 -07001909#ifdef CONFIG_IP_ROUTE_CLASSID
1910 rth->dst.tclassid = itag;
1911#endif
David S. Miller9917e1e82012-07-17 14:44:26 -07001912 rth->rt_is_input = 1;
David Ahernb7503e02015-09-02 13:58:35 -07001913 if (res.table)
1914 rth->rt_table_id = res.table->tb_id;
Roopa Prabhu571e7222015-07-21 10:43:47 +02001915
Duan Jionga6254862014-02-17 15:23:43 +08001916 RT_CACHE_STAT_INC(in_slow_tot);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001917 if (res.type == RTN_UNREACHABLE) {
Changli Gaod8d1f302010-06-10 23:31:35 -07001918 rth->dst.input= ip_error;
1919 rth->dst.error= -err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001920 rth->rt_flags &= ~RTCF_LOCAL;
1921 }
Alexei Starovoitovdcdfdf52013-11-19 19:12:34 -08001922 if (do_cache) {
1923 if (unlikely(!rt_cache_route(&FIB_RES_NH(res), rth))) {
1924 rth->dst.flags |= DST_NOCACHE;
1925 rt_add_uncached_list(rth);
1926 }
1927 }
David S. Miller89aef892012-07-17 11:00:09 -07001928 skb_dst_set(skb, &rth->dst);
David S. Millerb23dd4f2011-03-02 14:31:35 -08001929 err = 0;
Eric Dumazetebc0ffa2010-10-05 10:41:36 +00001930 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001931
1932no_route:
1933 RT_CACHE_STAT_INC(in_no_route);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001934 res.type = RTN_UNREACHABLE;
Nicolas Cavallarifa19c2b02014-10-30 10:09:53 +01001935 res.fi = NULL;
David Ahernbde6f9d2015-09-16 10:16:39 -06001936 res.table = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001937 goto local_input;
1938
1939 /*
1940 * Do not cache martian addresses: they should be logged (RFC1812)
1941 */
1942martian_destination:
1943 RT_CACHE_STAT_INC(in_martian_dst);
1944#ifdef CONFIG_IP_ROUTE_VERBOSE
Joe Perchese87cc472012-05-13 21:56:26 +00001945 if (IN_DEV_LOG_MARTIANS(in_dev))
1946 net_warn_ratelimited("martian destination %pI4 from %pI4, dev %s\n",
1947 &daddr, &saddr, dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001948#endif
Dietmar Eggemann2c2910a2005-06-28 13:06:23 -07001949
Linus Torvalds1da177e2005-04-16 15:20:36 -07001950e_inval:
1951 err = -EINVAL;
Eric Dumazetebc0ffa2010-10-05 10:41:36 +00001952 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001953
1954e_nobufs:
1955 err = -ENOBUFS;
Eric Dumazetebc0ffa2010-10-05 10:41:36 +00001956 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001957
1958martian_source:
1959 ip_handle_martian_source(dev, in_dev, skb, daddr, saddr);
Eric Dumazetebc0ffa2010-10-05 10:41:36 +00001960 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001961}
1962
David S. Millerc6cffba2012-07-26 11:14:38 +00001963int ip_route_input_noref(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1964 u8 tos, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001965{
Eric Dumazet96d36222010-06-02 19:21:31 +00001966 int res;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001967
Eric Dumazet96d36222010-06-02 19:21:31 +00001968 rcu_read_lock();
1969
Linus Torvalds1da177e2005-04-16 15:20:36 -07001970 /* Multicast recognition logic is moved from route cache to here.
1971 The problem was that too many Ethernet cards have broken/missing
1972 hardware multicast filters :-( As result the host on multicasting
1973 network acquires a lot of useless route cache entries, sort of
1974 SDR messages from all the world. Now we try to get rid of them.
1975 Really, provided software IP multicast filter is organized
1976 reasonably (at least, hashed), it does not result in a slowdown
1977 comparing with route cache reject entries.
1978 Note, that multicast routers are not affected, because
1979 route cache entry is created eventually.
1980 */
Joe Perchesf97c1e02007-12-16 13:45:43 -08001981 if (ipv4_is_multicast(daddr)) {
Eric Dumazet96d36222010-06-02 19:21:31 +00001982 struct in_device *in_dev = __in_dev_get_rcu(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001983
Eric Dumazet96d36222010-06-02 19:21:31 +00001984 if (in_dev) {
David S. Millerdbdd9a52011-03-10 16:34:38 -08001985 int our = ip_check_mc_rcu(in_dev, daddr, saddr,
1986 ip_hdr(skb)->protocol);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001987 if (our
1988#ifdef CONFIG_IP_MROUTE
Joe Perches9d4fb272009-11-23 10:41:23 -08001989 ||
1990 (!ipv4_is_local_multicast(daddr) &&
1991 IN_DEV_MFORWARD(in_dev))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001992#endif
Joe Perches9d4fb272009-11-23 10:41:23 -08001993 ) {
Eric Dumazet96d36222010-06-02 19:21:31 +00001994 int res = ip_route_input_mc(skb, daddr, saddr,
1995 tos, dev, our);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001996 rcu_read_unlock();
Eric Dumazet96d36222010-06-02 19:21:31 +00001997 return res;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001998 }
1999 }
2000 rcu_read_unlock();
2001 return -EINVAL;
2002 }
David S. Millerc10237e2012-06-27 17:05:06 -07002003 res = ip_route_input_slow(skb, daddr, saddr, tos, dev);
Eric Dumazet96d36222010-06-02 19:21:31 +00002004 rcu_read_unlock();
2005 return res;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002006}
David S. Millerc6cffba2012-07-26 11:14:38 +00002007EXPORT_SYMBOL(ip_route_input_noref);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002008
Eric Dumazetebc0ffa2010-10-05 10:41:36 +00002009/* called with rcu_read_lock() */
David S. Miller982721f2011-02-16 21:44:24 -08002010static struct rtable *__mkroute_output(const struct fib_result *res,
David Miller1a00fee2012-07-01 02:02:56 +00002011 const struct flowi4 *fl4, int orig_oif,
Julian Anastasovf61759e2011-12-02 11:39:42 +00002012 struct net_device *dev_out,
David S. Miller5ada5522011-02-17 15:29:00 -08002013 unsigned int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002014{
David S. Miller982721f2011-02-16 21:44:24 -08002015 struct fib_info *fi = res->fi;
David S. Millerf2bb4be2012-07-17 12:20:47 -07002016 struct fib_nh_exception *fnhe;
David S. Miller5ada5522011-02-17 15:29:00 -08002017 struct in_device *in_dev;
David S. Miller982721f2011-02-16 21:44:24 -08002018 u16 type = res->type;
David S. Miller5ada5522011-02-17 15:29:00 -08002019 struct rtable *rth;
Julian Anastasovc92b9652012-10-08 11:41:19 +00002020 bool do_cache;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002021
Thomas Grafd0daebc32012-06-12 00:44:01 +00002022 in_dev = __in_dev_get_rcu(dev_out);
2023 if (!in_dev)
David S. Miller5ada5522011-02-17 15:29:00 -08002024 return ERR_PTR(-EINVAL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002025
Thomas Grafd0daebc32012-06-12 00:44:01 +00002026 if (likely(!IN_DEV_ROUTE_LOCALNET(in_dev)))
David Ahern5f02ce242016-09-10 12:09:54 -07002027 if (ipv4_is_loopback(fl4->saddr) &&
2028 !(dev_out->flags & IFF_LOOPBACK) &&
2029 !netif_is_l3_master(dev_out))
Thomas Grafd0daebc32012-06-12 00:44:01 +00002030 return ERR_PTR(-EINVAL);
2031
David S. Miller68a5e3d2011-03-11 20:07:33 -05002032 if (ipv4_is_lbcast(fl4->daddr))
David S. Miller982721f2011-02-16 21:44:24 -08002033 type = RTN_BROADCAST;
David S. Miller68a5e3d2011-03-11 20:07:33 -05002034 else if (ipv4_is_multicast(fl4->daddr))
David S. Miller982721f2011-02-16 21:44:24 -08002035 type = RTN_MULTICAST;
David S. Miller68a5e3d2011-03-11 20:07:33 -05002036 else if (ipv4_is_zeronet(fl4->daddr))
David S. Miller5ada5522011-02-17 15:29:00 -08002037 return ERR_PTR(-EINVAL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002038
2039 if (dev_out->flags & IFF_LOOPBACK)
2040 flags |= RTCF_LOCAL;
2041
Julian Anastasov63617422012-11-22 23:04:14 +02002042 do_cache = true;
David S. Miller982721f2011-02-16 21:44:24 -08002043 if (type == RTN_BROADCAST) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002044 flags |= RTCF_BROADCAST | RTCF_LOCAL;
David S. Miller982721f2011-02-16 21:44:24 -08002045 fi = NULL;
2046 } else if (type == RTN_MULTICAST) {
Eric Dumazetdd28d1a2010-09-29 11:53:50 +00002047 flags |= RTCF_MULTICAST | RTCF_LOCAL;
David S. Miller813b3b52011-04-28 14:48:42 -07002048 if (!ip_check_mc_rcu(in_dev, fl4->daddr, fl4->saddr,
2049 fl4->flowi4_proto))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002050 flags &= ~RTCF_LOCAL;
Julian Anastasov63617422012-11-22 23:04:14 +02002051 else
2052 do_cache = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002053 /* If multicast route do not exist use
Eric Dumazetdd28d1a2010-09-29 11:53:50 +00002054 * default one, but do not gateway in this case.
2055 * Yes, it is hack.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002056 */
David S. Miller982721f2011-02-16 21:44:24 -08002057 if (fi && res->prefixlen < 4)
2058 fi = NULL;
Chris Friesend6d5e992016-04-08 15:21:30 -06002059 } else if ((type == RTN_LOCAL) && (orig_oif != 0) &&
2060 (orig_oif != dev_out->ifindex)) {
2061 /* For local routes that require a particular output interface
2062 * we do not want to cache the result. Caching the result
2063 * causes incorrect behaviour when there are multiple source
2064 * addresses on the interface, the end result being that if the
2065 * intended recipient is waiting on that interface for the
2066 * packet he won't receive it because it will be delivered on
2067 * the loopback interface and the IP_PKTINFO ipi_ifindex will
2068 * be set to the loopback interface as well.
2069 */
2070 fi = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002071 }
2072
David S. Millerf2bb4be2012-07-17 12:20:47 -07002073 fnhe = NULL;
Julian Anastasov63617422012-11-22 23:04:14 +02002074 do_cache &= fi != NULL;
2075 if (do_cache) {
David S. Millerc5038a82012-07-31 15:02:02 -07002076 struct rtable __rcu **prth;
Julian Anastasovc92b9652012-10-08 11:41:19 +00002077 struct fib_nh *nh = &FIB_RES_NH(*res);
Eric Dumazetd26b3a72012-07-31 05:45:30 +00002078
Julian Anastasovc92b9652012-10-08 11:41:19 +00002079 fnhe = find_exception(nh, fl4->daddr);
Xin Longdeed49d2016-02-18 21:21:19 +08002080 if (fnhe) {
Timo Teräs2ffae992013-06-27 10:27:05 +03002081 prth = &fnhe->fnhe_rth_output;
Xin Longdeed49d2016-02-18 21:21:19 +08002082 rth = rcu_dereference(*prth);
2083 if (rth && rth->dst.expires &&
2084 time_after(jiffies, rth->dst.expires)) {
2085 ip_del_fnhe(nh, fl4->daddr);
2086 fnhe = NULL;
2087 } else {
2088 goto rt_cache;
Julian Anastasovc92b9652012-10-08 11:41:19 +00002089 }
Julian Anastasovc92b9652012-10-08 11:41:19 +00002090 }
Xin Longdeed49d2016-02-18 21:21:19 +08002091
2092 if (unlikely(fl4->flowi4_flags &
2093 FLOWI_FLAG_KNOWN_NH &&
2094 !(nh->nh_gw &&
2095 nh->nh_scope == RT_SCOPE_LINK))) {
2096 do_cache = false;
2097 goto add;
2098 }
2099 prth = raw_cpu_ptr(nh->nh_pcpu_rth_output);
David S. Millerc5038a82012-07-31 15:02:02 -07002100 rth = rcu_dereference(*prth);
Xin Longdeed49d2016-02-18 21:21:19 +08002101
2102rt_cache:
David S. Millerc5038a82012-07-31 15:02:02 -07002103 if (rt_cache_valid(rth)) {
2104 dst_hold(&rth->dst);
2105 return rth;
David S. Millerf2bb4be2012-07-17 12:20:47 -07002106 }
2107 }
Julian Anastasovc92b9652012-10-08 11:41:19 +00002108
2109add:
David Ahernd08c4f32015-09-02 13:58:34 -07002110 rth = rt_dst_alloc(dev_out, flags, type,
David S. Miller5c1e6aa2011-04-28 14:13:38 -07002111 IN_DEV_CONF_GET(in_dev, NOPOLICY),
David S. Millerf2bb4be2012-07-17 12:20:47 -07002112 IN_DEV_CONF_GET(in_dev, NOXFRM),
Julian Anastasovc92b9652012-10-08 11:41:19 +00002113 do_cache);
Dimitris Michailidis8391d072010-10-07 14:48:38 +00002114 if (!rth)
David S. Miller5ada5522011-02-17 15:29:00 -08002115 return ERR_PTR(-ENOBUFS);
Dimitris Michailidis8391d072010-10-07 14:48:38 +00002116
David S. Miller13378ca2012-07-23 13:57:45 -07002117 rth->rt_iif = orig_oif ? : 0;
David Ahernb7503e02015-09-02 13:58:35 -07002118 if (res->table)
2119 rth->rt_table_id = res->table->tb_id;
2120
Linus Torvalds1da177e2005-04-16 15:20:36 -07002121 RT_CACHE_STAT_INC(out_slow_tot);
2122
Linus Torvalds1da177e2005-04-16 15:20:36 -07002123 if (flags & (RTCF_BROADCAST | RTCF_MULTICAST)) {
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09002124 if (flags & RTCF_LOCAL &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07002125 !(dev_out->flags & IFF_LOOPBACK)) {
Changli Gaod8d1f302010-06-10 23:31:35 -07002126 rth->dst.output = ip_mc_output;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002127 RT_CACHE_STAT_INC(out_slow_mc);
2128 }
2129#ifdef CONFIG_IP_MROUTE
David S. Miller982721f2011-02-16 21:44:24 -08002130 if (type == RTN_MULTICAST) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002131 if (IN_DEV_MFORWARD(in_dev) &&
David S. Miller813b3b52011-04-28 14:48:42 -07002132 !ipv4_is_local_multicast(fl4->daddr)) {
Changli Gaod8d1f302010-06-10 23:31:35 -07002133 rth->dst.input = ip_mr_input;
2134 rth->dst.output = ip_mc_output;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002135 }
2136 }
2137#endif
2138 }
2139
David S. Millerf2bb4be2012-07-17 12:20:47 -07002140 rt_set_nexthop(rth, fl4->daddr, res, fnhe, fi, type, 0);
Jiri Benc61adedf2015-08-20 13:56:25 +02002141 if (lwtunnel_output_redirect(rth->dst.lwtstate))
Robert Shearman0335f5b2015-08-03 17:39:21 +01002142 rth->dst.output = lwtunnel_output;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002143
David S. Miller5ada5522011-02-17 15:29:00 -08002144 return rth;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002145}
2146
Linus Torvalds1da177e2005-04-16 15:20:36 -07002147/*
2148 * Major route resolver routine.
2149 */
2150
Peter Nørlund79a13152015-09-30 10:12:22 +02002151struct rtable *__ip_route_output_key_hash(struct net *net, struct flowi4 *fl4,
2152 int mp_hash)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002153{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002154 struct net_device *dev_out = NULL;
Julian Anastasovf61759e2011-12-02 11:39:42 +00002155 __u8 tos = RT_FL_TOS(fl4);
David S. Miller813b3b52011-04-28 14:48:42 -07002156 unsigned int flags = 0;
2157 struct fib_result res;
David S. Miller5ada5522011-02-17 15:29:00 -08002158 struct rtable *rth;
David S. Miller813b3b52011-04-28 14:48:42 -07002159 int orig_oif;
Nikola ForrĂ³0315e382015-09-17 16:01:32 +02002160 int err = -ENETUNREACH;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002161
David S. Miller85b91b02012-07-13 08:21:29 -07002162 res.tclassid = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002163 res.fi = NULL;
David S. Miller8b96d222012-06-11 02:01:56 -07002164 res.table = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002165
David S. Miller813b3b52011-04-28 14:48:42 -07002166 orig_oif = fl4->flowi4_oif;
2167
Pavel Emelyanov1fb94892012-08-08 21:53:36 +00002168 fl4->flowi4_iif = LOOPBACK_IFINDEX;
David S. Miller813b3b52011-04-28 14:48:42 -07002169 fl4->flowi4_tos = tos & IPTOS_RT_MASK;
2170 fl4->flowi4_scope = ((tos & RTO_ONLINK) ?
2171 RT_SCOPE_LINK : RT_SCOPE_UNIVERSE);
David S. Miller44713b62011-03-04 21:24:47 -08002172
David S. Miller010c2702011-02-17 15:37:09 -08002173 rcu_read_lock();
David S. Miller813b3b52011-04-28 14:48:42 -07002174 if (fl4->saddr) {
David S. Millerb23dd4f2011-03-02 14:31:35 -08002175 rth = ERR_PTR(-EINVAL);
David S. Miller813b3b52011-04-28 14:48:42 -07002176 if (ipv4_is_multicast(fl4->saddr) ||
2177 ipv4_is_lbcast(fl4->saddr) ||
2178 ipv4_is_zeronet(fl4->saddr))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002179 goto out;
2180
Linus Torvalds1da177e2005-04-16 15:20:36 -07002181 /* I removed check for oif == dev_out->oif here.
2182 It was wrong for two reasons:
Denis V. Lunev1ab35272008-01-22 22:04:30 -08002183 1. ip_dev_find(net, saddr) can return wrong iface, if saddr
2184 is assigned to multiple interfaces.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002185 2. Moreover, we are allowed to send packets with saddr
2186 of another iface. --ANK
2187 */
2188
David S. Miller813b3b52011-04-28 14:48:42 -07002189 if (fl4->flowi4_oif == 0 &&
2190 (ipv4_is_multicast(fl4->daddr) ||
2191 ipv4_is_lbcast(fl4->daddr))) {
Julian Anastasova210d012008-10-01 07:28:28 -07002192 /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
David S. Miller813b3b52011-04-28 14:48:42 -07002193 dev_out = __ip_dev_find(net, fl4->saddr, false);
Ian Morris51456b22015-04-03 09:17:26 +01002194 if (!dev_out)
Julian Anastasova210d012008-10-01 07:28:28 -07002195 goto out;
2196
Linus Torvalds1da177e2005-04-16 15:20:36 -07002197 /* Special hack: user can direct multicasts
2198 and limited broadcast via necessary interface
2199 without fiddling with IP_MULTICAST_IF or IP_PKTINFO.
2200 This hack is not just for fun, it allows
2201 vic,vat and friends to work.
2202 They bind socket to loopback, set ttl to zero
2203 and expect that it will work.
2204 From the viewpoint of routing cache they are broken,
2205 because we are not allowed to build multicast path
2206 with loopback source addr (look, routing cache
2207 cannot know, that ttl is zero, so that packet
2208 will not leave this host and route is valid).
2209 Luckily, this hack is good workaround.
2210 */
2211
David S. Miller813b3b52011-04-28 14:48:42 -07002212 fl4->flowi4_oif = dev_out->ifindex;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002213 goto make_route;
2214 }
Julian Anastasova210d012008-10-01 07:28:28 -07002215
David S. Miller813b3b52011-04-28 14:48:42 -07002216 if (!(fl4->flowi4_flags & FLOWI_FLAG_ANYSRC)) {
Julian Anastasova210d012008-10-01 07:28:28 -07002217 /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
David S. Miller813b3b52011-04-28 14:48:42 -07002218 if (!__ip_dev_find(net, fl4->saddr, false))
Julian Anastasova210d012008-10-01 07:28:28 -07002219 goto out;
Julian Anastasova210d012008-10-01 07:28:28 -07002220 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002221 }
2222
2223
David S. Miller813b3b52011-04-28 14:48:42 -07002224 if (fl4->flowi4_oif) {
2225 dev_out = dev_get_by_index_rcu(net, fl4->flowi4_oif);
David S. Millerb23dd4f2011-03-02 14:31:35 -08002226 rth = ERR_PTR(-ENODEV);
Ian Morris51456b22015-04-03 09:17:26 +01002227 if (!dev_out)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002228 goto out;
Herbert Xue5ed6392005-10-03 14:35:55 -07002229
2230 /* RACE: Check return value of inet_select_addr instead. */
Eric Dumazetfc75fc82010-12-22 04:39:39 +00002231 if (!(dev_out->flags & IFF_UP) || !__in_dev_get_rcu(dev_out)) {
David S. Millerb23dd4f2011-03-02 14:31:35 -08002232 rth = ERR_PTR(-ENETUNREACH);
Eric Dumazetfc75fc82010-12-22 04:39:39 +00002233 goto out;
2234 }
David S. Miller813b3b52011-04-28 14:48:42 -07002235 if (ipv4_is_local_multicast(fl4->daddr) ||
Andrew Lunn6a211652015-05-01 16:39:54 +02002236 ipv4_is_lbcast(fl4->daddr) ||
2237 fl4->flowi4_proto == IPPROTO_IGMP) {
David S. Miller813b3b52011-04-28 14:48:42 -07002238 if (!fl4->saddr)
2239 fl4->saddr = inet_select_addr(dev_out, 0,
2240 RT_SCOPE_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002241 goto make_route;
2242 }
Jiri Benc0a7e2262013-10-04 17:04:48 +02002243 if (!fl4->saddr) {
David S. Miller813b3b52011-04-28 14:48:42 -07002244 if (ipv4_is_multicast(fl4->daddr))
2245 fl4->saddr = inet_select_addr(dev_out, 0,
2246 fl4->flowi4_scope);
2247 else if (!fl4->daddr)
2248 fl4->saddr = inet_select_addr(dev_out, 0,
2249 RT_SCOPE_HOST);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002250 }
2251 }
2252
David S. Miller813b3b52011-04-28 14:48:42 -07002253 if (!fl4->daddr) {
2254 fl4->daddr = fl4->saddr;
2255 if (!fl4->daddr)
2256 fl4->daddr = fl4->saddr = htonl(INADDR_LOOPBACK);
Denis V. Lunevb40afd02008-01-22 22:06:19 -08002257 dev_out = net->loopback_dev;
Pavel Emelyanov1fb94892012-08-08 21:53:36 +00002258 fl4->flowi4_oif = LOOPBACK_IFINDEX;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002259 res.type = RTN_LOCAL;
2260 flags |= RTCF_LOCAL;
2261 goto make_route;
2262 }
2263
Nikola ForrĂ³0315e382015-09-17 16:01:32 +02002264 err = fib_lookup(net, fl4, &res, 0);
2265 if (err) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002266 res.fi = NULL;
David S. Miller8b96d222012-06-11 02:01:56 -07002267 res.table = NULL;
David Ahern6104e112016-10-12 13:20:11 -07002268 if (fl4->flowi4_oif &&
2269 !netif_index_is_l3_master(net, fl4->flowi4_oif)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002270 /* Apparently, routing tables are wrong. Assume,
2271 that the destination is on link.
2272
2273 WHY? DW.
2274 Because we are allowed to send to iface
2275 even if it has NO routes and NO assigned
2276 addresses. When oif is specified, routing
2277 tables are looked up with only one purpose:
2278 to catch if destination is gatewayed, rather than
2279 direct. Moreover, if MSG_DONTROUTE is set,
2280 we send packet, ignoring both routing tables
2281 and ifaddr state. --ANK
2282
2283
2284 We could make it even if oif is unknown,
2285 likely IPv6, but we do not.
2286 */
2287
David S. Miller813b3b52011-04-28 14:48:42 -07002288 if (fl4->saddr == 0)
2289 fl4->saddr = inet_select_addr(dev_out, 0,
2290 RT_SCOPE_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002291 res.type = RTN_UNICAST;
2292 goto make_route;
2293 }
Nikola ForrĂ³0315e382015-09-17 16:01:32 +02002294 rth = ERR_PTR(err);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002295 goto out;
2296 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002297
2298 if (res.type == RTN_LOCAL) {
David S. Miller813b3b52011-04-28 14:48:42 -07002299 if (!fl4->saddr) {
Joel Sing9fc3bbb2011-01-03 20:24:20 +00002300 if (res.fi->fib_prefsrc)
David S. Miller813b3b52011-04-28 14:48:42 -07002301 fl4->saddr = res.fi->fib_prefsrc;
Joel Sing9fc3bbb2011-01-03 20:24:20 +00002302 else
David S. Miller813b3b52011-04-28 14:48:42 -07002303 fl4->saddr = fl4->daddr;
Joel Sing9fc3bbb2011-01-03 20:24:20 +00002304 }
David Ahern5f02ce242016-09-10 12:09:54 -07002305
2306 /* L3 master device is the loopback for that domain */
2307 dev_out = l3mdev_master_dev_rcu(dev_out) ? : net->loopback_dev;
David S. Miller813b3b52011-04-28 14:48:42 -07002308 fl4->flowi4_oif = dev_out->ifindex;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002309 flags |= RTCF_LOCAL;
2310 goto make_route;
2311 }
2312
David Ahern3ce58d82015-10-05 08:51:25 -07002313 fib_select_path(net, &res, fl4, mp_hash);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002314
Linus Torvalds1da177e2005-04-16 15:20:36 -07002315 dev_out = FIB_RES_DEV(res);
David S. Miller813b3b52011-04-28 14:48:42 -07002316 fl4->flowi4_oif = dev_out->ifindex;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002317
2318
2319make_route:
David Miller1a00fee2012-07-01 02:02:56 +00002320 rth = __mkroute_output(&res, fl4, orig_oif, dev_out, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002321
David S. Miller010c2702011-02-17 15:37:09 -08002322out:
2323 rcu_read_unlock();
David S. Millerb23dd4f2011-03-02 14:31:35 -08002324 return rth;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002325}
Peter Nørlund79a13152015-09-30 10:12:22 +02002326EXPORT_SYMBOL_GPL(__ip_route_output_key_hash);
Arnaldo Carvalho de Melod8c97a92005-08-09 20:12:12 -07002327
Jianzhao Wangae2688d2010-09-08 14:35:43 -07002328static struct dst_entry *ipv4_blackhole_dst_check(struct dst_entry *dst, u32 cookie)
2329{
2330 return NULL;
2331}
2332
Steffen Klassertebb762f2011-11-23 02:12:51 +00002333static unsigned int ipv4_blackhole_mtu(const struct dst_entry *dst)
Roland Dreierec831ea2011-01-31 13:16:00 -08002334{
Steffen Klassert618f9bc2011-11-23 02:13:31 +00002335 unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
2336
2337 return mtu ? : dst->dev->mtu;
Roland Dreierec831ea2011-01-31 13:16:00 -08002338}
2339
David S. Miller6700c272012-07-17 03:29:28 -07002340static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk,
2341 struct sk_buff *skb, u32 mtu)
David S. Miller14e50e52007-05-24 18:17:54 -07002342{
2343}
2344
David S. Miller6700c272012-07-17 03:29:28 -07002345static void ipv4_rt_blackhole_redirect(struct dst_entry *dst, struct sock *sk,
2346 struct sk_buff *skb)
David S. Millerb587ee32012-07-12 00:39:24 -07002347{
2348}
2349
Held Bernhard0972ddb2011-04-24 22:07:32 +00002350static u32 *ipv4_rt_blackhole_cow_metrics(struct dst_entry *dst,
2351 unsigned long old)
2352{
2353 return NULL;
2354}
2355
David S. Miller14e50e52007-05-24 18:17:54 -07002356static struct dst_ops ipv4_dst_blackhole_ops = {
2357 .family = AF_INET,
Jianzhao Wangae2688d2010-09-08 14:35:43 -07002358 .check = ipv4_blackhole_dst_check,
Steffen Klassertebb762f2011-11-23 02:12:51 +00002359 .mtu = ipv4_blackhole_mtu,
Eric Dumazet214f45c2011-02-18 11:39:01 -08002360 .default_advmss = ipv4_default_advmss,
David S. Miller14e50e52007-05-24 18:17:54 -07002361 .update_pmtu = ipv4_rt_blackhole_update_pmtu,
David S. Millerb587ee32012-07-12 00:39:24 -07002362 .redirect = ipv4_rt_blackhole_redirect,
Held Bernhard0972ddb2011-04-24 22:07:32 +00002363 .cow_metrics = ipv4_rt_blackhole_cow_metrics,
David S. Millerd3aaeb32011-07-18 00:40:17 -07002364 .neigh_lookup = ipv4_neigh_lookup,
David S. Miller14e50e52007-05-24 18:17:54 -07002365};
2366
David S. Miller2774c132011-03-01 14:59:04 -08002367struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_orig)
David S. Miller14e50e52007-05-24 18:17:54 -07002368{
David S. Miller2774c132011-03-01 14:59:04 -08002369 struct rtable *ort = (struct rtable *) dst_orig;
David S. Millerf5b0a872012-07-19 12:31:33 -07002370 struct rtable *rt;
David S. Miller14e50e52007-05-24 18:17:54 -07002371
David S. Millerf5b0a872012-07-19 12:31:33 -07002372 rt = dst_alloc(&ipv4_dst_blackhole_ops, NULL, 1, DST_OBSOLETE_NONE, 0);
David S. Miller14e50e52007-05-24 18:17:54 -07002373 if (rt) {
Changli Gaod8d1f302010-06-10 23:31:35 -07002374 struct dst_entry *new = &rt->dst;
David S. Miller14e50e52007-05-24 18:17:54 -07002375
David S. Miller14e50e52007-05-24 18:17:54 -07002376 new->__use = 1;
Herbert Xu352e5122007-11-13 21:34:06 -08002377 new->input = dst_discard;
Eric W. Biedermanede20592015-10-07 16:48:47 -05002378 new->output = dst_discard_out;
David S. Miller14e50e52007-05-24 18:17:54 -07002379
Changli Gaod8d1f302010-06-10 23:31:35 -07002380 new->dev = ort->dst.dev;
David S. Miller14e50e52007-05-24 18:17:54 -07002381 if (new->dev)
2382 dev_hold(new->dev);
2383
David S. Miller9917e1e82012-07-17 14:44:26 -07002384 rt->rt_is_input = ort->rt_is_input;
David S. Miller5e2b61f2011-03-04 21:47:09 -08002385 rt->rt_iif = ort->rt_iif;
David S. Miller59436342012-07-10 06:58:42 -07002386 rt->rt_pmtu = ort->rt_pmtu;
David S. Miller14e50e52007-05-24 18:17:54 -07002387
fan.duca4c3fc2013-07-30 08:33:53 +08002388 rt->rt_genid = rt_genid_ipv4(net);
David S. Miller14e50e52007-05-24 18:17:54 -07002389 rt->rt_flags = ort->rt_flags;
2390 rt->rt_type = ort->rt_type;
David S. Miller14e50e52007-05-24 18:17:54 -07002391 rt->rt_gateway = ort->rt_gateway;
Julian Anastasov155e8332012-10-08 11:41:18 +00002392 rt->rt_uses_gateway = ort->rt_uses_gateway;
David S. Miller14e50e52007-05-24 18:17:54 -07002393
David S. Millercaacf052012-07-31 15:06:50 -07002394 INIT_LIST_HEAD(&rt->rt_uncached);
David S. Miller14e50e52007-05-24 18:17:54 -07002395 dst_free(new);
2396 }
2397
David S. Miller2774c132011-03-01 14:59:04 -08002398 dst_release(dst_orig);
2399
2400 return rt ? &rt->dst : ERR_PTR(-ENOMEM);
David S. Miller14e50e52007-05-24 18:17:54 -07002401}
2402
David S. Miller9d6ec932011-03-12 01:12:47 -05002403struct rtable *ip_route_output_flow(struct net *net, struct flowi4 *flp4,
Eric Dumazet6f9c9612015-09-25 07:39:10 -07002404 const struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002405{
David S. Miller9d6ec932011-03-12 01:12:47 -05002406 struct rtable *rt = __ip_route_output_key(net, flp4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002407
David S. Millerb23dd4f2011-03-02 14:31:35 -08002408 if (IS_ERR(rt))
2409 return rt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002410
David S. Miller56157872011-05-02 14:37:45 -07002411 if (flp4->flowi4_proto)
Steffen Klassertf92ee612014-09-16 10:08:40 +02002412 rt = (struct rtable *)xfrm_lookup_route(net, &rt->dst,
2413 flowi4_to_flowi(flp4),
2414 sk, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002415
David S. Millerb23dd4f2011-03-02 14:31:35 -08002416 return rt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002417}
Arnaldo Carvalho de Melod8c97a92005-08-09 20:12:12 -07002418EXPORT_SYMBOL_GPL(ip_route_output_flow);
2419
David Ahernc36ba662015-09-02 13:58:36 -07002420static int rt_fill_info(struct net *net, __be32 dst, __be32 src, u32 table_id,
Eric W. Biederman15e47302012-09-07 20:12:54 +00002421 struct flowi4 *fl4, struct sk_buff *skb, u32 portid,
David S. Millerf1ce3062012-07-12 10:10:17 -07002422 u32 seq, int event, int nowait, unsigned int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002423{
Eric Dumazet511c3f92009-06-02 05:14:27 +00002424 struct rtable *rt = skb_rtable(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002425 struct rtmsg *r;
Thomas Grafbe403ea2006-08-17 18:15:17 -07002426 struct nlmsghdr *nlh;
Steffen Klassert2bc8ca42011-10-11 01:12:02 +00002427 unsigned long expires = 0;
David S. Millerf1850712012-07-10 07:26:01 -07002428 u32 error;
Julian Anastasov521f5492012-07-20 12:02:08 +03002429 u32 metrics[RTAX_MAX];
Thomas Grafbe403ea2006-08-17 18:15:17 -07002430
Eric W. Biederman15e47302012-09-07 20:12:54 +00002431 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*r), flags);
Ian Morris51456b22015-04-03 09:17:26 +01002432 if (!nlh)
Patrick McHardy26932562007-01-31 23:16:40 -08002433 return -EMSGSIZE;
Thomas Grafbe403ea2006-08-17 18:15:17 -07002434
2435 r = nlmsg_data(nlh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002436 r->rtm_family = AF_INET;
2437 r->rtm_dst_len = 32;
2438 r->rtm_src_len = 0;
David Millerd6c0a4f2012-07-01 02:02:59 +00002439 r->rtm_tos = fl4->flowi4_tos;
David Ahernc36ba662015-09-02 13:58:36 -07002440 r->rtm_table = table_id;
2441 if (nla_put_u32(skb, RTA_TABLE, table_id))
David S. Millerf3756b72012-04-01 20:39:02 -04002442 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002443 r->rtm_type = rt->rt_type;
2444 r->rtm_scope = RT_SCOPE_UNIVERSE;
2445 r->rtm_protocol = RTPROT_UNSPEC;
2446 r->rtm_flags = (rt->rt_flags & ~0xFFFF) | RTM_F_CLONED;
2447 if (rt->rt_flags & RTCF_NOTIFY)
2448 r->rtm_flags |= RTM_F_NOTIFY;
Hannes Frederic Sowadf4d9252015-01-23 12:01:26 +01002449 if (IPCB(skb)->flags & IPSKB_DOREDIRECT)
2450 r->rtm_flags |= RTCF_DOREDIRECT;
Thomas Grafbe403ea2006-08-17 18:15:17 -07002451
Jiri Benc930345e2015-03-29 16:59:25 +02002452 if (nla_put_in_addr(skb, RTA_DST, dst))
David S. Millerf3756b72012-04-01 20:39:02 -04002453 goto nla_put_failure;
David Miller1a00fee2012-07-01 02:02:56 +00002454 if (src) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002455 r->rtm_src_len = 32;
Jiri Benc930345e2015-03-29 16:59:25 +02002456 if (nla_put_in_addr(skb, RTA_SRC, src))
David S. Millerf3756b72012-04-01 20:39:02 -04002457 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002458 }
David S. Millerf3756b72012-04-01 20:39:02 -04002459 if (rt->dst.dev &&
2460 nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex))
2461 goto nla_put_failure;
Patrick McHardyc7066f72011-01-14 13:36:42 +01002462#ifdef CONFIG_IP_ROUTE_CLASSID
David S. Millerf3756b72012-04-01 20:39:02 -04002463 if (rt->dst.tclassid &&
2464 nla_put_u32(skb, RTA_FLOW, rt->dst.tclassid))
2465 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002466#endif
David S. Miller41347dc2012-06-28 04:05:27 -07002467 if (!rt_is_input_route(rt) &&
David Millerd6c0a4f2012-07-01 02:02:59 +00002468 fl4->saddr != src) {
Jiri Benc930345e2015-03-29 16:59:25 +02002469 if (nla_put_in_addr(skb, RTA_PREFSRC, fl4->saddr))
David S. Millerf3756b72012-04-01 20:39:02 -04002470 goto nla_put_failure;
2471 }
Julian Anastasov155e8332012-10-08 11:41:18 +00002472 if (rt->rt_uses_gateway &&
Jiri Benc930345e2015-03-29 16:59:25 +02002473 nla_put_in_addr(skb, RTA_GATEWAY, rt->rt_gateway))
David S. Millerf3756b72012-04-01 20:39:02 -04002474 goto nla_put_failure;
Thomas Grafbe403ea2006-08-17 18:15:17 -07002475
Steffen Klassertee9a8f72012-10-08 00:56:54 +00002476 expires = rt->dst.expires;
2477 if (expires) {
2478 unsigned long now = jiffies;
2479
2480 if (time_before(now, expires))
2481 expires -= now;
2482 else
2483 expires = 0;
2484 }
2485
Julian Anastasov521f5492012-07-20 12:02:08 +03002486 memcpy(metrics, dst_metrics_ptr(&rt->dst), sizeof(metrics));
Steffen Klassertee9a8f72012-10-08 00:56:54 +00002487 if (rt->rt_pmtu && expires)
Julian Anastasov521f5492012-07-20 12:02:08 +03002488 metrics[RTAX_MTU - 1] = rt->rt_pmtu;
2489 if (rtnetlink_put_metrics(skb, metrics) < 0)
Thomas Grafbe403ea2006-08-17 18:15:17 -07002490 goto nla_put_failure;
2491
David Millerb4869882012-07-01 02:03:01 +00002492 if (fl4->flowi4_mark &&
stephen hemminger68aaed52012-10-10 08:27:25 +00002493 nla_put_u32(skb, RTA_MARK, fl4->flowi4_mark))
David S. Millerf3756b72012-04-01 20:39:02 -04002494 goto nla_put_failure;
Eric Dumazet963bfee2010-07-20 22:03:14 +00002495
Changli Gaod8d1f302010-06-10 23:31:35 -07002496 error = rt->dst.error;
Thomas Grafbe403ea2006-08-17 18:15:17 -07002497
David S. Millerc7537962010-11-11 17:07:48 -08002498 if (rt_is_input_route(rt)) {
Nicolas Dichtel8caaf7b2012-12-04 01:03:07 +00002499#ifdef CONFIG_IP_MROUTE
2500 if (ipv4_is_multicast(dst) && !ipv4_is_local_multicast(dst) &&
2501 IPV4_DEVCONF_ALL(net, MC_FORWARDING)) {
2502 int err = ipmr_get_route(net, skb,
2503 fl4->saddr, fl4->daddr,
Nikolay Aleksandrov2cf75072016-09-25 23:08:31 +02002504 r, nowait, portid);
2505
Nicolas Dichtel8caaf7b2012-12-04 01:03:07 +00002506 if (err <= 0) {
2507 if (!nowait) {
2508 if (err == 0)
2509 return 0;
2510 goto nla_put_failure;
2511 } else {
2512 if (err == -EMSGSIZE)
2513 goto nla_put_failure;
2514 error = err;
2515 }
2516 }
2517 } else
2518#endif
Julian Anastasov91146152014-04-13 18:08:02 +03002519 if (nla_put_u32(skb, RTA_IIF, skb->dev->ifindex))
Nicolas Dichtel8caaf7b2012-12-04 01:03:07 +00002520 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002521 }
2522
David S. Millerf1850712012-07-10 07:26:01 -07002523 if (rtnl_put_cacheinfo(skb, &rt->dst, 0, expires, error) < 0)
Thomas Grafe3703b32006-11-27 09:27:07 -08002524 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002525
Johannes Berg053c0952015-01-16 22:09:00 +01002526 nlmsg_end(skb, nlh);
2527 return 0;
Thomas Grafbe403ea2006-08-17 18:15:17 -07002528
2529nla_put_failure:
Patrick McHardy26932562007-01-31 23:16:40 -08002530 nlmsg_cancel(skb, nlh);
2531 return -EMSGSIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002532}
2533
Thomas Graf661d2962013-03-21 07:45:29 +00002534static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002535{
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09002536 struct net *net = sock_net(in_skb->sk);
Thomas Grafd889ce32006-08-17 18:15:44 -07002537 struct rtmsg *rtm;
2538 struct nlattr *tb[RTA_MAX+1];
Linus Torvalds1da177e2005-04-16 15:20:36 -07002539 struct rtable *rt = NULL;
David Millerd6c0a4f2012-07-01 02:02:59 +00002540 struct flowi4 fl4;
Al Viro9e12bb22006-09-26 21:25:20 -07002541 __be32 dst = 0;
2542 __be32 src = 0;
2543 u32 iif;
Thomas Grafd889ce32006-08-17 18:15:44 -07002544 int err;
Eric Dumazet963bfee2010-07-20 22:03:14 +00002545 int mark;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002546 struct sk_buff *skb;
David Ahernc36ba662015-09-02 13:58:36 -07002547 u32 table_id = RT_TABLE_MAIN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002548
Thomas Grafd889ce32006-08-17 18:15:44 -07002549 err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv4_policy);
2550 if (err < 0)
2551 goto errout;
2552
2553 rtm = nlmsg_data(nlh);
2554
Linus Torvalds1da177e2005-04-16 15:20:36 -07002555 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
Ian Morris51456b22015-04-03 09:17:26 +01002556 if (!skb) {
Thomas Grafd889ce32006-08-17 18:15:44 -07002557 err = -ENOBUFS;
2558 goto errout;
2559 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002560
2561 /* Reserve room for dummy headers, this skb can pass
2562 through good chunk of routing engine.
2563 */
Arnaldo Carvalho de Melo459a98e2007-03-19 15:30:44 -07002564 skb_reset_mac_header(skb);
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -07002565 skb_reset_network_header(skb);
Stephen Hemmingerd2c962b2006-04-17 17:27:11 -07002566
2567 /* Bugfix: need to give ip_route_input enough of an IP header to not gag. */
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07002568 ip_hdr(skb)->protocol = IPPROTO_ICMP;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002569 skb_reserve(skb, MAX_HEADER + sizeof(struct iphdr));
2570
Jiri Benc67b61f62015-03-29 16:59:26 +02002571 src = tb[RTA_SRC] ? nla_get_in_addr(tb[RTA_SRC]) : 0;
2572 dst = tb[RTA_DST] ? nla_get_in_addr(tb[RTA_DST]) : 0;
Thomas Grafd889ce32006-08-17 18:15:44 -07002573 iif = tb[RTA_IIF] ? nla_get_u32(tb[RTA_IIF]) : 0;
Eric Dumazet963bfee2010-07-20 22:03:14 +00002574 mark = tb[RTA_MARK] ? nla_get_u32(tb[RTA_MARK]) : 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002575
David Millerd6c0a4f2012-07-01 02:02:59 +00002576 memset(&fl4, 0, sizeof(fl4));
2577 fl4.daddr = dst;
2578 fl4.saddr = src;
2579 fl4.flowi4_tos = rtm->rtm_tos;
2580 fl4.flowi4_oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0;
2581 fl4.flowi4_mark = mark;
2582
Linus Torvalds1da177e2005-04-16 15:20:36 -07002583 if (iif) {
Thomas Grafd889ce32006-08-17 18:15:44 -07002584 struct net_device *dev;
2585
Denis V. Lunev19375042008-02-28 20:52:04 -08002586 dev = __dev_get_by_index(net, iif);
Ian Morris51456b22015-04-03 09:17:26 +01002587 if (!dev) {
Thomas Grafd889ce32006-08-17 18:15:44 -07002588 err = -ENODEV;
2589 goto errout_free;
2590 }
2591
Linus Torvalds1da177e2005-04-16 15:20:36 -07002592 skb->protocol = htons(ETH_P_IP);
2593 skb->dev = dev;
Eric Dumazet963bfee2010-07-20 22:03:14 +00002594 skb->mark = mark;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002595 local_bh_disable();
2596 err = ip_route_input(skb, dst, src, rtm->rtm_tos, dev);
2597 local_bh_enable();
Thomas Grafd889ce32006-08-17 18:15:44 -07002598
Eric Dumazet511c3f92009-06-02 05:14:27 +00002599 rt = skb_rtable(skb);
Changli Gaod8d1f302010-06-10 23:31:35 -07002600 if (err == 0 && rt->dst.error)
2601 err = -rt->dst.error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002602 } else {
David S. Miller9d6ec932011-03-12 01:12:47 -05002603 rt = ip_route_output_key(net, &fl4);
David S. Millerb23dd4f2011-03-02 14:31:35 -08002604
2605 err = 0;
2606 if (IS_ERR(rt))
2607 err = PTR_ERR(rt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002608 }
Thomas Grafd889ce32006-08-17 18:15:44 -07002609
Linus Torvalds1da177e2005-04-16 15:20:36 -07002610 if (err)
Thomas Grafd889ce32006-08-17 18:15:44 -07002611 goto errout_free;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002612
Changli Gaod8d1f302010-06-10 23:31:35 -07002613 skb_dst_set(skb, &rt->dst);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002614 if (rtm->rtm_flags & RTM_F_NOTIFY)
2615 rt->rt_flags |= RTCF_NOTIFY;
2616
David Ahernc36ba662015-09-02 13:58:36 -07002617 if (rtm->rtm_flags & RTM_F_LOOKUP_TABLE)
2618 table_id = rt->rt_table_id;
2619
2620 err = rt_fill_info(net, dst, src, table_id, &fl4, skb,
Eric W. Biederman15e47302012-09-07 20:12:54 +00002621 NETLINK_CB(in_skb).portid, nlh->nlmsg_seq,
Denis V. Lunev19375042008-02-28 20:52:04 -08002622 RTM_NEWROUTE, 0, 0);
David S. Miller7b46a642015-01-18 23:36:08 -05002623 if (err < 0)
Thomas Grafd889ce32006-08-17 18:15:44 -07002624 goto errout_free;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002625
Eric W. Biederman15e47302012-09-07 20:12:54 +00002626 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
Thomas Grafd889ce32006-08-17 18:15:44 -07002627errout:
Thomas Graf2942e902006-08-15 00:30:25 -07002628 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002629
Thomas Grafd889ce32006-08-17 18:15:44 -07002630errout_free:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002631 kfree_skb(skb);
Thomas Grafd889ce32006-08-17 18:15:44 -07002632 goto errout;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002633}
2634
Linus Torvalds1da177e2005-04-16 15:20:36 -07002635void ip_rt_multicast_event(struct in_device *in_dev)
2636{
Nicolas Dichtel4ccfe6d2012-09-07 00:45:29 +00002637 rt_cache_flush(dev_net(in_dev->dev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002638}
2639
2640#ifdef CONFIG_SYSCTL
Gao feng082c7ca2013-02-19 00:43:12 +00002641static int ip_rt_gc_interval __read_mostly = 60 * HZ;
2642static int ip_rt_gc_min_interval __read_mostly = HZ / 2;
2643static int ip_rt_gc_elasticity __read_mostly = 8;
2644
Joe Perchesfe2c6332013-06-11 23:04:25 -07002645static int ipv4_sysctl_rtcache_flush(struct ctl_table *__ctl, int write,
Alexey Dobriyan8d65af72009-09-23 15:57:19 -07002646 void __user *buffer,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002647 size_t *lenp, loff_t *ppos)
2648{
Timo Teräs5aad1de2013-05-27 20:46:33 +00002649 struct net *net = (struct net *)__ctl->extra1;
2650
Linus Torvalds1da177e2005-04-16 15:20:36 -07002651 if (write) {
Timo Teräs5aad1de2013-05-27 20:46:33 +00002652 rt_cache_flush(net);
2653 fnhe_genid_bump(net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002654 return 0;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09002655 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002656
2657 return -EINVAL;
2658}
2659
Joe Perchesfe2c6332013-06-11 23:04:25 -07002660static struct ctl_table ipv4_route_table[] = {
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09002661 {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002662 .procname = "gc_thresh",
2663 .data = &ipv4_dst_ops.gc_thresh,
2664 .maxlen = sizeof(int),
2665 .mode = 0644,
Alexey Dobriyan6d9f2392008-11-03 18:21:05 -08002666 .proc_handler = proc_dointvec,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002667 },
2668 {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002669 .procname = "max_size",
2670 .data = &ip_rt_max_size,
2671 .maxlen = sizeof(int),
2672 .mode = 0644,
Alexey Dobriyan6d9f2392008-11-03 18:21:05 -08002673 .proc_handler = proc_dointvec,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002674 },
2675 {
2676 /* Deprecated. Use gc_min_interval_ms */
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09002677
Linus Torvalds1da177e2005-04-16 15:20:36 -07002678 .procname = "gc_min_interval",
2679 .data = &ip_rt_gc_min_interval,
2680 .maxlen = sizeof(int),
2681 .mode = 0644,
Alexey Dobriyan6d9f2392008-11-03 18:21:05 -08002682 .proc_handler = proc_dointvec_jiffies,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002683 },
2684 {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002685 .procname = "gc_min_interval_ms",
2686 .data = &ip_rt_gc_min_interval,
2687 .maxlen = sizeof(int),
2688 .mode = 0644,
Alexey Dobriyan6d9f2392008-11-03 18:21:05 -08002689 .proc_handler = proc_dointvec_ms_jiffies,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002690 },
2691 {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002692 .procname = "gc_timeout",
2693 .data = &ip_rt_gc_timeout,
2694 .maxlen = sizeof(int),
2695 .mode = 0644,
Alexey Dobriyan6d9f2392008-11-03 18:21:05 -08002696 .proc_handler = proc_dointvec_jiffies,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002697 },
2698 {
Eric Dumazet9f28a2f2011-12-21 15:47:16 -05002699 .procname = "gc_interval",
2700 .data = &ip_rt_gc_interval,
2701 .maxlen = sizeof(int),
2702 .mode = 0644,
2703 .proc_handler = proc_dointvec_jiffies,
2704 },
2705 {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002706 .procname = "redirect_load",
2707 .data = &ip_rt_redirect_load,
2708 .maxlen = sizeof(int),
2709 .mode = 0644,
Alexey Dobriyan6d9f2392008-11-03 18:21:05 -08002710 .proc_handler = proc_dointvec,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002711 },
2712 {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002713 .procname = "redirect_number",
2714 .data = &ip_rt_redirect_number,
2715 .maxlen = sizeof(int),
2716 .mode = 0644,
Alexey Dobriyan6d9f2392008-11-03 18:21:05 -08002717 .proc_handler = proc_dointvec,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002718 },
2719 {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002720 .procname = "redirect_silence",
2721 .data = &ip_rt_redirect_silence,
2722 .maxlen = sizeof(int),
2723 .mode = 0644,
Alexey Dobriyan6d9f2392008-11-03 18:21:05 -08002724 .proc_handler = proc_dointvec,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002725 },
2726 {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002727 .procname = "error_cost",
2728 .data = &ip_rt_error_cost,
2729 .maxlen = sizeof(int),
2730 .mode = 0644,
Alexey Dobriyan6d9f2392008-11-03 18:21:05 -08002731 .proc_handler = proc_dointvec,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002732 },
2733 {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002734 .procname = "error_burst",
2735 .data = &ip_rt_error_burst,
2736 .maxlen = sizeof(int),
2737 .mode = 0644,
Alexey Dobriyan6d9f2392008-11-03 18:21:05 -08002738 .proc_handler = proc_dointvec,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002739 },
2740 {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002741 .procname = "gc_elasticity",
2742 .data = &ip_rt_gc_elasticity,
2743 .maxlen = sizeof(int),
2744 .mode = 0644,
Alexey Dobriyan6d9f2392008-11-03 18:21:05 -08002745 .proc_handler = proc_dointvec,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002746 },
2747 {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002748 .procname = "mtu_expires",
2749 .data = &ip_rt_mtu_expires,
2750 .maxlen = sizeof(int),
2751 .mode = 0644,
Alexey Dobriyan6d9f2392008-11-03 18:21:05 -08002752 .proc_handler = proc_dointvec_jiffies,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002753 },
2754 {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002755 .procname = "min_pmtu",
2756 .data = &ip_rt_min_pmtu,
2757 .maxlen = sizeof(int),
2758 .mode = 0644,
Alexey Dobriyan6d9f2392008-11-03 18:21:05 -08002759 .proc_handler = proc_dointvec,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002760 },
2761 {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002762 .procname = "min_adv_mss",
2763 .data = &ip_rt_min_advmss,
2764 .maxlen = sizeof(int),
2765 .mode = 0644,
Alexey Dobriyan6d9f2392008-11-03 18:21:05 -08002766 .proc_handler = proc_dointvec,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002767 },
Eric W. Biedermanf8572d82009-11-05 13:32:03 -08002768 { }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002769};
Denis V. Lunev39a23e72008-07-05 19:02:33 -07002770
Denis V. Lunev39a23e72008-07-05 19:02:33 -07002771static struct ctl_table ipv4_route_flush_table[] = {
2772 {
Denis V. Lunev39a23e72008-07-05 19:02:33 -07002773 .procname = "flush",
2774 .maxlen = sizeof(int),
2775 .mode = 0200,
Alexey Dobriyan6d9f2392008-11-03 18:21:05 -08002776 .proc_handler = ipv4_sysctl_rtcache_flush,
Denis V. Lunev39a23e72008-07-05 19:02:33 -07002777 },
Eric W. Biedermanf8572d82009-11-05 13:32:03 -08002778 { },
Denis V. Lunev39a23e72008-07-05 19:02:33 -07002779};
2780
2781static __net_init int sysctl_route_net_init(struct net *net)
2782{
2783 struct ctl_table *tbl;
2784
2785 tbl = ipv4_route_flush_table;
Octavian Purdila09ad9bc2009-11-25 15:14:13 -08002786 if (!net_eq(net, &init_net)) {
Denis V. Lunev39a23e72008-07-05 19:02:33 -07002787 tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL);
Ian Morris51456b22015-04-03 09:17:26 +01002788 if (!tbl)
Denis V. Lunev39a23e72008-07-05 19:02:33 -07002789 goto err_dup;
Eric W. Biederman464dc802012-11-16 03:02:59 +00002790
2791 /* Don't export sysctls to unprivileged users */
2792 if (net->user_ns != &init_user_ns)
2793 tbl[0].procname = NULL;
Denis V. Lunev39a23e72008-07-05 19:02:33 -07002794 }
2795 tbl[0].extra1 = net;
2796
Eric W. Biedermanec8f23c2012-04-19 13:44:49 +00002797 net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
Ian Morris51456b22015-04-03 09:17:26 +01002798 if (!net->ipv4.route_hdr)
Denis V. Lunev39a23e72008-07-05 19:02:33 -07002799 goto err_reg;
2800 return 0;
2801
2802err_reg:
2803 if (tbl != ipv4_route_flush_table)
2804 kfree(tbl);
2805err_dup:
2806 return -ENOMEM;
2807}
2808
2809static __net_exit void sysctl_route_net_exit(struct net *net)
2810{
2811 struct ctl_table *tbl;
2812
2813 tbl = net->ipv4.route_hdr->ctl_table_arg;
2814 unregister_net_sysctl_table(net->ipv4.route_hdr);
2815 BUG_ON(tbl == ipv4_route_flush_table);
2816 kfree(tbl);
2817}
2818
2819static __net_initdata struct pernet_operations sysctl_route_ops = {
2820 .init = sysctl_route_net_init,
2821 .exit = sysctl_route_net_exit,
2822};
Linus Torvalds1da177e2005-04-16 15:20:36 -07002823#endif
2824
Neil Horman3ee94372010-05-08 01:57:52 -07002825static __net_init int rt_genid_init(struct net *net)
Denis V. Lunev9f5e97e2008-07-05 19:02:59 -07002826{
fan.duca4c3fc2013-07-30 08:33:53 +08002827 atomic_set(&net->ipv4.rt_genid, 0);
Timo Teräs5aad1de2013-05-27 20:46:33 +00002828 atomic_set(&net->fnhe_genid, 0);
David S. Miller436c3b62011-03-24 17:42:21 -07002829 get_random_bytes(&net->ipv4.dev_addr_genid,
2830 sizeof(net->ipv4.dev_addr_genid));
Denis V. Lunev9f5e97e2008-07-05 19:02:59 -07002831 return 0;
2832}
2833
Neil Horman3ee94372010-05-08 01:57:52 -07002834static __net_initdata struct pernet_operations rt_genid_ops = {
2835 .init = rt_genid_init,
Denis V. Lunev9f5e97e2008-07-05 19:02:59 -07002836};
2837
David S. Millerc3426b42012-06-09 16:27:05 -07002838static int __net_init ipv4_inetpeer_init(struct net *net)
2839{
2840 struct inet_peer_base *bp = kmalloc(sizeof(*bp), GFP_KERNEL);
2841
2842 if (!bp)
2843 return -ENOMEM;
2844 inet_peer_base_init(bp);
2845 net->ipv4.peers = bp;
2846 return 0;
2847}
2848
2849static void __net_exit ipv4_inetpeer_exit(struct net *net)
2850{
2851 struct inet_peer_base *bp = net->ipv4.peers;
2852
2853 net->ipv4.peers = NULL;
David S. Miller56a6b242012-06-09 16:32:41 -07002854 inetpeer_invalidate_tree(bp);
David S. Millerc3426b42012-06-09 16:27:05 -07002855 kfree(bp);
2856}
2857
2858static __net_initdata struct pernet_operations ipv4_inetpeer_ops = {
2859 .init = ipv4_inetpeer_init,
2860 .exit = ipv4_inetpeer_exit,
2861};
Denis V. Lunev9f5e97e2008-07-05 19:02:59 -07002862
Patrick McHardyc7066f72011-01-14 13:36:42 +01002863#ifdef CONFIG_IP_ROUTE_CLASSID
Tejun Heo7d720c32010-02-16 15:20:26 +00002864struct ip_rt_acct __percpu *ip_rt_acct __read_mostly;
Patrick McHardyc7066f72011-01-14 13:36:42 +01002865#endif /* CONFIG_IP_ROUTE_CLASSID */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002866
Linus Torvalds1da177e2005-04-16 15:20:36 -07002867int __init ip_rt_init(void)
2868{
Eric Dumazet424c4b72005-07-05 14:58:19 -07002869 int rc = 0;
Eric Dumazet5055c372015-01-14 15:17:06 -08002870 int cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002871
Eric Dumazet73f156a2014-06-02 05:26:03 -07002872 ip_idents = kmalloc(IP_IDENTS_SZ * sizeof(*ip_idents), GFP_KERNEL);
2873 if (!ip_idents)
2874 panic("IP: failed to allocate ip_idents\n");
2875
2876 prandom_bytes(ip_idents, IP_IDENTS_SZ * sizeof(*ip_idents));
2877
Eric Dumazet355b5902015-05-01 10:37:49 -07002878 ip_tstamps = kcalloc(IP_IDENTS_SZ, sizeof(*ip_tstamps), GFP_KERNEL);
2879 if (!ip_tstamps)
2880 panic("IP: failed to allocate ip_tstamps\n");
2881
Eric Dumazet5055c372015-01-14 15:17:06 -08002882 for_each_possible_cpu(cpu) {
2883 struct uncached_list *ul = &per_cpu(rt_uncached_list, cpu);
2884
2885 INIT_LIST_HEAD(&ul->head);
2886 spin_lock_init(&ul->lock);
2887 }
Patrick McHardyc7066f72011-01-14 13:36:42 +01002888#ifdef CONFIG_IP_ROUTE_CLASSID
Ingo Molnar0dcec8c2009-02-25 14:07:33 +01002889 ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct), __alignof__(struct ip_rt_acct));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002890 if (!ip_rt_acct)
2891 panic("IP: failed to allocate ip_rt_acct\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002892#endif
2893
Alexey Dobriyane5d679f332006-08-26 19:25:52 -07002894 ipv4_dst_ops.kmem_cachep =
2895 kmem_cache_create("ip_dst_cache", sizeof(struct rtable), 0,
Paul Mundt20c2df82007-07-20 10:11:58 +09002896 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002897
David S. Miller14e50e52007-05-24 18:17:54 -07002898 ipv4_dst_blackhole_ops.kmem_cachep = ipv4_dst_ops.kmem_cachep;
2899
Eric Dumazetfc66f952010-10-08 06:37:34 +00002900 if (dst_entries_init(&ipv4_dst_ops) < 0)
2901 panic("IP: failed to allocate ipv4_dst_ops counter\n");
2902
2903 if (dst_entries_init(&ipv4_dst_blackhole_ops) < 0)
2904 panic("IP: failed to allocate ipv4_dst_blackhole_ops counter\n");
2905
David S. Miller89aef892012-07-17 11:00:09 -07002906 ipv4_dst_ops.gc_thresh = ~0;
2907 ip_rt_max_size = INT_MAX;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002908
Linus Torvalds1da177e2005-04-16 15:20:36 -07002909 devinet_init();
2910 ip_fib_init();
2911
Denis V. Lunev73b38712008-02-28 20:51:18 -08002912 if (ip_rt_proc_init())
Joe Perches058bd4d2012-03-11 18:36:11 +00002913 pr_err("Unable to create route proc files\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002914#ifdef CONFIG_XFRM
2915 xfrm_init();
Steffen Klassert703fb942012-11-13 08:52:24 +01002916 xfrm4_init();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002917#endif
Greg Rosec7ac8672011-06-10 01:27:09 +00002918 rtnl_register(PF_INET, RTM_GETROUTE, inet_rtm_getroute, NULL, NULL);
Thomas Graf63f34442007-03-22 11:55:17 -07002919
Denis V. Lunev39a23e72008-07-05 19:02:33 -07002920#ifdef CONFIG_SYSCTL
2921 register_pernet_subsys(&sysctl_route_ops);
2922#endif
Neil Horman3ee94372010-05-08 01:57:52 -07002923 register_pernet_subsys(&rt_genid_ops);
David S. Millerc3426b42012-06-09 16:27:05 -07002924 register_pernet_subsys(&ipv4_inetpeer_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002925 return rc;
2926}
2927
Al Viroa1bc6eb2008-07-30 06:32:52 -04002928#ifdef CONFIG_SYSCTL
Al Viroeeb61f72008-07-27 08:59:33 +01002929/*
2930 * We really need to sanitize the damn ipv4 init order, then all
2931 * this nonsense will go away.
2932 */
2933void __init ip_static_sysctl_init(void)
2934{
Eric W. Biederman4e5ca782012-04-19 13:32:39 +00002935 register_net_sysctl(&init_net, "net/ipv4/route", ipv4_route_table);
Al Viroeeb61f72008-07-27 08:59:33 +01002936}
Al Viroa1bc6eb2008-07-30 06:32:52 -04002937#endif