blob: f8b22183b7fd133594c4fbda738e30c07320d503 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Linux INET6 implementation
3 * FIB front-end.
4 *
5 * Authors:
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09006 * Pedro Roque <roque@di.fc.ul.pt>
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 */
13
14/* Changes:
15 *
16 * YOSHIFUJI Hideaki @USAGI
17 * reworked default router selection.
18 * - respect outgoing interface
19 * - select from (probably) reachable routers (i.e.
20 * routers in REACHABLE, STALE, DELAY or PROBE states).
21 * - always select the same router if it is (probably)
22 * reachable. otherwise, round-robin the list.
YOSHIFUJI Hideakic0bece92006-08-23 17:23:25 -070023 * Ville Nuorvala
24 * Fixed routing subtrees.
Linus Torvalds1da177e2005-04-16 15:20:36 -070025 */
26
Joe Perchesf3213832012-05-15 14:11:53 +000027#define pr_fmt(fmt) "IPv6: " fmt
28
Randy Dunlap4fc268d2006-01-11 12:17:47 -080029#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030#include <linux/errno.h>
Paul Gortmakerbc3b2d72011-07-15 11:47:34 -040031#include <linux/export.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032#include <linux/types.h>
33#include <linux/times.h>
34#include <linux/socket.h>
35#include <linux/sockios.h>
36#include <linux/net.h>
37#include <linux/route.h>
38#include <linux/netdevice.h>
39#include <linux/in6.h>
YOSHIFUJI Hideaki7bc570c2008-04-03 09:22:53 +090040#include <linux/mroute6.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070041#include <linux/init.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070042#include <linux/if_arp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070043#include <linux/proc_fs.h>
44#include <linux/seq_file.h>
Daniel Lezcano5b7c9312008-03-03 23:28:58 -080045#include <linux/nsproxy.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090046#include <linux/slab.h>
Wei Wang35732d02017-10-06 12:05:57 -070047#include <linux/jhash.h>
Eric W. Biederman457c4cb2007-09-12 12:01:34 +020048#include <net/net_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070049#include <net/snmp.h>
50#include <net/ipv6.h>
51#include <net/ip6_fib.h>
52#include <net/ip6_route.h>
53#include <net/ndisc.h>
54#include <net/addrconf.h>
55#include <net/tcp.h>
56#include <linux/rtnetlink.h>
57#include <net/dst.h>
Jiri Benc904af042015-08-20 13:56:31 +020058#include <net/dst_metadata.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070059#include <net/xfrm.h>
Tom Tucker8d717402006-07-30 20:43:36 -070060#include <net/netevent.h>
Thomas Graf21713eb2006-08-15 00:35:24 -070061#include <net/netlink.h>
Nicolas Dichtel51ebd312012-10-22 03:42:09 +000062#include <net/nexthop.h>
Roopa Prabhu19e42e42015-07-21 10:43:48 +020063#include <net/lwtunnel.h>
Jiri Benc904af042015-08-20 13:56:31 +020064#include <net/ip_tunnels.h>
David Ahernca254492015-10-12 11:47:10 -070065#include <net/l3mdev.h>
David Ahernb8115802015-11-19 12:24:22 -080066#include <trace/events/fib6.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070067
Linus Torvalds7c0f6ba2016-12-24 11:46:01 -080068#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070069
70#ifdef CONFIG_SYSCTL
71#include <linux/sysctl.h>
72#endif
73
Hannes Frederic Sowaafc154e2013-07-11 12:43:42 +020074enum rt6_nud_state {
Jiri Benc7e980562013-12-11 13:48:20 +010075 RT6_NUD_FAIL_HARD = -3,
76 RT6_NUD_FAIL_PROBE = -2,
77 RT6_NUD_FAIL_DO_RR = -1,
Hannes Frederic Sowaafc154e2013-07-11 12:43:42 +020078 RT6_NUD_SUCCEED = 1
79};
80
Linus Torvalds1da177e2005-04-16 15:20:36 -070081static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie);
David S. Miller0dbaee32010-12-13 12:52:14 -080082static unsigned int ip6_default_advmss(const struct dst_entry *dst);
Steffen Klassertebb762f2011-11-23 02:12:51 +000083static unsigned int ip6_mtu(const struct dst_entry *dst);
Linus Torvalds1da177e2005-04-16 15:20:36 -070084static struct dst_entry *ip6_negative_advice(struct dst_entry *);
85static void ip6_dst_destroy(struct dst_entry *);
86static void ip6_dst_ifdown(struct dst_entry *,
87 struct net_device *dev, int how);
Daniel Lezcano569d3642008-01-18 03:56:57 -080088static int ip6_dst_gc(struct dst_ops *ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -070089
90static int ip6_pkt_discard(struct sk_buff *skb);
Eric W. Biedermanede20592015-10-07 16:48:47 -050091static int ip6_pkt_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb);
Kamala R7150aed2013-12-02 19:55:21 +053092static int ip6_pkt_prohibit(struct sk_buff *skb);
Eric W. Biedermanede20592015-10-07 16:48:47 -050093static int ip6_pkt_prohibit_out(struct net *net, struct sock *sk, struct sk_buff *skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070094static void ip6_link_failure(struct sk_buff *skb);
David S. Miller6700c272012-07-17 03:29:28 -070095static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
96 struct sk_buff *skb, u32 mtu);
97static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk,
98 struct sk_buff *skb);
David Ahern8d1c8022018-04-17 17:33:26 -070099static int rt6_score_route(struct fib6_info *rt, int oif, int strict);
100static size_t rt6_nlmsg_size(struct fib6_info *rt);
David Ahernd4ead6b2018-04-17 17:33:16 -0700101static int rt6_fill_node(struct net *net, struct sk_buff *skb,
David Ahern8d1c8022018-04-17 17:33:26 -0700102 struct fib6_info *rt, struct dst_entry *dst,
David Ahernd4ead6b2018-04-17 17:33:16 -0700103 struct in6_addr *dest, struct in6_addr *src,
David Ahern16a16cd2017-02-02 12:37:11 -0800104 int iif, int type, u32 portid, u32 seq,
105 unsigned int flags);
David Ahern8d1c8022018-04-17 17:33:26 -0700106static struct rt6_info *rt6_find_cached_rt(struct fib6_info *rt,
Wei Wang35732d02017-10-06 12:05:57 -0700107 struct in6_addr *daddr,
108 struct in6_addr *saddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109
YOSHIFUJI Hideaki70ceb4f2006-03-20 17:06:24 -0800110#ifdef CONFIG_IPV6_ROUTE_INFO
David Ahern8d1c8022018-04-17 17:33:26 -0700111static struct fib6_info *rt6_add_route_info(struct net *net,
Eric Dumazetb71d1d42011-04-22 04:53:02 +0000112 const struct in6_addr *prefix, int prefixlen,
David Ahern830218c2016-10-24 10:52:35 -0700113 const struct in6_addr *gwaddr,
114 struct net_device *dev,
Eric Dumazet95c96172012-04-15 05:58:06 +0000115 unsigned int pref);
David Ahern8d1c8022018-04-17 17:33:26 -0700116static struct fib6_info *rt6_get_route_info(struct net *net,
Eric Dumazetb71d1d42011-04-22 04:53:02 +0000117 const struct in6_addr *prefix, int prefixlen,
David Ahern830218c2016-10-24 10:52:35 -0700118 const struct in6_addr *gwaddr,
119 struct net_device *dev);
YOSHIFUJI Hideaki70ceb4f2006-03-20 17:06:24 -0800120#endif
121
Martin KaFai Lau8d0b94a2015-05-22 20:56:04 -0700122struct uncached_list {
123 spinlock_t lock;
124 struct list_head head;
125};
126
127static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt6_uncached_list);
128
Xin Long510c3212018-02-14 19:06:02 +0800129void rt6_uncached_list_add(struct rt6_info *rt)
Martin KaFai Lau8d0b94a2015-05-22 20:56:04 -0700130{
131 struct uncached_list *ul = raw_cpu_ptr(&rt6_uncached_list);
132
Martin KaFai Lau8d0b94a2015-05-22 20:56:04 -0700133 rt->rt6i_uncached_list = ul;
134
135 spin_lock_bh(&ul->lock);
136 list_add_tail(&rt->rt6i_uncached, &ul->head);
137 spin_unlock_bh(&ul->lock);
138}
139
Xin Long510c3212018-02-14 19:06:02 +0800140void rt6_uncached_list_del(struct rt6_info *rt)
Martin KaFai Lau8d0b94a2015-05-22 20:56:04 -0700141{
142 if (!list_empty(&rt->rt6i_uncached)) {
143 struct uncached_list *ul = rt->rt6i_uncached_list;
Wei Wang81eb8442017-10-06 12:06:11 -0700144 struct net *net = dev_net(rt->dst.dev);
Martin KaFai Lau8d0b94a2015-05-22 20:56:04 -0700145
146 spin_lock_bh(&ul->lock);
147 list_del(&rt->rt6i_uncached);
Wei Wang81eb8442017-10-06 12:06:11 -0700148 atomic_dec(&net->ipv6.rt6_stats->fib_rt_uncache);
Martin KaFai Lau8d0b94a2015-05-22 20:56:04 -0700149 spin_unlock_bh(&ul->lock);
150 }
151}
152
153static void rt6_uncached_list_flush_dev(struct net *net, struct net_device *dev)
154{
155 struct net_device *loopback_dev = net->loopback_dev;
156 int cpu;
157
Eric W. Biedermane332bc62015-10-12 11:02:08 -0500158 if (dev == loopback_dev)
159 return;
160
Martin KaFai Lau8d0b94a2015-05-22 20:56:04 -0700161 for_each_possible_cpu(cpu) {
162 struct uncached_list *ul = per_cpu_ptr(&rt6_uncached_list, cpu);
163 struct rt6_info *rt;
164
165 spin_lock_bh(&ul->lock);
166 list_for_each_entry(rt, &ul->head, rt6i_uncached) {
167 struct inet6_dev *rt_idev = rt->rt6i_idev;
168 struct net_device *rt_dev = rt->dst.dev;
169
Eric W. Biedermane332bc62015-10-12 11:02:08 -0500170 if (rt_idev->dev == dev) {
Martin KaFai Lau8d0b94a2015-05-22 20:56:04 -0700171 rt->rt6i_idev = in6_dev_get(loopback_dev);
172 in6_dev_put(rt_idev);
173 }
174
Eric W. Biedermane332bc62015-10-12 11:02:08 -0500175 if (rt_dev == dev) {
Martin KaFai Lau8d0b94a2015-05-22 20:56:04 -0700176 rt->dst.dev = loopback_dev;
177 dev_hold(rt->dst.dev);
178 dev_put(rt_dev);
179 }
180 }
181 spin_unlock_bh(&ul->lock);
182 }
183}
184
David Ahernf8a1b432018-04-17 17:33:21 -0700185static inline const void *choose_neigh_daddr(const struct in6_addr *p,
David S. Millerf894cbf2012-07-02 21:52:24 -0700186 struct sk_buff *skb,
187 const void *daddr)
David S. Miller39232972012-01-26 15:22:32 -0500188{
David S. Millera7563f32012-01-26 16:29:16 -0500189 if (!ipv6_addr_any(p))
David S. Miller39232972012-01-26 15:22:32 -0500190 return (const void *) p;
David S. Millerf894cbf2012-07-02 21:52:24 -0700191 else if (skb)
192 return &ipv6_hdr(skb)->daddr;
David S. Miller39232972012-01-26 15:22:32 -0500193 return daddr;
194}
195
David Ahernf8a1b432018-04-17 17:33:21 -0700196struct neighbour *ip6_neigh_lookup(const struct in6_addr *gw,
197 struct net_device *dev,
198 struct sk_buff *skb,
199 const void *daddr)
David S. Millerd3aaeb32011-07-18 00:40:17 -0700200{
David S. Miller39232972012-01-26 15:22:32 -0500201 struct neighbour *n;
202
David Ahernf8a1b432018-04-17 17:33:21 -0700203 daddr = choose_neigh_daddr(gw, skb, daddr);
204 n = __ipv6_neigh_lookup(dev, daddr);
David S. Millerf83c7792011-12-28 15:41:23 -0500205 if (n)
206 return n;
David Ahernf8a1b432018-04-17 17:33:21 -0700207 return neigh_create(&nd_tbl, daddr, dev);
208}
209
210static struct neighbour *ip6_dst_neigh_lookup(const struct dst_entry *dst,
211 struct sk_buff *skb,
212 const void *daddr)
213{
214 const struct rt6_info *rt = container_of(dst, struct rt6_info, dst);
215
216 return ip6_neigh_lookup(&rt->rt6i_gateway, dst->dev, skb, daddr);
David S. Millerf83c7792011-12-28 15:41:23 -0500217}
218
Julian Anastasov63fca652017-02-06 23:14:15 +0200219static void ip6_confirm_neigh(const struct dst_entry *dst, const void *daddr)
220{
221 struct net_device *dev = dst->dev;
222 struct rt6_info *rt = (struct rt6_info *)dst;
223
David Ahernf8a1b432018-04-17 17:33:21 -0700224 daddr = choose_neigh_daddr(&rt->rt6i_gateway, NULL, daddr);
Julian Anastasov63fca652017-02-06 23:14:15 +0200225 if (!daddr)
226 return;
227 if (dev->flags & (IFF_NOARP | IFF_LOOPBACK))
228 return;
229 if (ipv6_addr_is_multicast((const struct in6_addr *)daddr))
230 return;
231 __ipv6_confirm_neigh(dev, daddr);
232}
233
Daniel Lezcano9a7ec3a2008-03-04 13:48:53 -0800234static struct dst_ops ip6_dst_ops_template = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700235 .family = AF_INET6,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700236 .gc = ip6_dst_gc,
237 .gc_thresh = 1024,
238 .check = ip6_dst_check,
David S. Miller0dbaee32010-12-13 12:52:14 -0800239 .default_advmss = ip6_default_advmss,
Steffen Klassertebb762f2011-11-23 02:12:51 +0000240 .mtu = ip6_mtu,
David Ahernd4ead6b2018-04-17 17:33:16 -0700241 .cow_metrics = dst_cow_metrics_generic,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700242 .destroy = ip6_dst_destroy,
243 .ifdown = ip6_dst_ifdown,
244 .negative_advice = ip6_negative_advice,
245 .link_failure = ip6_link_failure,
246 .update_pmtu = ip6_rt_update_pmtu,
David S. Miller6e157b62012-07-12 00:05:02 -0700247 .redirect = rt6_do_redirect,
Eric W. Biederman9f8955c2015-10-07 16:48:39 -0500248 .local_out = __ip6_local_out,
David Ahernf8a1b432018-04-17 17:33:21 -0700249 .neigh_lookup = ip6_dst_neigh_lookup,
Julian Anastasov63fca652017-02-06 23:14:15 +0200250 .confirm_neigh = ip6_confirm_neigh,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251};
252
Steffen Klassertebb762f2011-11-23 02:12:51 +0000253static unsigned int ip6_blackhole_mtu(const struct dst_entry *dst)
Roland Dreierec831ea2011-01-31 13:16:00 -0800254{
Steffen Klassert618f9bc2011-11-23 02:13:31 +0000255 unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
256
257 return mtu ? : dst->dev->mtu;
Roland Dreierec831ea2011-01-31 13:16:00 -0800258}
259
David S. Miller6700c272012-07-17 03:29:28 -0700260static void ip6_rt_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk,
261 struct sk_buff *skb, u32 mtu)
David S. Miller14e50e52007-05-24 18:17:54 -0700262{
263}
264
David S. Miller6700c272012-07-17 03:29:28 -0700265static void ip6_rt_blackhole_redirect(struct dst_entry *dst, struct sock *sk,
266 struct sk_buff *skb)
David S. Millerb587ee32012-07-12 00:39:24 -0700267{
268}
269
David S. Miller14e50e52007-05-24 18:17:54 -0700270static struct dst_ops ip6_dst_blackhole_ops = {
271 .family = AF_INET6,
David S. Miller14e50e52007-05-24 18:17:54 -0700272 .destroy = ip6_dst_destroy,
273 .check = ip6_dst_check,
Steffen Klassertebb762f2011-11-23 02:12:51 +0000274 .mtu = ip6_blackhole_mtu,
Eric Dumazet214f45c2011-02-18 11:39:01 -0800275 .default_advmss = ip6_default_advmss,
David S. Miller14e50e52007-05-24 18:17:54 -0700276 .update_pmtu = ip6_rt_blackhole_update_pmtu,
David S. Millerb587ee32012-07-12 00:39:24 -0700277 .redirect = ip6_rt_blackhole_redirect,
Martin KaFai Lau0a1f5962015-10-15 16:39:58 -0700278 .cow_metrics = dst_cow_metrics_generic,
David Ahernf8a1b432018-04-17 17:33:21 -0700279 .neigh_lookup = ip6_dst_neigh_lookup,
David S. Miller14e50e52007-05-24 18:17:54 -0700280};
281
David S. Miller62fa8a82011-01-26 20:51:05 -0800282static const u32 ip6_template_metrics[RTAX_MAX] = {
Li RongQing14edd872012-10-24 14:01:18 +0800283 [RTAX_HOPLIMIT - 1] = 0,
David S. Miller62fa8a82011-01-26 20:51:05 -0800284};
285
David Ahern8d1c8022018-04-17 17:33:26 -0700286static const struct fib6_info fib6_null_entry_template = {
David Ahern93c2fb22018-04-18 15:38:59 -0700287 .fib6_flags = (RTF_REJECT | RTF_NONEXTHOP),
288 .fib6_protocol = RTPROT_KERNEL,
289 .fib6_metric = ~(u32)0,
290 .fib6_ref = ATOMIC_INIT(1),
David Ahern421842e2018-04-17 17:33:18 -0700291 .fib6_type = RTN_UNREACHABLE,
292 .fib6_metrics = (struct dst_metrics *)&dst_default_metrics,
293};
294
Eric Dumazetfb0af4c2012-09-11 21:47:51 +0000295static const struct rt6_info ip6_null_entry_template = {
Changli Gaod8d1f302010-06-10 23:31:35 -0700296 .dst = {
297 .__refcnt = ATOMIC_INIT(1),
298 .__use = 1,
Nicolas Dichtel2c20cbd2012-09-10 22:09:47 +0000299 .obsolete = DST_OBSOLETE_FORCE_CHK,
Changli Gaod8d1f302010-06-10 23:31:35 -0700300 .error = -ENETUNREACH,
Changli Gaod8d1f302010-06-10 23:31:35 -0700301 .input = ip6_pkt_discard,
302 .output = ip6_pkt_discard_out,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700303 },
304 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700305};
306
Thomas Graf101367c2006-08-04 03:39:02 -0700307#ifdef CONFIG_IPV6_MULTIPLE_TABLES
308
Eric Dumazetfb0af4c2012-09-11 21:47:51 +0000309static const struct rt6_info ip6_prohibit_entry_template = {
Changli Gaod8d1f302010-06-10 23:31:35 -0700310 .dst = {
311 .__refcnt = ATOMIC_INIT(1),
312 .__use = 1,
Nicolas Dichtel2c20cbd2012-09-10 22:09:47 +0000313 .obsolete = DST_OBSOLETE_FORCE_CHK,
Changli Gaod8d1f302010-06-10 23:31:35 -0700314 .error = -EACCES,
Changli Gaod8d1f302010-06-10 23:31:35 -0700315 .input = ip6_pkt_prohibit,
316 .output = ip6_pkt_prohibit_out,
Thomas Graf101367c2006-08-04 03:39:02 -0700317 },
318 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
Thomas Graf101367c2006-08-04 03:39:02 -0700319};
320
Eric Dumazetfb0af4c2012-09-11 21:47:51 +0000321static const struct rt6_info ip6_blk_hole_entry_template = {
Changli Gaod8d1f302010-06-10 23:31:35 -0700322 .dst = {
323 .__refcnt = ATOMIC_INIT(1),
324 .__use = 1,
Nicolas Dichtel2c20cbd2012-09-10 22:09:47 +0000325 .obsolete = DST_OBSOLETE_FORCE_CHK,
Changli Gaod8d1f302010-06-10 23:31:35 -0700326 .error = -EINVAL,
Changli Gaod8d1f302010-06-10 23:31:35 -0700327 .input = dst_discard,
Eric W. Biedermanede20592015-10-07 16:48:47 -0500328 .output = dst_discard_out,
Thomas Graf101367c2006-08-04 03:39:02 -0700329 },
330 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
Thomas Graf101367c2006-08-04 03:39:02 -0700331};
332
333#endif
334
Martin KaFai Lauebfa45f2015-10-15 16:39:57 -0700335static void rt6_info_init(struct rt6_info *rt)
336{
337 struct dst_entry *dst = &rt->dst;
338
339 memset(dst + 1, 0, sizeof(*rt) - sizeof(*dst));
Martin KaFai Lauebfa45f2015-10-15 16:39:57 -0700340 INIT_LIST_HEAD(&rt->rt6i_uncached);
341}
342
Linus Torvalds1da177e2005-04-16 15:20:36 -0700343/* allocate dst with ip6_dst_ops */
David Ahern93531c62018-04-17 17:33:25 -0700344struct rt6_info *ip6_dst_alloc(struct net *net, struct net_device *dev,
345 int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346{
David S. Miller97bab732012-06-09 22:36:36 -0700347 struct rt6_info *rt = dst_alloc(&net->ipv6.ip6_dst_ops, dev,
Wei Wangb2a9c0e2017-06-17 10:42:41 -0700348 1, DST_OBSOLETE_FORCE_CHK, flags);
David S. Millercf911662011-04-28 14:31:47 -0700349
Wei Wang81eb8442017-10-06 12:06:11 -0700350 if (rt) {
Martin KaFai Lauebfa45f2015-10-15 16:39:57 -0700351 rt6_info_init(rt);
Wei Wang81eb8442017-10-06 12:06:11 -0700352 atomic_inc(&net->ipv6.rt6_stats->fib_rt_alloc);
353 }
Steffen Klassert81048912012-07-05 23:37:09 +0000354
David S. Millercf911662011-04-28 14:31:47 -0700355 return rt;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700356}
David Ahern9ab179d2016-04-07 11:10:06 -0700357EXPORT_SYMBOL(ip6_dst_alloc);
Martin KaFai Laud52d3992015-05-22 20:56:06 -0700358
Linus Torvalds1da177e2005-04-16 15:20:36 -0700359static void ip6_dst_destroy(struct dst_entry *dst)
360{
361 struct rt6_info *rt = (struct rt6_info *)dst;
David Ahern8d1c8022018-04-17 17:33:26 -0700362 struct fib6_info *from = rt->from;
Martin KaFai Lau8d0b94a2015-05-22 20:56:04 -0700363 struct inet6_dev *idev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700364
Martin KaFai Lau4b32b5a2015-04-28 13:03:06 -0700365 dst_destroy_metrics_generic(dst);
Martin KaFai Lau8d0b94a2015-05-22 20:56:04 -0700366 rt6_uncached_list_del(rt);
367
368 idev = rt->rt6i_idev;
David S. Miller38308472011-12-03 18:02:47 -0500369 if (idev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700370 rt->rt6i_idev = NULL;
371 in6_dev_put(idev);
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +0900372 }
David Ahernd4ead6b2018-04-17 17:33:16 -0700373
David Miller3a2232e2017-11-28 15:40:40 -0500374 rt->from = NULL;
David Ahern93531c62018-04-17 17:33:25 -0700375 fib6_info_release(from);
David S. Millerb3419362010-11-30 12:27:11 -0800376}
377
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
379 int how)
380{
381 struct rt6_info *rt = (struct rt6_info *)dst;
382 struct inet6_dev *idev = rt->rt6i_idev;
Denis V. Lunev5a3e55d2007-12-07 00:38:10 -0800383 struct net_device *loopback_dev =
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900384 dev_net(dev)->loopback_dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700385
Wei Wange5645f52017-08-14 10:44:59 -0700386 if (idev && idev->dev != loopback_dev) {
387 struct inet6_dev *loopback_idev = in6_dev_get(loopback_dev);
388 if (loopback_idev) {
389 rt->rt6i_idev = loopback_idev;
390 in6_dev_put(idev);
David S. Miller97cac082012-07-02 22:43:47 -0700391 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700392 }
393}
394
Martin KaFai Lau5973fb12015-11-11 11:51:07 -0800395static bool __rt6_check_expired(const struct rt6_info *rt)
396{
397 if (rt->rt6i_flags & RTF_EXPIRES)
398 return time_after(jiffies, rt->dst.expires);
399 else
400 return false;
401}
402
Eric Dumazeta50feda2012-05-18 18:57:34 +0000403static bool rt6_check_expired(const struct rt6_info *rt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700404{
Gao feng1716a962012-04-06 00:13:10 +0000405 if (rt->rt6i_flags & RTF_EXPIRES) {
406 if (time_after(jiffies, rt->dst.expires))
Eric Dumazeta50feda2012-05-18 18:57:34 +0000407 return true;
David Miller3a2232e2017-11-28 15:40:40 -0500408 } else if (rt->from) {
Xin Long1e2ea8a2017-08-26 20:10:10 +0800409 return rt->dst.obsolete != DST_OBSOLETE_FORCE_CHK ||
David Ahern14895682018-04-17 17:33:17 -0700410 fib6_check_expired(rt->from);
Gao feng1716a962012-04-06 00:13:10 +0000411 }
Eric Dumazeta50feda2012-05-18 18:57:34 +0000412 return false;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700413}
414
David Ahern8d1c8022018-04-17 17:33:26 -0700415static struct fib6_info *rt6_multipath_select(const struct net *net,
416 struct fib6_info *match,
Nicolas Dichtel52bd4c02013-06-28 17:35:48 +0200417 struct flowi6 *fl6, int oif,
David Ahernb75cc8f2018-03-02 08:32:17 -0800418 const struct sk_buff *skb,
Nicolas Dichtel52bd4c02013-06-28 17:35:48 +0200419 int strict)
Nicolas Dichtel51ebd312012-10-22 03:42:09 +0000420{
David Ahern8d1c8022018-04-17 17:33:26 -0700421 struct fib6_info *sibling, *next_sibling;
Nicolas Dichtel51ebd312012-10-22 03:42:09 +0000422
Jakub Sitnickib673d6c2017-08-23 09:58:31 +0200423 /* We might have already computed the hash for ICMPv6 errors. In such
424 * case it will always be non-zero. Otherwise now is the time to do it.
425 */
426 if (!fl6->mp_hash)
David Ahernb4bac172018-03-02 08:32:18 -0800427 fl6->mp_hash = rt6_multipath_hash(net, fl6, skb, NULL);
Jakub Sitnickib673d6c2017-08-23 09:58:31 +0200428
David Ahern5e670d82018-04-17 17:33:14 -0700429 if (fl6->mp_hash <= atomic_read(&match->fib6_nh.nh_upper_bound))
Ido Schimmel3d709f62018-01-09 16:40:27 +0200430 return match;
Ido Schimmelbbfcd772017-11-21 09:50:12 +0200431
David Ahern93c2fb22018-04-18 15:38:59 -0700432 list_for_each_entry_safe(sibling, next_sibling, &match->fib6_siblings,
433 fib6_siblings) {
David Ahern5e670d82018-04-17 17:33:14 -0700434 int nh_upper_bound;
435
436 nh_upper_bound = atomic_read(&sibling->fib6_nh.nh_upper_bound);
437 if (fl6->mp_hash > nh_upper_bound)
Ido Schimmel3d709f62018-01-09 16:40:27 +0200438 continue;
439 if (rt6_score_route(sibling, oif, strict) < 0)
440 break;
441 match = sibling;
442 break;
443 }
444
Nicolas Dichtel51ebd312012-10-22 03:42:09 +0000445 return match;
446}
447
Linus Torvalds1da177e2005-04-16 15:20:36 -0700448/*
Wei Wang66f5d6c2017-10-06 12:06:10 -0700449 * Route lookup. rcu_read_lock() should be held.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700450 */
451
David Ahern8d1c8022018-04-17 17:33:26 -0700452static inline struct fib6_info *rt6_device_match(struct net *net,
453 struct fib6_info *rt,
Eric Dumazetb71d1d42011-04-22 04:53:02 +0000454 const struct in6_addr *saddr,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700455 int oif,
YOSHIFUJI Hideakid4208952008-06-27 20:14:54 -0700456 int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700457{
David Ahern8d1c8022018-04-17 17:33:26 -0700458 struct fib6_info *sprt;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700459
David Ahern5e670d82018-04-17 17:33:14 -0700460 if (!oif && ipv6_addr_any(saddr) &&
461 !(rt->fib6_nh.nh_flags & RTNH_F_DEAD))
Ido Schimmel8067bb82018-01-07 12:45:09 +0200462 return rt;
YOSHIFUJI Hideakidd3abc42008-07-02 18:30:18 +0900463
David Miller071fb372017-11-28 15:40:15 -0500464 for (sprt = rt; sprt; sprt = rcu_dereference(sprt->rt6_next)) {
David Ahern5e670d82018-04-17 17:33:14 -0700465 const struct net_device *dev = sprt->fib6_nh.nh_dev;
YOSHIFUJI Hideakidd3abc42008-07-02 18:30:18 +0900466
David Ahern5e670d82018-04-17 17:33:14 -0700467 if (sprt->fib6_nh.nh_flags & RTNH_F_DEAD)
Ido Schimmel8067bb82018-01-07 12:45:09 +0200468 continue;
469
YOSHIFUJI Hideakidd3abc42008-07-02 18:30:18 +0900470 if (oif) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700471 if (dev->ifindex == oif)
472 return sprt;
YOSHIFUJI Hideakidd3abc42008-07-02 18:30:18 +0900473 } else {
474 if (ipv6_chk_addr(net, saddr, dev,
475 flags & RT6_LOOKUP_F_IFACE))
476 return sprt;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700477 }
YOSHIFUJI Hideakidd3abc42008-07-02 18:30:18 +0900478 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700479
David Aherneea68cd2018-04-18 15:39:02 -0700480 if (oif && flags & RT6_LOOKUP_F_IFACE)
481 return net->ipv6.fib6_null_entry;
Ido Schimmel8067bb82018-01-07 12:45:09 +0200482
David Ahern421842e2018-04-17 17:33:18 -0700483 return rt->fib6_nh.nh_flags & RTNH_F_DEAD ? net->ipv6.fib6_null_entry : rt;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700484}
485
YOSHIFUJI Hideaki27097252006-03-20 17:05:13 -0800486#ifdef CONFIG_IPV6_ROUTER_PREF
Hannes Frederic Sowac2f17e82013-10-21 06:17:15 +0200487struct __rt6_probe_work {
488 struct work_struct work;
489 struct in6_addr target;
490 struct net_device *dev;
491};
492
493static void rt6_probe_deferred(struct work_struct *w)
494{
495 struct in6_addr mcaddr;
496 struct __rt6_probe_work *work =
497 container_of(w, struct __rt6_probe_work, work);
498
499 addrconf_addr_solict_mult(&work->target, &mcaddr);
Erik Nordmarkadc176c2016-12-02 14:00:08 -0800500 ndisc_send_ns(work->dev, &work->target, &mcaddr, NULL, 0);
Hannes Frederic Sowac2f17e82013-10-21 06:17:15 +0200501 dev_put(work->dev);
Michael Büsch662f5532015-02-08 10:14:07 +0100502 kfree(work);
Hannes Frederic Sowac2f17e82013-10-21 06:17:15 +0200503}
504
David Ahern8d1c8022018-04-17 17:33:26 -0700505static void rt6_probe(struct fib6_info *rt)
YOSHIFUJI Hideaki27097252006-03-20 17:05:13 -0800506{
Martin KaFai Lau990edb42015-07-24 09:57:42 -0700507 struct __rt6_probe_work *work;
David Ahern5e670d82018-04-17 17:33:14 -0700508 const struct in6_addr *nh_gw;
Eric Dumazetf2c31e32011-07-29 19:00:53 +0000509 struct neighbour *neigh;
David Ahern5e670d82018-04-17 17:33:14 -0700510 struct net_device *dev;
511
YOSHIFUJI Hideaki27097252006-03-20 17:05:13 -0800512 /*
513 * Okay, this does not seem to be appropriate
514 * for now, however, we need to check if it
515 * is really so; aka Router Reachability Probing.
516 *
517 * Router Reachability Probe MUST be rate-limited
518 * to no more than one per minute.
519 */
David Ahern93c2fb22018-04-18 15:38:59 -0700520 if (!rt || !(rt->fib6_flags & RTF_GATEWAY))
Amerigo Wangfdd66812012-09-10 02:48:44 +0000521 return;
David Ahern5e670d82018-04-17 17:33:14 -0700522
523 nh_gw = &rt->fib6_nh.nh_gw;
524 dev = rt->fib6_nh.nh_dev;
YOSHIFUJI Hideaki / 吉藤英明2152cae2013-01-17 12:53:43 +0000525 rcu_read_lock_bh();
David Ahern5e670d82018-04-17 17:33:14 -0700526 neigh = __ipv6_neigh_lookup_noref(dev, nh_gw);
YOSHIFUJI Hideaki / 吉藤英明2152cae2013-01-17 12:53:43 +0000527 if (neigh) {
David Aherndcd1f572018-04-18 15:39:05 -0700528 struct inet6_dev *idev;
529
Martin KaFai Lau8d6c31b2015-07-24 09:57:43 -0700530 if (neigh->nud_state & NUD_VALID)
531 goto out;
532
David Aherndcd1f572018-04-18 15:39:05 -0700533 idev = __in6_dev_get(dev);
Martin KaFai Lau990edb42015-07-24 09:57:42 -0700534 work = NULL;
YOSHIFUJI Hideaki / 吉藤英明2152cae2013-01-17 12:53:43 +0000535 write_lock(&neigh->lock);
Martin KaFai Lau990edb42015-07-24 09:57:42 -0700536 if (!(neigh->nud_state & NUD_VALID) &&
537 time_after(jiffies,
David Aherndcd1f572018-04-18 15:39:05 -0700538 neigh->updated + idev->cnf.rtr_probe_interval)) {
Martin KaFai Lau990edb42015-07-24 09:57:42 -0700539 work = kmalloc(sizeof(*work), GFP_ATOMIC);
540 if (work)
541 __neigh_set_probe_once(neigh);
Hannes Frederic Sowac2f17e82013-10-21 06:17:15 +0200542 }
YOSHIFUJI Hideaki / 吉藤英明2152cae2013-01-17 12:53:43 +0000543 write_unlock(&neigh->lock);
Martin KaFai Lau990edb42015-07-24 09:57:42 -0700544 } else {
545 work = kmalloc(sizeof(*work), GFP_ATOMIC);
Eric Dumazetf2c31e32011-07-29 19:00:53 +0000546 }
Martin KaFai Lau990edb42015-07-24 09:57:42 -0700547
548 if (work) {
549 INIT_WORK(&work->work, rt6_probe_deferred);
David Ahern5e670d82018-04-17 17:33:14 -0700550 work->target = *nh_gw;
551 dev_hold(dev);
552 work->dev = dev;
Martin KaFai Lau990edb42015-07-24 09:57:42 -0700553 schedule_work(&work->work);
554 }
555
Martin KaFai Lau8d6c31b2015-07-24 09:57:43 -0700556out:
YOSHIFUJI Hideaki / 吉藤英明2152cae2013-01-17 12:53:43 +0000557 rcu_read_unlock_bh();
YOSHIFUJI Hideaki27097252006-03-20 17:05:13 -0800558}
559#else
David Ahern8d1c8022018-04-17 17:33:26 -0700560static inline void rt6_probe(struct fib6_info *rt)
YOSHIFUJI Hideaki27097252006-03-20 17:05:13 -0800561{
YOSHIFUJI Hideaki27097252006-03-20 17:05:13 -0800562}
563#endif
564
Linus Torvalds1da177e2005-04-16 15:20:36 -0700565/*
YOSHIFUJI Hideaki554cfb72006-03-20 17:00:26 -0800566 * Default Router Selection (RFC 2461 6.3.6)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700567 */
David Ahern8d1c8022018-04-17 17:33:26 -0700568static inline int rt6_check_dev(struct fib6_info *rt, int oif)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700569{
David Ahern5e670d82018-04-17 17:33:14 -0700570 const struct net_device *dev = rt->fib6_nh.nh_dev;
571
David S. Miller161980f2007-04-06 11:42:27 -0700572 if (!oif || dev->ifindex == oif)
YOSHIFUJI Hideaki554cfb72006-03-20 17:00:26 -0800573 return 2;
David S. Miller161980f2007-04-06 11:42:27 -0700574 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700575}
576
David Ahern8d1c8022018-04-17 17:33:26 -0700577static inline enum rt6_nud_state rt6_check_neigh(struct fib6_info *rt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700578{
Hannes Frederic Sowaafc154e2013-07-11 12:43:42 +0200579 enum rt6_nud_state ret = RT6_NUD_FAIL_HARD;
David Ahern5e670d82018-04-17 17:33:14 -0700580 struct neighbour *neigh;
Eric Dumazetf2c31e32011-07-29 19:00:53 +0000581
David Ahern93c2fb22018-04-18 15:38:59 -0700582 if (rt->fib6_flags & RTF_NONEXTHOP ||
583 !(rt->fib6_flags & RTF_GATEWAY))
Hannes Frederic Sowaafc154e2013-07-11 12:43:42 +0200584 return RT6_NUD_SUCCEED;
YOSHIFUJI Hideaki / 吉藤英明145a3622013-01-17 12:53:38 +0000585
586 rcu_read_lock_bh();
David Ahern5e670d82018-04-17 17:33:14 -0700587 neigh = __ipv6_neigh_lookup_noref(rt->fib6_nh.nh_dev,
588 &rt->fib6_nh.nh_gw);
YOSHIFUJI Hideaki / 吉藤英明145a3622013-01-17 12:53:38 +0000589 if (neigh) {
590 read_lock(&neigh->lock);
YOSHIFUJI Hideaki554cfb72006-03-20 17:00:26 -0800591 if (neigh->nud_state & NUD_VALID)
Hannes Frederic Sowaafc154e2013-07-11 12:43:42 +0200592 ret = RT6_NUD_SUCCEED;
YOSHIFUJI Hideaki398bcbe2008-01-19 00:35:16 -0800593#ifdef CONFIG_IPV6_ROUTER_PREF
Paul Marksa5a81f02012-12-03 10:26:54 +0000594 else if (!(neigh->nud_state & NUD_FAILED))
Hannes Frederic Sowaafc154e2013-07-11 12:43:42 +0200595 ret = RT6_NUD_SUCCEED;
Jiri Benc7e980562013-12-11 13:48:20 +0100596 else
597 ret = RT6_NUD_FAIL_PROBE;
YOSHIFUJI Hideaki398bcbe2008-01-19 00:35:16 -0800598#endif
YOSHIFUJI Hideaki / 吉藤英明145a3622013-01-17 12:53:38 +0000599 read_unlock(&neigh->lock);
Hannes Frederic Sowaafc154e2013-07-11 12:43:42 +0200600 } else {
601 ret = IS_ENABLED(CONFIG_IPV6_ROUTER_PREF) ?
Jiri Benc7e980562013-12-11 13:48:20 +0100602 RT6_NUD_SUCCEED : RT6_NUD_FAIL_DO_RR;
Paul Marksa5a81f02012-12-03 10:26:54 +0000603 }
YOSHIFUJI Hideaki / 吉藤英明145a3622013-01-17 12:53:38 +0000604 rcu_read_unlock_bh();
605
Paul Marksa5a81f02012-12-03 10:26:54 +0000606 return ret;
YOSHIFUJI Hideaki554cfb72006-03-20 17:00:26 -0800607}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700608
David Ahern8d1c8022018-04-17 17:33:26 -0700609static int rt6_score_route(struct fib6_info *rt, int oif, int strict)
YOSHIFUJI Hideaki554cfb72006-03-20 17:00:26 -0800610{
Paul Marksa5a81f02012-12-03 10:26:54 +0000611 int m;
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +0900612
YOSHIFUJI Hideaki4d0c5912006-05-26 13:23:41 -0700613 m = rt6_check_dev(rt, oif);
YOSHIFUJI Hideaki77d16f42006-08-23 17:25:05 -0700614 if (!m && (strict & RT6_LOOKUP_F_IFACE))
Hannes Frederic Sowaafc154e2013-07-11 12:43:42 +0200615 return RT6_NUD_FAIL_HARD;
YOSHIFUJI Hideakiebacaaa2006-03-20 17:04:53 -0800616#ifdef CONFIG_IPV6_ROUTER_PREF
David Ahern93c2fb22018-04-18 15:38:59 -0700617 m |= IPV6_DECODE_PREF(IPV6_EXTRACT_PREF(rt->fib6_flags)) << 2;
YOSHIFUJI Hideakiebacaaa2006-03-20 17:04:53 -0800618#endif
Hannes Frederic Sowaafc154e2013-07-11 12:43:42 +0200619 if (strict & RT6_LOOKUP_F_REACHABLE) {
620 int n = rt6_check_neigh(rt);
621 if (n < 0)
622 return n;
623 }
YOSHIFUJI Hideaki554cfb72006-03-20 17:00:26 -0800624 return m;
625}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700626
David Aherndcd1f572018-04-18 15:39:05 -0700627/* called with rc_read_lock held */
628static inline bool fib6_ignore_linkdown(const struct fib6_info *f6i)
629{
630 const struct net_device *dev = fib6_info_nh_dev(f6i);
631 bool rc = false;
632
633 if (dev) {
634 const struct inet6_dev *idev = __in6_dev_get(dev);
635
636 rc = !!idev->cnf.ignore_routes_with_linkdown;
637 }
638
639 return rc;
640}
641
David Ahern8d1c8022018-04-17 17:33:26 -0700642static struct fib6_info *find_match(struct fib6_info *rt, int oif, int strict,
643 int *mpri, struct fib6_info *match,
Hannes Frederic Sowaafc154e2013-07-11 12:43:42 +0200644 bool *do_rr)
YOSHIFUJI Hideaki554cfb72006-03-20 17:00:26 -0800645{
David S. Millerf11e6652007-03-24 20:36:25 -0700646 int m;
Hannes Frederic Sowaafc154e2013-07-11 12:43:42 +0200647 bool match_do_rr = false;
Andy Gospodarek35103d12015-08-13 10:39:01 -0400648
David Ahern5e670d82018-04-17 17:33:14 -0700649 if (rt->fib6_nh.nh_flags & RTNH_F_DEAD)
Ido Schimmel8067bb82018-01-07 12:45:09 +0200650 goto out;
651
David Aherndcd1f572018-04-18 15:39:05 -0700652 if (fib6_ignore_linkdown(rt) &&
David Ahern5e670d82018-04-17 17:33:14 -0700653 rt->fib6_nh.nh_flags & RTNH_F_LINKDOWN &&
David Ahernd5d32e42016-10-24 12:27:23 -0700654 !(strict & RT6_LOOKUP_F_IGNORE_LINKSTATE))
Andy Gospodarek35103d12015-08-13 10:39:01 -0400655 goto out;
David S. Millerf11e6652007-03-24 20:36:25 -0700656
David Ahern14895682018-04-17 17:33:17 -0700657 if (fib6_check_expired(rt))
David S. Millerf11e6652007-03-24 20:36:25 -0700658 goto out;
659
660 m = rt6_score_route(rt, oif, strict);
Jiri Benc7e980562013-12-11 13:48:20 +0100661 if (m == RT6_NUD_FAIL_DO_RR) {
Hannes Frederic Sowaafc154e2013-07-11 12:43:42 +0200662 match_do_rr = true;
663 m = 0; /* lowest valid score */
Jiri Benc7e980562013-12-11 13:48:20 +0100664 } else if (m == RT6_NUD_FAIL_HARD) {
David S. Millerf11e6652007-03-24 20:36:25 -0700665 goto out;
David S. Millerf11e6652007-03-24 20:36:25 -0700666 }
667
Hannes Frederic Sowaafc154e2013-07-11 12:43:42 +0200668 if (strict & RT6_LOOKUP_F_REACHABLE)
669 rt6_probe(rt);
670
Jiri Benc7e980562013-12-11 13:48:20 +0100671 /* note that m can be RT6_NUD_FAIL_PROBE at this point */
Hannes Frederic Sowaafc154e2013-07-11 12:43:42 +0200672 if (m > *mpri) {
673 *do_rr = match_do_rr;
674 *mpri = m;
675 match = rt;
676 }
David S. Millerf11e6652007-03-24 20:36:25 -0700677out:
678 return match;
679}
680
David Ahern8d1c8022018-04-17 17:33:26 -0700681static struct fib6_info *find_rr_leaf(struct fib6_node *fn,
682 struct fib6_info *leaf,
683 struct fib6_info *rr_head,
Hannes Frederic Sowaafc154e2013-07-11 12:43:42 +0200684 u32 metric, int oif, int strict,
685 bool *do_rr)
David S. Millerf11e6652007-03-24 20:36:25 -0700686{
David Ahern8d1c8022018-04-17 17:33:26 -0700687 struct fib6_info *rt, *match, *cont;
YOSHIFUJI Hideaki554cfb72006-03-20 17:00:26 -0800688 int mpri = -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700689
David S. Millerf11e6652007-03-24 20:36:25 -0700690 match = NULL;
Steffen Klassert9fbdcfa2015-04-28 13:03:04 -0700691 cont = NULL;
David Miller071fb372017-11-28 15:40:15 -0500692 for (rt = rr_head; rt; rt = rcu_dereference(rt->rt6_next)) {
David Ahern93c2fb22018-04-18 15:38:59 -0700693 if (rt->fib6_metric != metric) {
Steffen Klassert9fbdcfa2015-04-28 13:03:04 -0700694 cont = rt;
695 break;
696 }
697
Hannes Frederic Sowaafc154e2013-07-11 12:43:42 +0200698 match = find_match(rt, oif, strict, &mpri, match, do_rr);
Steffen Klassert9fbdcfa2015-04-28 13:03:04 -0700699 }
700
Wei Wang66f5d6c2017-10-06 12:06:10 -0700701 for (rt = leaf; rt && rt != rr_head;
David Miller071fb372017-11-28 15:40:15 -0500702 rt = rcu_dereference(rt->rt6_next)) {
David Ahern93c2fb22018-04-18 15:38:59 -0700703 if (rt->fib6_metric != metric) {
Steffen Klassert9fbdcfa2015-04-28 13:03:04 -0700704 cont = rt;
705 break;
706 }
707
708 match = find_match(rt, oif, strict, &mpri, match, do_rr);
709 }
710
711 if (match || !cont)
712 return match;
713
David Miller071fb372017-11-28 15:40:15 -0500714 for (rt = cont; rt; rt = rcu_dereference(rt->rt6_next))
Hannes Frederic Sowaafc154e2013-07-11 12:43:42 +0200715 match = find_match(rt, oif, strict, &mpri, match, do_rr);
YOSHIFUJI Hideaki554cfb72006-03-20 17:00:26 -0800716
David S. Millerf11e6652007-03-24 20:36:25 -0700717 return match;
718}
YOSHIFUJI Hideaki554cfb72006-03-20 17:00:26 -0800719
David Ahern8d1c8022018-04-17 17:33:26 -0700720static struct fib6_info *rt6_select(struct net *net, struct fib6_node *fn,
Wei Wang8d1040e2017-10-06 12:06:08 -0700721 int oif, int strict)
David S. Millerf11e6652007-03-24 20:36:25 -0700722{
David Ahern8d1c8022018-04-17 17:33:26 -0700723 struct fib6_info *leaf = rcu_dereference(fn->leaf);
724 struct fib6_info *match, *rt0;
Hannes Frederic Sowaafc154e2013-07-11 12:43:42 +0200725 bool do_rr = false;
Wei Wang17ecf592017-10-06 12:06:09 -0700726 int key_plen;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700727
David Ahern421842e2018-04-17 17:33:18 -0700728 if (!leaf || leaf == net->ipv6.fib6_null_entry)
729 return net->ipv6.fib6_null_entry;
Wei Wang8d1040e2017-10-06 12:06:08 -0700730
Wei Wang66f5d6c2017-10-06 12:06:10 -0700731 rt0 = rcu_dereference(fn->rr_ptr);
David S. Millerf11e6652007-03-24 20:36:25 -0700732 if (!rt0)
Wei Wang66f5d6c2017-10-06 12:06:10 -0700733 rt0 = leaf;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700734
Wei Wang17ecf592017-10-06 12:06:09 -0700735 /* Double check to make sure fn is not an intermediate node
736 * and fn->leaf does not points to its child's leaf
737 * (This might happen if all routes under fn are deleted from
738 * the tree and fib6_repair_tree() is called on the node.)
739 */
David Ahern93c2fb22018-04-18 15:38:59 -0700740 key_plen = rt0->fib6_dst.plen;
Wei Wang17ecf592017-10-06 12:06:09 -0700741#ifdef CONFIG_IPV6_SUBTREES
David Ahern93c2fb22018-04-18 15:38:59 -0700742 if (rt0->fib6_src.plen)
743 key_plen = rt0->fib6_src.plen;
Wei Wang17ecf592017-10-06 12:06:09 -0700744#endif
745 if (fn->fn_bit != key_plen)
David Ahern421842e2018-04-17 17:33:18 -0700746 return net->ipv6.fib6_null_entry;
Wei Wang17ecf592017-10-06 12:06:09 -0700747
David Ahern93c2fb22018-04-18 15:38:59 -0700748 match = find_rr_leaf(fn, leaf, rt0, rt0->fib6_metric, oif, strict,
Hannes Frederic Sowaafc154e2013-07-11 12:43:42 +0200749 &do_rr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700750
Hannes Frederic Sowaafc154e2013-07-11 12:43:42 +0200751 if (do_rr) {
David Ahern8d1c8022018-04-17 17:33:26 -0700752 struct fib6_info *next = rcu_dereference(rt0->rt6_next);
David S. Millerf11e6652007-03-24 20:36:25 -0700753
YOSHIFUJI Hideaki554cfb72006-03-20 17:00:26 -0800754 /* no entries matched; do round-robin */
David Ahern93c2fb22018-04-18 15:38:59 -0700755 if (!next || next->fib6_metric != rt0->fib6_metric)
Wei Wang8d1040e2017-10-06 12:06:08 -0700756 next = leaf;
David S. Millerf11e6652007-03-24 20:36:25 -0700757
Wei Wang66f5d6c2017-10-06 12:06:10 -0700758 if (next != rt0) {
David Ahern93c2fb22018-04-18 15:38:59 -0700759 spin_lock_bh(&leaf->fib6_table->tb6_lock);
Wei Wang66f5d6c2017-10-06 12:06:10 -0700760 /* make sure next is not being deleted from the tree */
David Ahern93c2fb22018-04-18 15:38:59 -0700761 if (next->fib6_node)
Wei Wang66f5d6c2017-10-06 12:06:10 -0700762 rcu_assign_pointer(fn->rr_ptr, next);
David Ahern93c2fb22018-04-18 15:38:59 -0700763 spin_unlock_bh(&leaf->fib6_table->tb6_lock);
Wei Wang66f5d6c2017-10-06 12:06:10 -0700764 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700765 }
766
David Ahern421842e2018-04-17 17:33:18 -0700767 return match ? match : net->ipv6.fib6_null_entry;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700768}
769
David Ahern8d1c8022018-04-17 17:33:26 -0700770static bool rt6_is_gw_or_nonexthop(const struct fib6_info *rt)
Martin KaFai Lau8b9df262015-05-22 20:55:59 -0700771{
David Ahern93c2fb22018-04-18 15:38:59 -0700772 return (rt->fib6_flags & (RTF_NONEXTHOP | RTF_GATEWAY));
Martin KaFai Lau8b9df262015-05-22 20:55:59 -0700773}
774
YOSHIFUJI Hideaki70ceb4f2006-03-20 17:06:24 -0800775#ifdef CONFIG_IPV6_ROUTE_INFO
776int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
Eric Dumazetb71d1d42011-04-22 04:53:02 +0000777 const struct in6_addr *gwaddr)
YOSHIFUJI Hideaki70ceb4f2006-03-20 17:06:24 -0800778{
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900779 struct net *net = dev_net(dev);
YOSHIFUJI Hideaki70ceb4f2006-03-20 17:06:24 -0800780 struct route_info *rinfo = (struct route_info *) opt;
781 struct in6_addr prefix_buf, *prefix;
782 unsigned int pref;
YOSHIFUJI Hideaki4bed72e2008-05-27 17:37:49 +0900783 unsigned long lifetime;
David Ahern8d1c8022018-04-17 17:33:26 -0700784 struct fib6_info *rt;
YOSHIFUJI Hideaki70ceb4f2006-03-20 17:06:24 -0800785
786 if (len < sizeof(struct route_info)) {
787 return -EINVAL;
788 }
789
790 /* Sanity check for prefix_len and length */
791 if (rinfo->length > 3) {
792 return -EINVAL;
793 } else if (rinfo->prefix_len > 128) {
794 return -EINVAL;
795 } else if (rinfo->prefix_len > 64) {
796 if (rinfo->length < 2) {
797 return -EINVAL;
798 }
799 } else if (rinfo->prefix_len > 0) {
800 if (rinfo->length < 1) {
801 return -EINVAL;
802 }
803 }
804
805 pref = rinfo->route_pref;
806 if (pref == ICMPV6_ROUTER_PREF_INVALID)
Jens Rosenboom3933fc92009-09-10 06:25:11 +0000807 return -EINVAL;
YOSHIFUJI Hideaki70ceb4f2006-03-20 17:06:24 -0800808
YOSHIFUJI Hideaki4bed72e2008-05-27 17:37:49 +0900809 lifetime = addrconf_timeout_fixup(ntohl(rinfo->lifetime), HZ);
YOSHIFUJI Hideaki70ceb4f2006-03-20 17:06:24 -0800810
811 if (rinfo->length == 3)
812 prefix = (struct in6_addr *)rinfo->prefix;
813 else {
814 /* this function is safe */
815 ipv6_addr_prefix(&prefix_buf,
816 (struct in6_addr *)rinfo->prefix,
817 rinfo->prefix_len);
818 prefix = &prefix_buf;
819 }
820
Duan Jiongf104a562013-11-08 09:56:53 +0800821 if (rinfo->prefix_len == 0)
David Ahernafb1d4b52018-04-17 17:33:11 -0700822 rt = rt6_get_dflt_router(net, gwaddr, dev);
Duan Jiongf104a562013-11-08 09:56:53 +0800823 else
824 rt = rt6_get_route_info(net, prefix, rinfo->prefix_len,
David Ahern830218c2016-10-24 10:52:35 -0700825 gwaddr, dev);
YOSHIFUJI Hideaki70ceb4f2006-03-20 17:06:24 -0800826
827 if (rt && !lifetime) {
David Ahernafb1d4b52018-04-17 17:33:11 -0700828 ip6_del_rt(net, rt);
YOSHIFUJI Hideaki70ceb4f2006-03-20 17:06:24 -0800829 rt = NULL;
830 }
831
832 if (!rt && lifetime)
David Ahern830218c2016-10-24 10:52:35 -0700833 rt = rt6_add_route_info(net, prefix, rinfo->prefix_len, gwaddr,
834 dev, pref);
YOSHIFUJI Hideaki70ceb4f2006-03-20 17:06:24 -0800835 else if (rt)
David Ahern93c2fb22018-04-18 15:38:59 -0700836 rt->fib6_flags = RTF_ROUTEINFO |
837 (rt->fib6_flags & ~RTF_PREF_MASK) | RTF_PREF(pref);
YOSHIFUJI Hideaki70ceb4f2006-03-20 17:06:24 -0800838
839 if (rt) {
Gao feng1716a962012-04-06 00:13:10 +0000840 if (!addrconf_finite_timeout(lifetime))
David Ahern14895682018-04-17 17:33:17 -0700841 fib6_clean_expires(rt);
Gao feng1716a962012-04-06 00:13:10 +0000842 else
David Ahern14895682018-04-17 17:33:17 -0700843 fib6_set_expires(rt, jiffies + HZ * lifetime);
Gao feng1716a962012-04-06 00:13:10 +0000844
David Ahern93531c62018-04-17 17:33:25 -0700845 fib6_info_release(rt);
YOSHIFUJI Hideaki70ceb4f2006-03-20 17:06:24 -0800846 }
847 return 0;
848}
849#endif
850
David Ahernae90d862018-04-17 17:33:12 -0700851/*
852 * Misc support functions
853 */
854
855/* called with rcu_lock held */
David Ahern8d1c8022018-04-17 17:33:26 -0700856static struct net_device *ip6_rt_get_dev_rcu(struct fib6_info *rt)
David Ahernae90d862018-04-17 17:33:12 -0700857{
David Ahern5e670d82018-04-17 17:33:14 -0700858 struct net_device *dev = rt->fib6_nh.nh_dev;
David Ahernae90d862018-04-17 17:33:12 -0700859
David Ahern93c2fb22018-04-18 15:38:59 -0700860 if (rt->fib6_flags & (RTF_LOCAL | RTF_ANYCAST)) {
David Ahernae90d862018-04-17 17:33:12 -0700861 /* for copies of local routes, dst->dev needs to be the
862 * device if it is a master device, the master device if
863 * device is enslaved, and the loopback as the default
864 */
865 if (netif_is_l3_slave(dev) &&
David Ahern93c2fb22018-04-18 15:38:59 -0700866 !rt6_need_strict(&rt->fib6_dst.addr))
David Ahernae90d862018-04-17 17:33:12 -0700867 dev = l3mdev_master_dev_rcu(dev);
868 else if (!netif_is_l3_master(dev))
869 dev = dev_net(dev)->loopback_dev;
870 /* last case is netif_is_l3_master(dev) is true in which
871 * case we want dev returned to be dev
872 */
873 }
874
875 return dev;
876}
877
David Ahern6edb3c92018-04-17 17:33:15 -0700878static const int fib6_prop[RTN_MAX + 1] = {
879 [RTN_UNSPEC] = 0,
880 [RTN_UNICAST] = 0,
881 [RTN_LOCAL] = 0,
882 [RTN_BROADCAST] = 0,
883 [RTN_ANYCAST] = 0,
884 [RTN_MULTICAST] = 0,
885 [RTN_BLACKHOLE] = -EINVAL,
886 [RTN_UNREACHABLE] = -EHOSTUNREACH,
887 [RTN_PROHIBIT] = -EACCES,
888 [RTN_THROW] = -EAGAIN,
889 [RTN_NAT] = -EINVAL,
890 [RTN_XRESOLVE] = -EINVAL,
891};
892
893static int ip6_rt_type_to_error(u8 fib6_type)
894{
895 return fib6_prop[fib6_type];
896}
897
David Ahern8d1c8022018-04-17 17:33:26 -0700898static unsigned short fib6_info_dst_flags(struct fib6_info *rt)
David Ahern3b6761d2018-04-17 17:33:20 -0700899{
900 unsigned short flags = 0;
901
902 if (rt->dst_nocount)
903 flags |= DST_NOCOUNT;
904 if (rt->dst_nopolicy)
905 flags |= DST_NOPOLICY;
906 if (rt->dst_host)
907 flags |= DST_HOST;
908
909 return flags;
910}
911
David Ahern8d1c8022018-04-17 17:33:26 -0700912static void ip6_rt_init_dst_reject(struct rt6_info *rt, struct fib6_info *ort)
David Ahern6edb3c92018-04-17 17:33:15 -0700913{
914 rt->dst.error = ip6_rt_type_to_error(ort->fib6_type);
915
916 switch (ort->fib6_type) {
917 case RTN_BLACKHOLE:
918 rt->dst.output = dst_discard_out;
919 rt->dst.input = dst_discard;
920 break;
921 case RTN_PROHIBIT:
922 rt->dst.output = ip6_pkt_prohibit_out;
923 rt->dst.input = ip6_pkt_prohibit;
924 break;
925 case RTN_THROW:
926 case RTN_UNREACHABLE:
927 default:
928 rt->dst.output = ip6_pkt_discard_out;
929 rt->dst.input = ip6_pkt_discard;
930 break;
931 }
932}
933
David Ahern8d1c8022018-04-17 17:33:26 -0700934static void ip6_rt_init_dst(struct rt6_info *rt, struct fib6_info *ort)
David Ahern6edb3c92018-04-17 17:33:15 -0700935{
David Ahern3b6761d2018-04-17 17:33:20 -0700936 rt->dst.flags |= fib6_info_dst_flags(ort);
937
David Ahern93c2fb22018-04-18 15:38:59 -0700938 if (ort->fib6_flags & RTF_REJECT) {
David Ahern6edb3c92018-04-17 17:33:15 -0700939 ip6_rt_init_dst_reject(rt, ort);
940 return;
941 }
942
943 rt->dst.error = 0;
944 rt->dst.output = ip6_output;
945
946 if (ort->fib6_type == RTN_LOCAL) {
David Ahern6edb3c92018-04-17 17:33:15 -0700947 rt->dst.input = ip6_input;
David Ahern93c2fb22018-04-18 15:38:59 -0700948 } else if (ipv6_addr_type(&ort->fib6_dst.addr) & IPV6_ADDR_MULTICAST) {
David Ahern6edb3c92018-04-17 17:33:15 -0700949 rt->dst.input = ip6_mc_input;
950 } else {
951 rt->dst.input = ip6_forward;
952 }
953
954 if (ort->fib6_nh.nh_lwtstate) {
955 rt->dst.lwtstate = lwtstate_get(ort->fib6_nh.nh_lwtstate);
956 lwtunnel_set_redirect(&rt->dst);
957 }
958
959 rt->dst.lastuse = jiffies;
960}
961
David Ahern8d1c8022018-04-17 17:33:26 -0700962static void rt6_set_from(struct rt6_info *rt, struct fib6_info *from)
David Ahernae90d862018-04-17 17:33:12 -0700963{
David Ahernae90d862018-04-17 17:33:12 -0700964 rt->rt6i_flags &= ~RTF_EXPIRES;
David Ahern93531c62018-04-17 17:33:25 -0700965 fib6_info_hold(from);
966 rt->from = from;
David Ahernd4ead6b2018-04-17 17:33:16 -0700967 dst_init_metrics(&rt->dst, from->fib6_metrics->metrics, true);
968 if (from->fib6_metrics != &dst_default_metrics) {
969 rt->dst._metrics |= DST_METRICS_REFCOUNTED;
970 refcount_inc(&from->fib6_metrics->refcnt);
971 }
David Ahernae90d862018-04-17 17:33:12 -0700972}
973
David Ahern8d1c8022018-04-17 17:33:26 -0700974static void ip6_rt_copy_init(struct rt6_info *rt, struct fib6_info *ort)
David Ahernae90d862018-04-17 17:33:12 -0700975{
David Aherndcd1f572018-04-18 15:39:05 -0700976 struct net_device *dev = fib6_info_nh_dev(ort);
977
David Ahern6edb3c92018-04-17 17:33:15 -0700978 ip6_rt_init_dst(rt, ort);
979
David Ahern93c2fb22018-04-18 15:38:59 -0700980 rt->rt6i_dst = ort->fib6_dst;
David Aherndcd1f572018-04-18 15:39:05 -0700981 rt->rt6i_idev = dev ? in6_dev_get(dev) : NULL;
David Ahern5e670d82018-04-17 17:33:14 -0700982 rt->rt6i_gateway = ort->fib6_nh.nh_gw;
David Ahern93c2fb22018-04-18 15:38:59 -0700983 rt->rt6i_flags = ort->fib6_flags;
David Ahernae90d862018-04-17 17:33:12 -0700984 rt6_set_from(rt, ort);
David Ahernae90d862018-04-17 17:33:12 -0700985#ifdef CONFIG_IPV6_SUBTREES
David Ahern93c2fb22018-04-18 15:38:59 -0700986 rt->rt6i_src = ort->fib6_src;
David Ahernae90d862018-04-17 17:33:12 -0700987#endif
David Ahern93c2fb22018-04-18 15:38:59 -0700988 rt->rt6i_prefsrc = ort->fib6_prefsrc;
David Ahern5e670d82018-04-17 17:33:14 -0700989 rt->dst.lwtstate = lwtstate_get(ort->fib6_nh.nh_lwtstate);
David Ahernae90d862018-04-17 17:33:12 -0700990}
991
Martin KaFai Laua3c00e42014-10-20 13:42:43 -0700992static struct fib6_node* fib6_backtrack(struct fib6_node *fn,
993 struct in6_addr *saddr)
994{
Wei Wang66f5d6c2017-10-06 12:06:10 -0700995 struct fib6_node *pn, *sn;
Martin KaFai Laua3c00e42014-10-20 13:42:43 -0700996 while (1) {
997 if (fn->fn_flags & RTN_TL_ROOT)
998 return NULL;
Wei Wang66f5d6c2017-10-06 12:06:10 -0700999 pn = rcu_dereference(fn->parent);
1000 sn = FIB6_SUBTREE(pn);
1001 if (sn && sn != fn)
1002 fn = fib6_lookup(sn, NULL, saddr);
Martin KaFai Laua3c00e42014-10-20 13:42:43 -07001003 else
1004 fn = pn;
1005 if (fn->fn_flags & RTN_RTINFO)
1006 return fn;
1007 }
1008}
Thomas Grafc71099a2006-08-04 23:20:06 -07001009
Wei Wangd3843fe2017-10-06 12:06:06 -07001010static bool ip6_hold_safe(struct net *net, struct rt6_info **prt,
1011 bool null_fallback)
1012{
1013 struct rt6_info *rt = *prt;
1014
1015 if (dst_hold_safe(&rt->dst))
1016 return true;
1017 if (null_fallback) {
1018 rt = net->ipv6.ip6_null_entry;
1019 dst_hold(&rt->dst);
1020 } else {
1021 rt = NULL;
1022 }
1023 *prt = rt;
1024 return false;
1025}
1026
David Aherndec9b0e2018-04-17 17:33:19 -07001027/* called with rcu_lock held */
David Ahern8d1c8022018-04-17 17:33:26 -07001028static struct rt6_info *ip6_create_rt_rcu(struct fib6_info *rt)
David Aherndec9b0e2018-04-17 17:33:19 -07001029{
David Ahern3b6761d2018-04-17 17:33:20 -07001030 unsigned short flags = fib6_info_dst_flags(rt);
David Aherndec9b0e2018-04-17 17:33:19 -07001031 struct net_device *dev = rt->fib6_nh.nh_dev;
1032 struct rt6_info *nrt;
1033
David Ahern93531c62018-04-17 17:33:25 -07001034 nrt = ip6_dst_alloc(dev_net(dev), dev, flags);
David Aherndec9b0e2018-04-17 17:33:19 -07001035 if (nrt)
1036 ip6_rt_copy_init(nrt, rt);
1037
1038 return nrt;
1039}
1040
Daniel Lezcano8ed67782008-03-04 13:48:30 -08001041static struct rt6_info *ip6_pol_route_lookup(struct net *net,
1042 struct fib6_table *table,
David Ahernb75cc8f2018-03-02 08:32:17 -08001043 struct flowi6 *fl6,
1044 const struct sk_buff *skb,
1045 int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001046{
David Ahern8d1c8022018-04-17 17:33:26 -07001047 struct fib6_info *f6i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001048 struct fib6_node *fn;
David Ahern23fb93a2018-04-17 17:33:23 -07001049 struct rt6_info *rt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001050
David Ahernb6cdbc82018-03-29 17:44:57 -07001051 if (fl6->flowi6_flags & FLOWI_FLAG_SKIP_NH_OIF)
1052 flags &= ~RT6_LOOKUP_F_IFACE;
1053
Wei Wang66f5d6c2017-10-06 12:06:10 -07001054 rcu_read_lock();
David S. Miller4c9483b2011-03-12 16:22:43 -05001055 fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
Thomas Grafc71099a2006-08-04 23:20:06 -07001056restart:
David Ahern23fb93a2018-04-17 17:33:23 -07001057 f6i = rcu_dereference(fn->leaf);
1058 if (!f6i) {
1059 f6i = net->ipv6.fib6_null_entry;
Wei Wang66f5d6c2017-10-06 12:06:10 -07001060 } else {
David Ahern23fb93a2018-04-17 17:33:23 -07001061 f6i = rt6_device_match(net, f6i, &fl6->saddr,
Wei Wang66f5d6c2017-10-06 12:06:10 -07001062 fl6->flowi6_oif, flags);
David Ahern93c2fb22018-04-18 15:38:59 -07001063 if (f6i->fib6_nsiblings && fl6->flowi6_oif == 0)
David Ahern23fb93a2018-04-17 17:33:23 -07001064 f6i = rt6_multipath_select(net, f6i, fl6,
1065 fl6->flowi6_oif, skb, flags);
Wei Wang66f5d6c2017-10-06 12:06:10 -07001066 }
David Ahern23fb93a2018-04-17 17:33:23 -07001067 if (f6i == net->ipv6.fib6_null_entry) {
Martin KaFai Laua3c00e42014-10-20 13:42:43 -07001068 fn = fib6_backtrack(fn, &fl6->saddr);
1069 if (fn)
1070 goto restart;
1071 }
David Ahern23fb93a2018-04-17 17:33:23 -07001072
Wei Wang2b760fc2017-10-06 12:06:03 -07001073 /* Search through exception table */
David Ahern23fb93a2018-04-17 17:33:23 -07001074 rt = rt6_find_cached_rt(f6i, &fl6->daddr, &fl6->saddr);
1075 if (rt) {
David Aherndec9b0e2018-04-17 17:33:19 -07001076 if (ip6_hold_safe(net, &rt, true))
1077 dst_use_noref(&rt->dst, jiffies);
David Ahern23fb93a2018-04-17 17:33:23 -07001078 } else if (f6i == net->ipv6.fib6_null_entry) {
David Aherndec9b0e2018-04-17 17:33:19 -07001079 rt = net->ipv6.ip6_null_entry;
1080 dst_hold(&rt->dst);
David Ahern23fb93a2018-04-17 17:33:23 -07001081 } else {
1082 rt = ip6_create_rt_rcu(f6i);
1083 if (!rt) {
1084 rt = net->ipv6.ip6_null_entry;
1085 dst_hold(&rt->dst);
1086 }
David Aherndec9b0e2018-04-17 17:33:19 -07001087 }
Wei Wangd3843fe2017-10-06 12:06:06 -07001088
Wei Wang66f5d6c2017-10-06 12:06:10 -07001089 rcu_read_unlock();
David Ahernb8115802015-11-19 12:24:22 -08001090
Paolo Abenib65f1642017-10-19 09:31:43 +02001091 trace_fib6_table_lookup(net, rt, table, fl6);
David Ahernb8115802015-11-19 12:24:22 -08001092
Thomas Grafc71099a2006-08-04 23:20:06 -07001093 return rt;
Thomas Grafc71099a2006-08-04 23:20:06 -07001094}
1095
Ian Morris67ba4152014-08-24 21:53:10 +01001096struct dst_entry *ip6_route_lookup(struct net *net, struct flowi6 *fl6,
David Ahernb75cc8f2018-03-02 08:32:17 -08001097 const struct sk_buff *skb, int flags)
Florian Westphalea6e5742011-09-05 16:05:44 +02001098{
David Ahernb75cc8f2018-03-02 08:32:17 -08001099 return fib6_rule_lookup(net, fl6, skb, flags, ip6_pol_route_lookup);
Florian Westphalea6e5742011-09-05 16:05:44 +02001100}
1101EXPORT_SYMBOL_GPL(ip6_route_lookup);
1102
YOSHIFUJI Hideaki9acd9f32008-04-10 15:42:10 +09001103struct rt6_info *rt6_lookup(struct net *net, const struct in6_addr *daddr,
David Ahernb75cc8f2018-03-02 08:32:17 -08001104 const struct in6_addr *saddr, int oif,
1105 const struct sk_buff *skb, int strict)
Thomas Grafc71099a2006-08-04 23:20:06 -07001106{
David S. Miller4c9483b2011-03-12 16:22:43 -05001107 struct flowi6 fl6 = {
1108 .flowi6_oif = oif,
1109 .daddr = *daddr,
Thomas Grafc71099a2006-08-04 23:20:06 -07001110 };
1111 struct dst_entry *dst;
YOSHIFUJI Hideaki77d16f42006-08-23 17:25:05 -07001112 int flags = strict ? RT6_LOOKUP_F_IFACE : 0;
Thomas Grafc71099a2006-08-04 23:20:06 -07001113
Thomas Grafadaa70b2006-10-13 15:01:03 -07001114 if (saddr) {
David S. Miller4c9483b2011-03-12 16:22:43 -05001115 memcpy(&fl6.saddr, saddr, sizeof(*saddr));
Thomas Grafadaa70b2006-10-13 15:01:03 -07001116 flags |= RT6_LOOKUP_F_HAS_SADDR;
1117 }
1118
David Ahernb75cc8f2018-03-02 08:32:17 -08001119 dst = fib6_rule_lookup(net, &fl6, skb, flags, ip6_pol_route_lookup);
Thomas Grafc71099a2006-08-04 23:20:06 -07001120 if (dst->error == 0)
1121 return (struct rt6_info *) dst;
1122
1123 dst_release(dst);
1124
Linus Torvalds1da177e2005-04-16 15:20:36 -07001125 return NULL;
1126}
YOSHIFUJI Hideaki71590392007-02-22 22:05:40 +09001127EXPORT_SYMBOL(rt6_lookup);
1128
Thomas Grafc71099a2006-08-04 23:20:06 -07001129/* ip6_ins_rt is called with FREE table->tb6_lock.
Wei Wang1cfb71e2017-06-17 10:42:33 -07001130 * It takes new route entry, the addition fails by any reason the
1131 * route is released.
1132 * Caller must hold dst before calling it.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001133 */
1134
David Ahern8d1c8022018-04-17 17:33:26 -07001135static int __ip6_ins_rt(struct fib6_info *rt, struct nl_info *info,
David Ahern333c4302017-05-21 10:12:04 -06001136 struct netlink_ext_ack *extack)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001137{
1138 int err;
Thomas Grafc71099a2006-08-04 23:20:06 -07001139 struct fib6_table *table;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001140
David Ahern93c2fb22018-04-18 15:38:59 -07001141 table = rt->fib6_table;
Wei Wang66f5d6c2017-10-06 12:06:10 -07001142 spin_lock_bh(&table->tb6_lock);
David Ahernd4ead6b2018-04-17 17:33:16 -07001143 err = fib6_add(&table->tb6_root, rt, info, extack);
Wei Wang66f5d6c2017-10-06 12:06:10 -07001144 spin_unlock_bh(&table->tb6_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001145
1146 return err;
1147}
1148
David Ahern8d1c8022018-04-17 17:33:26 -07001149int ip6_ins_rt(struct net *net, struct fib6_info *rt)
Thomas Graf40e22e82006-08-22 00:00:45 -07001150{
David Ahernafb1d4b52018-04-17 17:33:11 -07001151 struct nl_info info = { .nl_net = net, };
Florian Westphale715b6d2015-01-05 23:57:44 +01001152
David Ahernd4ead6b2018-04-17 17:33:16 -07001153 return __ip6_ins_rt(rt, &info, NULL);
Thomas Graf40e22e82006-08-22 00:00:45 -07001154}
1155
David Ahern8d1c8022018-04-17 17:33:26 -07001156static struct rt6_info *ip6_rt_cache_alloc(struct fib6_info *ort,
Martin KaFai Lau8b9df262015-05-22 20:55:59 -07001157 const struct in6_addr *daddr,
1158 const struct in6_addr *saddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001159{
David Ahern4832c302017-08-17 12:17:20 -07001160 struct net_device *dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001161 struct rt6_info *rt;
1162
1163 /*
1164 * Clone the route.
1165 */
1166
David Ahern4832c302017-08-17 12:17:20 -07001167 dev = ip6_rt_get_dev_rcu(ort);
David Ahern93531c62018-04-17 17:33:25 -07001168 rt = ip6_dst_alloc(dev_net(dev), dev, 0);
Martin KaFai Lau83a09ab2015-05-22 20:56:05 -07001169 if (!rt)
1170 return NULL;
1171
1172 ip6_rt_copy_init(rt, ort);
1173 rt->rt6i_flags |= RTF_CACHE;
Martin KaFai Lau83a09ab2015-05-22 20:56:05 -07001174 rt->dst.flags |= DST_HOST;
1175 rt->rt6i_dst.addr = *daddr;
1176 rt->rt6i_dst.plen = 128;
1177
1178 if (!rt6_is_gw_or_nonexthop(ort)) {
David Ahern93c2fb22018-04-18 15:38:59 -07001179 if (ort->fib6_dst.plen != 128 &&
1180 ipv6_addr_equal(&ort->fib6_dst.addr, daddr))
Martin KaFai Lau83a09ab2015-05-22 20:56:05 -07001181 rt->rt6i_flags |= RTF_ANYCAST;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001182#ifdef CONFIG_IPV6_SUBTREES
Martin KaFai Lau83a09ab2015-05-22 20:56:05 -07001183 if (rt->rt6i_src.plen && saddr) {
1184 rt->rt6i_src.addr = *saddr;
1185 rt->rt6i_src.plen = 128;
Martin KaFai Lau8b9df262015-05-22 20:55:59 -07001186 }
Martin KaFai Lau83a09ab2015-05-22 20:56:05 -07001187#endif
YOSHIFUJI Hideaki95a9a5b2006-03-20 16:55:51 -08001188 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001189
YOSHIFUJI Hideaki95a9a5b2006-03-20 16:55:51 -08001190 return rt;
1191}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001192
David Ahern8d1c8022018-04-17 17:33:26 -07001193static struct rt6_info *ip6_rt_pcpu_alloc(struct fib6_info *rt)
Martin KaFai Laud52d3992015-05-22 20:56:06 -07001194{
David Ahern3b6761d2018-04-17 17:33:20 -07001195 unsigned short flags = fib6_info_dst_flags(rt);
David Ahern4832c302017-08-17 12:17:20 -07001196 struct net_device *dev;
Martin KaFai Laud52d3992015-05-22 20:56:06 -07001197 struct rt6_info *pcpu_rt;
1198
David Ahern4832c302017-08-17 12:17:20 -07001199 rcu_read_lock();
1200 dev = ip6_rt_get_dev_rcu(rt);
David Ahern93531c62018-04-17 17:33:25 -07001201 pcpu_rt = ip6_dst_alloc(dev_net(dev), dev, flags);
David Ahern4832c302017-08-17 12:17:20 -07001202 rcu_read_unlock();
Martin KaFai Laud52d3992015-05-22 20:56:06 -07001203 if (!pcpu_rt)
1204 return NULL;
1205 ip6_rt_copy_init(pcpu_rt, rt);
Martin KaFai Laud52d3992015-05-22 20:56:06 -07001206 pcpu_rt->rt6i_flags |= RTF_PCPU;
1207 return pcpu_rt;
1208}
1209
Wei Wang66f5d6c2017-10-06 12:06:10 -07001210/* It should be called with rcu_read_lock() acquired */
David Ahern8d1c8022018-04-17 17:33:26 -07001211static struct rt6_info *rt6_get_pcpu_route(struct fib6_info *rt)
Martin KaFai Laud52d3992015-05-22 20:56:06 -07001212{
Martin KaFai Laua73e4192015-08-14 11:05:53 -07001213 struct rt6_info *pcpu_rt, **p;
Martin KaFai Laud52d3992015-05-22 20:56:06 -07001214
1215 p = this_cpu_ptr(rt->rt6i_pcpu);
1216 pcpu_rt = *p;
1217
David Ahernd4ead6b2018-04-17 17:33:16 -07001218 if (pcpu_rt)
1219 ip6_hold_safe(NULL, &pcpu_rt, false);
Wei Wangd3843fe2017-10-06 12:06:06 -07001220
Martin KaFai Laua73e4192015-08-14 11:05:53 -07001221 return pcpu_rt;
1222}
1223
David Ahernafb1d4b52018-04-17 17:33:11 -07001224static struct rt6_info *rt6_make_pcpu_route(struct net *net,
David Ahern8d1c8022018-04-17 17:33:26 -07001225 struct fib6_info *rt)
Martin KaFai Laua73e4192015-08-14 11:05:53 -07001226{
1227 struct rt6_info *pcpu_rt, *prev, **p;
Martin KaFai Laud52d3992015-05-22 20:56:06 -07001228
1229 pcpu_rt = ip6_rt_pcpu_alloc(rt);
1230 if (!pcpu_rt) {
Martin KaFai Lau9c7370a2015-08-14 11:05:54 -07001231 dst_hold(&net->ipv6.ip6_null_entry->dst);
1232 return net->ipv6.ip6_null_entry;
Martin KaFai Laud52d3992015-05-22 20:56:06 -07001233 }
1234
Martin KaFai Laud52d3992015-05-22 20:56:06 -07001235 dst_hold(&pcpu_rt->dst);
Wei Wanga94b9362017-10-06 12:06:04 -07001236 p = this_cpu_ptr(rt->rt6i_pcpu);
1237 prev = cmpxchg(p, NULL, pcpu_rt);
Eric Dumazet951f7882017-10-08 21:07:18 -07001238 BUG_ON(prev);
Wei Wanga94b9362017-10-06 12:06:04 -07001239
Martin KaFai Laud52d3992015-05-22 20:56:06 -07001240 return pcpu_rt;
1241}
1242
Wei Wang35732d02017-10-06 12:05:57 -07001243/* exception hash table implementation
1244 */
1245static DEFINE_SPINLOCK(rt6_exception_lock);
1246
1247/* Remove rt6_ex from hash table and free the memory
1248 * Caller must hold rt6_exception_lock
1249 */
1250static void rt6_remove_exception(struct rt6_exception_bucket *bucket,
1251 struct rt6_exception *rt6_ex)
1252{
Colin Ian Kingb2427e62017-10-10 18:01:16 +01001253 struct net *net;
Wei Wang81eb8442017-10-06 12:06:11 -07001254
Wei Wang35732d02017-10-06 12:05:57 -07001255 if (!bucket || !rt6_ex)
1256 return;
Colin Ian Kingb2427e62017-10-10 18:01:16 +01001257
1258 net = dev_net(rt6_ex->rt6i->dst.dev);
Wei Wang35732d02017-10-06 12:05:57 -07001259 hlist_del_rcu(&rt6_ex->hlist);
David Ahern77634cc2018-04-17 17:33:27 -07001260 dst_release(&rt6_ex->rt6i->dst);
Wei Wang35732d02017-10-06 12:05:57 -07001261 kfree_rcu(rt6_ex, rcu);
1262 WARN_ON_ONCE(!bucket->depth);
1263 bucket->depth--;
Wei Wang81eb8442017-10-06 12:06:11 -07001264 net->ipv6.rt6_stats->fib_rt_cache--;
Wei Wang35732d02017-10-06 12:05:57 -07001265}
1266
1267/* Remove oldest rt6_ex in bucket and free the memory
1268 * Caller must hold rt6_exception_lock
1269 */
1270static void rt6_exception_remove_oldest(struct rt6_exception_bucket *bucket)
1271{
1272 struct rt6_exception *rt6_ex, *oldest = NULL;
1273
1274 if (!bucket)
1275 return;
1276
1277 hlist_for_each_entry(rt6_ex, &bucket->chain, hlist) {
1278 if (!oldest || time_before(rt6_ex->stamp, oldest->stamp))
1279 oldest = rt6_ex;
1280 }
1281 rt6_remove_exception(bucket, oldest);
1282}
1283
1284static u32 rt6_exception_hash(const struct in6_addr *dst,
1285 const struct in6_addr *src)
1286{
1287 static u32 seed __read_mostly;
1288 u32 val;
1289
1290 net_get_random_once(&seed, sizeof(seed));
1291 val = jhash(dst, sizeof(*dst), seed);
1292
1293#ifdef CONFIG_IPV6_SUBTREES
1294 if (src)
1295 val = jhash(src, sizeof(*src), val);
1296#endif
1297 return hash_32(val, FIB6_EXCEPTION_BUCKET_SIZE_SHIFT);
1298}
1299
1300/* Helper function to find the cached rt in the hash table
1301 * and update bucket pointer to point to the bucket for this
1302 * (daddr, saddr) pair
1303 * Caller must hold rt6_exception_lock
1304 */
1305static struct rt6_exception *
1306__rt6_find_exception_spinlock(struct rt6_exception_bucket **bucket,
1307 const struct in6_addr *daddr,
1308 const struct in6_addr *saddr)
1309{
1310 struct rt6_exception *rt6_ex;
1311 u32 hval;
1312
1313 if (!(*bucket) || !daddr)
1314 return NULL;
1315
1316 hval = rt6_exception_hash(daddr, saddr);
1317 *bucket += hval;
1318
1319 hlist_for_each_entry(rt6_ex, &(*bucket)->chain, hlist) {
1320 struct rt6_info *rt6 = rt6_ex->rt6i;
1321 bool matched = ipv6_addr_equal(daddr, &rt6->rt6i_dst.addr);
1322
1323#ifdef CONFIG_IPV6_SUBTREES
1324 if (matched && saddr)
1325 matched = ipv6_addr_equal(saddr, &rt6->rt6i_src.addr);
1326#endif
1327 if (matched)
1328 return rt6_ex;
1329 }
1330 return NULL;
1331}
1332
1333/* Helper function to find the cached rt in the hash table
1334 * and update bucket pointer to point to the bucket for this
1335 * (daddr, saddr) pair
1336 * Caller must hold rcu_read_lock()
1337 */
1338static struct rt6_exception *
1339__rt6_find_exception_rcu(struct rt6_exception_bucket **bucket,
1340 const struct in6_addr *daddr,
1341 const struct in6_addr *saddr)
1342{
1343 struct rt6_exception *rt6_ex;
1344 u32 hval;
1345
1346 WARN_ON_ONCE(!rcu_read_lock_held());
1347
1348 if (!(*bucket) || !daddr)
1349 return NULL;
1350
1351 hval = rt6_exception_hash(daddr, saddr);
1352 *bucket += hval;
1353
1354 hlist_for_each_entry_rcu(rt6_ex, &(*bucket)->chain, hlist) {
1355 struct rt6_info *rt6 = rt6_ex->rt6i;
1356 bool matched = ipv6_addr_equal(daddr, &rt6->rt6i_dst.addr);
1357
1358#ifdef CONFIG_IPV6_SUBTREES
1359 if (matched && saddr)
1360 matched = ipv6_addr_equal(saddr, &rt6->rt6i_src.addr);
1361#endif
1362 if (matched)
1363 return rt6_ex;
1364 }
1365 return NULL;
1366}
1367
David Ahern8d1c8022018-04-17 17:33:26 -07001368static unsigned int fib6_mtu(const struct fib6_info *rt)
David Ahernd4ead6b2018-04-17 17:33:16 -07001369{
1370 unsigned int mtu;
1371
David Aherndcd1f572018-04-18 15:39:05 -07001372 if (rt->fib6_pmtu) {
1373 mtu = rt->fib6_pmtu;
1374 } else {
1375 struct net_device *dev = fib6_info_nh_dev(rt);
1376 struct inet6_dev *idev;
1377
1378 rcu_read_lock();
1379 idev = __in6_dev_get(dev);
1380 mtu = idev->cnf.mtu6;
1381 rcu_read_unlock();
1382 }
1383
David Ahernd4ead6b2018-04-17 17:33:16 -07001384 mtu = min_t(unsigned int, mtu, IP6_MAX_MTU);
1385
1386 return mtu - lwtunnel_headroom(rt->fib6_nh.nh_lwtstate, mtu);
1387}
1388
Wei Wang35732d02017-10-06 12:05:57 -07001389static int rt6_insert_exception(struct rt6_info *nrt,
David Ahern8d1c8022018-04-17 17:33:26 -07001390 struct fib6_info *ort)
Wei Wang35732d02017-10-06 12:05:57 -07001391{
David Ahern5e670d82018-04-17 17:33:14 -07001392 struct net *net = dev_net(nrt->dst.dev);
Wei Wang35732d02017-10-06 12:05:57 -07001393 struct rt6_exception_bucket *bucket;
1394 struct in6_addr *src_key = NULL;
1395 struct rt6_exception *rt6_ex;
1396 int err = 0;
1397
Wei Wang35732d02017-10-06 12:05:57 -07001398 spin_lock_bh(&rt6_exception_lock);
1399
1400 if (ort->exception_bucket_flushed) {
1401 err = -EINVAL;
1402 goto out;
1403 }
1404
1405 bucket = rcu_dereference_protected(ort->rt6i_exception_bucket,
1406 lockdep_is_held(&rt6_exception_lock));
1407 if (!bucket) {
1408 bucket = kcalloc(FIB6_EXCEPTION_BUCKET_SIZE, sizeof(*bucket),
1409 GFP_ATOMIC);
1410 if (!bucket) {
1411 err = -ENOMEM;
1412 goto out;
1413 }
1414 rcu_assign_pointer(ort->rt6i_exception_bucket, bucket);
1415 }
1416
1417#ifdef CONFIG_IPV6_SUBTREES
1418 /* rt6i_src.plen != 0 indicates ort is in subtree
1419 * and exception table is indexed by a hash of
1420 * both rt6i_dst and rt6i_src.
1421 * Otherwise, the exception table is indexed by
1422 * a hash of only rt6i_dst.
1423 */
David Ahern93c2fb22018-04-18 15:38:59 -07001424 if (ort->fib6_src.plen)
Wei Wang35732d02017-10-06 12:05:57 -07001425 src_key = &nrt->rt6i_src.addr;
1426#endif
Wei Wang60006a42017-10-06 12:05:58 -07001427
1428 /* Update rt6i_prefsrc as it could be changed
1429 * in rt6_remove_prefsrc()
1430 */
David Ahern93c2fb22018-04-18 15:38:59 -07001431 nrt->rt6i_prefsrc = ort->fib6_prefsrc;
Wei Wangf5bbe7e2017-10-06 12:05:59 -07001432 /* rt6_mtu_change() might lower mtu on ort.
1433 * Only insert this exception route if its mtu
1434 * is less than ort's mtu value.
1435 */
David Ahernd4ead6b2018-04-17 17:33:16 -07001436 if (dst_metric_raw(&nrt->dst, RTAX_MTU) >= fib6_mtu(ort)) {
Wei Wangf5bbe7e2017-10-06 12:05:59 -07001437 err = -EINVAL;
1438 goto out;
1439 }
Wei Wang60006a42017-10-06 12:05:58 -07001440
Wei Wang35732d02017-10-06 12:05:57 -07001441 rt6_ex = __rt6_find_exception_spinlock(&bucket, &nrt->rt6i_dst.addr,
1442 src_key);
1443 if (rt6_ex)
1444 rt6_remove_exception(bucket, rt6_ex);
1445
1446 rt6_ex = kzalloc(sizeof(*rt6_ex), GFP_ATOMIC);
1447 if (!rt6_ex) {
1448 err = -ENOMEM;
1449 goto out;
1450 }
1451 rt6_ex->rt6i = nrt;
1452 rt6_ex->stamp = jiffies;
Wei Wang35732d02017-10-06 12:05:57 -07001453 hlist_add_head_rcu(&rt6_ex->hlist, &bucket->chain);
1454 bucket->depth++;
Wei Wang81eb8442017-10-06 12:06:11 -07001455 net->ipv6.rt6_stats->fib_rt_cache++;
Wei Wang35732d02017-10-06 12:05:57 -07001456
1457 if (bucket->depth > FIB6_MAX_DEPTH)
1458 rt6_exception_remove_oldest(bucket);
1459
1460out:
1461 spin_unlock_bh(&rt6_exception_lock);
1462
1463 /* Update fn->fn_sernum to invalidate all cached dst */
Paolo Abenib886d5f2017-10-19 16:07:10 +02001464 if (!err) {
David Ahern93c2fb22018-04-18 15:38:59 -07001465 spin_lock_bh(&ort->fib6_table->tb6_lock);
David Ahern7aef6852018-04-17 17:33:10 -07001466 fib6_update_sernum(net, ort);
David Ahern93c2fb22018-04-18 15:38:59 -07001467 spin_unlock_bh(&ort->fib6_table->tb6_lock);
Paolo Abenib886d5f2017-10-19 16:07:10 +02001468 fib6_force_start_gc(net);
1469 }
Wei Wang35732d02017-10-06 12:05:57 -07001470
1471 return err;
1472}
1473
David Ahern8d1c8022018-04-17 17:33:26 -07001474void rt6_flush_exceptions(struct fib6_info *rt)
Wei Wang35732d02017-10-06 12:05:57 -07001475{
1476 struct rt6_exception_bucket *bucket;
1477 struct rt6_exception *rt6_ex;
1478 struct hlist_node *tmp;
1479 int i;
1480
1481 spin_lock_bh(&rt6_exception_lock);
1482 /* Prevent rt6_insert_exception() to recreate the bucket list */
1483 rt->exception_bucket_flushed = 1;
1484
1485 bucket = rcu_dereference_protected(rt->rt6i_exception_bucket,
1486 lockdep_is_held(&rt6_exception_lock));
1487 if (!bucket)
1488 goto out;
1489
1490 for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
1491 hlist_for_each_entry_safe(rt6_ex, tmp, &bucket->chain, hlist)
1492 rt6_remove_exception(bucket, rt6_ex);
1493 WARN_ON_ONCE(bucket->depth);
1494 bucket++;
1495 }
1496
1497out:
1498 spin_unlock_bh(&rt6_exception_lock);
1499}
1500
1501/* Find cached rt in the hash table inside passed in rt
1502 * Caller has to hold rcu_read_lock()
1503 */
David Ahern8d1c8022018-04-17 17:33:26 -07001504static struct rt6_info *rt6_find_cached_rt(struct fib6_info *rt,
Wei Wang35732d02017-10-06 12:05:57 -07001505 struct in6_addr *daddr,
1506 struct in6_addr *saddr)
1507{
1508 struct rt6_exception_bucket *bucket;
1509 struct in6_addr *src_key = NULL;
1510 struct rt6_exception *rt6_ex;
1511 struct rt6_info *res = NULL;
1512
1513 bucket = rcu_dereference(rt->rt6i_exception_bucket);
1514
1515#ifdef CONFIG_IPV6_SUBTREES
1516 /* rt6i_src.plen != 0 indicates rt is in subtree
1517 * and exception table is indexed by a hash of
1518 * both rt6i_dst and rt6i_src.
1519 * Otherwise, the exception table is indexed by
1520 * a hash of only rt6i_dst.
1521 */
David Ahern93c2fb22018-04-18 15:38:59 -07001522 if (rt->fib6_src.plen)
Wei Wang35732d02017-10-06 12:05:57 -07001523 src_key = saddr;
1524#endif
1525 rt6_ex = __rt6_find_exception_rcu(&bucket, daddr, src_key);
1526
1527 if (rt6_ex && !rt6_check_expired(rt6_ex->rt6i))
1528 res = rt6_ex->rt6i;
1529
1530 return res;
1531}
1532
1533/* Remove the passed in cached rt from the hash table that contains it */
David Ahern23fb93a2018-04-17 17:33:23 -07001534static int rt6_remove_exception_rt(struct rt6_info *rt)
Wei Wang35732d02017-10-06 12:05:57 -07001535{
Wei Wang35732d02017-10-06 12:05:57 -07001536 struct rt6_exception_bucket *bucket;
David Ahern8d1c8022018-04-17 17:33:26 -07001537 struct fib6_info *from = rt->from;
Wei Wang35732d02017-10-06 12:05:57 -07001538 struct in6_addr *src_key = NULL;
1539 struct rt6_exception *rt6_ex;
1540 int err;
1541
1542 if (!from ||
Colin Ian King442d7132017-10-10 19:10:30 +01001543 !(rt->rt6i_flags & RTF_CACHE))
Wei Wang35732d02017-10-06 12:05:57 -07001544 return -EINVAL;
1545
1546 if (!rcu_access_pointer(from->rt6i_exception_bucket))
1547 return -ENOENT;
1548
1549 spin_lock_bh(&rt6_exception_lock);
1550 bucket = rcu_dereference_protected(from->rt6i_exception_bucket,
1551 lockdep_is_held(&rt6_exception_lock));
1552#ifdef CONFIG_IPV6_SUBTREES
1553 /* rt6i_src.plen != 0 indicates 'from' is in subtree
1554 * and exception table is indexed by a hash of
1555 * both rt6i_dst and rt6i_src.
1556 * Otherwise, the exception table is indexed by
1557 * a hash of only rt6i_dst.
1558 */
David Ahern93c2fb22018-04-18 15:38:59 -07001559 if (from->fib6_src.plen)
Wei Wang35732d02017-10-06 12:05:57 -07001560 src_key = &rt->rt6i_src.addr;
1561#endif
1562 rt6_ex = __rt6_find_exception_spinlock(&bucket,
1563 &rt->rt6i_dst.addr,
1564 src_key);
1565 if (rt6_ex) {
1566 rt6_remove_exception(bucket, rt6_ex);
1567 err = 0;
1568 } else {
1569 err = -ENOENT;
1570 }
1571
1572 spin_unlock_bh(&rt6_exception_lock);
1573 return err;
1574}
1575
1576/* Find rt6_ex which contains the passed in rt cache and
1577 * refresh its stamp
1578 */
1579static void rt6_update_exception_stamp_rt(struct rt6_info *rt)
1580{
Wei Wang35732d02017-10-06 12:05:57 -07001581 struct rt6_exception_bucket *bucket;
David Ahern8d1c8022018-04-17 17:33:26 -07001582 struct fib6_info *from = rt->from;
Wei Wang35732d02017-10-06 12:05:57 -07001583 struct in6_addr *src_key = NULL;
1584 struct rt6_exception *rt6_ex;
1585
1586 if (!from ||
Colin Ian King442d7132017-10-10 19:10:30 +01001587 !(rt->rt6i_flags & RTF_CACHE))
Wei Wang35732d02017-10-06 12:05:57 -07001588 return;
1589
1590 rcu_read_lock();
1591 bucket = rcu_dereference(from->rt6i_exception_bucket);
1592
1593#ifdef CONFIG_IPV6_SUBTREES
1594 /* rt6i_src.plen != 0 indicates 'from' is in subtree
1595 * and exception table is indexed by a hash of
1596 * both rt6i_dst and rt6i_src.
1597 * Otherwise, the exception table is indexed by
1598 * a hash of only rt6i_dst.
1599 */
David Ahern93c2fb22018-04-18 15:38:59 -07001600 if (from->fib6_src.plen)
Wei Wang35732d02017-10-06 12:05:57 -07001601 src_key = &rt->rt6i_src.addr;
1602#endif
1603 rt6_ex = __rt6_find_exception_rcu(&bucket,
1604 &rt->rt6i_dst.addr,
1605 src_key);
1606 if (rt6_ex)
1607 rt6_ex->stamp = jiffies;
1608
1609 rcu_read_unlock();
1610}
1611
David Ahern8d1c8022018-04-17 17:33:26 -07001612static void rt6_exceptions_remove_prefsrc(struct fib6_info *rt)
Wei Wang60006a42017-10-06 12:05:58 -07001613{
1614 struct rt6_exception_bucket *bucket;
1615 struct rt6_exception *rt6_ex;
1616 int i;
1617
1618 bucket = rcu_dereference_protected(rt->rt6i_exception_bucket,
1619 lockdep_is_held(&rt6_exception_lock));
1620
1621 if (bucket) {
1622 for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
1623 hlist_for_each_entry(rt6_ex, &bucket->chain, hlist) {
1624 rt6_ex->rt6i->rt6i_prefsrc.plen = 0;
1625 }
1626 bucket++;
1627 }
1628 }
1629}
1630
Stefano Brivioe9fa1492018-03-06 11:10:19 +01001631static bool rt6_mtu_change_route_allowed(struct inet6_dev *idev,
1632 struct rt6_info *rt, int mtu)
1633{
1634 /* If the new MTU is lower than the route PMTU, this new MTU will be the
1635 * lowest MTU in the path: always allow updating the route PMTU to
1636 * reflect PMTU decreases.
1637 *
1638 * If the new MTU is higher, and the route PMTU is equal to the local
1639 * MTU, this means the old MTU is the lowest in the path, so allow
1640 * updating it: if other nodes now have lower MTUs, PMTU discovery will
1641 * handle this.
1642 */
1643
1644 if (dst_mtu(&rt->dst) >= mtu)
1645 return true;
1646
1647 if (dst_mtu(&rt->dst) == idev->cnf.mtu6)
1648 return true;
1649
1650 return false;
1651}
1652
1653static void rt6_exceptions_update_pmtu(struct inet6_dev *idev,
David Ahern8d1c8022018-04-17 17:33:26 -07001654 struct fib6_info *rt, int mtu)
Wei Wangf5bbe7e2017-10-06 12:05:59 -07001655{
1656 struct rt6_exception_bucket *bucket;
1657 struct rt6_exception *rt6_ex;
1658 int i;
1659
1660 bucket = rcu_dereference_protected(rt->rt6i_exception_bucket,
1661 lockdep_is_held(&rt6_exception_lock));
1662
Stefano Brivioe9fa1492018-03-06 11:10:19 +01001663 if (!bucket)
1664 return;
1665
1666 for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
1667 hlist_for_each_entry(rt6_ex, &bucket->chain, hlist) {
1668 struct rt6_info *entry = rt6_ex->rt6i;
1669
1670 /* For RTF_CACHE with rt6i_pmtu == 0 (i.e. a redirected
David Ahernd4ead6b2018-04-17 17:33:16 -07001671 * route), the metrics of its rt->from have already
Stefano Brivioe9fa1492018-03-06 11:10:19 +01001672 * been updated.
1673 */
David Ahernd4ead6b2018-04-17 17:33:16 -07001674 if (dst_metric_raw(&entry->dst, RTAX_MTU) &&
Stefano Brivioe9fa1492018-03-06 11:10:19 +01001675 rt6_mtu_change_route_allowed(idev, entry, mtu))
David Ahernd4ead6b2018-04-17 17:33:16 -07001676 dst_metric_set(&entry->dst, RTAX_MTU, mtu);
Wei Wangf5bbe7e2017-10-06 12:05:59 -07001677 }
Stefano Brivioe9fa1492018-03-06 11:10:19 +01001678 bucket++;
Wei Wangf5bbe7e2017-10-06 12:05:59 -07001679 }
1680}
1681
Wei Wangb16cb452017-10-06 12:06:00 -07001682#define RTF_CACHE_GATEWAY (RTF_GATEWAY | RTF_CACHE)
1683
David Ahern8d1c8022018-04-17 17:33:26 -07001684static void rt6_exceptions_clean_tohost(struct fib6_info *rt,
Wei Wangb16cb452017-10-06 12:06:00 -07001685 struct in6_addr *gateway)
1686{
1687 struct rt6_exception_bucket *bucket;
1688 struct rt6_exception *rt6_ex;
1689 struct hlist_node *tmp;
1690 int i;
1691
1692 if (!rcu_access_pointer(rt->rt6i_exception_bucket))
1693 return;
1694
1695 spin_lock_bh(&rt6_exception_lock);
1696 bucket = rcu_dereference_protected(rt->rt6i_exception_bucket,
1697 lockdep_is_held(&rt6_exception_lock));
1698
1699 if (bucket) {
1700 for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
1701 hlist_for_each_entry_safe(rt6_ex, tmp,
1702 &bucket->chain, hlist) {
1703 struct rt6_info *entry = rt6_ex->rt6i;
1704
1705 if ((entry->rt6i_flags & RTF_CACHE_GATEWAY) ==
1706 RTF_CACHE_GATEWAY &&
1707 ipv6_addr_equal(gateway,
1708 &entry->rt6i_gateway)) {
1709 rt6_remove_exception(bucket, rt6_ex);
1710 }
1711 }
1712 bucket++;
1713 }
1714 }
1715
1716 spin_unlock_bh(&rt6_exception_lock);
1717}
1718
Wei Wangc757faa2017-10-06 12:06:01 -07001719static void rt6_age_examine_exception(struct rt6_exception_bucket *bucket,
1720 struct rt6_exception *rt6_ex,
1721 struct fib6_gc_args *gc_args,
1722 unsigned long now)
1723{
1724 struct rt6_info *rt = rt6_ex->rt6i;
1725
Paolo Abeni1859bac2017-10-19 16:07:11 +02001726 /* we are pruning and obsoleting aged-out and non gateway exceptions
1727 * even if others have still references to them, so that on next
1728 * dst_check() such references can be dropped.
1729 * EXPIRES exceptions - e.g. pmtu-generated ones are pruned when
1730 * expired, independently from their aging, as per RFC 8201 section 4
1731 */
Wei Wang31afeb42018-01-26 11:40:17 -08001732 if (!(rt->rt6i_flags & RTF_EXPIRES)) {
1733 if (time_after_eq(now, rt->dst.lastuse + gc_args->timeout)) {
1734 RT6_TRACE("aging clone %p\n", rt);
1735 rt6_remove_exception(bucket, rt6_ex);
1736 return;
1737 }
1738 } else if (time_after(jiffies, rt->dst.expires)) {
1739 RT6_TRACE("purging expired route %p\n", rt);
Wei Wangc757faa2017-10-06 12:06:01 -07001740 rt6_remove_exception(bucket, rt6_ex);
1741 return;
Wei Wang31afeb42018-01-26 11:40:17 -08001742 }
1743
1744 if (rt->rt6i_flags & RTF_GATEWAY) {
Wei Wangc757faa2017-10-06 12:06:01 -07001745 struct neighbour *neigh;
1746 __u8 neigh_flags = 0;
1747
Eric Dumazet1bfa26f2018-03-23 07:56:58 -07001748 neigh = __ipv6_neigh_lookup_noref(rt->dst.dev, &rt->rt6i_gateway);
1749 if (neigh)
Wei Wangc757faa2017-10-06 12:06:01 -07001750 neigh_flags = neigh->flags;
Eric Dumazet1bfa26f2018-03-23 07:56:58 -07001751
Wei Wangc757faa2017-10-06 12:06:01 -07001752 if (!(neigh_flags & NTF_ROUTER)) {
1753 RT6_TRACE("purging route %p via non-router but gateway\n",
1754 rt);
1755 rt6_remove_exception(bucket, rt6_ex);
1756 return;
1757 }
1758 }
Wei Wang31afeb42018-01-26 11:40:17 -08001759
Wei Wangc757faa2017-10-06 12:06:01 -07001760 gc_args->more++;
1761}
1762
David Ahern8d1c8022018-04-17 17:33:26 -07001763void rt6_age_exceptions(struct fib6_info *rt,
Wei Wangc757faa2017-10-06 12:06:01 -07001764 struct fib6_gc_args *gc_args,
1765 unsigned long now)
1766{
1767 struct rt6_exception_bucket *bucket;
1768 struct rt6_exception *rt6_ex;
1769 struct hlist_node *tmp;
1770 int i;
1771
1772 if (!rcu_access_pointer(rt->rt6i_exception_bucket))
1773 return;
1774
Eric Dumazet1bfa26f2018-03-23 07:56:58 -07001775 rcu_read_lock_bh();
1776 spin_lock(&rt6_exception_lock);
Wei Wangc757faa2017-10-06 12:06:01 -07001777 bucket = rcu_dereference_protected(rt->rt6i_exception_bucket,
1778 lockdep_is_held(&rt6_exception_lock));
1779
1780 if (bucket) {
1781 for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
1782 hlist_for_each_entry_safe(rt6_ex, tmp,
1783 &bucket->chain, hlist) {
1784 rt6_age_examine_exception(bucket, rt6_ex,
1785 gc_args, now);
1786 }
1787 bucket++;
1788 }
1789 }
Eric Dumazet1bfa26f2018-03-23 07:56:58 -07001790 spin_unlock(&rt6_exception_lock);
1791 rcu_read_unlock_bh();
Wei Wangc757faa2017-10-06 12:06:01 -07001792}
1793
David Ahern9ff74382016-06-13 13:44:19 -07001794struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table,
David Ahernb75cc8f2018-03-02 08:32:17 -08001795 int oif, struct flowi6 *fl6,
1796 const struct sk_buff *skb, int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001797{
Martin KaFai Lau367efcb2014-10-20 13:42:45 -07001798 struct fib6_node *fn, *saved_fn;
David Ahern8d1c8022018-04-17 17:33:26 -07001799 struct fib6_info *f6i;
David Ahern23fb93a2018-04-17 17:33:23 -07001800 struct rt6_info *rt;
Thomas Grafc71099a2006-08-04 23:20:06 -07001801 int strict = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001802
YOSHIFUJI Hideaki77d16f42006-08-23 17:25:05 -07001803 strict |= flags & RT6_LOOKUP_F_IFACE;
David Ahernd5d32e42016-10-24 12:27:23 -07001804 strict |= flags & RT6_LOOKUP_F_IGNORE_LINKSTATE;
Martin KaFai Lau367efcb2014-10-20 13:42:45 -07001805 if (net->ipv6.devconf_all->forwarding == 0)
1806 strict |= RT6_LOOKUP_F_REACHABLE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001807
Wei Wang66f5d6c2017-10-06 12:06:10 -07001808 rcu_read_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001809
David S. Miller4c9483b2011-03-12 16:22:43 -05001810 fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
Martin KaFai Lau367efcb2014-10-20 13:42:45 -07001811 saved_fn = fn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001812
David Ahernca254492015-10-12 11:47:10 -07001813 if (fl6->flowi6_flags & FLOWI_FLAG_SKIP_NH_OIF)
1814 oif = 0;
1815
Martin KaFai Laua3c00e42014-10-20 13:42:43 -07001816redo_rt6_select:
David Ahern23fb93a2018-04-17 17:33:23 -07001817 f6i = rt6_select(net, fn, oif, strict);
David Ahern93c2fb22018-04-18 15:38:59 -07001818 if (f6i->fib6_nsiblings)
David Ahern23fb93a2018-04-17 17:33:23 -07001819 f6i = rt6_multipath_select(net, f6i, fl6, oif, skb, strict);
1820 if (f6i == net->ipv6.fib6_null_entry) {
Martin KaFai Laua3c00e42014-10-20 13:42:43 -07001821 fn = fib6_backtrack(fn, &fl6->saddr);
1822 if (fn)
1823 goto redo_rt6_select;
Martin KaFai Lau367efcb2014-10-20 13:42:45 -07001824 else if (strict & RT6_LOOKUP_F_REACHABLE) {
1825 /* also consider unreachable route */
1826 strict &= ~RT6_LOOKUP_F_REACHABLE;
1827 fn = saved_fn;
1828 goto redo_rt6_select;
Martin KaFai Lau367efcb2014-10-20 13:42:45 -07001829 }
Martin KaFai Laua3c00e42014-10-20 13:42:43 -07001830 }
1831
David Ahern23fb93a2018-04-17 17:33:23 -07001832 if (f6i == net->ipv6.fib6_null_entry) {
David Ahern421842e2018-04-17 17:33:18 -07001833 rt = net->ipv6.ip6_null_entry;
Wei Wang66f5d6c2017-10-06 12:06:10 -07001834 rcu_read_unlock();
Wei Wangd3843fe2017-10-06 12:06:06 -07001835 dst_hold(&rt->dst);
Paolo Abenib65f1642017-10-19 09:31:43 +02001836 trace_fib6_table_lookup(net, rt, table, fl6);
Wei Wangd3843fe2017-10-06 12:06:06 -07001837 return rt;
David Ahern23fb93a2018-04-17 17:33:23 -07001838 }
1839
1840 /*Search through exception table */
1841 rt = rt6_find_cached_rt(f6i, &fl6->daddr, &fl6->saddr);
1842 if (rt) {
David Ahernd4ead6b2018-04-17 17:33:16 -07001843 if (ip6_hold_safe(net, &rt, true))
Wei Wangd3843fe2017-10-06 12:06:06 -07001844 dst_use_noref(&rt->dst, jiffies);
David Ahernd4ead6b2018-04-17 17:33:16 -07001845
Wei Wang66f5d6c2017-10-06 12:06:10 -07001846 rcu_read_unlock();
Paolo Abenib65f1642017-10-19 09:31:43 +02001847 trace_fib6_table_lookup(net, rt, table, fl6);
Martin KaFai Laud52d3992015-05-22 20:56:06 -07001848 return rt;
Martin KaFai Lau3da59bd2015-05-22 20:56:03 -07001849 } else if (unlikely((fl6->flowi6_flags & FLOWI_FLAG_KNOWN_NH) &&
David Ahern93c2fb22018-04-18 15:38:59 -07001850 !(f6i->fib6_flags & RTF_GATEWAY))) {
Martin KaFai Lau3da59bd2015-05-22 20:56:03 -07001851 /* Create a RTF_CACHE clone which will not be
1852 * owned by the fib6 tree. It is for the special case where
1853 * the daddr in the skb during the neighbor look-up is different
1854 * from the fl6->daddr used to look-up route here.
1855 */
Martin KaFai Lau3da59bd2015-05-22 20:56:03 -07001856 struct rt6_info *uncached_rt;
1857
David Ahern23fb93a2018-04-17 17:33:23 -07001858 uncached_rt = ip6_rt_cache_alloc(f6i, &fl6->daddr, NULL);
David Ahern4d85cd02018-04-20 15:37:59 -07001859
1860 rcu_read_unlock();
Martin KaFai Lau3da59bd2015-05-22 20:56:03 -07001861
Wei Wang1cfb71e2017-06-17 10:42:33 -07001862 if (uncached_rt) {
1863 /* Uncached_rt's refcnt is taken during ip6_rt_cache_alloc()
1864 * No need for another dst_hold()
1865 */
Martin KaFai Lau8d0b94a2015-05-22 20:56:04 -07001866 rt6_uncached_list_add(uncached_rt);
Wei Wang81eb8442017-10-06 12:06:11 -07001867 atomic_inc(&net->ipv6.rt6_stats->fib_rt_uncache);
Wei Wang1cfb71e2017-06-17 10:42:33 -07001868 } else {
Martin KaFai Lau3da59bd2015-05-22 20:56:03 -07001869 uncached_rt = net->ipv6.ip6_null_entry;
Wei Wang1cfb71e2017-06-17 10:42:33 -07001870 dst_hold(&uncached_rt->dst);
1871 }
David Ahernb8115802015-11-19 12:24:22 -08001872
Paolo Abenib65f1642017-10-19 09:31:43 +02001873 trace_fib6_table_lookup(net, uncached_rt, table, fl6);
Martin KaFai Lau3da59bd2015-05-22 20:56:03 -07001874 return uncached_rt;
Martin KaFai Lau3da59bd2015-05-22 20:56:03 -07001875
Martin KaFai Laud52d3992015-05-22 20:56:06 -07001876 } else {
1877 /* Get a percpu copy */
1878
1879 struct rt6_info *pcpu_rt;
1880
Eric Dumazet951f7882017-10-08 21:07:18 -07001881 local_bh_disable();
David Ahern23fb93a2018-04-17 17:33:23 -07001882 pcpu_rt = rt6_get_pcpu_route(f6i);
Martin KaFai Laud52d3992015-05-22 20:56:06 -07001883
David Ahern93531c62018-04-17 17:33:25 -07001884 if (!pcpu_rt)
1885 pcpu_rt = rt6_make_pcpu_route(net, f6i);
1886
Eric Dumazet951f7882017-10-08 21:07:18 -07001887 local_bh_enable();
1888 rcu_read_unlock();
Paolo Abenib65f1642017-10-19 09:31:43 +02001889 trace_fib6_table_lookup(net, pcpu_rt, table, fl6);
Martin KaFai Laud52d3992015-05-22 20:56:06 -07001890 return pcpu_rt;
1891 }
Thomas Grafc71099a2006-08-04 23:20:06 -07001892}
David Ahern9ff74382016-06-13 13:44:19 -07001893EXPORT_SYMBOL_GPL(ip6_pol_route);
Thomas Grafc71099a2006-08-04 23:20:06 -07001894
David Ahernb75cc8f2018-03-02 08:32:17 -08001895static struct rt6_info *ip6_pol_route_input(struct net *net,
1896 struct fib6_table *table,
1897 struct flowi6 *fl6,
1898 const struct sk_buff *skb,
1899 int flags)
Pavel Emelyanov4acad722007-10-15 13:02:51 -07001900{
David Ahernb75cc8f2018-03-02 08:32:17 -08001901 return ip6_pol_route(net, table, fl6->flowi6_iif, fl6, skb, flags);
Pavel Emelyanov4acad722007-10-15 13:02:51 -07001902}
1903
Mahesh Bandeward409b842016-09-16 12:59:08 -07001904struct dst_entry *ip6_route_input_lookup(struct net *net,
1905 struct net_device *dev,
David Ahernb75cc8f2018-03-02 08:32:17 -08001906 struct flowi6 *fl6,
1907 const struct sk_buff *skb,
1908 int flags)
Shmulik Ladkani72331bc2012-04-01 04:03:45 +00001909{
1910 if (rt6_need_strict(&fl6->daddr) && dev->type != ARPHRD_PIMREG)
1911 flags |= RT6_LOOKUP_F_IFACE;
1912
David Ahernb75cc8f2018-03-02 08:32:17 -08001913 return fib6_rule_lookup(net, fl6, skb, flags, ip6_pol_route_input);
Shmulik Ladkani72331bc2012-04-01 04:03:45 +00001914}
Mahesh Bandeward409b842016-09-16 12:59:08 -07001915EXPORT_SYMBOL_GPL(ip6_route_input_lookup);
Shmulik Ladkani72331bc2012-04-01 04:03:45 +00001916
Jakub Sitnicki23aebda2017-08-23 09:58:29 +02001917static void ip6_multipath_l3_keys(const struct sk_buff *skb,
Roopa Prabhu5e5d6fe2018-02-28 22:43:22 -05001918 struct flow_keys *keys,
1919 struct flow_keys *flkeys)
Jakub Sitnicki23aebda2017-08-23 09:58:29 +02001920{
1921 const struct ipv6hdr *outer_iph = ipv6_hdr(skb);
1922 const struct ipv6hdr *key_iph = outer_iph;
Roopa Prabhu5e5d6fe2018-02-28 22:43:22 -05001923 struct flow_keys *_flkeys = flkeys;
Jakub Sitnicki23aebda2017-08-23 09:58:29 +02001924 const struct ipv6hdr *inner_iph;
1925 const struct icmp6hdr *icmph;
1926 struct ipv6hdr _inner_iph;
1927
1928 if (likely(outer_iph->nexthdr != IPPROTO_ICMPV6))
1929 goto out;
1930
1931 icmph = icmp6_hdr(skb);
1932 if (icmph->icmp6_type != ICMPV6_DEST_UNREACH &&
1933 icmph->icmp6_type != ICMPV6_PKT_TOOBIG &&
1934 icmph->icmp6_type != ICMPV6_TIME_EXCEED &&
1935 icmph->icmp6_type != ICMPV6_PARAMPROB)
1936 goto out;
1937
1938 inner_iph = skb_header_pointer(skb,
1939 skb_transport_offset(skb) + sizeof(*icmph),
1940 sizeof(_inner_iph), &_inner_iph);
1941 if (!inner_iph)
1942 goto out;
1943
1944 key_iph = inner_iph;
Roopa Prabhu5e5d6fe2018-02-28 22:43:22 -05001945 _flkeys = NULL;
Jakub Sitnicki23aebda2017-08-23 09:58:29 +02001946out:
Roopa Prabhu5e5d6fe2018-02-28 22:43:22 -05001947 if (_flkeys) {
1948 keys->addrs.v6addrs.src = _flkeys->addrs.v6addrs.src;
1949 keys->addrs.v6addrs.dst = _flkeys->addrs.v6addrs.dst;
1950 keys->tags.flow_label = _flkeys->tags.flow_label;
1951 keys->basic.ip_proto = _flkeys->basic.ip_proto;
1952 } else {
1953 keys->addrs.v6addrs.src = key_iph->saddr;
1954 keys->addrs.v6addrs.dst = key_iph->daddr;
1955 keys->tags.flow_label = ip6_flowinfo(key_iph);
1956 keys->basic.ip_proto = key_iph->nexthdr;
1957 }
Jakub Sitnicki23aebda2017-08-23 09:58:29 +02001958}
1959
1960/* if skb is set it will be used and fl6 can be NULL */
David Ahernb4bac172018-03-02 08:32:18 -08001961u32 rt6_multipath_hash(const struct net *net, const struct flowi6 *fl6,
1962 const struct sk_buff *skb, struct flow_keys *flkeys)
Jakub Sitnicki23aebda2017-08-23 09:58:29 +02001963{
1964 struct flow_keys hash_keys;
David Ahern9a2a5372018-03-02 08:32:15 -08001965 u32 mhash;
Jakub Sitnicki23aebda2017-08-23 09:58:29 +02001966
David S. Millerbbfa0472018-03-12 11:09:33 -04001967 switch (ip6_multipath_hash_policy(net)) {
David Ahernb4bac172018-03-02 08:32:18 -08001968 case 0:
1969 memset(&hash_keys, 0, sizeof(hash_keys));
1970 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
1971 if (skb) {
1972 ip6_multipath_l3_keys(skb, &hash_keys, flkeys);
1973 } else {
1974 hash_keys.addrs.v6addrs.src = fl6->saddr;
1975 hash_keys.addrs.v6addrs.dst = fl6->daddr;
1976 hash_keys.tags.flow_label = (__force u32)fl6->flowlabel;
1977 hash_keys.basic.ip_proto = fl6->flowi6_proto;
1978 }
1979 break;
1980 case 1:
1981 if (skb) {
1982 unsigned int flag = FLOW_DISSECTOR_F_STOP_AT_ENCAP;
1983 struct flow_keys keys;
1984
1985 /* short-circuit if we already have L4 hash present */
1986 if (skb->l4_hash)
1987 return skb_get_hash_raw(skb) >> 1;
1988
1989 memset(&hash_keys, 0, sizeof(hash_keys));
1990
1991 if (!flkeys) {
1992 skb_flow_dissect_flow_keys(skb, &keys, flag);
1993 flkeys = &keys;
1994 }
1995 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
1996 hash_keys.addrs.v6addrs.src = flkeys->addrs.v6addrs.src;
1997 hash_keys.addrs.v6addrs.dst = flkeys->addrs.v6addrs.dst;
1998 hash_keys.ports.src = flkeys->ports.src;
1999 hash_keys.ports.dst = flkeys->ports.dst;
2000 hash_keys.basic.ip_proto = flkeys->basic.ip_proto;
2001 } else {
2002 memset(&hash_keys, 0, sizeof(hash_keys));
2003 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2004 hash_keys.addrs.v6addrs.src = fl6->saddr;
2005 hash_keys.addrs.v6addrs.dst = fl6->daddr;
2006 hash_keys.ports.src = fl6->fl6_sport;
2007 hash_keys.ports.dst = fl6->fl6_dport;
2008 hash_keys.basic.ip_proto = fl6->flowi6_proto;
2009 }
2010 break;
Jakub Sitnicki23aebda2017-08-23 09:58:29 +02002011 }
David Ahern9a2a5372018-03-02 08:32:15 -08002012 mhash = flow_hash_from_keys(&hash_keys);
Jakub Sitnicki23aebda2017-08-23 09:58:29 +02002013
David Ahern9a2a5372018-03-02 08:32:15 -08002014 return mhash >> 1;
Jakub Sitnicki23aebda2017-08-23 09:58:29 +02002015}
2016
Thomas Grafc71099a2006-08-04 23:20:06 -07002017void ip6_route_input(struct sk_buff *skb)
2018{
Eric Dumazetb71d1d42011-04-22 04:53:02 +00002019 const struct ipv6hdr *iph = ipv6_hdr(skb);
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09002020 struct net *net = dev_net(skb->dev);
Thomas Grafadaa70b2006-10-13 15:01:03 -07002021 int flags = RT6_LOOKUP_F_HAS_SADDR;
Jiri Benc904af042015-08-20 13:56:31 +02002022 struct ip_tunnel_info *tun_info;
David S. Miller4c9483b2011-03-12 16:22:43 -05002023 struct flowi6 fl6 = {
David Aherne0d56fd2016-09-10 12:09:57 -07002024 .flowi6_iif = skb->dev->ifindex,
David S. Miller4c9483b2011-03-12 16:22:43 -05002025 .daddr = iph->daddr,
2026 .saddr = iph->saddr,
YOSHIFUJI Hideaki / 吉藤英明6502ca52013-01-13 05:01:51 +00002027 .flowlabel = ip6_flowinfo(iph),
David S. Miller4c9483b2011-03-12 16:22:43 -05002028 .flowi6_mark = skb->mark,
2029 .flowi6_proto = iph->nexthdr,
Thomas Grafc71099a2006-08-04 23:20:06 -07002030 };
Roopa Prabhu5e5d6fe2018-02-28 22:43:22 -05002031 struct flow_keys *flkeys = NULL, _flkeys;
Thomas Grafadaa70b2006-10-13 15:01:03 -07002032
Jiri Benc904af042015-08-20 13:56:31 +02002033 tun_info = skb_tunnel_info(skb);
Jiri Benc46fa0622015-08-28 20:48:19 +02002034 if (tun_info && !(tun_info->mode & IP_TUNNEL_INFO_TX))
Jiri Benc904af042015-08-20 13:56:31 +02002035 fl6.flowi6_tun_key.tun_id = tun_info->key.tun_id;
Roopa Prabhu5e5d6fe2018-02-28 22:43:22 -05002036
2037 if (fib6_rules_early_flow_dissect(net, skb, &fl6, &_flkeys))
2038 flkeys = &_flkeys;
2039
Jakub Sitnicki23aebda2017-08-23 09:58:29 +02002040 if (unlikely(fl6.flowi6_proto == IPPROTO_ICMPV6))
David Ahernb4bac172018-03-02 08:32:18 -08002041 fl6.mp_hash = rt6_multipath_hash(net, &fl6, skb, flkeys);
Jiri Benc06e9d042015-08-20 13:56:26 +02002042 skb_dst_drop(skb);
David Ahernb75cc8f2018-03-02 08:32:17 -08002043 skb_dst_set(skb,
2044 ip6_route_input_lookup(net, skb->dev, &fl6, skb, flags));
Thomas Grafc71099a2006-08-04 23:20:06 -07002045}
2046
David Ahernb75cc8f2018-03-02 08:32:17 -08002047static struct rt6_info *ip6_pol_route_output(struct net *net,
2048 struct fib6_table *table,
2049 struct flowi6 *fl6,
2050 const struct sk_buff *skb,
2051 int flags)
Thomas Grafc71099a2006-08-04 23:20:06 -07002052{
David Ahernb75cc8f2018-03-02 08:32:17 -08002053 return ip6_pol_route(net, table, fl6->flowi6_oif, fl6, skb, flags);
Thomas Grafc71099a2006-08-04 23:20:06 -07002054}
2055
Paolo Abeni6f21c962016-01-29 12:30:19 +01002056struct dst_entry *ip6_route_output_flags(struct net *net, const struct sock *sk,
2057 struct flowi6 *fl6, int flags)
Thomas Grafc71099a2006-08-04 23:20:06 -07002058{
David Ahernd46a9d62015-10-21 08:42:22 -07002059 bool any_src;
Thomas Grafc71099a2006-08-04 23:20:06 -07002060
David Ahern4c1feac2016-09-10 12:09:56 -07002061 if (rt6_need_strict(&fl6->daddr)) {
2062 struct dst_entry *dst;
2063
2064 dst = l3mdev_link_scope_lookup(net, fl6);
2065 if (dst)
2066 return dst;
2067 }
David Ahernca254492015-10-12 11:47:10 -07002068
Pavel Emelyanov1fb94892012-08-08 21:53:36 +00002069 fl6->flowi6_iif = LOOPBACK_IFINDEX;
David McCullough4dc27d1c2012-06-25 15:42:26 +00002070
David Ahernd46a9d62015-10-21 08:42:22 -07002071 any_src = ipv6_addr_any(&fl6->saddr);
David Ahern741a11d2015-09-28 10:12:13 -07002072 if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl6->daddr) ||
David Ahernd46a9d62015-10-21 08:42:22 -07002073 (fl6->flowi6_oif && any_src))
YOSHIFUJI Hideaki77d16f42006-08-23 17:25:05 -07002074 flags |= RT6_LOOKUP_F_IFACE;
Thomas Grafc71099a2006-08-04 23:20:06 -07002075
David Ahernd46a9d62015-10-21 08:42:22 -07002076 if (!any_src)
Thomas Grafadaa70b2006-10-13 15:01:03 -07002077 flags |= RT6_LOOKUP_F_HAS_SADDR;
YOSHIFUJI Hideaki / 吉藤英明0c9a2ac2010-03-07 00:14:44 +00002078 else if (sk)
2079 flags |= rt6_srcprefs2flags(inet6_sk(sk)->srcprefs);
Thomas Grafadaa70b2006-10-13 15:01:03 -07002080
David Ahernb75cc8f2018-03-02 08:32:17 -08002081 return fib6_rule_lookup(net, fl6, NULL, flags, ip6_pol_route_output);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002082}
Paolo Abeni6f21c962016-01-29 12:30:19 +01002083EXPORT_SYMBOL_GPL(ip6_route_output_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002084
David S. Miller2774c132011-03-01 14:59:04 -08002085struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_orig)
David S. Miller14e50e52007-05-24 18:17:54 -07002086{
David S. Miller5c1e6aa2011-04-28 14:13:38 -07002087 struct rt6_info *rt, *ort = (struct rt6_info *) dst_orig;
Wei Wang1dbe32522017-06-17 10:42:26 -07002088 struct net_device *loopback_dev = net->loopback_dev;
David S. Miller14e50e52007-05-24 18:17:54 -07002089 struct dst_entry *new = NULL;
2090
Wei Wang1dbe32522017-06-17 10:42:26 -07002091 rt = dst_alloc(&ip6_dst_blackhole_ops, loopback_dev, 1,
Steffen Klassert62cf27e2017-10-09 08:39:43 +02002092 DST_OBSOLETE_DEAD, 0);
David S. Miller14e50e52007-05-24 18:17:54 -07002093 if (rt) {
Martin KaFai Lau0a1f5962015-10-15 16:39:58 -07002094 rt6_info_init(rt);
Wei Wang81eb8442017-10-06 12:06:11 -07002095 atomic_inc(&net->ipv6.rt6_stats->fib_rt_alloc);
Martin KaFai Lau0a1f5962015-10-15 16:39:58 -07002096
Changli Gaod8d1f302010-06-10 23:31:35 -07002097 new = &rt->dst;
David S. Miller14e50e52007-05-24 18:17:54 -07002098 new->__use = 1;
Herbert Xu352e5122007-11-13 21:34:06 -08002099 new->input = dst_discard;
Eric W. Biedermanede20592015-10-07 16:48:47 -05002100 new->output = dst_discard_out;
David S. Miller14e50e52007-05-24 18:17:54 -07002101
Martin KaFai Lau0a1f5962015-10-15 16:39:58 -07002102 dst_copy_metrics(new, &ort->dst);
David S. Miller14e50e52007-05-24 18:17:54 -07002103
Wei Wang1dbe32522017-06-17 10:42:26 -07002104 rt->rt6i_idev = in6_dev_get(loopback_dev);
Alexey Dobriyan4e3fd7a2011-11-21 03:39:03 +00002105 rt->rt6i_gateway = ort->rt6i_gateway;
Martin KaFai Lau0a1f5962015-10-15 16:39:58 -07002106 rt->rt6i_flags = ort->rt6i_flags & ~RTF_PCPU;
David S. Miller14e50e52007-05-24 18:17:54 -07002107
2108 memcpy(&rt->rt6i_dst, &ort->rt6i_dst, sizeof(struct rt6key));
2109#ifdef CONFIG_IPV6_SUBTREES
2110 memcpy(&rt->rt6i_src, &ort->rt6i_src, sizeof(struct rt6key));
2111#endif
David S. Miller14e50e52007-05-24 18:17:54 -07002112 }
2113
David S. Miller69ead7a2011-03-01 14:45:33 -08002114 dst_release(dst_orig);
2115 return new ? new : ERR_PTR(-ENOMEM);
David S. Miller14e50e52007-05-24 18:17:54 -07002116}
David S. Miller14e50e52007-05-24 18:17:54 -07002117
Linus Torvalds1da177e2005-04-16 15:20:36 -07002118/*
2119 * Destination cache support functions
2120 */
2121
David Ahern8d1c8022018-04-17 17:33:26 -07002122static bool fib6_check(struct fib6_info *f6i, u32 cookie)
David Ahern93531c62018-04-17 17:33:25 -07002123{
2124 u32 rt_cookie = 0;
2125
David Aherna269f1a2018-04-20 15:37:58 -07002126 if ((f6i && !fib6_get_cookie_safe(f6i, &rt_cookie)) ||
David Ahern93531c62018-04-17 17:33:25 -07002127 rt_cookie != cookie)
2128 return false;
2129
2130 if (fib6_check_expired(f6i))
2131 return false;
2132
2133 return true;
2134}
2135
Martin KaFai Lau3da59bd2015-05-22 20:56:03 -07002136static struct dst_entry *rt6_check(struct rt6_info *rt, u32 cookie)
2137{
Steffen Klassert36143642017-08-25 09:05:42 +02002138 u32 rt_cookie = 0;
Wei Wangc5cff852017-08-21 09:47:10 -07002139
David Aherna269f1a2018-04-20 15:37:58 -07002140 if ((rt->from && !fib6_get_cookie_safe(rt->from, &rt_cookie)) ||
David Ahern93531c62018-04-17 17:33:25 -07002141 rt_cookie != cookie)
Martin KaFai Lau3da59bd2015-05-22 20:56:03 -07002142 return NULL;
2143
2144 if (rt6_check_expired(rt))
2145 return NULL;
2146
2147 return &rt->dst;
2148}
2149
2150static struct dst_entry *rt6_dst_from_check(struct rt6_info *rt, u32 cookie)
2151{
Martin KaFai Lau5973fb12015-11-11 11:51:07 -08002152 if (!__rt6_check_expired(rt) &&
2153 rt->dst.obsolete == DST_OBSOLETE_FORCE_CHK &&
David Ahern93531c62018-04-17 17:33:25 -07002154 fib6_check(rt->from, cookie))
Martin KaFai Lau3da59bd2015-05-22 20:56:03 -07002155 return &rt->dst;
2156 else
2157 return NULL;
2158}
2159
Linus Torvalds1da177e2005-04-16 15:20:36 -07002160static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie)
2161{
2162 struct rt6_info *rt;
2163
2164 rt = (struct rt6_info *) dst;
2165
Nicolas Dichtel6f3118b2012-09-10 22:09:46 +00002166 /* All IPV6 dsts are created with ->obsolete set to the value
2167 * DST_OBSOLETE_FORCE_CHK which forces validation calls down
2168 * into this function always.
2169 */
Hannes Frederic Sowae3bc10b2013-10-24 07:48:24 +02002170
Martin KaFai Lau02bcf4e2015-11-11 11:51:08 -08002171 if (rt->rt6i_flags & RTF_PCPU ||
David Miller3a2232e2017-11-28 15:40:40 -05002172 (unlikely(!list_empty(&rt->rt6i_uncached)) && rt->from))
Martin KaFai Lau3da59bd2015-05-22 20:56:03 -07002173 return rt6_dst_from_check(rt, cookie);
2174 else
2175 return rt6_check(rt, cookie);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002176}
2177
2178static struct dst_entry *ip6_negative_advice(struct dst_entry *dst)
2179{
2180 struct rt6_info *rt = (struct rt6_info *) dst;
2181
2182 if (rt) {
YOSHIFUJI Hideaki / 吉藤英明54c1a852010-03-28 07:15:45 +00002183 if (rt->rt6i_flags & RTF_CACHE) {
2184 if (rt6_check_expired(rt)) {
David Ahern93531c62018-04-17 17:33:25 -07002185 rt6_remove_exception_rt(rt);
YOSHIFUJI Hideaki / 吉藤英明54c1a852010-03-28 07:15:45 +00002186 dst = NULL;
2187 }
2188 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002189 dst_release(dst);
YOSHIFUJI Hideaki / 吉藤英明54c1a852010-03-28 07:15:45 +00002190 dst = NULL;
2191 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002192 }
YOSHIFUJI Hideaki / 吉藤英明54c1a852010-03-28 07:15:45 +00002193 return dst;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002194}
2195
2196static void ip6_link_failure(struct sk_buff *skb)
2197{
2198 struct rt6_info *rt;
2199
Alexey Dobriyan3ffe5332010-02-18 08:25:24 +00002200 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002201
Eric Dumazetadf30902009-06-02 05:19:30 +00002202 rt = (struct rt6_info *) skb_dst(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002203 if (rt) {
Hannes Frederic Sowa1eb4f752013-07-10 23:00:57 +02002204 if (rt->rt6i_flags & RTF_CACHE) {
Wei Wangad65a2f2017-06-17 10:42:35 -07002205 if (dst_hold_safe(&rt->dst))
David Ahern93531c62018-04-17 17:33:25 -07002206 rt6_remove_exception_rt(rt);
2207 } else if (rt->from) {
Wei Wangc5cff852017-08-21 09:47:10 -07002208 struct fib6_node *fn;
2209
2210 rcu_read_lock();
David Ahern93c2fb22018-04-18 15:38:59 -07002211 fn = rcu_dereference(rt->from->fib6_node);
Wei Wangc5cff852017-08-21 09:47:10 -07002212 if (fn && (rt->rt6i_flags & RTF_DEFAULT))
2213 fn->fn_sernum = -1;
2214 rcu_read_unlock();
Hannes Frederic Sowa1eb4f752013-07-10 23:00:57 +02002215 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002216 }
2217}
2218
David Ahern6a3e0302018-04-20 15:37:57 -07002219static void rt6_update_expires(struct rt6_info *rt0, int timeout)
2220{
2221 if (!(rt0->rt6i_flags & RTF_EXPIRES) && rt0->from)
2222 rt0->dst.expires = rt0->from->expires;
2223
2224 dst_set_expires(&rt0->dst, timeout);
2225 rt0->rt6i_flags |= RTF_EXPIRES;
2226}
2227
Martin KaFai Lau45e4fd22015-05-22 20:56:00 -07002228static void rt6_do_update_pmtu(struct rt6_info *rt, u32 mtu)
2229{
2230 struct net *net = dev_net(rt->dst.dev);
2231
David Ahernd4ead6b2018-04-17 17:33:16 -07002232 dst_metric_set(&rt->dst, RTAX_MTU, mtu);
Martin KaFai Lau45e4fd22015-05-22 20:56:00 -07002233 rt->rt6i_flags |= RTF_MODIFIED;
Martin KaFai Lau45e4fd22015-05-22 20:56:00 -07002234 rt6_update_expires(rt, net->ipv6.sysctl.ip6_rt_mtu_expires);
2235}
2236
Martin KaFai Lau0d3f6d22015-11-11 11:51:06 -08002237static bool rt6_cache_allowed_for_pmtu(const struct rt6_info *rt)
2238{
2239 return !(rt->rt6i_flags & RTF_CACHE) &&
David Ahern77634cc2018-04-17 17:33:27 -07002240 (rt->rt6i_flags & RTF_PCPU || rt->from);
Martin KaFai Lau0d3f6d22015-11-11 11:51:06 -08002241}
2242
Martin KaFai Lau45e4fd22015-05-22 20:56:00 -07002243static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk,
2244 const struct ipv6hdr *iph, u32 mtu)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002245{
Julian Anastasov0dec8792017-02-06 23:14:16 +02002246 const struct in6_addr *daddr, *saddr;
Ian Morris67ba4152014-08-24 21:53:10 +01002247 struct rt6_info *rt6 = (struct rt6_info *)dst;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002248
Martin KaFai Lau45e4fd22015-05-22 20:56:00 -07002249 if (rt6->rt6i_flags & RTF_LOCAL)
2250 return;
2251
Xin Long19bda362016-10-28 18:18:01 +08002252 if (dst_metric_locked(dst, RTAX_MTU))
2253 return;
2254
Julian Anastasov0dec8792017-02-06 23:14:16 +02002255 if (iph) {
2256 daddr = &iph->daddr;
2257 saddr = &iph->saddr;
2258 } else if (sk) {
2259 daddr = &sk->sk_v6_daddr;
2260 saddr = &inet6_sk(sk)->saddr;
2261 } else {
2262 daddr = NULL;
2263 saddr = NULL;
2264 }
2265 dst_confirm_neigh(dst, daddr);
Martin KaFai Lau45e4fd22015-05-22 20:56:00 -07002266 mtu = max_t(u32, mtu, IPV6_MIN_MTU);
2267 if (mtu >= dst_mtu(dst))
2268 return;
David S. Miller81aded22012-06-15 14:54:11 -07002269
Martin KaFai Lau0d3f6d22015-11-11 11:51:06 -08002270 if (!rt6_cache_allowed_for_pmtu(rt6)) {
Martin KaFai Lau45e4fd22015-05-22 20:56:00 -07002271 rt6_do_update_pmtu(rt6, mtu);
Wei Wang2b760fc2017-10-06 12:06:03 -07002272 /* update rt6_ex->stamp for cache */
2273 if (rt6->rt6i_flags & RTF_CACHE)
2274 rt6_update_exception_stamp_rt(rt6);
Julian Anastasov0dec8792017-02-06 23:14:16 +02002275 } else if (daddr) {
Martin KaFai Lau45e4fd22015-05-22 20:56:00 -07002276 struct rt6_info *nrt6;
Hagen Paul Pfeifer9d289712015-01-15 22:34:25 +01002277
David Ahern4d85cd02018-04-20 15:37:59 -07002278 rcu_read_lock();
David Ahernd4ead6b2018-04-17 17:33:16 -07002279 nrt6 = ip6_rt_cache_alloc(rt6->from, daddr, saddr);
David Ahern4d85cd02018-04-20 15:37:59 -07002280 rcu_read_unlock();
Martin KaFai Lau45e4fd22015-05-22 20:56:00 -07002281 if (nrt6) {
2282 rt6_do_update_pmtu(nrt6, mtu);
David Ahernd4ead6b2018-04-17 17:33:16 -07002283 if (rt6_insert_exception(nrt6, rt6->from))
Wei Wang2b760fc2017-10-06 12:06:03 -07002284 dst_release_immediate(&nrt6->dst);
Martin KaFai Lau45e4fd22015-05-22 20:56:00 -07002285 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002286 }
2287}
2288
Martin KaFai Lau45e4fd22015-05-22 20:56:00 -07002289static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
2290 struct sk_buff *skb, u32 mtu)
2291{
2292 __ip6_rt_update_pmtu(dst, sk, skb ? ipv6_hdr(skb) : NULL, mtu);
2293}
2294
David S. Miller42ae66c2012-06-15 20:01:57 -07002295void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu,
Lorenzo Colittie2d118a2016-11-04 02:23:43 +09002296 int oif, u32 mark, kuid_t uid)
David S. Miller81aded22012-06-15 14:54:11 -07002297{
2298 const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data;
2299 struct dst_entry *dst;
2300 struct flowi6 fl6;
2301
2302 memset(&fl6, 0, sizeof(fl6));
2303 fl6.flowi6_oif = oif;
Lorenzo Colitti1b3c61d2014-05-13 10:17:34 -07002304 fl6.flowi6_mark = mark ? mark : IP6_REPLY_MARK(net, skb->mark);
David S. Miller81aded22012-06-15 14:54:11 -07002305 fl6.daddr = iph->daddr;
2306 fl6.saddr = iph->saddr;
YOSHIFUJI Hideaki / 吉藤英明6502ca52013-01-13 05:01:51 +00002307 fl6.flowlabel = ip6_flowinfo(iph);
Lorenzo Colittie2d118a2016-11-04 02:23:43 +09002308 fl6.flowi6_uid = uid;
David S. Miller81aded22012-06-15 14:54:11 -07002309
2310 dst = ip6_route_output(net, NULL, &fl6);
2311 if (!dst->error)
Martin KaFai Lau45e4fd22015-05-22 20:56:00 -07002312 __ip6_rt_update_pmtu(dst, NULL, iph, ntohl(mtu));
David S. Miller81aded22012-06-15 14:54:11 -07002313 dst_release(dst);
2314}
2315EXPORT_SYMBOL_GPL(ip6_update_pmtu);
2316
2317void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu)
2318{
Martin KaFai Lau33c162a2016-04-11 15:29:36 -07002319 struct dst_entry *dst;
2320
David S. Miller81aded22012-06-15 14:54:11 -07002321 ip6_update_pmtu(skb, sock_net(sk), mtu,
Lorenzo Colittie2d118a2016-11-04 02:23:43 +09002322 sk->sk_bound_dev_if, sk->sk_mark, sk->sk_uid);
Martin KaFai Lau33c162a2016-04-11 15:29:36 -07002323
2324 dst = __sk_dst_get(sk);
2325 if (!dst || !dst->obsolete ||
2326 dst->ops->check(dst, inet6_sk(sk)->dst_cookie))
2327 return;
2328
2329 bh_lock_sock(sk);
2330 if (!sock_owned_by_user(sk) && !ipv6_addr_v4mapped(&sk->sk_v6_daddr))
2331 ip6_datagram_dst_update(sk, false);
2332 bh_unlock_sock(sk);
David S. Miller81aded22012-06-15 14:54:11 -07002333}
2334EXPORT_SYMBOL_GPL(ip6_sk_update_pmtu);
2335
Alexey Kodanev7d6850f2018-04-03 15:00:07 +03002336void ip6_sk_dst_store_flow(struct sock *sk, struct dst_entry *dst,
2337 const struct flowi6 *fl6)
2338{
2339#ifdef CONFIG_IPV6_SUBTREES
2340 struct ipv6_pinfo *np = inet6_sk(sk);
2341#endif
2342
2343 ip6_dst_store(sk, dst,
2344 ipv6_addr_equal(&fl6->daddr, &sk->sk_v6_daddr) ?
2345 &sk->sk_v6_daddr : NULL,
2346#ifdef CONFIG_IPV6_SUBTREES
2347 ipv6_addr_equal(&fl6->saddr, &np->saddr) ?
2348 &np->saddr :
2349#endif
2350 NULL);
2351}
2352
Duan Jiongb55b76b2013-09-04 19:44:21 +08002353/* Handle redirects */
2354struct ip6rd_flowi {
2355 struct flowi6 fl6;
2356 struct in6_addr gateway;
2357};
2358
2359static struct rt6_info *__ip6_route_redirect(struct net *net,
2360 struct fib6_table *table,
2361 struct flowi6 *fl6,
David Ahernb75cc8f2018-03-02 08:32:17 -08002362 const struct sk_buff *skb,
Duan Jiongb55b76b2013-09-04 19:44:21 +08002363 int flags)
2364{
2365 struct ip6rd_flowi *rdfl = (struct ip6rd_flowi *)fl6;
David Ahern23fb93a2018-04-17 17:33:23 -07002366 struct rt6_info *ret = NULL, *rt_cache;
David Ahern8d1c8022018-04-17 17:33:26 -07002367 struct fib6_info *rt;
Duan Jiongb55b76b2013-09-04 19:44:21 +08002368 struct fib6_node *fn;
2369
2370 /* Get the "current" route for this destination and
Alexander Alemayhu67c408c2017-01-07 23:53:00 +01002371 * check if the redirect has come from appropriate router.
Duan Jiongb55b76b2013-09-04 19:44:21 +08002372 *
2373 * RFC 4861 specifies that redirects should only be
2374 * accepted if they come from the nexthop to the target.
2375 * Due to the way the routes are chosen, this notion
2376 * is a bit fuzzy and one might need to check all possible
2377 * routes.
2378 */
2379
Wei Wang66f5d6c2017-10-06 12:06:10 -07002380 rcu_read_lock();
Duan Jiongb55b76b2013-09-04 19:44:21 +08002381 fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
2382restart:
Wei Wang66f5d6c2017-10-06 12:06:10 -07002383 for_each_fib6_node_rt_rcu(fn) {
David Ahern5e670d82018-04-17 17:33:14 -07002384 if (rt->fib6_nh.nh_flags & RTNH_F_DEAD)
Ido Schimmel8067bb82018-01-07 12:45:09 +02002385 continue;
David Ahern14895682018-04-17 17:33:17 -07002386 if (fib6_check_expired(rt))
Duan Jiongb55b76b2013-09-04 19:44:21 +08002387 continue;
David Ahern93c2fb22018-04-18 15:38:59 -07002388 if (rt->fib6_flags & RTF_REJECT)
Duan Jiongb55b76b2013-09-04 19:44:21 +08002389 break;
David Ahern93c2fb22018-04-18 15:38:59 -07002390 if (!(rt->fib6_flags & RTF_GATEWAY))
Duan Jiongb55b76b2013-09-04 19:44:21 +08002391 continue;
David Ahern5e670d82018-04-17 17:33:14 -07002392 if (fl6->flowi6_oif != rt->fib6_nh.nh_dev->ifindex)
Duan Jiongb55b76b2013-09-04 19:44:21 +08002393 continue;
Wei Wang2b760fc2017-10-06 12:06:03 -07002394 /* rt_cache's gateway might be different from its 'parent'
2395 * in the case of an ip redirect.
2396 * So we keep searching in the exception table if the gateway
2397 * is different.
2398 */
David Ahern5e670d82018-04-17 17:33:14 -07002399 if (!ipv6_addr_equal(&rdfl->gateway, &rt->fib6_nh.nh_gw)) {
Wei Wang2b760fc2017-10-06 12:06:03 -07002400 rt_cache = rt6_find_cached_rt(rt,
2401 &fl6->daddr,
2402 &fl6->saddr);
2403 if (rt_cache &&
2404 ipv6_addr_equal(&rdfl->gateway,
2405 &rt_cache->rt6i_gateway)) {
David Ahern23fb93a2018-04-17 17:33:23 -07002406 ret = rt_cache;
Wei Wang2b760fc2017-10-06 12:06:03 -07002407 break;
2408 }
Duan Jiongb55b76b2013-09-04 19:44:21 +08002409 continue;
Wei Wang2b760fc2017-10-06 12:06:03 -07002410 }
Duan Jiongb55b76b2013-09-04 19:44:21 +08002411 break;
2412 }
2413
2414 if (!rt)
David Ahern421842e2018-04-17 17:33:18 -07002415 rt = net->ipv6.fib6_null_entry;
David Ahern93c2fb22018-04-18 15:38:59 -07002416 else if (rt->fib6_flags & RTF_REJECT) {
David Ahern23fb93a2018-04-17 17:33:23 -07002417 ret = net->ipv6.ip6_null_entry;
Martin KaFai Laub0a1ba52015-01-20 19:16:02 -08002418 goto out;
2419 }
2420
David Ahern421842e2018-04-17 17:33:18 -07002421 if (rt == net->ipv6.fib6_null_entry) {
Martin KaFai Laua3c00e42014-10-20 13:42:43 -07002422 fn = fib6_backtrack(fn, &fl6->saddr);
2423 if (fn)
2424 goto restart;
Duan Jiongb55b76b2013-09-04 19:44:21 +08002425 }
Martin KaFai Laua3c00e42014-10-20 13:42:43 -07002426
Martin KaFai Laub0a1ba52015-01-20 19:16:02 -08002427out:
David Ahern23fb93a2018-04-17 17:33:23 -07002428 if (ret)
2429 dst_hold(&ret->dst);
2430 else
2431 ret = ip6_create_rt_rcu(rt);
Duan Jiongb55b76b2013-09-04 19:44:21 +08002432
Wei Wang66f5d6c2017-10-06 12:06:10 -07002433 rcu_read_unlock();
Duan Jiongb55b76b2013-09-04 19:44:21 +08002434
David Ahern23fb93a2018-04-17 17:33:23 -07002435 trace_fib6_table_lookup(net, ret, table, fl6);
2436 return ret;
Duan Jiongb55b76b2013-09-04 19:44:21 +08002437};
2438
2439static struct dst_entry *ip6_route_redirect(struct net *net,
David Ahernb75cc8f2018-03-02 08:32:17 -08002440 const struct flowi6 *fl6,
2441 const struct sk_buff *skb,
2442 const struct in6_addr *gateway)
Duan Jiongb55b76b2013-09-04 19:44:21 +08002443{
2444 int flags = RT6_LOOKUP_F_HAS_SADDR;
2445 struct ip6rd_flowi rdfl;
2446
2447 rdfl.fl6 = *fl6;
2448 rdfl.gateway = *gateway;
2449
David Ahernb75cc8f2018-03-02 08:32:17 -08002450 return fib6_rule_lookup(net, &rdfl.fl6, skb,
Duan Jiongb55b76b2013-09-04 19:44:21 +08002451 flags, __ip6_route_redirect);
2452}
2453
Lorenzo Colittie2d118a2016-11-04 02:23:43 +09002454void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark,
2455 kuid_t uid)
David S. Miller3a5ad2e2012-07-12 00:08:07 -07002456{
2457 const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data;
2458 struct dst_entry *dst;
2459 struct flowi6 fl6;
2460
2461 memset(&fl6, 0, sizeof(fl6));
Julian Anastasove374c612014-04-28 10:51:56 +03002462 fl6.flowi6_iif = LOOPBACK_IFINDEX;
David S. Miller3a5ad2e2012-07-12 00:08:07 -07002463 fl6.flowi6_oif = oif;
2464 fl6.flowi6_mark = mark;
David S. Miller3a5ad2e2012-07-12 00:08:07 -07002465 fl6.daddr = iph->daddr;
2466 fl6.saddr = iph->saddr;
YOSHIFUJI Hideaki / 吉藤英明6502ca52013-01-13 05:01:51 +00002467 fl6.flowlabel = ip6_flowinfo(iph);
Lorenzo Colittie2d118a2016-11-04 02:23:43 +09002468 fl6.flowi6_uid = uid;
David S. Miller3a5ad2e2012-07-12 00:08:07 -07002469
David Ahernb75cc8f2018-03-02 08:32:17 -08002470 dst = ip6_route_redirect(net, &fl6, skb, &ipv6_hdr(skb)->saddr);
Duan Jiongb55b76b2013-09-04 19:44:21 +08002471 rt6_do_redirect(dst, NULL, skb);
David S. Miller3a5ad2e2012-07-12 00:08:07 -07002472 dst_release(dst);
2473}
2474EXPORT_SYMBOL_GPL(ip6_redirect);
2475
Duan Jiongc92a59e2013-08-22 12:07:35 +08002476void ip6_redirect_no_header(struct sk_buff *skb, struct net *net, int oif,
2477 u32 mark)
2478{
2479 const struct ipv6hdr *iph = ipv6_hdr(skb);
2480 const struct rd_msg *msg = (struct rd_msg *)icmp6_hdr(skb);
2481 struct dst_entry *dst;
2482 struct flowi6 fl6;
2483
2484 memset(&fl6, 0, sizeof(fl6));
Julian Anastasove374c612014-04-28 10:51:56 +03002485 fl6.flowi6_iif = LOOPBACK_IFINDEX;
Duan Jiongc92a59e2013-08-22 12:07:35 +08002486 fl6.flowi6_oif = oif;
2487 fl6.flowi6_mark = mark;
Duan Jiongc92a59e2013-08-22 12:07:35 +08002488 fl6.daddr = msg->dest;
2489 fl6.saddr = iph->daddr;
Lorenzo Colittie2d118a2016-11-04 02:23:43 +09002490 fl6.flowi6_uid = sock_net_uid(net, NULL);
Duan Jiongc92a59e2013-08-22 12:07:35 +08002491
David Ahernb75cc8f2018-03-02 08:32:17 -08002492 dst = ip6_route_redirect(net, &fl6, skb, &iph->saddr);
Duan Jiongb55b76b2013-09-04 19:44:21 +08002493 rt6_do_redirect(dst, NULL, skb);
Duan Jiongc92a59e2013-08-22 12:07:35 +08002494 dst_release(dst);
2495}
2496
David S. Miller3a5ad2e2012-07-12 00:08:07 -07002497void ip6_sk_redirect(struct sk_buff *skb, struct sock *sk)
2498{
Lorenzo Colittie2d118a2016-11-04 02:23:43 +09002499 ip6_redirect(skb, sock_net(sk), sk->sk_bound_dev_if, sk->sk_mark,
2500 sk->sk_uid);
David S. Miller3a5ad2e2012-07-12 00:08:07 -07002501}
2502EXPORT_SYMBOL_GPL(ip6_sk_redirect);
2503
David S. Miller0dbaee32010-12-13 12:52:14 -08002504static unsigned int ip6_default_advmss(const struct dst_entry *dst)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002505{
David S. Miller0dbaee32010-12-13 12:52:14 -08002506 struct net_device *dev = dst->dev;
2507 unsigned int mtu = dst_mtu(dst);
2508 struct net *net = dev_net(dev);
2509
Linus Torvalds1da177e2005-04-16 15:20:36 -07002510 mtu -= sizeof(struct ipv6hdr) + sizeof(struct tcphdr);
2511
Daniel Lezcano55786892008-03-04 13:47:47 -08002512 if (mtu < net->ipv6.sysctl.ip6_rt_min_advmss)
2513 mtu = net->ipv6.sysctl.ip6_rt_min_advmss;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002514
2515 /*
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09002516 * Maximal non-jumbo IPv6 payload is IPV6_MAXPLEN and
2517 * corresponding MSS is IPV6_MAXPLEN - tcp_header_size.
2518 * IPV6_MAXPLEN is also valid and means: "any MSS,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002519 * rely only on pmtu discovery"
2520 */
2521 if (mtu > IPV6_MAXPLEN - sizeof(struct tcphdr))
2522 mtu = IPV6_MAXPLEN;
2523 return mtu;
2524}
2525
Steffen Klassertebb762f2011-11-23 02:12:51 +00002526static unsigned int ip6_mtu(const struct dst_entry *dst)
David S. Millerd33e4552010-12-14 13:01:14 -08002527{
David S. Millerd33e4552010-12-14 13:01:14 -08002528 struct inet6_dev *idev;
David Ahernd4ead6b2018-04-17 17:33:16 -07002529 unsigned int mtu;
Steffen Klassert618f9bc2011-11-23 02:13:31 +00002530
Martin KaFai Lau4b32b5a2015-04-28 13:03:06 -07002531 mtu = dst_metric_raw(dst, RTAX_MTU);
2532 if (mtu)
2533 goto out;
2534
Steffen Klassert618f9bc2011-11-23 02:13:31 +00002535 mtu = IPV6_MIN_MTU;
David S. Millerd33e4552010-12-14 13:01:14 -08002536
2537 rcu_read_lock();
2538 idev = __in6_dev_get(dst->dev);
2539 if (idev)
2540 mtu = idev->cnf.mtu6;
2541 rcu_read_unlock();
2542
Eric Dumazet30f78d82014-04-10 21:23:36 -07002543out:
Roopa Prabhu14972cb2016-08-24 20:10:43 -07002544 mtu = min_t(unsigned int, mtu, IP6_MAX_MTU);
2545
2546 return mtu - lwtunnel_headroom(dst->lwtstate, mtu);
David S. Millerd33e4552010-12-14 13:01:14 -08002547}
2548
YOSHIFUJI Hideaki3b009442007-12-06 16:11:48 -08002549struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
David S. Miller87a11572011-12-06 17:04:13 -05002550 struct flowi6 *fl6)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002551{
David S. Miller87a11572011-12-06 17:04:13 -05002552 struct dst_entry *dst;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002553 struct rt6_info *rt;
2554 struct inet6_dev *idev = in6_dev_get(dev);
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09002555 struct net *net = dev_net(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002556
David S. Miller38308472011-12-03 18:02:47 -05002557 if (unlikely(!idev))
Eric Dumazet122bdf62012-03-14 21:13:11 +00002558 return ERR_PTR(-ENODEV);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002559
Martin KaFai Lauad706862015-08-14 11:05:52 -07002560 rt = ip6_dst_alloc(net, dev, 0);
David S. Miller38308472011-12-03 18:02:47 -05002561 if (unlikely(!rt)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002562 in6_dev_put(idev);
David S. Miller87a11572011-12-06 17:04:13 -05002563 dst = ERR_PTR(-ENOMEM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002564 goto out;
2565 }
2566
Yan, Zheng8e2ec632011-09-05 21:34:30 +00002567 rt->dst.flags |= DST_HOST;
Brendan McGrath588753f2017-12-13 22:14:57 +11002568 rt->dst.input = ip6_input;
Yan, Zheng8e2ec632011-09-05 21:34:30 +00002569 rt->dst.output = ip6_output;
Julian Anastasov550bab42013-10-20 15:43:04 +03002570 rt->rt6i_gateway = fl6->daddr;
David S. Miller87a11572011-12-06 17:04:13 -05002571 rt->rt6i_dst.addr = fl6->daddr;
Yan, Zheng8e2ec632011-09-05 21:34:30 +00002572 rt->rt6i_dst.plen = 128;
2573 rt->rt6i_idev = idev;
Li RongQing14edd872012-10-24 14:01:18 +08002574 dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002575
Ido Schimmel4c981e22018-01-07 12:45:04 +02002576 /* Add this dst into uncached_list so that rt6_disable_ip() can
Wei Wang587fea72017-06-17 10:42:36 -07002577 * do proper release of the net_device
2578 */
2579 rt6_uncached_list_add(rt);
Wei Wang81eb8442017-10-06 12:06:11 -07002580 atomic_inc(&net->ipv6.rt6_stats->fib_rt_uncache);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002581
David S. Miller87a11572011-12-06 17:04:13 -05002582 dst = xfrm_lookup(net, &rt->dst, flowi6_to_flowi(fl6), NULL, 0);
2583
Linus Torvalds1da177e2005-04-16 15:20:36 -07002584out:
David S. Miller87a11572011-12-06 17:04:13 -05002585 return dst;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002586}
2587
Daniel Lezcano569d3642008-01-18 03:56:57 -08002588static int ip6_dst_gc(struct dst_ops *ops)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002589{
Alexey Dobriyan86393e52009-08-29 01:34:49 +00002590 struct net *net = container_of(ops, struct net, ipv6.ip6_dst_ops);
Daniel Lezcano7019b782008-03-04 13:50:14 -08002591 int rt_min_interval = net->ipv6.sysctl.ip6_rt_gc_min_interval;
2592 int rt_max_size = net->ipv6.sysctl.ip6_rt_max_size;
2593 int rt_elasticity = net->ipv6.sysctl.ip6_rt_gc_elasticity;
2594 int rt_gc_timeout = net->ipv6.sysctl.ip6_rt_gc_timeout;
2595 unsigned long rt_last_gc = net->ipv6.ip6_rt_last_gc;
Eric Dumazetfc66f952010-10-08 06:37:34 +00002596 int entries;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002597
Eric Dumazetfc66f952010-10-08 06:37:34 +00002598 entries = dst_entries_get_fast(ops);
Michal Kubeček49a18d82013-08-01 10:04:24 +02002599 if (time_after(rt_last_gc + rt_min_interval, jiffies) &&
Eric Dumazetfc66f952010-10-08 06:37:34 +00002600 entries <= rt_max_size)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002601 goto out;
2602
Benjamin Thery6891a342008-03-04 13:49:47 -08002603 net->ipv6.ip6_rt_gc_expire++;
Li RongQing14956642014-05-19 17:30:28 +08002604 fib6_run_gc(net->ipv6.ip6_rt_gc_expire, net, true);
Eric Dumazetfc66f952010-10-08 06:37:34 +00002605 entries = dst_entries_get_slow(ops);
2606 if (entries < ops->gc_thresh)
Daniel Lezcano7019b782008-03-04 13:50:14 -08002607 net->ipv6.ip6_rt_gc_expire = rt_gc_timeout>>1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002608out:
Daniel Lezcano7019b782008-03-04 13:50:14 -08002609 net->ipv6.ip6_rt_gc_expire -= net->ipv6.ip6_rt_gc_expire>>rt_elasticity;
Eric Dumazetfc66f952010-10-08 06:37:34 +00002610 return entries > rt_max_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002611}
2612
David Ahern8d1c8022018-04-17 17:33:26 -07002613static int ip6_convert_metrics(struct net *net, struct fib6_info *rt,
David Ahernd4ead6b2018-04-17 17:33:16 -07002614 struct fib6_config *cfg)
Florian Westphale715b6d2015-01-05 23:57:44 +01002615{
Eric Dumazet263243d2018-04-19 09:14:53 -07002616 struct dst_metrics *p;
Florian Westphale715b6d2015-01-05 23:57:44 +01002617
Eric Dumazet263243d2018-04-19 09:14:53 -07002618 if (!cfg->fc_mx)
2619 return 0;
Florian Westphale715b6d2015-01-05 23:57:44 +01002620
Eric Dumazet263243d2018-04-19 09:14:53 -07002621 p = kzalloc(sizeof(*rt->fib6_metrics), GFP_KERNEL);
2622 if (unlikely(!p))
2623 return -ENOMEM;
Florian Westphale715b6d2015-01-05 23:57:44 +01002624
Eric Dumazet263243d2018-04-19 09:14:53 -07002625 refcount_set(&p->refcnt, 1);
2626 rt->fib6_metrics = p;
Florian Westphale715b6d2015-01-05 23:57:44 +01002627
Eric Dumazet263243d2018-04-19 09:14:53 -07002628 return ip_metrics_convert(net, cfg->fc_mx, cfg->fc_mx_len, p->metrics);
Florian Westphale715b6d2015-01-05 23:57:44 +01002629}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002630
David Ahern8c145862016-04-24 21:26:04 -07002631static struct rt6_info *ip6_nh_lookup_table(struct net *net,
2632 struct fib6_config *cfg,
David Ahernf4797b32018-01-25 16:55:08 -08002633 const struct in6_addr *gw_addr,
2634 u32 tbid, int flags)
David Ahern8c145862016-04-24 21:26:04 -07002635{
2636 struct flowi6 fl6 = {
2637 .flowi6_oif = cfg->fc_ifindex,
2638 .daddr = *gw_addr,
2639 .saddr = cfg->fc_prefsrc,
2640 };
2641 struct fib6_table *table;
2642 struct rt6_info *rt;
David Ahern8c145862016-04-24 21:26:04 -07002643
David Ahernf4797b32018-01-25 16:55:08 -08002644 table = fib6_get_table(net, tbid);
David Ahern8c145862016-04-24 21:26:04 -07002645 if (!table)
2646 return NULL;
2647
2648 if (!ipv6_addr_any(&cfg->fc_prefsrc))
2649 flags |= RT6_LOOKUP_F_HAS_SADDR;
2650
David Ahernf4797b32018-01-25 16:55:08 -08002651 flags |= RT6_LOOKUP_F_IGNORE_LINKSTATE;
David Ahernb75cc8f2018-03-02 08:32:17 -08002652 rt = ip6_pol_route(net, table, cfg->fc_ifindex, &fl6, NULL, flags);
David Ahern8c145862016-04-24 21:26:04 -07002653
2654 /* if table lookup failed, fall back to full lookup */
2655 if (rt == net->ipv6.ip6_null_entry) {
2656 ip6_rt_put(rt);
2657 rt = NULL;
2658 }
2659
2660 return rt;
2661}
2662
David Ahernfc1e64e2018-01-25 16:55:09 -08002663static int ip6_route_check_nh_onlink(struct net *net,
2664 struct fib6_config *cfg,
David Ahern9fbb7042018-03-13 08:29:36 -07002665 const struct net_device *dev,
David Ahernfc1e64e2018-01-25 16:55:09 -08002666 struct netlink_ext_ack *extack)
2667{
David Ahern44750f82018-02-06 13:17:06 -08002668 u32 tbid = l3mdev_fib_table(dev) ? : RT_TABLE_MAIN;
David Ahernfc1e64e2018-01-25 16:55:09 -08002669 const struct in6_addr *gw_addr = &cfg->fc_gateway;
2670 u32 flags = RTF_LOCAL | RTF_ANYCAST | RTF_REJECT;
2671 struct rt6_info *grt;
2672 int err;
2673
2674 err = 0;
2675 grt = ip6_nh_lookup_table(net, cfg, gw_addr, tbid, 0);
2676 if (grt) {
David Ahern58e354c2018-02-06 12:14:12 -08002677 if (!grt->dst.error &&
2678 (grt->rt6i_flags & flags || dev != grt->dst.dev)) {
David Ahern44750f82018-02-06 13:17:06 -08002679 NL_SET_ERR_MSG(extack,
2680 "Nexthop has invalid gateway or device mismatch");
David Ahernfc1e64e2018-01-25 16:55:09 -08002681 err = -EINVAL;
2682 }
2683
2684 ip6_rt_put(grt);
2685 }
2686
2687 return err;
2688}
2689
David Ahern1edce992018-01-25 16:55:07 -08002690static int ip6_route_check_nh(struct net *net,
2691 struct fib6_config *cfg,
2692 struct net_device **_dev,
2693 struct inet6_dev **idev)
2694{
2695 const struct in6_addr *gw_addr = &cfg->fc_gateway;
2696 struct net_device *dev = _dev ? *_dev : NULL;
2697 struct rt6_info *grt = NULL;
2698 int err = -EHOSTUNREACH;
2699
2700 if (cfg->fc_table) {
David Ahernf4797b32018-01-25 16:55:08 -08002701 int flags = RT6_LOOKUP_F_IFACE;
2702
2703 grt = ip6_nh_lookup_table(net, cfg, gw_addr,
2704 cfg->fc_table, flags);
David Ahern1edce992018-01-25 16:55:07 -08002705 if (grt) {
2706 if (grt->rt6i_flags & RTF_GATEWAY ||
2707 (dev && dev != grt->dst.dev)) {
2708 ip6_rt_put(grt);
2709 grt = NULL;
2710 }
2711 }
2712 }
2713
2714 if (!grt)
David Ahernb75cc8f2018-03-02 08:32:17 -08002715 grt = rt6_lookup(net, gw_addr, NULL, cfg->fc_ifindex, NULL, 1);
David Ahern1edce992018-01-25 16:55:07 -08002716
2717 if (!grt)
2718 goto out;
2719
2720 if (dev) {
2721 if (dev != grt->dst.dev) {
2722 ip6_rt_put(grt);
2723 goto out;
2724 }
2725 } else {
2726 *_dev = dev = grt->dst.dev;
2727 *idev = grt->rt6i_idev;
2728 dev_hold(dev);
2729 in6_dev_hold(grt->rt6i_idev);
2730 }
2731
2732 if (!(grt->rt6i_flags & RTF_GATEWAY))
2733 err = 0;
2734
2735 ip6_rt_put(grt);
2736
2737out:
2738 return err;
2739}
2740
David Ahern9fbb7042018-03-13 08:29:36 -07002741static int ip6_validate_gw(struct net *net, struct fib6_config *cfg,
2742 struct net_device **_dev, struct inet6_dev **idev,
2743 struct netlink_ext_ack *extack)
2744{
2745 const struct in6_addr *gw_addr = &cfg->fc_gateway;
2746 int gwa_type = ipv6_addr_type(gw_addr);
David Ahern232378e2018-03-13 08:29:37 -07002747 bool skip_dev = gwa_type & IPV6_ADDR_LINKLOCAL ? false : true;
David Ahern9fbb7042018-03-13 08:29:36 -07002748 const struct net_device *dev = *_dev;
David Ahern232378e2018-03-13 08:29:37 -07002749 bool need_addr_check = !dev;
David Ahern9fbb7042018-03-13 08:29:36 -07002750 int err = -EINVAL;
2751
2752 /* if gw_addr is local we will fail to detect this in case
2753 * address is still TENTATIVE (DAD in progress). rt6_lookup()
2754 * will return already-added prefix route via interface that
2755 * prefix route was assigned to, which might be non-loopback.
2756 */
David Ahern232378e2018-03-13 08:29:37 -07002757 if (dev &&
2758 ipv6_chk_addr_and_flags(net, gw_addr, dev, skip_dev, 0, 0)) {
2759 NL_SET_ERR_MSG(extack, "Gateway can not be a local address");
David Ahern9fbb7042018-03-13 08:29:36 -07002760 goto out;
2761 }
2762
2763 if (gwa_type != (IPV6_ADDR_LINKLOCAL | IPV6_ADDR_UNICAST)) {
2764 /* IPv6 strictly inhibits using not link-local
2765 * addresses as nexthop address.
2766 * Otherwise, router will not able to send redirects.
2767 * It is very good, but in some (rare!) circumstances
2768 * (SIT, PtP, NBMA NOARP links) it is handy to allow
2769 * some exceptions. --ANK
2770 * We allow IPv4-mapped nexthops to support RFC4798-type
2771 * addressing
2772 */
2773 if (!(gwa_type & (IPV6_ADDR_UNICAST | IPV6_ADDR_MAPPED))) {
2774 NL_SET_ERR_MSG(extack, "Invalid gateway address");
2775 goto out;
2776 }
2777
2778 if (cfg->fc_flags & RTNH_F_ONLINK)
2779 err = ip6_route_check_nh_onlink(net, cfg, dev, extack);
2780 else
2781 err = ip6_route_check_nh(net, cfg, _dev, idev);
2782
2783 if (err)
2784 goto out;
2785 }
2786
2787 /* reload in case device was changed */
2788 dev = *_dev;
2789
2790 err = -EINVAL;
2791 if (!dev) {
2792 NL_SET_ERR_MSG(extack, "Egress device not specified");
2793 goto out;
2794 } else if (dev->flags & IFF_LOOPBACK) {
2795 NL_SET_ERR_MSG(extack,
2796 "Egress device can not be loopback device for this route");
2797 goto out;
2798 }
David Ahern232378e2018-03-13 08:29:37 -07002799
2800 /* if we did not check gw_addr above, do so now that the
2801 * egress device has been resolved.
2802 */
2803 if (need_addr_check &&
2804 ipv6_chk_addr_and_flags(net, gw_addr, dev, skip_dev, 0, 0)) {
2805 NL_SET_ERR_MSG(extack, "Gateway can not be a local address");
2806 goto out;
2807 }
2808
David Ahern9fbb7042018-03-13 08:29:36 -07002809 err = 0;
2810out:
2811 return err;
2812}
2813
David Ahern8d1c8022018-04-17 17:33:26 -07002814static struct fib6_info *ip6_route_info_create(struct fib6_config *cfg,
David Ahernacb54e32018-04-17 17:33:22 -07002815 gfp_t gfp_flags,
David Ahern333c4302017-05-21 10:12:04 -06002816 struct netlink_ext_ack *extack)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002817{
Daniel Lezcano55786892008-03-04 13:47:47 -08002818 struct net *net = cfg->fc_nlinfo.nl_net;
David Ahern8d1c8022018-04-17 17:33:26 -07002819 struct fib6_info *rt = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002820 struct net_device *dev = NULL;
2821 struct inet6_dev *idev = NULL;
Thomas Grafc71099a2006-08-04 23:20:06 -07002822 struct fib6_table *table;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002823 int addr_type;
Roopa Prabhu8c5b83f2015-10-10 08:26:36 -07002824 int err = -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002825
David Ahern557c44b2017-04-19 14:19:43 -07002826 /* RTF_PCPU is an internal flag; can not be set by userspace */
David Ahernd5d531c2017-05-21 10:12:05 -06002827 if (cfg->fc_flags & RTF_PCPU) {
2828 NL_SET_ERR_MSG(extack, "Userspace can not set RTF_PCPU");
David Ahern557c44b2017-04-19 14:19:43 -07002829 goto out;
David Ahernd5d531c2017-05-21 10:12:05 -06002830 }
David Ahern557c44b2017-04-19 14:19:43 -07002831
Wei Wang2ea23522017-10-27 17:30:12 -07002832 /* RTF_CACHE is an internal flag; can not be set by userspace */
2833 if (cfg->fc_flags & RTF_CACHE) {
2834 NL_SET_ERR_MSG(extack, "Userspace can not set RTF_CACHE");
2835 goto out;
2836 }
2837
David Aherne8478e82018-04-17 17:33:13 -07002838 if (cfg->fc_type > RTN_MAX) {
2839 NL_SET_ERR_MSG(extack, "Invalid route type");
2840 goto out;
2841 }
2842
David Ahernd5d531c2017-05-21 10:12:05 -06002843 if (cfg->fc_dst_len > 128) {
2844 NL_SET_ERR_MSG(extack, "Invalid prefix length");
Roopa Prabhu8c5b83f2015-10-10 08:26:36 -07002845 goto out;
David Ahernd5d531c2017-05-21 10:12:05 -06002846 }
2847 if (cfg->fc_src_len > 128) {
2848 NL_SET_ERR_MSG(extack, "Invalid source address length");
2849 goto out;
2850 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002851#ifndef CONFIG_IPV6_SUBTREES
David Ahernd5d531c2017-05-21 10:12:05 -06002852 if (cfg->fc_src_len) {
2853 NL_SET_ERR_MSG(extack,
2854 "Specifying source address requires IPV6_SUBTREES to be enabled");
Roopa Prabhu8c5b83f2015-10-10 08:26:36 -07002855 goto out;
David Ahernd5d531c2017-05-21 10:12:05 -06002856 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002857#endif
Thomas Graf86872cb2006-08-22 00:01:08 -07002858 if (cfg->fc_ifindex) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002859 err = -ENODEV;
Daniel Lezcano55786892008-03-04 13:47:47 -08002860 dev = dev_get_by_index(net, cfg->fc_ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002861 if (!dev)
2862 goto out;
2863 idev = in6_dev_get(dev);
2864 if (!idev)
2865 goto out;
2866 }
2867
Thomas Graf86872cb2006-08-22 00:01:08 -07002868 if (cfg->fc_metric == 0)
2869 cfg->fc_metric = IP6_RT_PRIO_USER;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002870
David Ahernfc1e64e2018-01-25 16:55:09 -08002871 if (cfg->fc_flags & RTNH_F_ONLINK) {
2872 if (!dev) {
2873 NL_SET_ERR_MSG(extack,
2874 "Nexthop device required for onlink");
2875 err = -ENODEV;
2876 goto out;
2877 }
2878
2879 if (!(dev->flags & IFF_UP)) {
2880 NL_SET_ERR_MSG(extack, "Nexthop device is not up");
2881 err = -ENETDOWN;
2882 goto out;
2883 }
2884 }
2885
Matti Vaittinend71314b2011-11-14 00:14:49 +00002886 err = -ENOBUFS;
David S. Miller38308472011-12-03 18:02:47 -05002887 if (cfg->fc_nlinfo.nlh &&
2888 !(cfg->fc_nlinfo.nlh->nlmsg_flags & NLM_F_CREATE)) {
Matti Vaittinend71314b2011-11-14 00:14:49 +00002889 table = fib6_get_table(net, cfg->fc_table);
David S. Miller38308472011-12-03 18:02:47 -05002890 if (!table) {
Joe Perchesf3213832012-05-15 14:11:53 +00002891 pr_warn("NLM_F_CREATE should be specified when creating new route\n");
Matti Vaittinend71314b2011-11-14 00:14:49 +00002892 table = fib6_new_table(net, cfg->fc_table);
2893 }
2894 } else {
2895 table = fib6_new_table(net, cfg->fc_table);
2896 }
David S. Miller38308472011-12-03 18:02:47 -05002897
2898 if (!table)
Thomas Grafc71099a2006-08-04 23:20:06 -07002899 goto out;
Thomas Grafc71099a2006-08-04 23:20:06 -07002900
David Ahern93531c62018-04-17 17:33:25 -07002901 err = -ENOMEM;
2902 rt = fib6_info_alloc(gfp_flags);
2903 if (!rt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002904 goto out;
David Ahern93531c62018-04-17 17:33:25 -07002905
2906 if (cfg->fc_flags & RTF_ADDRCONF)
2907 rt->dst_nocount = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002908
David Ahernd4ead6b2018-04-17 17:33:16 -07002909 err = ip6_convert_metrics(net, rt, cfg);
2910 if (err < 0)
2911 goto out;
2912
Gao feng1716a962012-04-06 00:13:10 +00002913 if (cfg->fc_flags & RTF_EXPIRES)
David Ahern14895682018-04-17 17:33:17 -07002914 fib6_set_expires(rt, jiffies +
Gao feng1716a962012-04-06 00:13:10 +00002915 clock_t_to_jiffies(cfg->fc_expires));
2916 else
David Ahern14895682018-04-17 17:33:17 -07002917 fib6_clean_expires(rt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002918
Thomas Graf86872cb2006-08-22 00:01:08 -07002919 if (cfg->fc_protocol == RTPROT_UNSPEC)
2920 cfg->fc_protocol = RTPROT_BOOT;
David Ahern93c2fb22018-04-18 15:38:59 -07002921 rt->fib6_protocol = cfg->fc_protocol;
Thomas Graf86872cb2006-08-22 00:01:08 -07002922
2923 addr_type = ipv6_addr_type(&cfg->fc_dst);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002924
Roopa Prabhu19e42e42015-07-21 10:43:48 +02002925 if (cfg->fc_encap) {
2926 struct lwtunnel_state *lwtstate;
2927
David Ahern30357d72017-01-30 12:07:37 -08002928 err = lwtunnel_build_state(cfg->fc_encap_type,
Tom Herbert127eb7c2015-08-24 09:45:41 -07002929 cfg->fc_encap, AF_INET6, cfg,
David Ahern9ae28722017-05-27 16:19:28 -06002930 &lwtstate, extack);
Roopa Prabhu19e42e42015-07-21 10:43:48 +02002931 if (err)
2932 goto out;
David Ahern5e670d82018-04-17 17:33:14 -07002933 rt->fib6_nh.nh_lwtstate = lwtstate_get(lwtstate);
Roopa Prabhu19e42e42015-07-21 10:43:48 +02002934 }
2935
David Ahern93c2fb22018-04-18 15:38:59 -07002936 ipv6_addr_prefix(&rt->fib6_dst.addr, &cfg->fc_dst, cfg->fc_dst_len);
2937 rt->fib6_dst.plen = cfg->fc_dst_len;
2938 if (rt->fib6_dst.plen == 128)
David Ahern3b6761d2018-04-17 17:33:20 -07002939 rt->dst_host = true;
Michal Kubečeke5fd3872014-03-27 13:04:08 +01002940
Linus Torvalds1da177e2005-04-16 15:20:36 -07002941#ifdef CONFIG_IPV6_SUBTREES
David Ahern93c2fb22018-04-18 15:38:59 -07002942 ipv6_addr_prefix(&rt->fib6_src.addr, &cfg->fc_src, cfg->fc_src_len);
2943 rt->fib6_src.plen = cfg->fc_src_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002944#endif
2945
David Ahern93c2fb22018-04-18 15:38:59 -07002946 rt->fib6_metric = cfg->fc_metric;
David Ahern5e670d82018-04-17 17:33:14 -07002947 rt->fib6_nh.nh_weight = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002948
David Aherne8478e82018-04-17 17:33:13 -07002949 rt->fib6_type = cfg->fc_type;
2950
Linus Torvalds1da177e2005-04-16 15:20:36 -07002951 /* We cannot add true routes via loopback here,
2952 they would result in kernel looping; promote them to reject routes
2953 */
Thomas Graf86872cb2006-08-22 00:01:08 -07002954 if ((cfg->fc_flags & RTF_REJECT) ||
David S. Miller38308472011-12-03 18:02:47 -05002955 (dev && (dev->flags & IFF_LOOPBACK) &&
2956 !(addr_type & IPV6_ADDR_LOOPBACK) &&
2957 !(cfg->fc_flags & RTF_LOCAL))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002958 /* hold loopback dev/idev if we haven't done so. */
Daniel Lezcano55786892008-03-04 13:47:47 -08002959 if (dev != net->loopback_dev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002960 if (dev) {
2961 dev_put(dev);
2962 in6_dev_put(idev);
2963 }
Daniel Lezcano55786892008-03-04 13:47:47 -08002964 dev = net->loopback_dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002965 dev_hold(dev);
2966 idev = in6_dev_get(dev);
2967 if (!idev) {
2968 err = -ENODEV;
2969 goto out;
2970 }
2971 }
David Ahern93c2fb22018-04-18 15:38:59 -07002972 rt->fib6_flags = RTF_REJECT|RTF_NONEXTHOP;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002973 goto install_route;
2974 }
2975
Thomas Graf86872cb2006-08-22 00:01:08 -07002976 if (cfg->fc_flags & RTF_GATEWAY) {
David Ahern9fbb7042018-03-13 08:29:36 -07002977 err = ip6_validate_gw(net, cfg, &dev, &idev, extack);
2978 if (err)
Florian Westphal48ed7b22015-05-21 00:25:41 +02002979 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002980
David Ahern93531c62018-04-17 17:33:25 -07002981 rt->fib6_nh.nh_gw = cfg->fc_gateway;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002982 }
2983
2984 err = -ENODEV;
David S. Miller38308472011-12-03 18:02:47 -05002985 if (!dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002986 goto out;
2987
Lorenzo Bianconi428604f2018-03-29 11:02:24 +02002988 if (idev->cnf.disable_ipv6) {
2989 NL_SET_ERR_MSG(extack, "IPv6 is disabled on nexthop device");
2990 err = -EACCES;
2991 goto out;
2992 }
2993
David Ahern955ec4c2018-01-24 19:45:29 -08002994 if (!(dev->flags & IFF_UP)) {
2995 NL_SET_ERR_MSG(extack, "Nexthop device is not up");
2996 err = -ENETDOWN;
2997 goto out;
2998 }
2999
Daniel Walterc3968a82011-04-13 21:10:57 +00003000 if (!ipv6_addr_any(&cfg->fc_prefsrc)) {
3001 if (!ipv6_chk_addr(net, &cfg->fc_prefsrc, dev, 0)) {
David Ahernd5d531c2017-05-21 10:12:05 -06003002 NL_SET_ERR_MSG(extack, "Invalid source address");
Daniel Walterc3968a82011-04-13 21:10:57 +00003003 err = -EINVAL;
3004 goto out;
3005 }
David Ahern93c2fb22018-04-18 15:38:59 -07003006 rt->fib6_prefsrc.addr = cfg->fc_prefsrc;
3007 rt->fib6_prefsrc.plen = 128;
Daniel Walterc3968a82011-04-13 21:10:57 +00003008 } else
David Ahern93c2fb22018-04-18 15:38:59 -07003009 rt->fib6_prefsrc.plen = 0;
Daniel Walterc3968a82011-04-13 21:10:57 +00003010
David Ahern93c2fb22018-04-18 15:38:59 -07003011 rt->fib6_flags = cfg->fc_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003012
3013install_route:
David Ahern93c2fb22018-04-18 15:38:59 -07003014 if (!(rt->fib6_flags & (RTF_LOCAL | RTF_ANYCAST)) &&
Ido Schimmel5609b802018-01-07 12:45:06 +02003015 !netif_carrier_ok(dev))
David Ahern5e670d82018-04-17 17:33:14 -07003016 rt->fib6_nh.nh_flags |= RTNH_F_LINKDOWN;
3017 rt->fib6_nh.nh_flags |= (cfg->fc_flags & RTNH_F_ONLINK);
David Ahern93531c62018-04-17 17:33:25 -07003018 rt->fib6_nh.nh_dev = dev;
David Ahern93c2fb22018-04-18 15:38:59 -07003019 rt->fib6_table = table;
Daniel Lezcano63152fc2008-03-03 23:31:11 -08003020
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09003021 cfg->fc_nlinfo.nl_net = dev_net(dev);
Daniel Lezcano63152fc2008-03-03 23:31:11 -08003022
David Aherndcd1f572018-04-18 15:39:05 -07003023 if (idev)
3024 in6_dev_put(idev);
3025
Roopa Prabhu8c5b83f2015-10-10 08:26:36 -07003026 return rt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003027out:
3028 if (dev)
3029 dev_put(dev);
3030 if (idev)
3031 in6_dev_put(idev);
Roopa Prabhu6b9ea5a2015-09-08 10:53:04 -07003032
David Ahern93531c62018-04-17 17:33:25 -07003033 fib6_info_release(rt);
Roopa Prabhu8c5b83f2015-10-10 08:26:36 -07003034 return ERR_PTR(err);
Roopa Prabhu6b9ea5a2015-09-08 10:53:04 -07003035}
3036
David Ahernacb54e32018-04-17 17:33:22 -07003037int ip6_route_add(struct fib6_config *cfg, gfp_t gfp_flags,
3038 struct netlink_ext_ack *extack)
Roopa Prabhu6b9ea5a2015-09-08 10:53:04 -07003039{
David Ahern8d1c8022018-04-17 17:33:26 -07003040 struct fib6_info *rt;
Roopa Prabhu6b9ea5a2015-09-08 10:53:04 -07003041 int err;
3042
David Ahernacb54e32018-04-17 17:33:22 -07003043 rt = ip6_route_info_create(cfg, gfp_flags, extack);
David Ahernd4ead6b2018-04-17 17:33:16 -07003044 if (IS_ERR(rt))
3045 return PTR_ERR(rt);
Roopa Prabhu6b9ea5a2015-09-08 10:53:04 -07003046
David Ahernd4ead6b2018-04-17 17:33:16 -07003047 err = __ip6_ins_rt(rt, &cfg->fc_nlinfo, extack);
David Ahern93531c62018-04-17 17:33:25 -07003048 fib6_info_release(rt);
Roopa Prabhu6b9ea5a2015-09-08 10:53:04 -07003049
Linus Torvalds1da177e2005-04-16 15:20:36 -07003050 return err;
3051}
3052
David Ahern8d1c8022018-04-17 17:33:26 -07003053static int __ip6_del_rt(struct fib6_info *rt, struct nl_info *info)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003054{
David Ahernafb1d4b52018-04-17 17:33:11 -07003055 struct net *net = info->nl_net;
Thomas Grafc71099a2006-08-04 23:20:06 -07003056 struct fib6_table *table;
David Ahernafb1d4b52018-04-17 17:33:11 -07003057 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003058
David Ahern421842e2018-04-17 17:33:18 -07003059 if (rt == net->ipv6.fib6_null_entry) {
Gao feng6825a262012-09-19 19:25:34 +00003060 err = -ENOENT;
3061 goto out;
3062 }
Patrick McHardy6c813a72006-08-06 22:22:47 -07003063
David Ahern93c2fb22018-04-18 15:38:59 -07003064 table = rt->fib6_table;
Wei Wang66f5d6c2017-10-06 12:06:10 -07003065 spin_lock_bh(&table->tb6_lock);
Thomas Graf86872cb2006-08-22 00:01:08 -07003066 err = fib6_del(rt, info);
Wei Wang66f5d6c2017-10-06 12:06:10 -07003067 spin_unlock_bh(&table->tb6_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003068
Gao feng6825a262012-09-19 19:25:34 +00003069out:
David Ahern93531c62018-04-17 17:33:25 -07003070 fib6_info_release(rt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003071 return err;
3072}
3073
David Ahern8d1c8022018-04-17 17:33:26 -07003074int ip6_del_rt(struct net *net, struct fib6_info *rt)
Thomas Grafe0a1ad732006-08-22 00:00:21 -07003075{
David Ahernafb1d4b52018-04-17 17:33:11 -07003076 struct nl_info info = { .nl_net = net };
3077
Denis V. Lunev528c4ce2007-12-13 09:45:12 -08003078 return __ip6_del_rt(rt, &info);
Thomas Grafe0a1ad732006-08-22 00:00:21 -07003079}
3080
David Ahern8d1c8022018-04-17 17:33:26 -07003081static int __ip6_del_rt_siblings(struct fib6_info *rt, struct fib6_config *cfg)
David Ahern0ae81332017-02-02 12:37:08 -08003082{
3083 struct nl_info *info = &cfg->fc_nlinfo;
WANG Conge3330032017-02-27 16:07:43 -08003084 struct net *net = info->nl_net;
David Ahern16a16cd2017-02-02 12:37:11 -08003085 struct sk_buff *skb = NULL;
David Ahern0ae81332017-02-02 12:37:08 -08003086 struct fib6_table *table;
WANG Conge3330032017-02-27 16:07:43 -08003087 int err = -ENOENT;
David Ahern0ae81332017-02-02 12:37:08 -08003088
David Ahern421842e2018-04-17 17:33:18 -07003089 if (rt == net->ipv6.fib6_null_entry)
WANG Conge3330032017-02-27 16:07:43 -08003090 goto out_put;
David Ahern93c2fb22018-04-18 15:38:59 -07003091 table = rt->fib6_table;
Wei Wang66f5d6c2017-10-06 12:06:10 -07003092 spin_lock_bh(&table->tb6_lock);
David Ahern0ae81332017-02-02 12:37:08 -08003093
David Ahern93c2fb22018-04-18 15:38:59 -07003094 if (rt->fib6_nsiblings && cfg->fc_delete_all_nh) {
David Ahern8d1c8022018-04-17 17:33:26 -07003095 struct fib6_info *sibling, *next_sibling;
David Ahern0ae81332017-02-02 12:37:08 -08003096
David Ahern16a16cd2017-02-02 12:37:11 -08003097 /* prefer to send a single notification with all hops */
3098 skb = nlmsg_new(rt6_nlmsg_size(rt), gfp_any());
3099 if (skb) {
3100 u32 seq = info->nlh ? info->nlh->nlmsg_seq : 0;
3101
David Ahernd4ead6b2018-04-17 17:33:16 -07003102 if (rt6_fill_node(net, skb, rt, NULL,
David Ahern16a16cd2017-02-02 12:37:11 -08003103 NULL, NULL, 0, RTM_DELROUTE,
3104 info->portid, seq, 0) < 0) {
3105 kfree_skb(skb);
3106 skb = NULL;
3107 } else
3108 info->skip_notify = 1;
3109 }
3110
David Ahern0ae81332017-02-02 12:37:08 -08003111 list_for_each_entry_safe(sibling, next_sibling,
David Ahern93c2fb22018-04-18 15:38:59 -07003112 &rt->fib6_siblings,
3113 fib6_siblings) {
David Ahern0ae81332017-02-02 12:37:08 -08003114 err = fib6_del(sibling, info);
3115 if (err)
WANG Conge3330032017-02-27 16:07:43 -08003116 goto out_unlock;
David Ahern0ae81332017-02-02 12:37:08 -08003117 }
3118 }
3119
3120 err = fib6_del(rt, info);
WANG Conge3330032017-02-27 16:07:43 -08003121out_unlock:
Wei Wang66f5d6c2017-10-06 12:06:10 -07003122 spin_unlock_bh(&table->tb6_lock);
WANG Conge3330032017-02-27 16:07:43 -08003123out_put:
David Ahern93531c62018-04-17 17:33:25 -07003124 fib6_info_release(rt);
David Ahern16a16cd2017-02-02 12:37:11 -08003125
3126 if (skb) {
WANG Conge3330032017-02-27 16:07:43 -08003127 rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE,
David Ahern16a16cd2017-02-02 12:37:11 -08003128 info->nlh, gfp_any());
3129 }
David Ahern0ae81332017-02-02 12:37:08 -08003130 return err;
3131}
3132
David Ahern23fb93a2018-04-17 17:33:23 -07003133static int ip6_del_cached_rt(struct rt6_info *rt, struct fib6_config *cfg)
3134{
3135 int rc = -ESRCH;
3136
3137 if (cfg->fc_ifindex && rt->dst.dev->ifindex != cfg->fc_ifindex)
3138 goto out;
3139
3140 if (cfg->fc_flags & RTF_GATEWAY &&
3141 !ipv6_addr_equal(&cfg->fc_gateway, &rt->rt6i_gateway))
3142 goto out;
3143 if (dst_hold_safe(&rt->dst))
3144 rc = rt6_remove_exception_rt(rt);
3145out:
3146 return rc;
3147}
3148
David Ahern333c4302017-05-21 10:12:04 -06003149static int ip6_route_del(struct fib6_config *cfg,
3150 struct netlink_ext_ack *extack)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003151{
David Ahern8d1c8022018-04-17 17:33:26 -07003152 struct rt6_info *rt_cache;
Thomas Grafc71099a2006-08-04 23:20:06 -07003153 struct fib6_table *table;
David Ahern8d1c8022018-04-17 17:33:26 -07003154 struct fib6_info *rt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003155 struct fib6_node *fn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003156 int err = -ESRCH;
3157
Daniel Lezcano55786892008-03-04 13:47:47 -08003158 table = fib6_get_table(cfg->fc_nlinfo.nl_net, cfg->fc_table);
David Ahernd5d531c2017-05-21 10:12:05 -06003159 if (!table) {
3160 NL_SET_ERR_MSG(extack, "FIB table does not exist");
Thomas Grafc71099a2006-08-04 23:20:06 -07003161 return err;
David Ahernd5d531c2017-05-21 10:12:05 -06003162 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003163
Wei Wang66f5d6c2017-10-06 12:06:10 -07003164 rcu_read_lock();
Thomas Grafc71099a2006-08-04 23:20:06 -07003165
3166 fn = fib6_locate(&table->tb6_root,
Thomas Graf86872cb2006-08-22 00:01:08 -07003167 &cfg->fc_dst, cfg->fc_dst_len,
Wei Wang38fbeee2017-10-06 12:06:02 -07003168 &cfg->fc_src, cfg->fc_src_len,
Wei Wang2b760fc2017-10-06 12:06:03 -07003169 !(cfg->fc_flags & RTF_CACHE));
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09003170
Linus Torvalds1da177e2005-04-16 15:20:36 -07003171 if (fn) {
Wei Wang66f5d6c2017-10-06 12:06:10 -07003172 for_each_fib6_node_rt_rcu(fn) {
Wei Wang2b760fc2017-10-06 12:06:03 -07003173 if (cfg->fc_flags & RTF_CACHE) {
David Ahern23fb93a2018-04-17 17:33:23 -07003174 int rc;
3175
Wei Wang2b760fc2017-10-06 12:06:03 -07003176 rt_cache = rt6_find_cached_rt(rt, &cfg->fc_dst,
3177 &cfg->fc_src);
David Ahern23fb93a2018-04-17 17:33:23 -07003178 if (rt_cache) {
3179 rc = ip6_del_cached_rt(rt_cache, cfg);
3180 if (rc != -ESRCH)
3181 return rc;
3182 }
3183 continue;
Wei Wang2b760fc2017-10-06 12:06:03 -07003184 }
Thomas Graf86872cb2006-08-22 00:01:08 -07003185 if (cfg->fc_ifindex &&
David Ahern5e670d82018-04-17 17:33:14 -07003186 (!rt->fib6_nh.nh_dev ||
3187 rt->fib6_nh.nh_dev->ifindex != cfg->fc_ifindex))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003188 continue;
Thomas Graf86872cb2006-08-22 00:01:08 -07003189 if (cfg->fc_flags & RTF_GATEWAY &&
David Ahern5e670d82018-04-17 17:33:14 -07003190 !ipv6_addr_equal(&cfg->fc_gateway, &rt->fib6_nh.nh_gw))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003191 continue;
David Ahern93c2fb22018-04-18 15:38:59 -07003192 if (cfg->fc_metric && cfg->fc_metric != rt->fib6_metric)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003193 continue;
David Ahern93c2fb22018-04-18 15:38:59 -07003194 if (cfg->fc_protocol && cfg->fc_protocol != rt->fib6_protocol)
Mantas Mc2ed1882016-12-16 10:30:59 +02003195 continue;
David Ahern93531c62018-04-17 17:33:25 -07003196 fib6_info_hold(rt);
Wei Wang66f5d6c2017-10-06 12:06:10 -07003197 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003198
David Ahern0ae81332017-02-02 12:37:08 -08003199 /* if gateway was specified only delete the one hop */
3200 if (cfg->fc_flags & RTF_GATEWAY)
3201 return __ip6_del_rt(rt, &cfg->fc_nlinfo);
3202
3203 return __ip6_del_rt_siblings(rt, cfg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003204 }
3205 }
Wei Wang66f5d6c2017-10-06 12:06:10 -07003206 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003207
3208 return err;
3209}
3210
David S. Miller6700c272012-07-17 03:29:28 -07003211static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb)
YOSHIFUJI Hideakia6279452006-08-23 17:18:26 -07003212{
YOSHIFUJI Hideakia6279452006-08-23 17:18:26 -07003213 struct netevent_redirect netevent;
David S. Millere8599ff2012-07-11 23:43:53 -07003214 struct rt6_info *rt, *nrt = NULL;
David S. Millere8599ff2012-07-11 23:43:53 -07003215 struct ndisc_options ndopts;
3216 struct inet6_dev *in6_dev;
3217 struct neighbour *neigh;
YOSHIFUJI Hideaki / 吉藤英明71bcdba2013-01-05 16:34:51 +00003218 struct rd_msg *msg;
David S. Miller6e157b62012-07-12 00:05:02 -07003219 int optlen, on_link;
3220 u8 *lladdr;
David S. Millere8599ff2012-07-11 23:43:53 -07003221
Simon Horman29a3cad2013-05-28 20:34:26 +00003222 optlen = skb_tail_pointer(skb) - skb_transport_header(skb);
YOSHIFUJI Hideaki / 吉藤英明71bcdba2013-01-05 16:34:51 +00003223 optlen -= sizeof(*msg);
David S. Millere8599ff2012-07-11 23:43:53 -07003224
3225 if (optlen < 0) {
David S. Miller6e157b62012-07-12 00:05:02 -07003226 net_dbg_ratelimited("rt6_do_redirect: packet too short\n");
David S. Millere8599ff2012-07-11 23:43:53 -07003227 return;
3228 }
3229
YOSHIFUJI Hideaki / 吉藤英明71bcdba2013-01-05 16:34:51 +00003230 msg = (struct rd_msg *)icmp6_hdr(skb);
David S. Millere8599ff2012-07-11 23:43:53 -07003231
YOSHIFUJI Hideaki / 吉藤英明71bcdba2013-01-05 16:34:51 +00003232 if (ipv6_addr_is_multicast(&msg->dest)) {
David S. Miller6e157b62012-07-12 00:05:02 -07003233 net_dbg_ratelimited("rt6_do_redirect: destination address is multicast\n");
David S. Millere8599ff2012-07-11 23:43:53 -07003234 return;
3235 }
3236
David S. Miller6e157b62012-07-12 00:05:02 -07003237 on_link = 0;
YOSHIFUJI Hideaki / 吉藤英明71bcdba2013-01-05 16:34:51 +00003238 if (ipv6_addr_equal(&msg->dest, &msg->target)) {
David S. Millere8599ff2012-07-11 23:43:53 -07003239 on_link = 1;
YOSHIFUJI Hideaki / 吉藤英明71bcdba2013-01-05 16:34:51 +00003240 } else if (ipv6_addr_type(&msg->target) !=
David S. Millere8599ff2012-07-11 23:43:53 -07003241 (IPV6_ADDR_UNICAST|IPV6_ADDR_LINKLOCAL)) {
David S. Miller6e157b62012-07-12 00:05:02 -07003242 net_dbg_ratelimited("rt6_do_redirect: target address is not link-local unicast\n");
David S. Millere8599ff2012-07-11 23:43:53 -07003243 return;
3244 }
3245
3246 in6_dev = __in6_dev_get(skb->dev);
3247 if (!in6_dev)
3248 return;
3249 if (in6_dev->cnf.forwarding || !in6_dev->cnf.accept_redirects)
3250 return;
3251
3252 /* RFC2461 8.1:
3253 * The IP source address of the Redirect MUST be the same as the current
3254 * first-hop router for the specified ICMP Destination Address.
3255 */
3256
Alexander Aringf997c552016-06-15 21:20:23 +02003257 if (!ndisc_parse_options(skb->dev, msg->opt, optlen, &ndopts)) {
David S. Millere8599ff2012-07-11 23:43:53 -07003258 net_dbg_ratelimited("rt6_redirect: invalid ND options\n");
3259 return;
3260 }
David S. Miller6e157b62012-07-12 00:05:02 -07003261
3262 lladdr = NULL;
David S. Millere8599ff2012-07-11 23:43:53 -07003263 if (ndopts.nd_opts_tgt_lladdr) {
3264 lladdr = ndisc_opt_addr_data(ndopts.nd_opts_tgt_lladdr,
3265 skb->dev);
3266 if (!lladdr) {
3267 net_dbg_ratelimited("rt6_redirect: invalid link-layer address length\n");
3268 return;
3269 }
3270 }
3271
David S. Miller6e157b62012-07-12 00:05:02 -07003272 rt = (struct rt6_info *) dst;
Matthias Schifferec13ad12015-11-02 01:24:38 +01003273 if (rt->rt6i_flags & RTF_REJECT) {
David S. Miller6e157b62012-07-12 00:05:02 -07003274 net_dbg_ratelimited("rt6_redirect: source isn't a valid nexthop for redirect target\n");
3275 return;
3276 }
3277
3278 /* Redirect received -> path was valid.
3279 * Look, redirects are sent only in response to data packets,
3280 * so that this nexthop apparently is reachable. --ANK
3281 */
Julian Anastasov0dec8792017-02-06 23:14:16 +02003282 dst_confirm_neigh(&rt->dst, &ipv6_hdr(skb)->saddr);
David S. Miller6e157b62012-07-12 00:05:02 -07003283
YOSHIFUJI Hideaki / 吉藤英明71bcdba2013-01-05 16:34:51 +00003284 neigh = __neigh_lookup(&nd_tbl, &msg->target, skb->dev, 1);
David S. Millere8599ff2012-07-11 23:43:53 -07003285 if (!neigh)
3286 return;
3287
Linus Torvalds1da177e2005-04-16 15:20:36 -07003288 /*
3289 * We have finally decided to accept it.
3290 */
3291
Alexander Aringf997c552016-06-15 21:20:23 +02003292 ndisc_update(skb->dev, neigh, lladdr, NUD_STALE,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003293 NEIGH_UPDATE_F_WEAK_OVERRIDE|
3294 NEIGH_UPDATE_F_OVERRIDE|
3295 (on_link ? 0 : (NEIGH_UPDATE_F_OVERRIDE_ISROUTER|
Alexander Aringf997c552016-06-15 21:20:23 +02003296 NEIGH_UPDATE_F_ISROUTER)),
3297 NDISC_REDIRECT, &ndopts);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003298
David Ahern4d85cd02018-04-20 15:37:59 -07003299 rcu_read_lock();
David Ahern23fb93a2018-04-17 17:33:23 -07003300 nrt = ip6_rt_cache_alloc(rt->from, &msg->dest, NULL);
David Ahern4d85cd02018-04-20 15:37:59 -07003301 rcu_read_unlock();
David S. Miller38308472011-12-03 18:02:47 -05003302 if (!nrt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003303 goto out;
3304
3305 nrt->rt6i_flags = RTF_GATEWAY|RTF_UP|RTF_DYNAMIC|RTF_CACHE;
3306 if (on_link)
3307 nrt->rt6i_flags &= ~RTF_GATEWAY;
3308
Alexey Dobriyan4e3fd7a2011-11-21 03:39:03 +00003309 nrt->rt6i_gateway = *(struct in6_addr *)neigh->primary_key;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003310
Wei Wang2b760fc2017-10-06 12:06:03 -07003311 /* No need to remove rt from the exception table if rt is
3312 * a cached route because rt6_insert_exception() will
3313 * takes care of it
3314 */
David Ahernd4ead6b2018-04-17 17:33:16 -07003315 if (rt6_insert_exception(nrt, rt->from)) {
Wei Wang2b760fc2017-10-06 12:06:03 -07003316 dst_release_immediate(&nrt->dst);
3317 goto out;
3318 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003319
Changli Gaod8d1f302010-06-10 23:31:35 -07003320 netevent.old = &rt->dst;
3321 netevent.new = &nrt->dst;
YOSHIFUJI Hideaki / 吉藤英明71bcdba2013-01-05 16:34:51 +00003322 netevent.daddr = &msg->dest;
YOSHIFUJI Hideaki / 吉藤英明60592832013-01-14 09:28:27 +00003323 netevent.neigh = neigh;
Tom Tucker8d717402006-07-30 20:43:36 -07003324 call_netevent_notifiers(NETEVENT_REDIRECT, &netevent);
3325
Linus Torvalds1da177e2005-04-16 15:20:36 -07003326out:
David S. Millere8599ff2012-07-11 23:43:53 -07003327 neigh_release(neigh);
David S. Miller6e157b62012-07-12 00:05:02 -07003328}
3329
YOSHIFUJI Hideaki70ceb4f2006-03-20 17:06:24 -08003330#ifdef CONFIG_IPV6_ROUTE_INFO
David Ahern8d1c8022018-04-17 17:33:26 -07003331static struct fib6_info *rt6_get_route_info(struct net *net,
Eric Dumazetb71d1d42011-04-22 04:53:02 +00003332 const struct in6_addr *prefix, int prefixlen,
David Ahern830218c2016-10-24 10:52:35 -07003333 const struct in6_addr *gwaddr,
3334 struct net_device *dev)
YOSHIFUJI Hideaki70ceb4f2006-03-20 17:06:24 -08003335{
David Ahern830218c2016-10-24 10:52:35 -07003336 u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_INFO;
3337 int ifindex = dev->ifindex;
YOSHIFUJI Hideaki70ceb4f2006-03-20 17:06:24 -08003338 struct fib6_node *fn;
David Ahern8d1c8022018-04-17 17:33:26 -07003339 struct fib6_info *rt = NULL;
Thomas Grafc71099a2006-08-04 23:20:06 -07003340 struct fib6_table *table;
YOSHIFUJI Hideaki70ceb4f2006-03-20 17:06:24 -08003341
David Ahern830218c2016-10-24 10:52:35 -07003342 table = fib6_get_table(net, tb_id);
David S. Miller38308472011-12-03 18:02:47 -05003343 if (!table)
Thomas Grafc71099a2006-08-04 23:20:06 -07003344 return NULL;
3345
Wei Wang66f5d6c2017-10-06 12:06:10 -07003346 rcu_read_lock();
Wei Wang38fbeee2017-10-06 12:06:02 -07003347 fn = fib6_locate(&table->tb6_root, prefix, prefixlen, NULL, 0, true);
YOSHIFUJI Hideaki70ceb4f2006-03-20 17:06:24 -08003348 if (!fn)
3349 goto out;
3350
Wei Wang66f5d6c2017-10-06 12:06:10 -07003351 for_each_fib6_node_rt_rcu(fn) {
David Ahern5e670d82018-04-17 17:33:14 -07003352 if (rt->fib6_nh.nh_dev->ifindex != ifindex)
YOSHIFUJI Hideaki70ceb4f2006-03-20 17:06:24 -08003353 continue;
David Ahern93c2fb22018-04-18 15:38:59 -07003354 if ((rt->fib6_flags & (RTF_ROUTEINFO|RTF_GATEWAY)) != (RTF_ROUTEINFO|RTF_GATEWAY))
YOSHIFUJI Hideaki70ceb4f2006-03-20 17:06:24 -08003355 continue;
David Ahern5e670d82018-04-17 17:33:14 -07003356 if (!ipv6_addr_equal(&rt->fib6_nh.nh_gw, gwaddr))
YOSHIFUJI Hideaki70ceb4f2006-03-20 17:06:24 -08003357 continue;
David Ahern8d1c8022018-04-17 17:33:26 -07003358 fib6_info_hold(rt);
YOSHIFUJI Hideaki70ceb4f2006-03-20 17:06:24 -08003359 break;
3360 }
3361out:
Wei Wang66f5d6c2017-10-06 12:06:10 -07003362 rcu_read_unlock();
YOSHIFUJI Hideaki70ceb4f2006-03-20 17:06:24 -08003363 return rt;
3364}
3365
David Ahern8d1c8022018-04-17 17:33:26 -07003366static struct fib6_info *rt6_add_route_info(struct net *net,
Eric Dumazetb71d1d42011-04-22 04:53:02 +00003367 const struct in6_addr *prefix, int prefixlen,
David Ahern830218c2016-10-24 10:52:35 -07003368 const struct in6_addr *gwaddr,
3369 struct net_device *dev,
Eric Dumazet95c96172012-04-15 05:58:06 +00003370 unsigned int pref)
YOSHIFUJI Hideaki70ceb4f2006-03-20 17:06:24 -08003371{
Thomas Graf86872cb2006-08-22 00:01:08 -07003372 struct fib6_config cfg = {
Rami Rosen238fc7e2008-02-09 23:43:11 -08003373 .fc_metric = IP6_RT_PRIO_USER,
David Ahern830218c2016-10-24 10:52:35 -07003374 .fc_ifindex = dev->ifindex,
Thomas Graf86872cb2006-08-22 00:01:08 -07003375 .fc_dst_len = prefixlen,
3376 .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_ROUTEINFO |
3377 RTF_UP | RTF_PREF(pref),
Xin Longb91d5322017-08-03 14:13:46 +08003378 .fc_protocol = RTPROT_RA,
David Aherne8478e82018-04-17 17:33:13 -07003379 .fc_type = RTN_UNICAST,
Eric W. Biederman15e47302012-09-07 20:12:54 +00003380 .fc_nlinfo.portid = 0,
Daniel Lezcanoefa2cea2008-03-04 13:46:48 -08003381 .fc_nlinfo.nlh = NULL,
3382 .fc_nlinfo.nl_net = net,
Thomas Graf86872cb2006-08-22 00:01:08 -07003383 };
YOSHIFUJI Hideaki70ceb4f2006-03-20 17:06:24 -08003384
David Ahern830218c2016-10-24 10:52:35 -07003385 cfg.fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_INFO,
Alexey Dobriyan4e3fd7a2011-11-21 03:39:03 +00003386 cfg.fc_dst = *prefix;
3387 cfg.fc_gateway = *gwaddr;
Thomas Graf86872cb2006-08-22 00:01:08 -07003388
YOSHIFUJI Hideakie317da92006-03-20 17:06:42 -08003389 /* We should treat it as a default route if prefix length is 0. */
3390 if (!prefixlen)
Thomas Graf86872cb2006-08-22 00:01:08 -07003391 cfg.fc_flags |= RTF_DEFAULT;
YOSHIFUJI Hideaki70ceb4f2006-03-20 17:06:24 -08003392
David Ahernacb54e32018-04-17 17:33:22 -07003393 ip6_route_add(&cfg, GFP_ATOMIC, NULL);
YOSHIFUJI Hideaki70ceb4f2006-03-20 17:06:24 -08003394
David Ahern830218c2016-10-24 10:52:35 -07003395 return rt6_get_route_info(net, prefix, prefixlen, gwaddr, dev);
YOSHIFUJI Hideaki70ceb4f2006-03-20 17:06:24 -08003396}
3397#endif
3398
David Ahern8d1c8022018-04-17 17:33:26 -07003399struct fib6_info *rt6_get_dflt_router(struct net *net,
David Ahernafb1d4b52018-04-17 17:33:11 -07003400 const struct in6_addr *addr,
3401 struct net_device *dev)
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09003402{
David Ahern830218c2016-10-24 10:52:35 -07003403 u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_DFLT;
David Ahern8d1c8022018-04-17 17:33:26 -07003404 struct fib6_info *rt;
Thomas Grafc71099a2006-08-04 23:20:06 -07003405 struct fib6_table *table;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003406
David Ahernafb1d4b52018-04-17 17:33:11 -07003407 table = fib6_get_table(net, tb_id);
David S. Miller38308472011-12-03 18:02:47 -05003408 if (!table)
Thomas Grafc71099a2006-08-04 23:20:06 -07003409 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003410
Wei Wang66f5d6c2017-10-06 12:06:10 -07003411 rcu_read_lock();
3412 for_each_fib6_node_rt_rcu(&table->tb6_root) {
David Ahern5e670d82018-04-17 17:33:14 -07003413 if (dev == rt->fib6_nh.nh_dev &&
David Ahern93c2fb22018-04-18 15:38:59 -07003414 ((rt->fib6_flags & (RTF_ADDRCONF | RTF_DEFAULT)) == (RTF_ADDRCONF | RTF_DEFAULT)) &&
David Ahern5e670d82018-04-17 17:33:14 -07003415 ipv6_addr_equal(&rt->fib6_nh.nh_gw, addr))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003416 break;
3417 }
3418 if (rt)
David Ahern8d1c8022018-04-17 17:33:26 -07003419 fib6_info_hold(rt);
Wei Wang66f5d6c2017-10-06 12:06:10 -07003420 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003421 return rt;
3422}
3423
David Ahern8d1c8022018-04-17 17:33:26 -07003424struct fib6_info *rt6_add_dflt_router(struct net *net,
David Ahernafb1d4b52018-04-17 17:33:11 -07003425 const struct in6_addr *gwaddr,
YOSHIFUJI Hideakiebacaaa2006-03-20 17:04:53 -08003426 struct net_device *dev,
3427 unsigned int pref)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003428{
Thomas Graf86872cb2006-08-22 00:01:08 -07003429 struct fib6_config cfg = {
David Ahernca254492015-10-12 11:47:10 -07003430 .fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_DFLT,
Rami Rosen238fc7e2008-02-09 23:43:11 -08003431 .fc_metric = IP6_RT_PRIO_USER,
Thomas Graf86872cb2006-08-22 00:01:08 -07003432 .fc_ifindex = dev->ifindex,
3433 .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_DEFAULT |
3434 RTF_UP | RTF_EXPIRES | RTF_PREF(pref),
Xin Longb91d5322017-08-03 14:13:46 +08003435 .fc_protocol = RTPROT_RA,
David Aherne8478e82018-04-17 17:33:13 -07003436 .fc_type = RTN_UNICAST,
Eric W. Biederman15e47302012-09-07 20:12:54 +00003437 .fc_nlinfo.portid = 0,
Daniel Lezcano55786892008-03-04 13:47:47 -08003438 .fc_nlinfo.nlh = NULL,
David Ahernafb1d4b52018-04-17 17:33:11 -07003439 .fc_nlinfo.nl_net = net,
Thomas Graf86872cb2006-08-22 00:01:08 -07003440 };
Linus Torvalds1da177e2005-04-16 15:20:36 -07003441
Alexey Dobriyan4e3fd7a2011-11-21 03:39:03 +00003442 cfg.fc_gateway = *gwaddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003443
David Ahernacb54e32018-04-17 17:33:22 -07003444 if (!ip6_route_add(&cfg, GFP_ATOMIC, NULL)) {
David Ahern830218c2016-10-24 10:52:35 -07003445 struct fib6_table *table;
3446
3447 table = fib6_get_table(dev_net(dev), cfg.fc_table);
3448 if (table)
3449 table->flags |= RT6_TABLE_HAS_DFLT_ROUTER;
3450 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003451
David Ahernafb1d4b52018-04-17 17:33:11 -07003452 return rt6_get_dflt_router(net, gwaddr, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003453}
3454
David Ahernafb1d4b52018-04-17 17:33:11 -07003455static void __rt6_purge_dflt_routers(struct net *net,
3456 struct fib6_table *table)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003457{
David Ahern8d1c8022018-04-17 17:33:26 -07003458 struct fib6_info *rt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003459
3460restart:
Wei Wang66f5d6c2017-10-06 12:06:10 -07003461 rcu_read_lock();
3462 for_each_fib6_node_rt_rcu(&table->tb6_root) {
David Aherndcd1f572018-04-18 15:39:05 -07003463 struct net_device *dev = fib6_info_nh_dev(rt);
3464 struct inet6_dev *idev = dev ? __in6_dev_get(dev) : NULL;
3465
David Ahern93c2fb22018-04-18 15:38:59 -07003466 if (rt->fib6_flags & (RTF_DEFAULT | RTF_ADDRCONF) &&
David Aherndcd1f572018-04-18 15:39:05 -07003467 (!idev || idev->cnf.accept_ra != 2)) {
David Ahern93531c62018-04-17 17:33:25 -07003468 fib6_info_hold(rt);
3469 rcu_read_unlock();
3470 ip6_del_rt(net, rt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003471 goto restart;
3472 }
3473 }
Wei Wang66f5d6c2017-10-06 12:06:10 -07003474 rcu_read_unlock();
David Ahern830218c2016-10-24 10:52:35 -07003475
3476 table->flags &= ~RT6_TABLE_HAS_DFLT_ROUTER;
3477}
3478
3479void rt6_purge_dflt_routers(struct net *net)
3480{
3481 struct fib6_table *table;
3482 struct hlist_head *head;
3483 unsigned int h;
3484
3485 rcu_read_lock();
3486
3487 for (h = 0; h < FIB6_TABLE_HASHSZ; h++) {
3488 head = &net->ipv6.fib_table_hash[h];
3489 hlist_for_each_entry_rcu(table, head, tb6_hlist) {
3490 if (table->flags & RT6_TABLE_HAS_DFLT_ROUTER)
David Ahernafb1d4b52018-04-17 17:33:11 -07003491 __rt6_purge_dflt_routers(net, table);
David Ahern830218c2016-10-24 10:52:35 -07003492 }
3493 }
3494
3495 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003496}
3497
Daniel Lezcano55786892008-03-04 13:47:47 -08003498static void rtmsg_to_fib6_config(struct net *net,
3499 struct in6_rtmsg *rtmsg,
Thomas Graf86872cb2006-08-22 00:01:08 -07003500 struct fib6_config *cfg)
3501{
3502 memset(cfg, 0, sizeof(*cfg));
3503
David Ahernca254492015-10-12 11:47:10 -07003504 cfg->fc_table = l3mdev_fib_table_by_index(net, rtmsg->rtmsg_ifindex) ?
3505 : RT6_TABLE_MAIN;
Thomas Graf86872cb2006-08-22 00:01:08 -07003506 cfg->fc_ifindex = rtmsg->rtmsg_ifindex;
3507 cfg->fc_metric = rtmsg->rtmsg_metric;
3508 cfg->fc_expires = rtmsg->rtmsg_info;
3509 cfg->fc_dst_len = rtmsg->rtmsg_dst_len;
3510 cfg->fc_src_len = rtmsg->rtmsg_src_len;
3511 cfg->fc_flags = rtmsg->rtmsg_flags;
David Aherne8478e82018-04-17 17:33:13 -07003512 cfg->fc_type = rtmsg->rtmsg_type;
Thomas Graf86872cb2006-08-22 00:01:08 -07003513
Daniel Lezcano55786892008-03-04 13:47:47 -08003514 cfg->fc_nlinfo.nl_net = net;
Benjamin Theryf1243c22008-02-26 18:10:03 -08003515
Alexey Dobriyan4e3fd7a2011-11-21 03:39:03 +00003516 cfg->fc_dst = rtmsg->rtmsg_dst;
3517 cfg->fc_src = rtmsg->rtmsg_src;
3518 cfg->fc_gateway = rtmsg->rtmsg_gateway;
Thomas Graf86872cb2006-08-22 00:01:08 -07003519}
3520
Daniel Lezcano55786892008-03-04 13:47:47 -08003521int ipv6_route_ioctl(struct net *net, unsigned int cmd, void __user *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003522{
Thomas Graf86872cb2006-08-22 00:01:08 -07003523 struct fib6_config cfg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003524 struct in6_rtmsg rtmsg;
3525 int err;
3526
Ian Morris67ba4152014-08-24 21:53:10 +01003527 switch (cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003528 case SIOCADDRT: /* Add a route */
3529 case SIOCDELRT: /* Delete a route */
Eric W. Biedermanaf31f412012-11-16 03:03:06 +00003530 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003531 return -EPERM;
3532 err = copy_from_user(&rtmsg, arg,
3533 sizeof(struct in6_rtmsg));
3534 if (err)
3535 return -EFAULT;
Thomas Graf86872cb2006-08-22 00:01:08 -07003536
Daniel Lezcano55786892008-03-04 13:47:47 -08003537 rtmsg_to_fib6_config(net, &rtmsg, &cfg);
Thomas Graf86872cb2006-08-22 00:01:08 -07003538
Linus Torvalds1da177e2005-04-16 15:20:36 -07003539 rtnl_lock();
3540 switch (cmd) {
3541 case SIOCADDRT:
David Ahernacb54e32018-04-17 17:33:22 -07003542 err = ip6_route_add(&cfg, GFP_KERNEL, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003543 break;
3544 case SIOCDELRT:
David Ahern333c4302017-05-21 10:12:04 -06003545 err = ip6_route_del(&cfg, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003546 break;
3547 default:
3548 err = -EINVAL;
3549 }
3550 rtnl_unlock();
3551
3552 return err;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07003553 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003554
3555 return -EINVAL;
3556}
3557
3558/*
3559 * Drop the packet on the floor
3560 */
3561
Brian Haleyd5fdd6b2009-06-23 04:31:07 -07003562static int ip6_pkt_drop(struct sk_buff *skb, u8 code, int ipstats_mib_noroutes)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003563{
YOSHIFUJI Hideaki612f09e2007-04-13 16:18:02 -07003564 int type;
Eric Dumazetadf30902009-06-02 05:19:30 +00003565 struct dst_entry *dst = skb_dst(skb);
YOSHIFUJI Hideaki612f09e2007-04-13 16:18:02 -07003566 switch (ipstats_mib_noroutes) {
3567 case IPSTATS_MIB_INNOROUTES:
Arnaldo Carvalho de Melo0660e032007-04-25 17:54:47 -07003568 type = ipv6_addr_type(&ipv6_hdr(skb)->daddr);
Ulrich Weber45bb0062010-02-25 23:28:58 +00003569 if (type == IPV6_ADDR_ANY) {
Stephen Suryaputrabdb7cc62018-04-16 13:42:16 -04003570 IP6_INC_STATS(dev_net(dst->dev),
3571 __in6_dev_get_safely(skb->dev),
Denis V. Lunev3bd653c2008-10-08 10:54:51 -07003572 IPSTATS_MIB_INADDRERRORS);
YOSHIFUJI Hideaki612f09e2007-04-13 16:18:02 -07003573 break;
3574 }
3575 /* FALLTHROUGH */
3576 case IPSTATS_MIB_OUTNOROUTES:
Denis V. Lunev3bd653c2008-10-08 10:54:51 -07003577 IP6_INC_STATS(dev_net(dst->dev), ip6_dst_idev(dst),
3578 ipstats_mib_noroutes);
YOSHIFUJI Hideaki612f09e2007-04-13 16:18:02 -07003579 break;
3580 }
Alexey Dobriyan3ffe5332010-02-18 08:25:24 +00003581 icmpv6_send(skb, ICMPV6_DEST_UNREACH, code, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003582 kfree_skb(skb);
3583 return 0;
3584}
3585
Thomas Graf9ce8ade2006-10-18 20:46:54 -07003586static int ip6_pkt_discard(struct sk_buff *skb)
3587{
YOSHIFUJI Hideaki612f09e2007-04-13 16:18:02 -07003588 return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_INNOROUTES);
Thomas Graf9ce8ade2006-10-18 20:46:54 -07003589}
3590
Eric W. Biedermanede20592015-10-07 16:48:47 -05003591static int ip6_pkt_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003592{
Eric Dumazetadf30902009-06-02 05:19:30 +00003593 skb->dev = skb_dst(skb)->dev;
YOSHIFUJI Hideaki612f09e2007-04-13 16:18:02 -07003594 return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_OUTNOROUTES);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003595}
3596
Thomas Graf9ce8ade2006-10-18 20:46:54 -07003597static int ip6_pkt_prohibit(struct sk_buff *skb)
3598{
YOSHIFUJI Hideaki612f09e2007-04-13 16:18:02 -07003599 return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_INNOROUTES);
Thomas Graf9ce8ade2006-10-18 20:46:54 -07003600}
3601
Eric W. Biedermanede20592015-10-07 16:48:47 -05003602static int ip6_pkt_prohibit_out(struct net *net, struct sock *sk, struct sk_buff *skb)
Thomas Graf9ce8ade2006-10-18 20:46:54 -07003603{
Eric Dumazetadf30902009-06-02 05:19:30 +00003604 skb->dev = skb_dst(skb)->dev;
YOSHIFUJI Hideaki612f09e2007-04-13 16:18:02 -07003605 return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_OUTNOROUTES);
Thomas Graf9ce8ade2006-10-18 20:46:54 -07003606}
3607
Linus Torvalds1da177e2005-04-16 15:20:36 -07003608/*
3609 * Allocate a dst for local (unicast / anycast) address.
3610 */
3611
David Ahern360a9882018-04-18 15:39:00 -07003612struct fib6_info *addrconf_f6i_alloc(struct net *net,
3613 struct inet6_dev *idev,
3614 const struct in6_addr *addr,
3615 bool anycast, gfp_t gfp_flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003616{
David Ahernca254492015-10-12 11:47:10 -07003617 u32 tb_id;
David Ahern4832c302017-08-17 12:17:20 -07003618 struct net_device *dev = idev->dev;
David Ahern360a9882018-04-18 15:39:00 -07003619 struct fib6_info *f6i;
David Ahern5f02ce242016-09-10 12:09:54 -07003620
David Ahern360a9882018-04-18 15:39:00 -07003621 f6i = fib6_info_alloc(gfp_flags);
3622 if (!f6i)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003623 return ERR_PTR(-ENOMEM);
3624
David Ahern360a9882018-04-18 15:39:00 -07003625 f6i->dst_nocount = true;
David Ahern360a9882018-04-18 15:39:00 -07003626 f6i->dst_host = true;
3627 f6i->fib6_protocol = RTPROT_KERNEL;
3628 f6i->fib6_flags = RTF_UP | RTF_NONEXTHOP;
David Aherne8478e82018-04-17 17:33:13 -07003629 if (anycast) {
David Ahern360a9882018-04-18 15:39:00 -07003630 f6i->fib6_type = RTN_ANYCAST;
3631 f6i->fib6_flags |= RTF_ANYCAST;
David Aherne8478e82018-04-17 17:33:13 -07003632 } else {
David Ahern360a9882018-04-18 15:39:00 -07003633 f6i->fib6_type = RTN_LOCAL;
3634 f6i->fib6_flags |= RTF_LOCAL;
David Aherne8478e82018-04-17 17:33:13 -07003635 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003636
David Ahern360a9882018-04-18 15:39:00 -07003637 f6i->fib6_nh.nh_gw = *addr;
David Ahern93531c62018-04-17 17:33:25 -07003638 dev_hold(dev);
David Ahern360a9882018-04-18 15:39:00 -07003639 f6i->fib6_nh.nh_dev = dev;
3640 f6i->fib6_dst.addr = *addr;
3641 f6i->fib6_dst.plen = 128;
David Ahernca254492015-10-12 11:47:10 -07003642 tb_id = l3mdev_fib_table(idev->dev) ? : RT6_TABLE_LOCAL;
David Ahern360a9882018-04-18 15:39:00 -07003643 f6i->fib6_table = fib6_get_table(net, tb_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003644
David Ahern360a9882018-04-18 15:39:00 -07003645 return f6i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003646}
3647
Daniel Walterc3968a82011-04-13 21:10:57 +00003648/* remove deleted ip from prefsrc entries */
3649struct arg_dev_net_ip {
3650 struct net_device *dev;
3651 struct net *net;
3652 struct in6_addr *addr;
3653};
3654
David Ahern8d1c8022018-04-17 17:33:26 -07003655static int fib6_remove_prefsrc(struct fib6_info *rt, void *arg)
Daniel Walterc3968a82011-04-13 21:10:57 +00003656{
3657 struct net_device *dev = ((struct arg_dev_net_ip *)arg)->dev;
3658 struct net *net = ((struct arg_dev_net_ip *)arg)->net;
3659 struct in6_addr *addr = ((struct arg_dev_net_ip *)arg)->addr;
3660
David Ahern5e670d82018-04-17 17:33:14 -07003661 if (((void *)rt->fib6_nh.nh_dev == dev || !dev) &&
David Ahern421842e2018-04-17 17:33:18 -07003662 rt != net->ipv6.fib6_null_entry &&
David Ahern93c2fb22018-04-18 15:38:59 -07003663 ipv6_addr_equal(addr, &rt->fib6_prefsrc.addr)) {
Wei Wang60006a42017-10-06 12:05:58 -07003664 spin_lock_bh(&rt6_exception_lock);
Daniel Walterc3968a82011-04-13 21:10:57 +00003665 /* remove prefsrc entry */
David Ahern93c2fb22018-04-18 15:38:59 -07003666 rt->fib6_prefsrc.plen = 0;
Wei Wang60006a42017-10-06 12:05:58 -07003667 /* need to update cache as well */
3668 rt6_exceptions_remove_prefsrc(rt);
3669 spin_unlock_bh(&rt6_exception_lock);
Daniel Walterc3968a82011-04-13 21:10:57 +00003670 }
3671 return 0;
3672}
3673
3674void rt6_remove_prefsrc(struct inet6_ifaddr *ifp)
3675{
3676 struct net *net = dev_net(ifp->idev->dev);
3677 struct arg_dev_net_ip adni = {
3678 .dev = ifp->idev->dev,
3679 .net = net,
3680 .addr = &ifp->addr,
3681 };
Li RongQing0c3584d2013-12-27 16:32:38 +08003682 fib6_clean_all(net, fib6_remove_prefsrc, &adni);
Daniel Walterc3968a82011-04-13 21:10:57 +00003683}
3684
Duan Jiongbe7a0102014-05-15 15:56:14 +08003685#define RTF_RA_ROUTER (RTF_ADDRCONF | RTF_DEFAULT | RTF_GATEWAY)
Duan Jiongbe7a0102014-05-15 15:56:14 +08003686
3687/* Remove routers and update dst entries when gateway turn into host. */
David Ahern8d1c8022018-04-17 17:33:26 -07003688static int fib6_clean_tohost(struct fib6_info *rt, void *arg)
Duan Jiongbe7a0102014-05-15 15:56:14 +08003689{
3690 struct in6_addr *gateway = (struct in6_addr *)arg;
3691
David Ahern93c2fb22018-04-18 15:38:59 -07003692 if (((rt->fib6_flags & RTF_RA_ROUTER) == RTF_RA_ROUTER) &&
David Ahern5e670d82018-04-17 17:33:14 -07003693 ipv6_addr_equal(gateway, &rt->fib6_nh.nh_gw)) {
Duan Jiongbe7a0102014-05-15 15:56:14 +08003694 return -1;
3695 }
Wei Wangb16cb452017-10-06 12:06:00 -07003696
3697 /* Further clean up cached routes in exception table.
3698 * This is needed because cached route may have a different
3699 * gateway than its 'parent' in the case of an ip redirect.
3700 */
3701 rt6_exceptions_clean_tohost(rt, gateway);
3702
Duan Jiongbe7a0102014-05-15 15:56:14 +08003703 return 0;
3704}
3705
3706void rt6_clean_tohost(struct net *net, struct in6_addr *gateway)
3707{
3708 fib6_clean_all(net, fib6_clean_tohost, gateway);
3709}
3710
Ido Schimmel2127d952018-01-07 12:45:03 +02003711struct arg_netdev_event {
3712 const struct net_device *dev;
Ido Schimmel4c981e22018-01-07 12:45:04 +02003713 union {
3714 unsigned int nh_flags;
3715 unsigned long event;
3716 };
Ido Schimmel2127d952018-01-07 12:45:03 +02003717};
3718
David Ahern8d1c8022018-04-17 17:33:26 -07003719static struct fib6_info *rt6_multipath_first_sibling(const struct fib6_info *rt)
Ido Schimmeld7dedee2018-01-09 16:40:25 +02003720{
David Ahern8d1c8022018-04-17 17:33:26 -07003721 struct fib6_info *iter;
Ido Schimmeld7dedee2018-01-09 16:40:25 +02003722 struct fib6_node *fn;
3723
David Ahern93c2fb22018-04-18 15:38:59 -07003724 fn = rcu_dereference_protected(rt->fib6_node,
3725 lockdep_is_held(&rt->fib6_table->tb6_lock));
Ido Schimmeld7dedee2018-01-09 16:40:25 +02003726 iter = rcu_dereference_protected(fn->leaf,
David Ahern93c2fb22018-04-18 15:38:59 -07003727 lockdep_is_held(&rt->fib6_table->tb6_lock));
Ido Schimmeld7dedee2018-01-09 16:40:25 +02003728 while (iter) {
David Ahern93c2fb22018-04-18 15:38:59 -07003729 if (iter->fib6_metric == rt->fib6_metric &&
Ido Schimmeld7dedee2018-01-09 16:40:25 +02003730 rt6_qualify_for_ecmp(iter))
3731 return iter;
3732 iter = rcu_dereference_protected(iter->rt6_next,
David Ahern93c2fb22018-04-18 15:38:59 -07003733 lockdep_is_held(&rt->fib6_table->tb6_lock));
Ido Schimmeld7dedee2018-01-09 16:40:25 +02003734 }
3735
3736 return NULL;
3737}
3738
David Ahern8d1c8022018-04-17 17:33:26 -07003739static bool rt6_is_dead(const struct fib6_info *rt)
Ido Schimmeld7dedee2018-01-09 16:40:25 +02003740{
David Ahern5e670d82018-04-17 17:33:14 -07003741 if (rt->fib6_nh.nh_flags & RTNH_F_DEAD ||
3742 (rt->fib6_nh.nh_flags & RTNH_F_LINKDOWN &&
David Aherndcd1f572018-04-18 15:39:05 -07003743 fib6_ignore_linkdown(rt)))
Ido Schimmeld7dedee2018-01-09 16:40:25 +02003744 return true;
3745
3746 return false;
3747}
3748
David Ahern8d1c8022018-04-17 17:33:26 -07003749static int rt6_multipath_total_weight(const struct fib6_info *rt)
Ido Schimmeld7dedee2018-01-09 16:40:25 +02003750{
David Ahern8d1c8022018-04-17 17:33:26 -07003751 struct fib6_info *iter;
Ido Schimmeld7dedee2018-01-09 16:40:25 +02003752 int total = 0;
3753
3754 if (!rt6_is_dead(rt))
David Ahern5e670d82018-04-17 17:33:14 -07003755 total += rt->fib6_nh.nh_weight;
Ido Schimmeld7dedee2018-01-09 16:40:25 +02003756
David Ahern93c2fb22018-04-18 15:38:59 -07003757 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings) {
Ido Schimmeld7dedee2018-01-09 16:40:25 +02003758 if (!rt6_is_dead(iter))
David Ahern5e670d82018-04-17 17:33:14 -07003759 total += iter->fib6_nh.nh_weight;
Ido Schimmeld7dedee2018-01-09 16:40:25 +02003760 }
3761
3762 return total;
3763}
3764
David Ahern8d1c8022018-04-17 17:33:26 -07003765static void rt6_upper_bound_set(struct fib6_info *rt, int *weight, int total)
Ido Schimmeld7dedee2018-01-09 16:40:25 +02003766{
3767 int upper_bound = -1;
3768
3769 if (!rt6_is_dead(rt)) {
David Ahern5e670d82018-04-17 17:33:14 -07003770 *weight += rt->fib6_nh.nh_weight;
Ido Schimmeld7dedee2018-01-09 16:40:25 +02003771 upper_bound = DIV_ROUND_CLOSEST_ULL((u64) (*weight) << 31,
3772 total) - 1;
3773 }
David Ahern5e670d82018-04-17 17:33:14 -07003774 atomic_set(&rt->fib6_nh.nh_upper_bound, upper_bound);
Ido Schimmeld7dedee2018-01-09 16:40:25 +02003775}
3776
David Ahern8d1c8022018-04-17 17:33:26 -07003777static void rt6_multipath_upper_bound_set(struct fib6_info *rt, int total)
Ido Schimmeld7dedee2018-01-09 16:40:25 +02003778{
David Ahern8d1c8022018-04-17 17:33:26 -07003779 struct fib6_info *iter;
Ido Schimmeld7dedee2018-01-09 16:40:25 +02003780 int weight = 0;
3781
3782 rt6_upper_bound_set(rt, &weight, total);
3783
David Ahern93c2fb22018-04-18 15:38:59 -07003784 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
Ido Schimmeld7dedee2018-01-09 16:40:25 +02003785 rt6_upper_bound_set(iter, &weight, total);
3786}
3787
David Ahern8d1c8022018-04-17 17:33:26 -07003788void rt6_multipath_rebalance(struct fib6_info *rt)
Ido Schimmeld7dedee2018-01-09 16:40:25 +02003789{
David Ahern8d1c8022018-04-17 17:33:26 -07003790 struct fib6_info *first;
Ido Schimmeld7dedee2018-01-09 16:40:25 +02003791 int total;
3792
3793 /* In case the entire multipath route was marked for flushing,
3794 * then there is no need to rebalance upon the removal of every
3795 * sibling route.
3796 */
David Ahern93c2fb22018-04-18 15:38:59 -07003797 if (!rt->fib6_nsiblings || rt->should_flush)
Ido Schimmeld7dedee2018-01-09 16:40:25 +02003798 return;
3799
3800 /* During lookup routes are evaluated in order, so we need to
3801 * make sure upper bounds are assigned from the first sibling
3802 * onwards.
3803 */
3804 first = rt6_multipath_first_sibling(rt);
3805 if (WARN_ON_ONCE(!first))
3806 return;
3807
3808 total = rt6_multipath_total_weight(first);
3809 rt6_multipath_upper_bound_set(first, total);
3810}
3811
David Ahern8d1c8022018-04-17 17:33:26 -07003812static int fib6_ifup(struct fib6_info *rt, void *p_arg)
Ido Schimmel2127d952018-01-07 12:45:03 +02003813{
3814 const struct arg_netdev_event *arg = p_arg;
David Ahern7aef6852018-04-17 17:33:10 -07003815 struct net *net = dev_net(arg->dev);
Ido Schimmel2127d952018-01-07 12:45:03 +02003816
David Ahern421842e2018-04-17 17:33:18 -07003817 if (rt != net->ipv6.fib6_null_entry && rt->fib6_nh.nh_dev == arg->dev) {
David Ahern5e670d82018-04-17 17:33:14 -07003818 rt->fib6_nh.nh_flags &= ~arg->nh_flags;
David Ahern7aef6852018-04-17 17:33:10 -07003819 fib6_update_sernum_upto_root(net, rt);
Ido Schimmeld7dedee2018-01-09 16:40:25 +02003820 rt6_multipath_rebalance(rt);
Ido Schimmel1de178e2018-01-07 12:45:15 +02003821 }
Ido Schimmel2127d952018-01-07 12:45:03 +02003822
3823 return 0;
3824}
3825
3826void rt6_sync_up(struct net_device *dev, unsigned int nh_flags)
3827{
3828 struct arg_netdev_event arg = {
3829 .dev = dev,
Ido Schimmel6802f3a2018-01-12 22:07:36 +02003830 {
3831 .nh_flags = nh_flags,
3832 },
Ido Schimmel2127d952018-01-07 12:45:03 +02003833 };
3834
3835 if (nh_flags & RTNH_F_DEAD && netif_carrier_ok(dev))
3836 arg.nh_flags |= RTNH_F_LINKDOWN;
3837
3838 fib6_clean_all(dev_net(dev), fib6_ifup, &arg);
3839}
3840
David Ahern8d1c8022018-04-17 17:33:26 -07003841static bool rt6_multipath_uses_dev(const struct fib6_info *rt,
Ido Schimmel1de178e2018-01-07 12:45:15 +02003842 const struct net_device *dev)
3843{
David Ahern8d1c8022018-04-17 17:33:26 -07003844 struct fib6_info *iter;
Ido Schimmel1de178e2018-01-07 12:45:15 +02003845
David Ahern5e670d82018-04-17 17:33:14 -07003846 if (rt->fib6_nh.nh_dev == dev)
Ido Schimmel1de178e2018-01-07 12:45:15 +02003847 return true;
David Ahern93c2fb22018-04-18 15:38:59 -07003848 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
David Ahern5e670d82018-04-17 17:33:14 -07003849 if (iter->fib6_nh.nh_dev == dev)
Ido Schimmel1de178e2018-01-07 12:45:15 +02003850 return true;
3851
3852 return false;
3853}
3854
David Ahern8d1c8022018-04-17 17:33:26 -07003855static void rt6_multipath_flush(struct fib6_info *rt)
Ido Schimmel1de178e2018-01-07 12:45:15 +02003856{
David Ahern8d1c8022018-04-17 17:33:26 -07003857 struct fib6_info *iter;
Ido Schimmel1de178e2018-01-07 12:45:15 +02003858
3859 rt->should_flush = 1;
David Ahern93c2fb22018-04-18 15:38:59 -07003860 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
Ido Schimmel1de178e2018-01-07 12:45:15 +02003861 iter->should_flush = 1;
3862}
3863
David Ahern8d1c8022018-04-17 17:33:26 -07003864static unsigned int rt6_multipath_dead_count(const struct fib6_info *rt,
Ido Schimmel1de178e2018-01-07 12:45:15 +02003865 const struct net_device *down_dev)
3866{
David Ahern8d1c8022018-04-17 17:33:26 -07003867 struct fib6_info *iter;
Ido Schimmel1de178e2018-01-07 12:45:15 +02003868 unsigned int dead = 0;
3869
David Ahern5e670d82018-04-17 17:33:14 -07003870 if (rt->fib6_nh.nh_dev == down_dev ||
3871 rt->fib6_nh.nh_flags & RTNH_F_DEAD)
Ido Schimmel1de178e2018-01-07 12:45:15 +02003872 dead++;
David Ahern93c2fb22018-04-18 15:38:59 -07003873 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
David Ahern5e670d82018-04-17 17:33:14 -07003874 if (iter->fib6_nh.nh_dev == down_dev ||
3875 iter->fib6_nh.nh_flags & RTNH_F_DEAD)
Ido Schimmel1de178e2018-01-07 12:45:15 +02003876 dead++;
3877
3878 return dead;
3879}
3880
David Ahern8d1c8022018-04-17 17:33:26 -07003881static void rt6_multipath_nh_flags_set(struct fib6_info *rt,
Ido Schimmel1de178e2018-01-07 12:45:15 +02003882 const struct net_device *dev,
3883 unsigned int nh_flags)
3884{
David Ahern8d1c8022018-04-17 17:33:26 -07003885 struct fib6_info *iter;
Ido Schimmel1de178e2018-01-07 12:45:15 +02003886
David Ahern5e670d82018-04-17 17:33:14 -07003887 if (rt->fib6_nh.nh_dev == dev)
3888 rt->fib6_nh.nh_flags |= nh_flags;
David Ahern93c2fb22018-04-18 15:38:59 -07003889 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
David Ahern5e670d82018-04-17 17:33:14 -07003890 if (iter->fib6_nh.nh_dev == dev)
3891 iter->fib6_nh.nh_flags |= nh_flags;
Ido Schimmel1de178e2018-01-07 12:45:15 +02003892}
3893
David Aherna1a22c12017-01-18 07:40:36 -08003894/* called with write lock held for table with rt */
David Ahern8d1c8022018-04-17 17:33:26 -07003895static int fib6_ifdown(struct fib6_info *rt, void *p_arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003896{
Ido Schimmel4c981e22018-01-07 12:45:04 +02003897 const struct arg_netdev_event *arg = p_arg;
3898 const struct net_device *dev = arg->dev;
David Ahern7aef6852018-04-17 17:33:10 -07003899 struct net *net = dev_net(dev);
Daniel Lezcano8ed67782008-03-04 13:48:30 -08003900
David Ahern421842e2018-04-17 17:33:18 -07003901 if (rt == net->ipv6.fib6_null_entry)
Ido Schimmel27c6fa72018-01-07 12:45:05 +02003902 return 0;
3903
3904 switch (arg->event) {
3905 case NETDEV_UNREGISTER:
David Ahern5e670d82018-04-17 17:33:14 -07003906 return rt->fib6_nh.nh_dev == dev ? -1 : 0;
Ido Schimmel27c6fa72018-01-07 12:45:05 +02003907 case NETDEV_DOWN:
Ido Schimmel1de178e2018-01-07 12:45:15 +02003908 if (rt->should_flush)
Ido Schimmel27c6fa72018-01-07 12:45:05 +02003909 return -1;
David Ahern93c2fb22018-04-18 15:38:59 -07003910 if (!rt->fib6_nsiblings)
David Ahern5e670d82018-04-17 17:33:14 -07003911 return rt->fib6_nh.nh_dev == dev ? -1 : 0;
Ido Schimmel1de178e2018-01-07 12:45:15 +02003912 if (rt6_multipath_uses_dev(rt, dev)) {
3913 unsigned int count;
3914
3915 count = rt6_multipath_dead_count(rt, dev);
David Ahern93c2fb22018-04-18 15:38:59 -07003916 if (rt->fib6_nsiblings + 1 == count) {
Ido Schimmel1de178e2018-01-07 12:45:15 +02003917 rt6_multipath_flush(rt);
3918 return -1;
3919 }
3920 rt6_multipath_nh_flags_set(rt, dev, RTNH_F_DEAD |
3921 RTNH_F_LINKDOWN);
David Ahern7aef6852018-04-17 17:33:10 -07003922 fib6_update_sernum(net, rt);
Ido Schimmeld7dedee2018-01-09 16:40:25 +02003923 rt6_multipath_rebalance(rt);
Ido Schimmel1de178e2018-01-07 12:45:15 +02003924 }
3925 return -2;
Ido Schimmel27c6fa72018-01-07 12:45:05 +02003926 case NETDEV_CHANGE:
David Ahern5e670d82018-04-17 17:33:14 -07003927 if (rt->fib6_nh.nh_dev != dev ||
David Ahern93c2fb22018-04-18 15:38:59 -07003928 rt->fib6_flags & (RTF_LOCAL | RTF_ANYCAST))
Ido Schimmel27c6fa72018-01-07 12:45:05 +02003929 break;
David Ahern5e670d82018-04-17 17:33:14 -07003930 rt->fib6_nh.nh_flags |= RTNH_F_LINKDOWN;
Ido Schimmeld7dedee2018-01-09 16:40:25 +02003931 rt6_multipath_rebalance(rt);
Ido Schimmel27c6fa72018-01-07 12:45:05 +02003932 break;
Ido Schimmel2b241362018-01-07 12:45:02 +02003933 }
David S. Millerc159d302011-12-26 15:24:36 -05003934
Linus Torvalds1da177e2005-04-16 15:20:36 -07003935 return 0;
3936}
3937
Ido Schimmel27c6fa72018-01-07 12:45:05 +02003938void rt6_sync_down_dev(struct net_device *dev, unsigned long event)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003939{
Ido Schimmel4c981e22018-01-07 12:45:04 +02003940 struct arg_netdev_event arg = {
Daniel Lezcano8ed67782008-03-04 13:48:30 -08003941 .dev = dev,
Ido Schimmel6802f3a2018-01-12 22:07:36 +02003942 {
3943 .event = event,
3944 },
Daniel Lezcano8ed67782008-03-04 13:48:30 -08003945 };
3946
Ido Schimmel4c981e22018-01-07 12:45:04 +02003947 fib6_clean_all(dev_net(dev), fib6_ifdown, &arg);
3948}
3949
3950void rt6_disable_ip(struct net_device *dev, unsigned long event)
3951{
3952 rt6_sync_down_dev(dev, event);
3953 rt6_uncached_list_flush_dev(dev_net(dev), dev);
3954 neigh_ifdown(&nd_tbl, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003955}
3956
Eric Dumazet95c96172012-04-15 05:58:06 +00003957struct rt6_mtu_change_arg {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003958 struct net_device *dev;
Eric Dumazet95c96172012-04-15 05:58:06 +00003959 unsigned int mtu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003960};
3961
David Ahern8d1c8022018-04-17 17:33:26 -07003962static int rt6_mtu_change_route(struct fib6_info *rt, void *p_arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003963{
3964 struct rt6_mtu_change_arg *arg = (struct rt6_mtu_change_arg *) p_arg;
3965 struct inet6_dev *idev;
3966
3967 /* In IPv6 pmtu discovery is not optional,
3968 so that RTAX_MTU lock cannot disable it.
3969 We still use this lock to block changes
3970 caused by addrconf/ndisc.
3971 */
3972
3973 idev = __in6_dev_get(arg->dev);
David S. Miller38308472011-12-03 18:02:47 -05003974 if (!idev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003975 return 0;
3976
3977 /* For administrative MTU increase, there is no way to discover
3978 IPv6 PMTU increase, so PMTU increase should be updated here.
3979 Since RFC 1981 doesn't include administrative MTU increase
3980 update PMTU increase is a MUST. (i.e. jumbo frame)
3981 */
David Ahern5e670d82018-04-17 17:33:14 -07003982 if (rt->fib6_nh.nh_dev == arg->dev &&
David Ahernd4ead6b2018-04-17 17:33:16 -07003983 !fib6_metric_locked(rt, RTAX_MTU)) {
3984 u32 mtu = rt->fib6_pmtu;
3985
3986 if (mtu >= arg->mtu ||
3987 (mtu < arg->mtu && mtu == idev->cnf.mtu6))
3988 fib6_metric_set(rt, RTAX_MTU, arg->mtu);
3989
Wei Wangf5bbe7e2017-10-06 12:05:59 -07003990 spin_lock_bh(&rt6_exception_lock);
Stefano Brivioe9fa1492018-03-06 11:10:19 +01003991 rt6_exceptions_update_pmtu(idev, rt, arg->mtu);
Wei Wangf5bbe7e2017-10-06 12:05:59 -07003992 spin_unlock_bh(&rt6_exception_lock);
Simon Arlott566cfd82007-07-26 00:09:55 -07003993 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003994 return 0;
3995}
3996
Eric Dumazet95c96172012-04-15 05:58:06 +00003997void rt6_mtu_change(struct net_device *dev, unsigned int mtu)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003998{
Thomas Grafc71099a2006-08-04 23:20:06 -07003999 struct rt6_mtu_change_arg arg = {
4000 .dev = dev,
4001 .mtu = mtu,
4002 };
Linus Torvalds1da177e2005-04-16 15:20:36 -07004003
Li RongQing0c3584d2013-12-27 16:32:38 +08004004 fib6_clean_all(dev_net(dev), rt6_mtu_change_route, &arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004005}
4006
Patrick McHardyef7c79e2007-06-05 12:38:30 -07004007static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = {
Thomas Graf5176f912006-08-26 20:13:18 -07004008 [RTA_GATEWAY] = { .len = sizeof(struct in6_addr) },
Thomas Graf86872cb2006-08-22 00:01:08 -07004009 [RTA_OIF] = { .type = NLA_U32 },
Thomas Grafab364a62006-08-22 00:01:47 -07004010 [RTA_IIF] = { .type = NLA_U32 },
Thomas Graf86872cb2006-08-22 00:01:08 -07004011 [RTA_PRIORITY] = { .type = NLA_U32 },
4012 [RTA_METRICS] = { .type = NLA_NESTED },
Nicolas Dichtel51ebd312012-10-22 03:42:09 +00004013 [RTA_MULTIPATH] = { .len = sizeof(struct rtnexthop) },
Lubomir Rintelc78ba6d2015-03-11 15:39:21 +01004014 [RTA_PREF] = { .type = NLA_U8 },
Roopa Prabhu19e42e42015-07-21 10:43:48 +02004015 [RTA_ENCAP_TYPE] = { .type = NLA_U16 },
4016 [RTA_ENCAP] = { .type = NLA_NESTED },
Xin Long32bc2012015-12-16 17:50:11 +08004017 [RTA_EXPIRES] = { .type = NLA_U32 },
Lorenzo Colitti622ec2c2016-11-04 02:23:42 +09004018 [RTA_UID] = { .type = NLA_U32 },
Liping Zhang3b45a412017-02-27 20:59:39 +08004019 [RTA_MARK] = { .type = NLA_U32 },
Thomas Graf86872cb2006-08-22 00:01:08 -07004020};
4021
4022static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
David Ahern333c4302017-05-21 10:12:04 -06004023 struct fib6_config *cfg,
4024 struct netlink_ext_ack *extack)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004025{
Thomas Graf86872cb2006-08-22 00:01:08 -07004026 struct rtmsg *rtm;
4027 struct nlattr *tb[RTA_MAX+1];
Lubomir Rintelc78ba6d2015-03-11 15:39:21 +01004028 unsigned int pref;
Thomas Graf86872cb2006-08-22 00:01:08 -07004029 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004030
Johannes Bergfceb6432017-04-12 14:34:07 +02004031 err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv6_policy,
4032 NULL);
Thomas Graf86872cb2006-08-22 00:01:08 -07004033 if (err < 0)
4034 goto errout;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004035
Thomas Graf86872cb2006-08-22 00:01:08 -07004036 err = -EINVAL;
4037 rtm = nlmsg_data(nlh);
4038 memset(cfg, 0, sizeof(*cfg));
4039
4040 cfg->fc_table = rtm->rtm_table;
4041 cfg->fc_dst_len = rtm->rtm_dst_len;
4042 cfg->fc_src_len = rtm->rtm_src_len;
4043 cfg->fc_flags = RTF_UP;
4044 cfg->fc_protocol = rtm->rtm_protocol;
Nicolas Dichtelef2c7d72012-09-05 02:12:42 +00004045 cfg->fc_type = rtm->rtm_type;
Thomas Graf86872cb2006-08-22 00:01:08 -07004046
Nicolas Dichtelef2c7d72012-09-05 02:12:42 +00004047 if (rtm->rtm_type == RTN_UNREACHABLE ||
4048 rtm->rtm_type == RTN_BLACKHOLE ||
Nicolas Dichtelb4949ab2012-09-06 05:53:35 +00004049 rtm->rtm_type == RTN_PROHIBIT ||
4050 rtm->rtm_type == RTN_THROW)
Thomas Graf86872cb2006-08-22 00:01:08 -07004051 cfg->fc_flags |= RTF_REJECT;
4052
Maciej Żenczykowskiab79ad12010-09-27 00:07:02 +00004053 if (rtm->rtm_type == RTN_LOCAL)
4054 cfg->fc_flags |= RTF_LOCAL;
4055
Martin KaFai Lau1f56a012015-04-28 13:03:03 -07004056 if (rtm->rtm_flags & RTM_F_CLONED)
4057 cfg->fc_flags |= RTF_CACHE;
4058
David Ahernfc1e64e2018-01-25 16:55:09 -08004059 cfg->fc_flags |= (rtm->rtm_flags & RTNH_F_ONLINK);
4060
Eric W. Biederman15e47302012-09-07 20:12:54 +00004061 cfg->fc_nlinfo.portid = NETLINK_CB(skb).portid;
Thomas Graf86872cb2006-08-22 00:01:08 -07004062 cfg->fc_nlinfo.nlh = nlh;
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09004063 cfg->fc_nlinfo.nl_net = sock_net(skb->sk);
Thomas Graf86872cb2006-08-22 00:01:08 -07004064
4065 if (tb[RTA_GATEWAY]) {
Jiri Benc67b61f62015-03-29 16:59:26 +02004066 cfg->fc_gateway = nla_get_in6_addr(tb[RTA_GATEWAY]);
Thomas Graf86872cb2006-08-22 00:01:08 -07004067 cfg->fc_flags |= RTF_GATEWAY;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004068 }
Thomas Graf86872cb2006-08-22 00:01:08 -07004069
4070 if (tb[RTA_DST]) {
4071 int plen = (rtm->rtm_dst_len + 7) >> 3;
4072
4073 if (nla_len(tb[RTA_DST]) < plen)
4074 goto errout;
4075
4076 nla_memcpy(&cfg->fc_dst, tb[RTA_DST], plen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004077 }
Thomas Graf86872cb2006-08-22 00:01:08 -07004078
4079 if (tb[RTA_SRC]) {
4080 int plen = (rtm->rtm_src_len + 7) >> 3;
4081
4082 if (nla_len(tb[RTA_SRC]) < plen)
4083 goto errout;
4084
4085 nla_memcpy(&cfg->fc_src, tb[RTA_SRC], plen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004086 }
Thomas Graf86872cb2006-08-22 00:01:08 -07004087
Daniel Walterc3968a82011-04-13 21:10:57 +00004088 if (tb[RTA_PREFSRC])
Jiri Benc67b61f62015-03-29 16:59:26 +02004089 cfg->fc_prefsrc = nla_get_in6_addr(tb[RTA_PREFSRC]);
Daniel Walterc3968a82011-04-13 21:10:57 +00004090
Thomas Graf86872cb2006-08-22 00:01:08 -07004091 if (tb[RTA_OIF])
4092 cfg->fc_ifindex = nla_get_u32(tb[RTA_OIF]);
4093
4094 if (tb[RTA_PRIORITY])
4095 cfg->fc_metric = nla_get_u32(tb[RTA_PRIORITY]);
4096
4097 if (tb[RTA_METRICS]) {
4098 cfg->fc_mx = nla_data(tb[RTA_METRICS]);
4099 cfg->fc_mx_len = nla_len(tb[RTA_METRICS]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004100 }
Thomas Graf86872cb2006-08-22 00:01:08 -07004101
4102 if (tb[RTA_TABLE])
4103 cfg->fc_table = nla_get_u32(tb[RTA_TABLE]);
4104
Nicolas Dichtel51ebd312012-10-22 03:42:09 +00004105 if (tb[RTA_MULTIPATH]) {
4106 cfg->fc_mp = nla_data(tb[RTA_MULTIPATH]);
4107 cfg->fc_mp_len = nla_len(tb[RTA_MULTIPATH]);
David Ahern9ed59592017-01-17 14:57:36 -08004108
4109 err = lwtunnel_valid_encap_type_attr(cfg->fc_mp,
David Ahernc255bd62017-05-27 16:19:27 -06004110 cfg->fc_mp_len, extack);
David Ahern9ed59592017-01-17 14:57:36 -08004111 if (err < 0)
4112 goto errout;
Nicolas Dichtel51ebd312012-10-22 03:42:09 +00004113 }
4114
Lubomir Rintelc78ba6d2015-03-11 15:39:21 +01004115 if (tb[RTA_PREF]) {
4116 pref = nla_get_u8(tb[RTA_PREF]);
4117 if (pref != ICMPV6_ROUTER_PREF_LOW &&
4118 pref != ICMPV6_ROUTER_PREF_HIGH)
4119 pref = ICMPV6_ROUTER_PREF_MEDIUM;
4120 cfg->fc_flags |= RTF_PREF(pref);
4121 }
4122
Roopa Prabhu19e42e42015-07-21 10:43:48 +02004123 if (tb[RTA_ENCAP])
4124 cfg->fc_encap = tb[RTA_ENCAP];
4125
David Ahern9ed59592017-01-17 14:57:36 -08004126 if (tb[RTA_ENCAP_TYPE]) {
Roopa Prabhu19e42e42015-07-21 10:43:48 +02004127 cfg->fc_encap_type = nla_get_u16(tb[RTA_ENCAP_TYPE]);
4128
David Ahernc255bd62017-05-27 16:19:27 -06004129 err = lwtunnel_valid_encap_type(cfg->fc_encap_type, extack);
David Ahern9ed59592017-01-17 14:57:36 -08004130 if (err < 0)
4131 goto errout;
4132 }
4133
Xin Long32bc2012015-12-16 17:50:11 +08004134 if (tb[RTA_EXPIRES]) {
4135 unsigned long timeout = addrconf_timeout_fixup(nla_get_u32(tb[RTA_EXPIRES]), HZ);
4136
4137 if (addrconf_finite_timeout(timeout)) {
4138 cfg->fc_expires = jiffies_to_clock_t(timeout * HZ);
4139 cfg->fc_flags |= RTF_EXPIRES;
4140 }
4141 }
4142
Thomas Graf86872cb2006-08-22 00:01:08 -07004143 err = 0;
4144errout:
4145 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004146}
4147
Roopa Prabhu6b9ea5a2015-09-08 10:53:04 -07004148struct rt6_nh {
David Ahern8d1c8022018-04-17 17:33:26 -07004149 struct fib6_info *fib6_info;
Roopa Prabhu6b9ea5a2015-09-08 10:53:04 -07004150 struct fib6_config r_cfg;
Roopa Prabhu6b9ea5a2015-09-08 10:53:04 -07004151 struct list_head next;
4152};
4153
4154static void ip6_print_replace_route_err(struct list_head *rt6_nh_list)
4155{
4156 struct rt6_nh *nh;
4157
4158 list_for_each_entry(nh, rt6_nh_list, next) {
David Ahern7d4d5062017-02-02 12:37:12 -08004159 pr_warn("IPV6: multipath route replace failed (check consistency of installed routes): %pI6c nexthop %pI6c ifi %d\n",
Roopa Prabhu6b9ea5a2015-09-08 10:53:04 -07004160 &nh->r_cfg.fc_dst, &nh->r_cfg.fc_gateway,
4161 nh->r_cfg.fc_ifindex);
4162 }
4163}
4164
David Ahernd4ead6b2018-04-17 17:33:16 -07004165static int ip6_route_info_append(struct net *net,
4166 struct list_head *rt6_nh_list,
David Ahern8d1c8022018-04-17 17:33:26 -07004167 struct fib6_info *rt,
4168 struct fib6_config *r_cfg)
Roopa Prabhu6b9ea5a2015-09-08 10:53:04 -07004169{
4170 struct rt6_nh *nh;
Roopa Prabhu6b9ea5a2015-09-08 10:53:04 -07004171 int err = -EEXIST;
4172
4173 list_for_each_entry(nh, rt6_nh_list, next) {
David Ahern8d1c8022018-04-17 17:33:26 -07004174 /* check if fib6_info already exists */
4175 if (rt6_duplicate_nexthop(nh->fib6_info, rt))
Roopa Prabhu6b9ea5a2015-09-08 10:53:04 -07004176 return err;
4177 }
4178
4179 nh = kzalloc(sizeof(*nh), GFP_KERNEL);
4180 if (!nh)
4181 return -ENOMEM;
David Ahern8d1c8022018-04-17 17:33:26 -07004182 nh->fib6_info = rt;
David Ahernd4ead6b2018-04-17 17:33:16 -07004183 err = ip6_convert_metrics(net, rt, r_cfg);
Roopa Prabhu6b9ea5a2015-09-08 10:53:04 -07004184 if (err) {
4185 kfree(nh);
4186 return err;
4187 }
4188 memcpy(&nh->r_cfg, r_cfg, sizeof(*r_cfg));
4189 list_add_tail(&nh->next, rt6_nh_list);
4190
4191 return 0;
4192}
4193
David Ahern8d1c8022018-04-17 17:33:26 -07004194static void ip6_route_mpath_notify(struct fib6_info *rt,
4195 struct fib6_info *rt_last,
David Ahern3b1137f2017-02-02 12:37:10 -08004196 struct nl_info *info,
4197 __u16 nlflags)
4198{
4199 /* if this is an APPEND route, then rt points to the first route
4200 * inserted and rt_last points to last route inserted. Userspace
4201 * wants a consistent dump of the route which starts at the first
4202 * nexthop. Since sibling routes are always added at the end of
4203 * the list, find the first sibling of the last route appended
4204 */
David Ahern93c2fb22018-04-18 15:38:59 -07004205 if ((nlflags & NLM_F_APPEND) && rt_last && rt_last->fib6_nsiblings) {
4206 rt = list_first_entry(&rt_last->fib6_siblings,
David Ahern8d1c8022018-04-17 17:33:26 -07004207 struct fib6_info,
David Ahern93c2fb22018-04-18 15:38:59 -07004208 fib6_siblings);
David Ahern3b1137f2017-02-02 12:37:10 -08004209 }
4210
4211 if (rt)
4212 inet6_rt_notify(RTM_NEWROUTE, rt, info, nlflags);
4213}
4214
David Ahern333c4302017-05-21 10:12:04 -06004215static int ip6_route_multipath_add(struct fib6_config *cfg,
4216 struct netlink_ext_ack *extack)
Nicolas Dichtel51ebd312012-10-22 03:42:09 +00004217{
David Ahern8d1c8022018-04-17 17:33:26 -07004218 struct fib6_info *rt_notif = NULL, *rt_last = NULL;
David Ahern3b1137f2017-02-02 12:37:10 -08004219 struct nl_info *info = &cfg->fc_nlinfo;
Nicolas Dichtel51ebd312012-10-22 03:42:09 +00004220 struct fib6_config r_cfg;
4221 struct rtnexthop *rtnh;
David Ahern8d1c8022018-04-17 17:33:26 -07004222 struct fib6_info *rt;
Roopa Prabhu6b9ea5a2015-09-08 10:53:04 -07004223 struct rt6_nh *err_nh;
4224 struct rt6_nh *nh, *nh_safe;
David Ahern3b1137f2017-02-02 12:37:10 -08004225 __u16 nlflags;
Nicolas Dichtel51ebd312012-10-22 03:42:09 +00004226 int remaining;
4227 int attrlen;
Roopa Prabhu6b9ea5a2015-09-08 10:53:04 -07004228 int err = 1;
4229 int nhn = 0;
4230 int replace = (cfg->fc_nlinfo.nlh &&
4231 (cfg->fc_nlinfo.nlh->nlmsg_flags & NLM_F_REPLACE));
4232 LIST_HEAD(rt6_nh_list);
Nicolas Dichtel51ebd312012-10-22 03:42:09 +00004233
David Ahern3b1137f2017-02-02 12:37:10 -08004234 nlflags = replace ? NLM_F_REPLACE : NLM_F_CREATE;
4235 if (info->nlh && info->nlh->nlmsg_flags & NLM_F_APPEND)
4236 nlflags |= NLM_F_APPEND;
4237
Michal Kubeček35f1b4e2015-05-18 20:53:55 +02004238 remaining = cfg->fc_mp_len;
Nicolas Dichtel51ebd312012-10-22 03:42:09 +00004239 rtnh = (struct rtnexthop *)cfg->fc_mp;
Nicolas Dichtel51ebd312012-10-22 03:42:09 +00004240
Roopa Prabhu6b9ea5a2015-09-08 10:53:04 -07004241 /* Parse a Multipath Entry and build a list (rt6_nh_list) of
David Ahern8d1c8022018-04-17 17:33:26 -07004242 * fib6_info structs per nexthop
Roopa Prabhu6b9ea5a2015-09-08 10:53:04 -07004243 */
Nicolas Dichtel51ebd312012-10-22 03:42:09 +00004244 while (rtnh_ok(rtnh, remaining)) {
4245 memcpy(&r_cfg, cfg, sizeof(*cfg));
4246 if (rtnh->rtnh_ifindex)
4247 r_cfg.fc_ifindex = rtnh->rtnh_ifindex;
4248
4249 attrlen = rtnh_attrlen(rtnh);
4250 if (attrlen > 0) {
4251 struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
4252
4253 nla = nla_find(attrs, attrlen, RTA_GATEWAY);
4254 if (nla) {
Jiri Benc67b61f62015-03-29 16:59:26 +02004255 r_cfg.fc_gateway = nla_get_in6_addr(nla);
Nicolas Dichtel51ebd312012-10-22 03:42:09 +00004256 r_cfg.fc_flags |= RTF_GATEWAY;
4257 }
Roopa Prabhu19e42e42015-07-21 10:43:48 +02004258 r_cfg.fc_encap = nla_find(attrs, attrlen, RTA_ENCAP);
4259 nla = nla_find(attrs, attrlen, RTA_ENCAP_TYPE);
4260 if (nla)
4261 r_cfg.fc_encap_type = nla_get_u16(nla);
Nicolas Dichtel51ebd312012-10-22 03:42:09 +00004262 }
Roopa Prabhu6b9ea5a2015-09-08 10:53:04 -07004263
David Ahern68e2ffd2018-03-20 10:06:59 -07004264 r_cfg.fc_flags |= (rtnh->rtnh_flags & RTNH_F_ONLINK);
David Ahernacb54e32018-04-17 17:33:22 -07004265 rt = ip6_route_info_create(&r_cfg, GFP_KERNEL, extack);
Roopa Prabhu8c5b83f2015-10-10 08:26:36 -07004266 if (IS_ERR(rt)) {
4267 err = PTR_ERR(rt);
4268 rt = NULL;
Roopa Prabhu6b9ea5a2015-09-08 10:53:04 -07004269 goto cleanup;
Roopa Prabhu8c5b83f2015-10-10 08:26:36 -07004270 }
Roopa Prabhu6b9ea5a2015-09-08 10:53:04 -07004271
David Ahern5e670d82018-04-17 17:33:14 -07004272 rt->fib6_nh.nh_weight = rtnh->rtnh_hops + 1;
Ido Schimmel398958a2018-01-09 16:40:28 +02004273
David Ahernd4ead6b2018-04-17 17:33:16 -07004274 err = ip6_route_info_append(info->nl_net, &rt6_nh_list,
4275 rt, &r_cfg);
Nicolas Dichtel51ebd312012-10-22 03:42:09 +00004276 if (err) {
David Ahern93531c62018-04-17 17:33:25 -07004277 fib6_info_release(rt);
Roopa Prabhu6b9ea5a2015-09-08 10:53:04 -07004278 goto cleanup;
Nicolas Dichtel51ebd312012-10-22 03:42:09 +00004279 }
Roopa Prabhu6b9ea5a2015-09-08 10:53:04 -07004280
4281 rtnh = rtnh_next(rtnh, &remaining);
4282 }
4283
David Ahern3b1137f2017-02-02 12:37:10 -08004284 /* for add and replace send one notification with all nexthops.
4285 * Skip the notification in fib6_add_rt2node and send one with
4286 * the full route when done
4287 */
4288 info->skip_notify = 1;
4289
Roopa Prabhu6b9ea5a2015-09-08 10:53:04 -07004290 err_nh = NULL;
4291 list_for_each_entry(nh, &rt6_nh_list, next) {
David Ahern8d1c8022018-04-17 17:33:26 -07004292 rt_last = nh->fib6_info;
4293 err = __ip6_ins_rt(nh->fib6_info, info, extack);
4294 fib6_info_release(nh->fib6_info);
David Ahern93531c62018-04-17 17:33:25 -07004295
David Ahern3b1137f2017-02-02 12:37:10 -08004296 /* save reference to first route for notification */
4297 if (!rt_notif && !err)
David Ahern8d1c8022018-04-17 17:33:26 -07004298 rt_notif = nh->fib6_info;
David Ahern3b1137f2017-02-02 12:37:10 -08004299
David Ahern8d1c8022018-04-17 17:33:26 -07004300 /* nh->fib6_info is used or freed at this point, reset to NULL*/
4301 nh->fib6_info = NULL;
Roopa Prabhu6b9ea5a2015-09-08 10:53:04 -07004302 if (err) {
4303 if (replace && nhn)
4304 ip6_print_replace_route_err(&rt6_nh_list);
4305 err_nh = nh;
4306 goto add_errout;
4307 }
4308
Nicolas Dichtel1a724182012-11-01 22:58:22 +00004309 /* Because each route is added like a single route we remove
Michal Kubeček27596472015-05-18 20:54:00 +02004310 * these flags after the first nexthop: if there is a collision,
4311 * we have already failed to add the first nexthop:
4312 * fib6_add_rt2node() has rejected it; when replacing, old
4313 * nexthops have been replaced by first new, the rest should
4314 * be added to it.
Nicolas Dichtel1a724182012-11-01 22:58:22 +00004315 */
Michal Kubeček27596472015-05-18 20:54:00 +02004316 cfg->fc_nlinfo.nlh->nlmsg_flags &= ~(NLM_F_EXCL |
4317 NLM_F_REPLACE);
Roopa Prabhu6b9ea5a2015-09-08 10:53:04 -07004318 nhn++;
4319 }
4320
David Ahern3b1137f2017-02-02 12:37:10 -08004321 /* success ... tell user about new route */
4322 ip6_route_mpath_notify(rt_notif, rt_last, info, nlflags);
Roopa Prabhu6b9ea5a2015-09-08 10:53:04 -07004323 goto cleanup;
4324
4325add_errout:
David Ahern3b1137f2017-02-02 12:37:10 -08004326 /* send notification for routes that were added so that
4327 * the delete notifications sent by ip6_route_del are
4328 * coherent
4329 */
4330 if (rt_notif)
4331 ip6_route_mpath_notify(rt_notif, rt_last, info, nlflags);
4332
Roopa Prabhu6b9ea5a2015-09-08 10:53:04 -07004333 /* Delete routes that were already added */
4334 list_for_each_entry(nh, &rt6_nh_list, next) {
4335 if (err_nh == nh)
4336 break;
David Ahern333c4302017-05-21 10:12:04 -06004337 ip6_route_del(&nh->r_cfg, extack);
Roopa Prabhu6b9ea5a2015-09-08 10:53:04 -07004338 }
4339
4340cleanup:
4341 list_for_each_entry_safe(nh, nh_safe, &rt6_nh_list, next) {
David Ahern8d1c8022018-04-17 17:33:26 -07004342 if (nh->fib6_info)
4343 fib6_info_release(nh->fib6_info);
Roopa Prabhu6b9ea5a2015-09-08 10:53:04 -07004344 list_del(&nh->next);
4345 kfree(nh);
4346 }
4347
4348 return err;
4349}
4350
David Ahern333c4302017-05-21 10:12:04 -06004351static int ip6_route_multipath_del(struct fib6_config *cfg,
4352 struct netlink_ext_ack *extack)
Roopa Prabhu6b9ea5a2015-09-08 10:53:04 -07004353{
4354 struct fib6_config r_cfg;
4355 struct rtnexthop *rtnh;
4356 int remaining;
4357 int attrlen;
4358 int err = 1, last_err = 0;
4359
4360 remaining = cfg->fc_mp_len;
4361 rtnh = (struct rtnexthop *)cfg->fc_mp;
4362
4363 /* Parse a Multipath Entry */
4364 while (rtnh_ok(rtnh, remaining)) {
4365 memcpy(&r_cfg, cfg, sizeof(*cfg));
4366 if (rtnh->rtnh_ifindex)
4367 r_cfg.fc_ifindex = rtnh->rtnh_ifindex;
4368
4369 attrlen = rtnh_attrlen(rtnh);
4370 if (attrlen > 0) {
4371 struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
4372
4373 nla = nla_find(attrs, attrlen, RTA_GATEWAY);
4374 if (nla) {
4375 nla_memcpy(&r_cfg.fc_gateway, nla, 16);
4376 r_cfg.fc_flags |= RTF_GATEWAY;
4377 }
4378 }
David Ahern333c4302017-05-21 10:12:04 -06004379 err = ip6_route_del(&r_cfg, extack);
Roopa Prabhu6b9ea5a2015-09-08 10:53:04 -07004380 if (err)
4381 last_err = err;
4382
Nicolas Dichtel51ebd312012-10-22 03:42:09 +00004383 rtnh = rtnh_next(rtnh, &remaining);
4384 }
4385
4386 return last_err;
4387}
4388
David Ahernc21ef3e2017-04-16 09:48:24 -07004389static int inet6_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh,
4390 struct netlink_ext_ack *extack)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004391{
Thomas Graf86872cb2006-08-22 00:01:08 -07004392 struct fib6_config cfg;
4393 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004394
David Ahern333c4302017-05-21 10:12:04 -06004395 err = rtm_to_fib6_config(skb, nlh, &cfg, extack);
Thomas Graf86872cb2006-08-22 00:01:08 -07004396 if (err < 0)
4397 return err;
4398
Nicolas Dichtel51ebd312012-10-22 03:42:09 +00004399 if (cfg.fc_mp)
David Ahern333c4302017-05-21 10:12:04 -06004400 return ip6_route_multipath_del(&cfg, extack);
David Ahern0ae81332017-02-02 12:37:08 -08004401 else {
4402 cfg.fc_delete_all_nh = 1;
David Ahern333c4302017-05-21 10:12:04 -06004403 return ip6_route_del(&cfg, extack);
David Ahern0ae81332017-02-02 12:37:08 -08004404 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004405}
4406
David Ahernc21ef3e2017-04-16 09:48:24 -07004407static int inet6_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh,
4408 struct netlink_ext_ack *extack)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004409{
Thomas Graf86872cb2006-08-22 00:01:08 -07004410 struct fib6_config cfg;
4411 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004412
David Ahern333c4302017-05-21 10:12:04 -06004413 err = rtm_to_fib6_config(skb, nlh, &cfg, extack);
Thomas Graf86872cb2006-08-22 00:01:08 -07004414 if (err < 0)
4415 return err;
4416
Nicolas Dichtel51ebd312012-10-22 03:42:09 +00004417 if (cfg.fc_mp)
David Ahern333c4302017-05-21 10:12:04 -06004418 return ip6_route_multipath_add(&cfg, extack);
Nicolas Dichtel51ebd312012-10-22 03:42:09 +00004419 else
David Ahernacb54e32018-04-17 17:33:22 -07004420 return ip6_route_add(&cfg, GFP_KERNEL, extack);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004421}
4422
David Ahern8d1c8022018-04-17 17:33:26 -07004423static size_t rt6_nlmsg_size(struct fib6_info *rt)
Thomas Graf339bf982006-11-10 14:10:15 -08004424{
David Ahernbeb1afac52017-02-02 12:37:09 -08004425 int nexthop_len = 0;
4426
David Ahern93c2fb22018-04-18 15:38:59 -07004427 if (rt->fib6_nsiblings) {
David Ahernbeb1afac52017-02-02 12:37:09 -08004428 nexthop_len = nla_total_size(0) /* RTA_MULTIPATH */
4429 + NLA_ALIGN(sizeof(struct rtnexthop))
4430 + nla_total_size(16) /* RTA_GATEWAY */
David Ahern5e670d82018-04-17 17:33:14 -07004431 + lwtunnel_get_encap_size(rt->fib6_nh.nh_lwtstate);
David Ahernbeb1afac52017-02-02 12:37:09 -08004432
David Ahern93c2fb22018-04-18 15:38:59 -07004433 nexthop_len *= rt->fib6_nsiblings;
David Ahernbeb1afac52017-02-02 12:37:09 -08004434 }
4435
Thomas Graf339bf982006-11-10 14:10:15 -08004436 return NLMSG_ALIGN(sizeof(struct rtmsg))
4437 + nla_total_size(16) /* RTA_SRC */
4438 + nla_total_size(16) /* RTA_DST */
4439 + nla_total_size(16) /* RTA_GATEWAY */
4440 + nla_total_size(16) /* RTA_PREFSRC */
4441 + nla_total_size(4) /* RTA_TABLE */
4442 + nla_total_size(4) /* RTA_IIF */
4443 + nla_total_size(4) /* RTA_OIF */
4444 + nla_total_size(4) /* RTA_PRIORITY */
Noriaki TAKAMIYA6a2b9ce2007-01-23 22:09:41 -08004445 + RTAX_MAX * nla_total_size(4) /* RTA_METRICS */
Daniel Borkmannea697632015-01-05 23:57:47 +01004446 + nla_total_size(sizeof(struct rta_cacheinfo))
Lubomir Rintelc78ba6d2015-03-11 15:39:21 +01004447 + nla_total_size(TCP_CA_NAME_MAX) /* RTAX_CC_ALGO */
Roopa Prabhu19e42e42015-07-21 10:43:48 +02004448 + nla_total_size(1) /* RTA_PREF */
David Ahern5e670d82018-04-17 17:33:14 -07004449 + lwtunnel_get_encap_size(rt->fib6_nh.nh_lwtstate)
David Ahernbeb1afac52017-02-02 12:37:09 -08004450 + nexthop_len;
4451}
4452
David Ahern8d1c8022018-04-17 17:33:26 -07004453static int rt6_nexthop_info(struct sk_buff *skb, struct fib6_info *rt,
David Ahern5be083c2017-03-06 15:57:31 -08004454 unsigned int *flags, bool skip_oif)
David Ahernbeb1afac52017-02-02 12:37:09 -08004455{
David Ahern5e670d82018-04-17 17:33:14 -07004456 if (rt->fib6_nh.nh_flags & RTNH_F_DEAD)
Ido Schimmelf9d882e2018-01-07 12:45:10 +02004457 *flags |= RTNH_F_DEAD;
4458
David Ahern5e670d82018-04-17 17:33:14 -07004459 if (rt->fib6_nh.nh_flags & RTNH_F_LINKDOWN) {
David Ahernbeb1afac52017-02-02 12:37:09 -08004460 *flags |= RTNH_F_LINKDOWN;
David Aherndcd1f572018-04-18 15:39:05 -07004461
4462 rcu_read_lock();
4463 if (fib6_ignore_linkdown(rt))
David Ahernbeb1afac52017-02-02 12:37:09 -08004464 *flags |= RTNH_F_DEAD;
David Aherndcd1f572018-04-18 15:39:05 -07004465 rcu_read_unlock();
David Ahernbeb1afac52017-02-02 12:37:09 -08004466 }
4467
David Ahern93c2fb22018-04-18 15:38:59 -07004468 if (rt->fib6_flags & RTF_GATEWAY) {
David Ahern5e670d82018-04-17 17:33:14 -07004469 if (nla_put_in6_addr(skb, RTA_GATEWAY, &rt->fib6_nh.nh_gw) < 0)
David Ahernbeb1afac52017-02-02 12:37:09 -08004470 goto nla_put_failure;
4471 }
4472
David Ahern5e670d82018-04-17 17:33:14 -07004473 *flags |= (rt->fib6_nh.nh_flags & RTNH_F_ONLINK);
4474 if (rt->fib6_nh.nh_flags & RTNH_F_OFFLOAD)
Ido Schimmel61e4d012017-08-03 13:28:20 +02004475 *flags |= RTNH_F_OFFLOAD;
4476
David Ahern5be083c2017-03-06 15:57:31 -08004477 /* not needed for multipath encoding b/c it has a rtnexthop struct */
David Ahern5e670d82018-04-17 17:33:14 -07004478 if (!skip_oif && rt->fib6_nh.nh_dev &&
4479 nla_put_u32(skb, RTA_OIF, rt->fib6_nh.nh_dev->ifindex))
David Ahernbeb1afac52017-02-02 12:37:09 -08004480 goto nla_put_failure;
4481
David Ahern5e670d82018-04-17 17:33:14 -07004482 if (rt->fib6_nh.nh_lwtstate &&
4483 lwtunnel_fill_encap(skb, rt->fib6_nh.nh_lwtstate) < 0)
David Ahernbeb1afac52017-02-02 12:37:09 -08004484 goto nla_put_failure;
4485
4486 return 0;
4487
4488nla_put_failure:
4489 return -EMSGSIZE;
4490}
4491
David Ahern5be083c2017-03-06 15:57:31 -08004492/* add multipath next hop */
David Ahern8d1c8022018-04-17 17:33:26 -07004493static int rt6_add_nexthop(struct sk_buff *skb, struct fib6_info *rt)
David Ahernbeb1afac52017-02-02 12:37:09 -08004494{
David Ahern5e670d82018-04-17 17:33:14 -07004495 const struct net_device *dev = rt->fib6_nh.nh_dev;
David Ahernbeb1afac52017-02-02 12:37:09 -08004496 struct rtnexthop *rtnh;
4497 unsigned int flags = 0;
4498
4499 rtnh = nla_reserve_nohdr(skb, sizeof(*rtnh));
4500 if (!rtnh)
4501 goto nla_put_failure;
4502
David Ahern5e670d82018-04-17 17:33:14 -07004503 rtnh->rtnh_hops = rt->fib6_nh.nh_weight - 1;
4504 rtnh->rtnh_ifindex = dev ? dev->ifindex : 0;
David Ahernbeb1afac52017-02-02 12:37:09 -08004505
David Ahern5be083c2017-03-06 15:57:31 -08004506 if (rt6_nexthop_info(skb, rt, &flags, true) < 0)
David Ahernbeb1afac52017-02-02 12:37:09 -08004507 goto nla_put_failure;
4508
4509 rtnh->rtnh_flags = flags;
4510
4511 /* length of rtnetlink header + attributes */
4512 rtnh->rtnh_len = nlmsg_get_pos(skb) - (void *)rtnh;
4513
4514 return 0;
4515
4516nla_put_failure:
4517 return -EMSGSIZE;
Thomas Graf339bf982006-11-10 14:10:15 -08004518}
4519
David Ahernd4ead6b2018-04-17 17:33:16 -07004520static int rt6_fill_node(struct net *net, struct sk_buff *skb,
David Ahern8d1c8022018-04-17 17:33:26 -07004521 struct fib6_info *rt, struct dst_entry *dst,
David Ahernd4ead6b2018-04-17 17:33:16 -07004522 struct in6_addr *dest, struct in6_addr *src,
Eric W. Biederman15e47302012-09-07 20:12:54 +00004523 int iif, int type, u32 portid, u32 seq,
David Ahernf8cfe2c2017-01-17 15:51:08 -08004524 unsigned int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004525{
4526 struct rtmsg *rtm;
Thomas Graf2d7202b2006-08-22 00:01:27 -07004527 struct nlmsghdr *nlh;
David Ahernd4ead6b2018-04-17 17:33:16 -07004528 long expires = 0;
4529 u32 *pmetrics;
Patrick McHardy9e762a42006-08-10 23:09:48 -07004530 u32 table;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004531
Eric W. Biederman15e47302012-09-07 20:12:54 +00004532 nlh = nlmsg_put(skb, portid, seq, type, sizeof(*rtm), flags);
David S. Miller38308472011-12-03 18:02:47 -05004533 if (!nlh)
Patrick McHardy26932562007-01-31 23:16:40 -08004534 return -EMSGSIZE;
Thomas Graf2d7202b2006-08-22 00:01:27 -07004535
4536 rtm = nlmsg_data(nlh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004537 rtm->rtm_family = AF_INET6;
David Ahern93c2fb22018-04-18 15:38:59 -07004538 rtm->rtm_dst_len = rt->fib6_dst.plen;
4539 rtm->rtm_src_len = rt->fib6_src.plen;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004540 rtm->rtm_tos = 0;
David Ahern93c2fb22018-04-18 15:38:59 -07004541 if (rt->fib6_table)
4542 table = rt->fib6_table->tb6_id;
Thomas Grafc71099a2006-08-04 23:20:06 -07004543 else
Patrick McHardy9e762a42006-08-10 23:09:48 -07004544 table = RT6_TABLE_UNSPEC;
4545 rtm->rtm_table = table;
David S. Millerc78679e2012-04-01 20:27:33 -04004546 if (nla_put_u32(skb, RTA_TABLE, table))
4547 goto nla_put_failure;
David Aherne8478e82018-04-17 17:33:13 -07004548
4549 rtm->rtm_type = rt->fib6_type;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004550 rtm->rtm_flags = 0;
4551 rtm->rtm_scope = RT_SCOPE_UNIVERSE;
David Ahern93c2fb22018-04-18 15:38:59 -07004552 rtm->rtm_protocol = rt->fib6_protocol;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004553
David Ahern93c2fb22018-04-18 15:38:59 -07004554 if (rt->fib6_flags & RTF_CACHE)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004555 rtm->rtm_flags |= RTM_F_CLONED;
4556
David Ahernd4ead6b2018-04-17 17:33:16 -07004557 if (dest) {
4558 if (nla_put_in6_addr(skb, RTA_DST, dest))
David S. Millerc78679e2012-04-01 20:27:33 -04004559 goto nla_put_failure;
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09004560 rtm->rtm_dst_len = 128;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004561 } else if (rtm->rtm_dst_len)
David Ahern93c2fb22018-04-18 15:38:59 -07004562 if (nla_put_in6_addr(skb, RTA_DST, &rt->fib6_dst.addr))
David S. Millerc78679e2012-04-01 20:27:33 -04004563 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004564#ifdef CONFIG_IPV6_SUBTREES
4565 if (src) {
Jiri Benc930345e2015-03-29 16:59:25 +02004566 if (nla_put_in6_addr(skb, RTA_SRC, src))
David S. Millerc78679e2012-04-01 20:27:33 -04004567 goto nla_put_failure;
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09004568 rtm->rtm_src_len = 128;
David S. Millerc78679e2012-04-01 20:27:33 -04004569 } else if (rtm->rtm_src_len &&
David Ahern93c2fb22018-04-18 15:38:59 -07004570 nla_put_in6_addr(skb, RTA_SRC, &rt->fib6_src.addr))
David S. Millerc78679e2012-04-01 20:27:33 -04004571 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004572#endif
YOSHIFUJI Hideaki7bc570c2008-04-03 09:22:53 +09004573 if (iif) {
4574#ifdef CONFIG_IPV6_MROUTE
David Ahern93c2fb22018-04-18 15:38:59 -07004575 if (ipv6_addr_is_multicast(&rt->fib6_dst.addr)) {
David Ahernfd61c6b2017-01-17 15:51:07 -08004576 int err = ip6mr_get_route(net, skb, rtm, portid);
Nikolay Aleksandrov2cf75072016-09-25 23:08:31 +02004577
David Ahernfd61c6b2017-01-17 15:51:07 -08004578 if (err == 0)
4579 return 0;
4580 if (err < 0)
4581 goto nla_put_failure;
YOSHIFUJI Hideaki7bc570c2008-04-03 09:22:53 +09004582 } else
4583#endif
David S. Millerc78679e2012-04-01 20:27:33 -04004584 if (nla_put_u32(skb, RTA_IIF, iif))
4585 goto nla_put_failure;
David Ahernd4ead6b2018-04-17 17:33:16 -07004586 } else if (dest) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004587 struct in6_addr saddr_buf;
David Ahernd4ead6b2018-04-17 17:33:16 -07004588 if (ip6_route_get_saddr(net, rt, dest, 0, &saddr_buf) == 0 &&
Jiri Benc930345e2015-03-29 16:59:25 +02004589 nla_put_in6_addr(skb, RTA_PREFSRC, &saddr_buf))
David S. Millerc78679e2012-04-01 20:27:33 -04004590 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004591 }
Thomas Graf2d7202b2006-08-22 00:01:27 -07004592
David Ahern93c2fb22018-04-18 15:38:59 -07004593 if (rt->fib6_prefsrc.plen) {
Daniel Walterc3968a82011-04-13 21:10:57 +00004594 struct in6_addr saddr_buf;
David Ahern93c2fb22018-04-18 15:38:59 -07004595 saddr_buf = rt->fib6_prefsrc.addr;
Jiri Benc930345e2015-03-29 16:59:25 +02004596 if (nla_put_in6_addr(skb, RTA_PREFSRC, &saddr_buf))
David S. Millerc78679e2012-04-01 20:27:33 -04004597 goto nla_put_failure;
Daniel Walterc3968a82011-04-13 21:10:57 +00004598 }
4599
David Ahernd4ead6b2018-04-17 17:33:16 -07004600 pmetrics = dst ? dst_metrics_ptr(dst) : rt->fib6_metrics->metrics;
4601 if (rtnetlink_put_metrics(skb, pmetrics) < 0)
Thomas Graf2d7202b2006-08-22 00:01:27 -07004602 goto nla_put_failure;
4603
David Ahern93c2fb22018-04-18 15:38:59 -07004604 if (nla_put_u32(skb, RTA_PRIORITY, rt->fib6_metric))
David S. Millerc78679e2012-04-01 20:27:33 -04004605 goto nla_put_failure;
Li Wei82539472012-07-29 16:01:30 +00004606
David Ahernbeb1afac52017-02-02 12:37:09 -08004607 /* For multipath routes, walk the siblings list and add
4608 * each as a nexthop within RTA_MULTIPATH.
4609 */
David Ahern93c2fb22018-04-18 15:38:59 -07004610 if (rt->fib6_nsiblings) {
David Ahern8d1c8022018-04-17 17:33:26 -07004611 struct fib6_info *sibling, *next_sibling;
David Ahernbeb1afac52017-02-02 12:37:09 -08004612 struct nlattr *mp;
4613
4614 mp = nla_nest_start(skb, RTA_MULTIPATH);
4615 if (!mp)
4616 goto nla_put_failure;
4617
4618 if (rt6_add_nexthop(skb, rt) < 0)
4619 goto nla_put_failure;
4620
4621 list_for_each_entry_safe(sibling, next_sibling,
David Ahern93c2fb22018-04-18 15:38:59 -07004622 &rt->fib6_siblings, fib6_siblings) {
David Ahernbeb1afac52017-02-02 12:37:09 -08004623 if (rt6_add_nexthop(skb, sibling) < 0)
4624 goto nla_put_failure;
4625 }
4626
4627 nla_nest_end(skb, mp);
4628 } else {
David Ahern5be083c2017-03-06 15:57:31 -08004629 if (rt6_nexthop_info(skb, rt, &rtm->rtm_flags, false) < 0)
David Ahernbeb1afac52017-02-02 12:37:09 -08004630 goto nla_put_failure;
4631 }
4632
David Ahern93c2fb22018-04-18 15:38:59 -07004633 if (rt->fib6_flags & RTF_EXPIRES) {
David Ahern14895682018-04-17 17:33:17 -07004634 expires = dst ? dst->expires : rt->expires;
4635 expires -= jiffies;
4636 }
YOSHIFUJI Hideaki69cdf8f2008-05-19 16:55:13 -07004637
David Ahernd4ead6b2018-04-17 17:33:16 -07004638 if (rtnl_put_cacheinfo(skb, dst, 0, expires, dst ? dst->error : 0) < 0)
Thomas Grafe3703b32006-11-27 09:27:07 -08004639 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004640
David Ahern93c2fb22018-04-18 15:38:59 -07004641 if (nla_put_u8(skb, RTA_PREF, IPV6_EXTRACT_PREF(rt->fib6_flags)))
Lubomir Rintelc78ba6d2015-03-11 15:39:21 +01004642 goto nla_put_failure;
4643
Roopa Prabhu19e42e42015-07-21 10:43:48 +02004644
Johannes Berg053c0952015-01-16 22:09:00 +01004645 nlmsg_end(skb, nlh);
4646 return 0;
Thomas Graf2d7202b2006-08-22 00:01:27 -07004647
4648nla_put_failure:
Patrick McHardy26932562007-01-31 23:16:40 -08004649 nlmsg_cancel(skb, nlh);
4650 return -EMSGSIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004651}
4652
David Ahern8d1c8022018-04-17 17:33:26 -07004653int rt6_dump_route(struct fib6_info *rt, void *p_arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004654{
4655 struct rt6_rtnl_dump_arg *arg = (struct rt6_rtnl_dump_arg *) p_arg;
David Ahern1f17e2f2017-01-26 13:54:08 -08004656 struct net *net = arg->net;
4657
David Ahern421842e2018-04-17 17:33:18 -07004658 if (rt == net->ipv6.fib6_null_entry)
David Ahern1f17e2f2017-01-26 13:54:08 -08004659 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004660
Thomas Graf2d7202b2006-08-22 00:01:27 -07004661 if (nlmsg_len(arg->cb->nlh) >= sizeof(struct rtmsg)) {
4662 struct rtmsg *rtm = nlmsg_data(arg->cb->nlh);
David Ahernf8cfe2c2017-01-17 15:51:08 -08004663
4664 /* user wants prefix routes only */
4665 if (rtm->rtm_flags & RTM_F_PREFIX &&
David Ahern93c2fb22018-04-18 15:38:59 -07004666 !(rt->fib6_flags & RTF_PREFIX_RT)) {
David Ahernf8cfe2c2017-01-17 15:51:08 -08004667 /* success since this is not a prefix route */
4668 return 1;
4669 }
4670 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004671
David Ahernd4ead6b2018-04-17 17:33:16 -07004672 return rt6_fill_node(net, arg->skb, rt, NULL, NULL, NULL, 0,
4673 RTM_NEWROUTE, NETLINK_CB(arg->cb->skb).portid,
4674 arg->cb->nlh->nlmsg_seq, NLM_F_MULTI);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004675}
4676
David Ahernc21ef3e2017-04-16 09:48:24 -07004677static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
4678 struct netlink_ext_ack *extack)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004679{
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09004680 struct net *net = sock_net(in_skb->sk);
Thomas Grafab364a62006-08-22 00:01:47 -07004681 struct nlattr *tb[RTA_MAX+1];
Roopa Prabhu18c3a612017-05-25 10:42:40 -07004682 int err, iif = 0, oif = 0;
4683 struct dst_entry *dst;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004684 struct rt6_info *rt;
Thomas Grafab364a62006-08-22 00:01:47 -07004685 struct sk_buff *skb;
4686 struct rtmsg *rtm;
David S. Miller4c9483b2011-03-12 16:22:43 -05004687 struct flowi6 fl6;
Roopa Prabhu18c3a612017-05-25 10:42:40 -07004688 bool fibmatch;
Thomas Grafab364a62006-08-22 00:01:47 -07004689
Johannes Bergfceb6432017-04-12 14:34:07 +02004690 err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv6_policy,
David Ahernc21ef3e2017-04-16 09:48:24 -07004691 extack);
Thomas Grafab364a62006-08-22 00:01:47 -07004692 if (err < 0)
4693 goto errout;
4694
4695 err = -EINVAL;
David S. Miller4c9483b2011-03-12 16:22:43 -05004696 memset(&fl6, 0, sizeof(fl6));
Hannes Frederic Sowa38b70972016-06-11 20:08:19 +02004697 rtm = nlmsg_data(nlh);
4698 fl6.flowlabel = ip6_make_flowinfo(rtm->rtm_tos, 0);
Roopa Prabhu18c3a612017-05-25 10:42:40 -07004699 fibmatch = !!(rtm->rtm_flags & RTM_F_FIB_MATCH);
Thomas Grafab364a62006-08-22 00:01:47 -07004700
4701 if (tb[RTA_SRC]) {
4702 if (nla_len(tb[RTA_SRC]) < sizeof(struct in6_addr))
4703 goto errout;
4704
Alexey Dobriyan4e3fd7a2011-11-21 03:39:03 +00004705 fl6.saddr = *(struct in6_addr *)nla_data(tb[RTA_SRC]);
Thomas Grafab364a62006-08-22 00:01:47 -07004706 }
4707
4708 if (tb[RTA_DST]) {
4709 if (nla_len(tb[RTA_DST]) < sizeof(struct in6_addr))
4710 goto errout;
4711
Alexey Dobriyan4e3fd7a2011-11-21 03:39:03 +00004712 fl6.daddr = *(struct in6_addr *)nla_data(tb[RTA_DST]);
Thomas Grafab364a62006-08-22 00:01:47 -07004713 }
4714
4715 if (tb[RTA_IIF])
4716 iif = nla_get_u32(tb[RTA_IIF]);
4717
4718 if (tb[RTA_OIF])
Shmulik Ladkani72331bc2012-04-01 04:03:45 +00004719 oif = nla_get_u32(tb[RTA_OIF]);
Thomas Grafab364a62006-08-22 00:01:47 -07004720
Lorenzo Colitti2e47b292014-05-15 16:38:41 -07004721 if (tb[RTA_MARK])
4722 fl6.flowi6_mark = nla_get_u32(tb[RTA_MARK]);
4723
Lorenzo Colitti622ec2c2016-11-04 02:23:42 +09004724 if (tb[RTA_UID])
4725 fl6.flowi6_uid = make_kuid(current_user_ns(),
4726 nla_get_u32(tb[RTA_UID]));
4727 else
4728 fl6.flowi6_uid = iif ? INVALID_UID : current_uid();
4729
Thomas Grafab364a62006-08-22 00:01:47 -07004730 if (iif) {
4731 struct net_device *dev;
Shmulik Ladkani72331bc2012-04-01 04:03:45 +00004732 int flags = 0;
4733
Florian Westphal121622d2017-08-15 16:34:42 +02004734 rcu_read_lock();
4735
4736 dev = dev_get_by_index_rcu(net, iif);
Thomas Grafab364a62006-08-22 00:01:47 -07004737 if (!dev) {
Florian Westphal121622d2017-08-15 16:34:42 +02004738 rcu_read_unlock();
Thomas Grafab364a62006-08-22 00:01:47 -07004739 err = -ENODEV;
4740 goto errout;
4741 }
Shmulik Ladkani72331bc2012-04-01 04:03:45 +00004742
4743 fl6.flowi6_iif = iif;
4744
4745 if (!ipv6_addr_any(&fl6.saddr))
4746 flags |= RT6_LOOKUP_F_HAS_SADDR;
4747
David Ahernb75cc8f2018-03-02 08:32:17 -08004748 dst = ip6_route_input_lookup(net, dev, &fl6, NULL, flags);
Florian Westphal121622d2017-08-15 16:34:42 +02004749
4750 rcu_read_unlock();
Shmulik Ladkani72331bc2012-04-01 04:03:45 +00004751 } else {
4752 fl6.flowi6_oif = oif;
4753
Ido Schimmel58acfd72017-12-20 12:28:25 +02004754 dst = ip6_route_output(net, NULL, &fl6);
Roopa Prabhu18c3a612017-05-25 10:42:40 -07004755 }
4756
Roopa Prabhu18c3a612017-05-25 10:42:40 -07004757
4758 rt = container_of(dst, struct rt6_info, dst);
4759 if (rt->dst.error) {
4760 err = rt->dst.error;
4761 ip6_rt_put(rt);
4762 goto errout;
Thomas Grafab364a62006-08-22 00:01:47 -07004763 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004764
WANG Cong9d6acb32017-03-01 20:48:39 -08004765 if (rt == net->ipv6.ip6_null_entry) {
4766 err = rt->dst.error;
4767 ip6_rt_put(rt);
4768 goto errout;
4769 }
4770
Linus Torvalds1da177e2005-04-16 15:20:36 -07004771 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
David S. Miller38308472011-12-03 18:02:47 -05004772 if (!skb) {
Amerigo Wang94e187c2012-10-29 00:13:19 +00004773 ip6_rt_put(rt);
Thomas Grafab364a62006-08-22 00:01:47 -07004774 err = -ENOBUFS;
4775 goto errout;
4776 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004777
Changli Gaod8d1f302010-06-10 23:31:35 -07004778 skb_dst_set(skb, &rt->dst);
Roopa Prabhu18c3a612017-05-25 10:42:40 -07004779 if (fibmatch)
David Ahern93531c62018-04-17 17:33:25 -07004780 err = rt6_fill_node(net, skb, rt->from, NULL, NULL, NULL, iif,
Roopa Prabhu18c3a612017-05-25 10:42:40 -07004781 RTM_NEWROUTE, NETLINK_CB(in_skb).portid,
4782 nlh->nlmsg_seq, 0);
4783 else
David Ahern93531c62018-04-17 17:33:25 -07004784 err = rt6_fill_node(net, skb, rt->from, dst,
4785 &fl6.daddr, &fl6.saddr, iif, RTM_NEWROUTE,
David Ahernd4ead6b2018-04-17 17:33:16 -07004786 NETLINK_CB(in_skb).portid, nlh->nlmsg_seq,
4787 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004788 if (err < 0) {
Thomas Grafab364a62006-08-22 00:01:47 -07004789 kfree_skb(skb);
4790 goto errout;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004791 }
4792
Eric W. Biederman15e47302012-09-07 20:12:54 +00004793 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
Thomas Grafab364a62006-08-22 00:01:47 -07004794errout:
Linus Torvalds1da177e2005-04-16 15:20:36 -07004795 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004796}
4797
David Ahern8d1c8022018-04-17 17:33:26 -07004798void inet6_rt_notify(int event, struct fib6_info *rt, struct nl_info *info,
Roopa Prabhu37a1d362015-09-13 10:18:33 -07004799 unsigned int nlm_flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004800{
4801 struct sk_buff *skb;
Daniel Lezcano55786892008-03-04 13:47:47 -08004802 struct net *net = info->nl_net;
Denis V. Lunev528c4ce2007-12-13 09:45:12 -08004803 u32 seq;
4804 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004805
Denis V. Lunev528c4ce2007-12-13 09:45:12 -08004806 err = -ENOBUFS;
David S. Miller38308472011-12-03 18:02:47 -05004807 seq = info->nlh ? info->nlh->nlmsg_seq : 0;
Thomas Graf86872cb2006-08-22 00:01:08 -07004808
Roopa Prabhu19e42e42015-07-21 10:43:48 +02004809 skb = nlmsg_new(rt6_nlmsg_size(rt), gfp_any());
David S. Miller38308472011-12-03 18:02:47 -05004810 if (!skb)
Thomas Graf21713eb2006-08-15 00:35:24 -07004811 goto errout;
4812
David Ahernd4ead6b2018-04-17 17:33:16 -07004813 err = rt6_fill_node(net, skb, rt, NULL, NULL, NULL, 0,
4814 event, info->portid, seq, nlm_flags);
Patrick McHardy26932562007-01-31 23:16:40 -08004815 if (err < 0) {
4816 /* -EMSGSIZE implies BUG in rt6_nlmsg_size() */
4817 WARN_ON(err == -EMSGSIZE);
4818 kfree_skb(skb);
4819 goto errout;
4820 }
Eric W. Biederman15e47302012-09-07 20:12:54 +00004821 rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE,
Pablo Neira Ayuso1ce85fe2009-02-24 23:18:28 -08004822 info->nlh, gfp_any());
4823 return;
Thomas Graf21713eb2006-08-15 00:35:24 -07004824errout:
4825 if (err < 0)
Daniel Lezcano55786892008-03-04 13:47:47 -08004826 rtnl_set_sk_err(net, RTNLGRP_IPV6_ROUTE, err);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004827}
4828
Daniel Lezcano8ed67782008-03-04 13:48:30 -08004829static int ip6_route_dev_notify(struct notifier_block *this,
Jiri Pirko351638e2013-05-28 01:30:21 +00004830 unsigned long event, void *ptr)
Daniel Lezcano8ed67782008-03-04 13:48:30 -08004831{
Jiri Pirko351638e2013-05-28 01:30:21 +00004832 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09004833 struct net *net = dev_net(dev);
Daniel Lezcano8ed67782008-03-04 13:48:30 -08004834
WANG Cong242d3a42017-05-08 10:12:13 -07004835 if (!(dev->flags & IFF_LOOPBACK))
4836 return NOTIFY_OK;
4837
4838 if (event == NETDEV_REGISTER) {
David Ahern421842e2018-04-17 17:33:18 -07004839 net->ipv6.fib6_null_entry->fib6_nh.nh_dev = dev;
Changli Gaod8d1f302010-06-10 23:31:35 -07004840 net->ipv6.ip6_null_entry->dst.dev = dev;
Daniel Lezcano8ed67782008-03-04 13:48:30 -08004841 net->ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(dev);
4842#ifdef CONFIG_IPV6_MULTIPLE_TABLES
Changli Gaod8d1f302010-06-10 23:31:35 -07004843 net->ipv6.ip6_prohibit_entry->dst.dev = dev;
Daniel Lezcano8ed67782008-03-04 13:48:30 -08004844 net->ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(dev);
Changli Gaod8d1f302010-06-10 23:31:35 -07004845 net->ipv6.ip6_blk_hole_entry->dst.dev = dev;
Daniel Lezcano8ed67782008-03-04 13:48:30 -08004846 net->ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(dev);
4847#endif
WANG Cong76da0702017-06-20 11:42:27 -07004848 } else if (event == NETDEV_UNREGISTER &&
4849 dev->reg_state != NETREG_UNREGISTERED) {
4850 /* NETDEV_UNREGISTER could be fired for multiple times by
4851 * netdev_wait_allrefs(). Make sure we only call this once.
4852 */
Eric Dumazet12d94a82017-08-15 04:09:51 -07004853 in6_dev_put_clear(&net->ipv6.ip6_null_entry->rt6i_idev);
WANG Cong242d3a42017-05-08 10:12:13 -07004854#ifdef CONFIG_IPV6_MULTIPLE_TABLES
Eric Dumazet12d94a82017-08-15 04:09:51 -07004855 in6_dev_put_clear(&net->ipv6.ip6_prohibit_entry->rt6i_idev);
4856 in6_dev_put_clear(&net->ipv6.ip6_blk_hole_entry->rt6i_idev);
WANG Cong242d3a42017-05-08 10:12:13 -07004857#endif
Daniel Lezcano8ed67782008-03-04 13:48:30 -08004858 }
4859
4860 return NOTIFY_OK;
4861}
4862
Linus Torvalds1da177e2005-04-16 15:20:36 -07004863/*
4864 * /proc
4865 */
4866
4867#ifdef CONFIG_PROC_FS
4868
Alexey Dobriyan33120b32007-11-06 05:27:11 -08004869static const struct file_operations ipv6_route_proc_fops = {
Alexey Dobriyan33120b32007-11-06 05:27:11 -08004870 .open = ipv6_route_open,
4871 .read = seq_read,
4872 .llseek = seq_lseek,
Hannes Frederic Sowa8d2ca1d2013-09-21 16:55:59 +02004873 .release = seq_release_net,
Alexey Dobriyan33120b32007-11-06 05:27:11 -08004874};
4875
Linus Torvalds1da177e2005-04-16 15:20:36 -07004876static int rt6_stats_seq_show(struct seq_file *seq, void *v)
4877{
Daniel Lezcano69ddb802008-03-04 13:46:23 -08004878 struct net *net = (struct net *)seq->private;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004879 seq_printf(seq, "%04x %04x %04x %04x %04x %04x %04x\n",
Daniel Lezcano69ddb802008-03-04 13:46:23 -08004880 net->ipv6.rt6_stats->fib_nodes,
4881 net->ipv6.rt6_stats->fib_route_nodes,
Wei Wang81eb8442017-10-06 12:06:11 -07004882 atomic_read(&net->ipv6.rt6_stats->fib_rt_alloc),
Daniel Lezcano69ddb802008-03-04 13:46:23 -08004883 net->ipv6.rt6_stats->fib_rt_entries,
4884 net->ipv6.rt6_stats->fib_rt_cache,
Eric Dumazetfc66f952010-10-08 06:37:34 +00004885 dst_entries_get_slow(&net->ipv6.ip6_dst_ops),
Daniel Lezcano69ddb802008-03-04 13:46:23 -08004886 net->ipv6.rt6_stats->fib_discarded_routes);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004887
4888 return 0;
4889}
4890
4891static int rt6_stats_seq_open(struct inode *inode, struct file *file)
4892{
Pavel Emelyanovde05c552008-07-18 04:07:21 -07004893 return single_open_net(inode, file, rt6_stats_seq_show);
Daniel Lezcano69ddb802008-03-04 13:46:23 -08004894}
4895
Arjan van de Ven9a321442007-02-12 00:55:35 -08004896static const struct file_operations rt6_stats_seq_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004897 .open = rt6_stats_seq_open,
4898 .read = seq_read,
4899 .llseek = seq_lseek,
Pavel Emelyanovb6fcbdb2008-07-18 04:07:44 -07004900 .release = single_release_net,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004901};
4902#endif /* CONFIG_PROC_FS */
4903
4904#ifdef CONFIG_SYSCTL
4905
Linus Torvalds1da177e2005-04-16 15:20:36 -07004906static
Joe Perchesfe2c6332013-06-11 23:04:25 -07004907int ipv6_sysctl_rtcache_flush(struct ctl_table *ctl, int write,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004908 void __user *buffer, size_t *lenp, loff_t *ppos)
4909{
Lucian Adrian Grijincuc486da32011-02-24 19:48:03 +00004910 struct net *net;
4911 int delay;
4912 if (!write)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004913 return -EINVAL;
Lucian Adrian Grijincuc486da32011-02-24 19:48:03 +00004914
4915 net = (struct net *)ctl->extra1;
4916 delay = net->ipv6.sysctl.flush_delay;
4917 proc_dointvec(ctl, write, buffer, lenp, ppos);
Michal Kubeček2ac3ac82013-08-01 10:04:14 +02004918 fib6_run_gc(delay <= 0 ? 0 : (unsigned long)delay, net, delay > 0);
Lucian Adrian Grijincuc486da32011-02-24 19:48:03 +00004919 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004920}
4921
Joe Perchesfe2c6332013-06-11 23:04:25 -07004922struct ctl_table ipv6_route_table_template[] = {
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09004923 {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004924 .procname = "flush",
Daniel Lezcano49905092008-01-10 03:01:01 -08004925 .data = &init_net.ipv6.sysctl.flush_delay,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004926 .maxlen = sizeof(int),
Dave Jones89c8b3a12005-04-28 12:11:49 -07004927 .mode = 0200,
Alexey Dobriyan6d9f2392008-11-03 18:21:05 -08004928 .proc_handler = ipv6_sysctl_rtcache_flush
Linus Torvalds1da177e2005-04-16 15:20:36 -07004929 },
4930 {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004931 .procname = "gc_thresh",
Daniel Lezcano9a7ec3a2008-03-04 13:48:53 -08004932 .data = &ip6_dst_ops_template.gc_thresh,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004933 .maxlen = sizeof(int),
4934 .mode = 0644,
Alexey Dobriyan6d9f2392008-11-03 18:21:05 -08004935 .proc_handler = proc_dointvec,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004936 },
4937 {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004938 .procname = "max_size",
Daniel Lezcano49905092008-01-10 03:01:01 -08004939 .data = &init_net.ipv6.sysctl.ip6_rt_max_size,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004940 .maxlen = sizeof(int),
4941 .mode = 0644,
Alexey Dobriyan6d9f2392008-11-03 18:21:05 -08004942 .proc_handler = proc_dointvec,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004943 },
4944 {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004945 .procname = "gc_min_interval",
Daniel Lezcano49905092008-01-10 03:01:01 -08004946 .data = &init_net.ipv6.sysctl.ip6_rt_gc_min_interval,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004947 .maxlen = sizeof(int),
4948 .mode = 0644,
Alexey Dobriyan6d9f2392008-11-03 18:21:05 -08004949 .proc_handler = proc_dointvec_jiffies,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004950 },
4951 {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004952 .procname = "gc_timeout",
Daniel Lezcano49905092008-01-10 03:01:01 -08004953 .data = &init_net.ipv6.sysctl.ip6_rt_gc_timeout,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004954 .maxlen = sizeof(int),
4955 .mode = 0644,
Alexey Dobriyan6d9f2392008-11-03 18:21:05 -08004956 .proc_handler = proc_dointvec_jiffies,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004957 },
4958 {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004959 .procname = "gc_interval",
Daniel Lezcano49905092008-01-10 03:01:01 -08004960 .data = &init_net.ipv6.sysctl.ip6_rt_gc_interval,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004961 .maxlen = sizeof(int),
4962 .mode = 0644,
Alexey Dobriyan6d9f2392008-11-03 18:21:05 -08004963 .proc_handler = proc_dointvec_jiffies,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004964 },
4965 {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004966 .procname = "gc_elasticity",
Daniel Lezcano49905092008-01-10 03:01:01 -08004967 .data = &init_net.ipv6.sysctl.ip6_rt_gc_elasticity,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004968 .maxlen = sizeof(int),
4969 .mode = 0644,
Min Zhangf3d3f612010-08-14 22:42:51 -07004970 .proc_handler = proc_dointvec,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004971 },
4972 {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004973 .procname = "mtu_expires",
Daniel Lezcano49905092008-01-10 03:01:01 -08004974 .data = &init_net.ipv6.sysctl.ip6_rt_mtu_expires,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004975 .maxlen = sizeof(int),
4976 .mode = 0644,
Alexey Dobriyan6d9f2392008-11-03 18:21:05 -08004977 .proc_handler = proc_dointvec_jiffies,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004978 },
4979 {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004980 .procname = "min_adv_mss",
Daniel Lezcano49905092008-01-10 03:01:01 -08004981 .data = &init_net.ipv6.sysctl.ip6_rt_min_advmss,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004982 .maxlen = sizeof(int),
4983 .mode = 0644,
Min Zhangf3d3f612010-08-14 22:42:51 -07004984 .proc_handler = proc_dointvec,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004985 },
4986 {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004987 .procname = "gc_min_interval_ms",
Daniel Lezcano49905092008-01-10 03:01:01 -08004988 .data = &init_net.ipv6.sysctl.ip6_rt_gc_min_interval,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004989 .maxlen = sizeof(int),
4990 .mode = 0644,
Alexey Dobriyan6d9f2392008-11-03 18:21:05 -08004991 .proc_handler = proc_dointvec_ms_jiffies,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004992 },
Eric W. Biedermanf8572d82009-11-05 13:32:03 -08004993 { }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004994};
4995
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00004996struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net)
Daniel Lezcano760f2d02008-01-10 02:53:43 -08004997{
4998 struct ctl_table *table;
4999
5000 table = kmemdup(ipv6_route_table_template,
5001 sizeof(ipv6_route_table_template),
5002 GFP_KERNEL);
YOSHIFUJI Hideaki5ee09102008-02-28 00:24:28 +09005003
5004 if (table) {
5005 table[0].data = &net->ipv6.sysctl.flush_delay;
Lucian Adrian Grijincuc486da32011-02-24 19:48:03 +00005006 table[0].extra1 = net;
Alexey Dobriyan86393e52009-08-29 01:34:49 +00005007 table[1].data = &net->ipv6.ip6_dst_ops.gc_thresh;
YOSHIFUJI Hideaki5ee09102008-02-28 00:24:28 +09005008 table[2].data = &net->ipv6.sysctl.ip6_rt_max_size;
5009 table[3].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
5010 table[4].data = &net->ipv6.sysctl.ip6_rt_gc_timeout;
5011 table[5].data = &net->ipv6.sysctl.ip6_rt_gc_interval;
5012 table[6].data = &net->ipv6.sysctl.ip6_rt_gc_elasticity;
5013 table[7].data = &net->ipv6.sysctl.ip6_rt_mtu_expires;
5014 table[8].data = &net->ipv6.sysctl.ip6_rt_min_advmss;
Alexey Dobriyan9c69fab2009-12-18 20:11:03 -08005015 table[9].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
Eric W. Biederman464dc802012-11-16 03:02:59 +00005016
5017 /* Don't export sysctls to unprivileged users */
5018 if (net->user_ns != &init_user_ns)
5019 table[0].procname = NULL;
YOSHIFUJI Hideaki5ee09102008-02-28 00:24:28 +09005020 }
5021
Daniel Lezcano760f2d02008-01-10 02:53:43 -08005022 return table;
5023}
Linus Torvalds1da177e2005-04-16 15:20:36 -07005024#endif
5025
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00005026static int __net_init ip6_route_net_init(struct net *net)
Daniel Lezcanocdb18762008-03-04 13:45:33 -08005027{
Pavel Emelyanov633d424b2008-04-21 14:25:23 -07005028 int ret = -ENOMEM;
Daniel Lezcano8ed67782008-03-04 13:48:30 -08005029
Alexey Dobriyan86393e52009-08-29 01:34:49 +00005030 memcpy(&net->ipv6.ip6_dst_ops, &ip6_dst_ops_template,
5031 sizeof(net->ipv6.ip6_dst_ops));
Benjamin Theryf2fc6a52008-03-04 13:49:23 -08005032
Eric Dumazetfc66f952010-10-08 06:37:34 +00005033 if (dst_entries_init(&net->ipv6.ip6_dst_ops) < 0)
5034 goto out_ip6_dst_ops;
5035
David Ahern421842e2018-04-17 17:33:18 -07005036 net->ipv6.fib6_null_entry = kmemdup(&fib6_null_entry_template,
5037 sizeof(*net->ipv6.fib6_null_entry),
5038 GFP_KERNEL);
5039 if (!net->ipv6.fib6_null_entry)
5040 goto out_ip6_dst_entries;
5041
Daniel Lezcano8ed67782008-03-04 13:48:30 -08005042 net->ipv6.ip6_null_entry = kmemdup(&ip6_null_entry_template,
5043 sizeof(*net->ipv6.ip6_null_entry),
5044 GFP_KERNEL);
5045 if (!net->ipv6.ip6_null_entry)
David Ahern421842e2018-04-17 17:33:18 -07005046 goto out_fib6_null_entry;
Changli Gaod8d1f302010-06-10 23:31:35 -07005047 net->ipv6.ip6_null_entry->dst.ops = &net->ipv6.ip6_dst_ops;
David S. Miller62fa8a82011-01-26 20:51:05 -08005048 dst_init_metrics(&net->ipv6.ip6_null_entry->dst,
5049 ip6_template_metrics, true);
Daniel Lezcano8ed67782008-03-04 13:48:30 -08005050
5051#ifdef CONFIG_IPV6_MULTIPLE_TABLES
Vincent Bernatfeca7d82017-08-08 20:23:49 +02005052 net->ipv6.fib6_has_custom_rules = false;
Daniel Lezcano8ed67782008-03-04 13:48:30 -08005053 net->ipv6.ip6_prohibit_entry = kmemdup(&ip6_prohibit_entry_template,
5054 sizeof(*net->ipv6.ip6_prohibit_entry),
5055 GFP_KERNEL);
Peter Zijlstra68fffc62008-10-07 14:12:10 -07005056 if (!net->ipv6.ip6_prohibit_entry)
5057 goto out_ip6_null_entry;
Changli Gaod8d1f302010-06-10 23:31:35 -07005058 net->ipv6.ip6_prohibit_entry->dst.ops = &net->ipv6.ip6_dst_ops;
David S. Miller62fa8a82011-01-26 20:51:05 -08005059 dst_init_metrics(&net->ipv6.ip6_prohibit_entry->dst,
5060 ip6_template_metrics, true);
Daniel Lezcano8ed67782008-03-04 13:48:30 -08005061
5062 net->ipv6.ip6_blk_hole_entry = kmemdup(&ip6_blk_hole_entry_template,
5063 sizeof(*net->ipv6.ip6_blk_hole_entry),
5064 GFP_KERNEL);
Peter Zijlstra68fffc62008-10-07 14:12:10 -07005065 if (!net->ipv6.ip6_blk_hole_entry)
5066 goto out_ip6_prohibit_entry;
Changli Gaod8d1f302010-06-10 23:31:35 -07005067 net->ipv6.ip6_blk_hole_entry->dst.ops = &net->ipv6.ip6_dst_ops;
David S. Miller62fa8a82011-01-26 20:51:05 -08005068 dst_init_metrics(&net->ipv6.ip6_blk_hole_entry->dst,
5069 ip6_template_metrics, true);
Daniel Lezcano8ed67782008-03-04 13:48:30 -08005070#endif
5071
Peter Zijlstrab339a47c2008-10-07 14:15:00 -07005072 net->ipv6.sysctl.flush_delay = 0;
5073 net->ipv6.sysctl.ip6_rt_max_size = 4096;
5074 net->ipv6.sysctl.ip6_rt_gc_min_interval = HZ / 2;
5075 net->ipv6.sysctl.ip6_rt_gc_timeout = 60*HZ;
5076 net->ipv6.sysctl.ip6_rt_gc_interval = 30*HZ;
5077 net->ipv6.sysctl.ip6_rt_gc_elasticity = 9;
5078 net->ipv6.sysctl.ip6_rt_mtu_expires = 10*60*HZ;
5079 net->ipv6.sysctl.ip6_rt_min_advmss = IPV6_MIN_MTU - 20 - 40;
5080
Benjamin Thery6891a342008-03-04 13:49:47 -08005081 net->ipv6.ip6_rt_gc_expire = 30*HZ;
5082
Daniel Lezcano8ed67782008-03-04 13:48:30 -08005083 ret = 0;
5084out:
5085 return ret;
Benjamin Theryf2fc6a52008-03-04 13:49:23 -08005086
Peter Zijlstra68fffc62008-10-07 14:12:10 -07005087#ifdef CONFIG_IPV6_MULTIPLE_TABLES
5088out_ip6_prohibit_entry:
5089 kfree(net->ipv6.ip6_prohibit_entry);
5090out_ip6_null_entry:
5091 kfree(net->ipv6.ip6_null_entry);
5092#endif
David Ahern421842e2018-04-17 17:33:18 -07005093out_fib6_null_entry:
5094 kfree(net->ipv6.fib6_null_entry);
Eric Dumazetfc66f952010-10-08 06:37:34 +00005095out_ip6_dst_entries:
5096 dst_entries_destroy(&net->ipv6.ip6_dst_ops);
Benjamin Theryf2fc6a52008-03-04 13:49:23 -08005097out_ip6_dst_ops:
Benjamin Theryf2fc6a52008-03-04 13:49:23 -08005098 goto out;
Daniel Lezcanocdb18762008-03-04 13:45:33 -08005099}
5100
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00005101static void __net_exit ip6_route_net_exit(struct net *net)
Daniel Lezcanocdb18762008-03-04 13:45:33 -08005102{
David Ahern421842e2018-04-17 17:33:18 -07005103 kfree(net->ipv6.fib6_null_entry);
Daniel Lezcano8ed67782008-03-04 13:48:30 -08005104 kfree(net->ipv6.ip6_null_entry);
5105#ifdef CONFIG_IPV6_MULTIPLE_TABLES
5106 kfree(net->ipv6.ip6_prohibit_entry);
5107 kfree(net->ipv6.ip6_blk_hole_entry);
5108#endif
Xiaotian Feng41bb78b2010-11-02 16:11:05 +00005109 dst_entries_destroy(&net->ipv6.ip6_dst_ops);
Daniel Lezcanocdb18762008-03-04 13:45:33 -08005110}
5111
Thomas Grafd1896342012-06-18 12:08:33 +00005112static int __net_init ip6_route_net_init_late(struct net *net)
5113{
5114#ifdef CONFIG_PROC_FS
Gao fengd4beaa62013-02-18 01:34:54 +00005115 proc_create("ipv6_route", 0, net->proc_net, &ipv6_route_proc_fops);
Joe Perchesd6444062018-03-23 15:54:38 -07005116 proc_create("rt6_stats", 0444, net->proc_net, &rt6_stats_seq_fops);
Thomas Grafd1896342012-06-18 12:08:33 +00005117#endif
5118 return 0;
5119}
5120
5121static void __net_exit ip6_route_net_exit_late(struct net *net)
5122{
5123#ifdef CONFIG_PROC_FS
Gao fengece31ff2013-02-18 01:34:56 +00005124 remove_proc_entry("ipv6_route", net->proc_net);
5125 remove_proc_entry("rt6_stats", net->proc_net);
Thomas Grafd1896342012-06-18 12:08:33 +00005126#endif
5127}
5128
Daniel Lezcanocdb18762008-03-04 13:45:33 -08005129static struct pernet_operations ip6_route_net_ops = {
5130 .init = ip6_route_net_init,
5131 .exit = ip6_route_net_exit,
5132};
5133
David S. Millerc3426b42012-06-09 16:27:05 -07005134static int __net_init ipv6_inetpeer_init(struct net *net)
5135{
5136 struct inet_peer_base *bp = kmalloc(sizeof(*bp), GFP_KERNEL);
5137
5138 if (!bp)
5139 return -ENOMEM;
5140 inet_peer_base_init(bp);
5141 net->ipv6.peers = bp;
5142 return 0;
5143}
5144
5145static void __net_exit ipv6_inetpeer_exit(struct net *net)
5146{
5147 struct inet_peer_base *bp = net->ipv6.peers;
5148
5149 net->ipv6.peers = NULL;
David S. Miller56a6b242012-06-09 16:32:41 -07005150 inetpeer_invalidate_tree(bp);
David S. Millerc3426b42012-06-09 16:27:05 -07005151 kfree(bp);
5152}
5153
David S. Miller2b823f72012-06-09 19:00:16 -07005154static struct pernet_operations ipv6_inetpeer_ops = {
David S. Millerc3426b42012-06-09 16:27:05 -07005155 .init = ipv6_inetpeer_init,
5156 .exit = ipv6_inetpeer_exit,
5157};
5158
Thomas Grafd1896342012-06-18 12:08:33 +00005159static struct pernet_operations ip6_route_net_late_ops = {
5160 .init = ip6_route_net_init_late,
5161 .exit = ip6_route_net_exit_late,
5162};
5163
Daniel Lezcano8ed67782008-03-04 13:48:30 -08005164static struct notifier_block ip6_route_dev_notifier = {
5165 .notifier_call = ip6_route_dev_notify,
WANG Cong242d3a42017-05-08 10:12:13 -07005166 .priority = ADDRCONF_NOTIFY_PRIORITY - 10,
Daniel Lezcano8ed67782008-03-04 13:48:30 -08005167};
5168
WANG Cong2f460932017-05-03 22:07:31 -07005169void __init ip6_route_init_special_entries(void)
5170{
5171 /* Registering of the loopback is done before this portion of code,
5172 * the loopback reference in rt6_info will not be taken, do it
5173 * manually for init_net */
David Ahern421842e2018-04-17 17:33:18 -07005174 init_net.ipv6.fib6_null_entry->fib6_nh.nh_dev = init_net.loopback_dev;
WANG Cong2f460932017-05-03 22:07:31 -07005175 init_net.ipv6.ip6_null_entry->dst.dev = init_net.loopback_dev;
5176 init_net.ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
5177 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
5178 init_net.ipv6.ip6_prohibit_entry->dst.dev = init_net.loopback_dev;
5179 init_net.ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
5180 init_net.ipv6.ip6_blk_hole_entry->dst.dev = init_net.loopback_dev;
5181 init_net.ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
5182 #endif
5183}
5184
Daniel Lezcano433d49c2007-12-07 00:43:48 -08005185int __init ip6_route_init(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005186{
Daniel Lezcano433d49c2007-12-07 00:43:48 -08005187 int ret;
Martin KaFai Lau8d0b94a2015-05-22 20:56:04 -07005188 int cpu;
Daniel Lezcano433d49c2007-12-07 00:43:48 -08005189
Daniel Lezcano9a7ec3a2008-03-04 13:48:53 -08005190 ret = -ENOMEM;
5191 ip6_dst_ops_template.kmem_cachep =
5192 kmem_cache_create("ip6_dst_cache", sizeof(struct rt6_info), 0,
5193 SLAB_HWCACHE_ALIGN, NULL);
5194 if (!ip6_dst_ops_template.kmem_cachep)
Fernando Carrijoc19a28e2009-01-07 18:09:08 -08005195 goto out;
David S. Miller14e50e52007-05-24 18:17:54 -07005196
Eric Dumazetfc66f952010-10-08 06:37:34 +00005197 ret = dst_entries_init(&ip6_dst_blackhole_ops);
Daniel Lezcano8ed67782008-03-04 13:48:30 -08005198 if (ret)
Daniel Lezcanobdb32892008-03-04 13:48:10 -08005199 goto out_kmem_cache;
Daniel Lezcanobdb32892008-03-04 13:48:10 -08005200
David S. Millerc3426b42012-06-09 16:27:05 -07005201 ret = register_pernet_subsys(&ipv6_inetpeer_ops);
5202 if (ret)
David S. Millere8803b62012-06-16 01:12:19 -07005203 goto out_dst_entries;
Thomas Graf2a0c4512012-06-14 23:00:17 +00005204
David S. Miller7e52b332012-06-15 15:51:55 -07005205 ret = register_pernet_subsys(&ip6_route_net_ops);
5206 if (ret)
5207 goto out_register_inetpeer;
David S. Millerc3426b42012-06-09 16:27:05 -07005208
Arnaud Ebalard5dc121e2008-10-01 02:37:56 -07005209 ip6_dst_blackhole_ops.kmem_cachep = ip6_dst_ops_template.kmem_cachep;
5210
David S. Millere8803b62012-06-16 01:12:19 -07005211 ret = fib6_init();
Daniel Lezcano433d49c2007-12-07 00:43:48 -08005212 if (ret)
Daniel Lezcano8ed67782008-03-04 13:48:30 -08005213 goto out_register_subsys;
Daniel Lezcano433d49c2007-12-07 00:43:48 -08005214
Daniel Lezcano433d49c2007-12-07 00:43:48 -08005215 ret = xfrm6_init();
5216 if (ret)
David S. Millere8803b62012-06-16 01:12:19 -07005217 goto out_fib6_init;
Daniel Lezcanoc35b7e72007-12-08 00:14:11 -08005218
Daniel Lezcano433d49c2007-12-07 00:43:48 -08005219 ret = fib6_rules_init();
5220 if (ret)
5221 goto xfrm6_init;
Daniel Lezcano7e5449c2007-12-08 00:14:54 -08005222
Thomas Grafd1896342012-06-18 12:08:33 +00005223 ret = register_pernet_subsys(&ip6_route_net_late_ops);
5224 if (ret)
5225 goto fib6_rules_init;
5226
Florian Westphal16feebc2017-12-02 21:44:08 +01005227 ret = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_NEWROUTE,
5228 inet6_rtm_newroute, NULL, 0);
5229 if (ret < 0)
5230 goto out_register_late_subsys;
5231
5232 ret = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_DELROUTE,
5233 inet6_rtm_delroute, NULL, 0);
5234 if (ret < 0)
5235 goto out_register_late_subsys;
5236
5237 ret = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_GETROUTE,
5238 inet6_rtm_getroute, NULL,
5239 RTNL_FLAG_DOIT_UNLOCKED);
5240 if (ret < 0)
Thomas Grafd1896342012-06-18 12:08:33 +00005241 goto out_register_late_subsys;
Daniel Lezcano433d49c2007-12-07 00:43:48 -08005242
Daniel Lezcano8ed67782008-03-04 13:48:30 -08005243 ret = register_netdevice_notifier(&ip6_route_dev_notifier);
Daniel Lezcanocdb18762008-03-04 13:45:33 -08005244 if (ret)
Thomas Grafd1896342012-06-18 12:08:33 +00005245 goto out_register_late_subsys;
Daniel Lezcano8ed67782008-03-04 13:48:30 -08005246
Martin KaFai Lau8d0b94a2015-05-22 20:56:04 -07005247 for_each_possible_cpu(cpu) {
5248 struct uncached_list *ul = per_cpu_ptr(&rt6_uncached_list, cpu);
5249
5250 INIT_LIST_HEAD(&ul->head);
5251 spin_lock_init(&ul->lock);
5252 }
5253
Daniel Lezcano433d49c2007-12-07 00:43:48 -08005254out:
5255 return ret;
5256
Thomas Grafd1896342012-06-18 12:08:33 +00005257out_register_late_subsys:
Florian Westphal16feebc2017-12-02 21:44:08 +01005258 rtnl_unregister_all(PF_INET6);
Thomas Grafd1896342012-06-18 12:08:33 +00005259 unregister_pernet_subsys(&ip6_route_net_late_ops);
Daniel Lezcano433d49c2007-12-07 00:43:48 -08005260fib6_rules_init:
Daniel Lezcano433d49c2007-12-07 00:43:48 -08005261 fib6_rules_cleanup();
5262xfrm6_init:
Daniel Lezcano433d49c2007-12-07 00:43:48 -08005263 xfrm6_fini();
Thomas Graf2a0c4512012-06-14 23:00:17 +00005264out_fib6_init:
5265 fib6_gc_cleanup();
Daniel Lezcano8ed67782008-03-04 13:48:30 -08005266out_register_subsys:
5267 unregister_pernet_subsys(&ip6_route_net_ops);
David S. Miller7e52b332012-06-15 15:51:55 -07005268out_register_inetpeer:
5269 unregister_pernet_subsys(&ipv6_inetpeer_ops);
Eric Dumazetfc66f952010-10-08 06:37:34 +00005270out_dst_entries:
5271 dst_entries_destroy(&ip6_dst_blackhole_ops);
Daniel Lezcano433d49c2007-12-07 00:43:48 -08005272out_kmem_cache:
Benjamin Theryf2fc6a52008-03-04 13:49:23 -08005273 kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep);
Daniel Lezcano433d49c2007-12-07 00:43:48 -08005274 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005275}
5276
5277void ip6_route_cleanup(void)
5278{
Daniel Lezcano8ed67782008-03-04 13:48:30 -08005279 unregister_netdevice_notifier(&ip6_route_dev_notifier);
Thomas Grafd1896342012-06-18 12:08:33 +00005280 unregister_pernet_subsys(&ip6_route_net_late_ops);
Thomas Graf101367c2006-08-04 03:39:02 -07005281 fib6_rules_cleanup();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005282 xfrm6_fini();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005283 fib6_gc_cleanup();
David S. Millerc3426b42012-06-09 16:27:05 -07005284 unregister_pernet_subsys(&ipv6_inetpeer_ops);
Daniel Lezcano8ed67782008-03-04 13:48:30 -08005285 unregister_pernet_subsys(&ip6_route_net_ops);
Xiaotian Feng41bb78b2010-11-02 16:11:05 +00005286 dst_entries_destroy(&ip6_dst_blackhole_ops);
Benjamin Theryf2fc6a52008-03-04 13:49:23 -08005287 kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005288}