blob: 809a7968de01a171473f7724b6139cd0d4b5dedb [file] [log] [blame]
David Ahern193125d2015-08-13 14:59:10 -06001/*
2 * vrf.c: device driver to encapsulate a VRF space
3 *
4 * Copyright (c) 2015 Cumulus Networks. All rights reserved.
5 * Copyright (c) 2015 Shrijeet Mukherjee <shm@cumulusnetworks.com>
6 * Copyright (c) 2015 David Ahern <dsa@cumulusnetworks.com>
7 *
8 * Based on dummy, team and ipvlan drivers
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 */
15
16#include <linux/module.h>
17#include <linux/kernel.h>
18#include <linux/netdevice.h>
19#include <linux/etherdevice.h>
20#include <linux/ip.h>
21#include <linux/init.h>
22#include <linux/moduleparam.h>
23#include <linux/netfilter.h>
24#include <linux/rtnetlink.h>
25#include <net/rtnetlink.h>
26#include <linux/u64_stats_sync.h>
27#include <linux/hashtable.h>
28
29#include <linux/inetdevice.h>
David Ahern8f583362015-08-27 10:10:50 -070030#include <net/arp.h>
David Ahern193125d2015-08-13 14:59:10 -060031#include <net/ip.h>
32#include <net/ip_fib.h>
David Ahern35402e32015-10-12 11:47:09 -070033#include <net/ip6_fib.h>
David Ahern193125d2015-08-13 14:59:10 -060034#include <net/ip6_route.h>
David Ahern193125d2015-08-13 14:59:10 -060035#include <net/route.h>
36#include <net/addrconf.h>
David Ahernee15ee52015-09-29 20:07:12 -070037#include <net/l3mdev.h>
David Ahern1aa6c4f2016-06-08 10:55:40 -070038#include <net/fib_rules.h>
David Ahern193125d2015-08-13 14:59:10 -060039
40#define DRV_NAME "vrf"
41#define DRV_VERSION "1.0"
42
David Ahern1aa6c4f2016-06-08 10:55:40 -070043#define FIB_RULE_PREF 1000 /* default preference for FIB rules */
44static bool add_fib_rules = true;
45
David Ahernec539512015-09-29 20:07:17 -070046struct net_vrf {
David Ahernb0e95cc2016-05-13 12:23:45 -070047 struct rtable __rcu *rth;
David Ahernafe80a42016-06-06 20:50:39 -070048 struct rtable __rcu *rth_local;
David Ahernb0e95cc2016-05-13 12:23:45 -070049 struct rt6_info __rcu *rt6;
David Ahernb4869aa2016-06-06 20:50:40 -070050 struct rt6_info __rcu *rt6_local;
David Ahernec539512015-09-29 20:07:17 -070051 u32 tb_id;
52};
53
David Ahern193125d2015-08-13 14:59:10 -060054struct pcpu_dstats {
55 u64 tx_pkts;
56 u64 tx_bytes;
57 u64 tx_drps;
58 u64 rx_pkts;
59 u64 rx_bytes;
David Ahernafe80a42016-06-06 20:50:39 -070060 u64 rx_drps;
David Ahern193125d2015-08-13 14:59:10 -060061 struct u64_stats_sync syncp;
62};
63
David Ahernafe80a42016-06-06 20:50:39 -070064static void vrf_rx_stats(struct net_device *dev, int len)
65{
66 struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
67
68 u64_stats_update_begin(&dstats->syncp);
69 dstats->rx_pkts++;
70 dstats->rx_bytes += len;
71 u64_stats_update_end(&dstats->syncp);
72}
73
Nikolay Aleksandrov57b8efa2015-08-19 06:12:29 +030074static void vrf_tx_error(struct net_device *vrf_dev, struct sk_buff *skb)
75{
76 vrf_dev->stats.tx_errors++;
77 kfree_skb(skb);
78}
79
David Ahern193125d2015-08-13 14:59:10 -060080static struct rtnl_link_stats64 *vrf_get_stats64(struct net_device *dev,
81 struct rtnl_link_stats64 *stats)
82{
83 int i;
84
85 for_each_possible_cpu(i) {
86 const struct pcpu_dstats *dstats;
87 u64 tbytes, tpkts, tdrops, rbytes, rpkts;
88 unsigned int start;
89
90 dstats = per_cpu_ptr(dev->dstats, i);
91 do {
92 start = u64_stats_fetch_begin_irq(&dstats->syncp);
93 tbytes = dstats->tx_bytes;
94 tpkts = dstats->tx_pkts;
95 tdrops = dstats->tx_drps;
96 rbytes = dstats->rx_bytes;
97 rpkts = dstats->rx_pkts;
98 } while (u64_stats_fetch_retry_irq(&dstats->syncp, start));
99 stats->tx_bytes += tbytes;
100 stats->tx_packets += tpkts;
101 stats->tx_dropped += tdrops;
102 stats->rx_bytes += rbytes;
103 stats->rx_packets += rpkts;
104 }
105 return stats;
106}
107
David Ahernafe80a42016-06-06 20:50:39 -0700108/* Local traffic destined to local address. Reinsert the packet to rx
109 * path, similar to loopback handling.
110 */
111static int vrf_local_xmit(struct sk_buff *skb, struct net_device *dev,
112 struct dst_entry *dst)
113{
114 int len = skb->len;
115
116 skb_orphan(skb);
117
118 skb_dst_set(skb, dst);
119 skb_dst_force(skb);
120
121 /* set pkt_type to avoid skb hitting packet taps twice -
122 * once on Tx and again in Rx processing
123 */
124 skb->pkt_type = PACKET_LOOPBACK;
125
126 skb->protocol = eth_type_trans(skb, dev);
127
128 if (likely(netif_rx(skb) == NET_RX_SUCCESS))
129 vrf_rx_stats(dev, len);
130 else
131 this_cpu_inc(dev->dstats->rx_drps);
132
133 return NETDEV_TX_OK;
134}
135
David Ahern35402e32015-10-12 11:47:09 -0700136#if IS_ENABLED(CONFIG_IPV6)
David Ahern4c1feac2016-09-10 12:09:56 -0700137static int vrf_ip6_local_out(struct net *net, struct sock *sk,
138 struct sk_buff *skb)
139{
140 int err;
141
142 err = nf_hook(NFPROTO_IPV6, NF_INET_LOCAL_OUT, net,
143 sk, skb, NULL, skb_dst(skb)->dev, dst_output);
144
145 if (likely(err == 1))
146 err = dst_output(net, sk, skb);
147
148 return err;
149}
150
David Ahern35402e32015-10-12 11:47:09 -0700151static netdev_tx_t vrf_process_v6_outbound(struct sk_buff *skb,
152 struct net_device *dev)
153{
154 const struct ipv6hdr *iph = ipv6_hdr(skb);
155 struct net *net = dev_net(skb->dev);
156 struct flowi6 fl6 = {
157 /* needed to match OIF rule */
158 .flowi6_oif = dev->ifindex,
159 .flowi6_iif = LOOPBACK_IFINDEX,
160 .daddr = iph->daddr,
161 .saddr = iph->saddr,
162 .flowlabel = ip6_flowinfo(iph),
163 .flowi6_mark = skb->mark,
164 .flowi6_proto = iph->nexthdr,
David Ahernc71ad3d2016-09-10 12:10:02 -0700165 .flowi6_flags = FLOWI_FLAG_SKIP_NH_OIF,
David Ahern35402e32015-10-12 11:47:09 -0700166 };
167 int ret = NET_XMIT_DROP;
168 struct dst_entry *dst;
169 struct dst_entry *dst_null = &net->ipv6.ip6_null_entry->dst;
170
171 dst = ip6_route_output(net, NULL, &fl6);
172 if (dst == dst_null)
173 goto err;
174
175 skb_dst_drop(skb);
David Ahernb4869aa2016-06-06 20:50:40 -0700176
177 /* if dst.dev is loopback or the VRF device again this is locally
178 * originated traffic destined to a local address. Short circuit
179 * to Rx path using our local dst
180 */
181 if (dst->dev == net->loopback_dev || dst->dev == dev) {
182 struct net_vrf *vrf = netdev_priv(dev);
183 struct rt6_info *rt6_local;
184
185 /* release looked up dst and use cached local dst */
186 dst_release(dst);
187
188 rcu_read_lock();
189
190 rt6_local = rcu_dereference(vrf->rt6_local);
191 if (unlikely(!rt6_local)) {
192 rcu_read_unlock();
193 goto err;
194 }
195
196 /* Ordering issue: cached local dst is created on newlink
197 * before the IPv6 initialization. Using the local dst
198 * requires rt6i_idev to be set so make sure it is.
199 */
200 if (unlikely(!rt6_local->rt6i_idev)) {
201 rt6_local->rt6i_idev = in6_dev_get(dev);
202 if (!rt6_local->rt6i_idev) {
203 rcu_read_unlock();
204 goto err;
205 }
206 }
207
208 dst = &rt6_local->dst;
209 dst_hold(dst);
210
211 rcu_read_unlock();
212
213 return vrf_local_xmit(skb, dev, &rt6_local->dst);
214 }
215
David Ahern35402e32015-10-12 11:47:09 -0700216 skb_dst_set(skb, dst);
217
David Ahern911a66f2016-06-06 20:50:38 -0700218 /* strip the ethernet header added for pass through VRF device */
219 __skb_pull(skb, skb_network_offset(skb));
220
David Ahern4c1feac2016-09-10 12:09:56 -0700221 ret = vrf_ip6_local_out(net, skb->sk, skb);
David Ahern35402e32015-10-12 11:47:09 -0700222 if (unlikely(net_xmit_eval(ret)))
223 dev->stats.tx_errors++;
224 else
225 ret = NET_XMIT_SUCCESS;
226
227 return ret;
228err:
229 vrf_tx_error(dev, skb);
230 return NET_XMIT_DROP;
231}
232#else
David Ahern193125d2015-08-13 14:59:10 -0600233static netdev_tx_t vrf_process_v6_outbound(struct sk_buff *skb,
234 struct net_device *dev)
235{
Nikolay Aleksandrov57b8efa2015-08-19 06:12:29 +0300236 vrf_tx_error(dev, skb);
237 return NET_XMIT_DROP;
David Ahern193125d2015-08-13 14:59:10 -0600238}
David Ahern35402e32015-10-12 11:47:09 -0700239#endif
David Ahern193125d2015-08-13 14:59:10 -0600240
David Ahernebfc1022016-09-10 12:09:55 -0700241/* based on ip_local_out; can't use it b/c the dst is switched pointing to us */
242static int vrf_ip_local_out(struct net *net, struct sock *sk,
243 struct sk_buff *skb)
244{
245 int err;
246
247 err = nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT, net, sk,
248 skb, NULL, skb_dst(skb)->dev, dst_output);
249 if (likely(err == 1))
250 err = dst_output(net, sk, skb);
251
252 return err;
253}
254
David Ahern193125d2015-08-13 14:59:10 -0600255static netdev_tx_t vrf_process_v4_outbound(struct sk_buff *skb,
256 struct net_device *vrf_dev)
257{
258 struct iphdr *ip4h = ip_hdr(skb);
259 int ret = NET_XMIT_DROP;
260 struct flowi4 fl4 = {
261 /* needed to match OIF rule */
262 .flowi4_oif = vrf_dev->ifindex,
263 .flowi4_iif = LOOPBACK_IFINDEX,
264 .flowi4_tos = RT_TOS(ip4h->tos),
David Ahernc71ad3d2016-09-10 12:10:02 -0700265 .flowi4_flags = FLOWI_FLAG_ANYSRC | FLOWI_FLAG_SKIP_NH_OIF,
David Ahern193125d2015-08-13 14:59:10 -0600266 .daddr = ip4h->daddr,
267 };
David Ahern911a66f2016-06-06 20:50:38 -0700268 struct net *net = dev_net(vrf_dev);
269 struct rtable *rt;
David Ahern193125d2015-08-13 14:59:10 -0600270
David Ahern911a66f2016-06-06 20:50:38 -0700271 rt = ip_route_output_flow(net, &fl4, NULL);
272 if (IS_ERR(rt))
David Ahern193125d2015-08-13 14:59:10 -0600273 goto err;
274
David Ahern911a66f2016-06-06 20:50:38 -0700275 if (rt->rt_type != RTN_UNICAST && rt->rt_type != RTN_LOCAL) {
276 ip_rt_put(rt);
277 goto err;
278 }
279
280 skb_dst_drop(skb);
David Ahernafe80a42016-06-06 20:50:39 -0700281
282 /* if dst.dev is loopback or the VRF device again this is locally
283 * originated traffic destined to a local address. Short circuit
284 * to Rx path using our local dst
285 */
286 if (rt->dst.dev == net->loopback_dev || rt->dst.dev == vrf_dev) {
287 struct net_vrf *vrf = netdev_priv(vrf_dev);
288 struct rtable *rth_local;
289 struct dst_entry *dst = NULL;
290
291 ip_rt_put(rt);
292
293 rcu_read_lock();
294
295 rth_local = rcu_dereference(vrf->rth_local);
296 if (likely(rth_local)) {
297 dst = &rth_local->dst;
298 dst_hold(dst);
299 }
300
301 rcu_read_unlock();
302
303 if (unlikely(!dst))
304 goto err;
305
306 return vrf_local_xmit(skb, vrf_dev, dst);
307 }
308
David Ahern911a66f2016-06-06 20:50:38 -0700309 skb_dst_set(skb, &rt->dst);
310
311 /* strip the ethernet header added for pass through VRF device */
312 __skb_pull(skb, skb_network_offset(skb));
313
David Ahern193125d2015-08-13 14:59:10 -0600314 if (!ip4h->saddr) {
315 ip4h->saddr = inet_select_addr(skb_dst(skb)->dev, 0,
316 RT_SCOPE_LINK);
317 }
318
David Ahernebfc1022016-09-10 12:09:55 -0700319 ret = vrf_ip_local_out(dev_net(skb_dst(skb)->dev), skb->sk, skb);
David Ahern193125d2015-08-13 14:59:10 -0600320 if (unlikely(net_xmit_eval(ret)))
321 vrf_dev->stats.tx_errors++;
322 else
323 ret = NET_XMIT_SUCCESS;
324
325out:
326 return ret;
327err:
Nikolay Aleksandrov57b8efa2015-08-19 06:12:29 +0300328 vrf_tx_error(vrf_dev, skb);
David Ahern193125d2015-08-13 14:59:10 -0600329 goto out;
330}
331
332static netdev_tx_t is_ip_tx_frame(struct sk_buff *skb, struct net_device *dev)
333{
334 switch (skb->protocol) {
335 case htons(ETH_P_IP):
336 return vrf_process_v4_outbound(skb, dev);
337 case htons(ETH_P_IPV6):
338 return vrf_process_v6_outbound(skb, dev);
339 default:
Nikolay Aleksandrov57b8efa2015-08-19 06:12:29 +0300340 vrf_tx_error(dev, skb);
David Ahern193125d2015-08-13 14:59:10 -0600341 return NET_XMIT_DROP;
342 }
343}
344
345static netdev_tx_t vrf_xmit(struct sk_buff *skb, struct net_device *dev)
346{
347 netdev_tx_t ret = is_ip_tx_frame(skb, dev);
348
349 if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
350 struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
351
352 u64_stats_update_begin(&dstats->syncp);
353 dstats->tx_pkts++;
354 dstats->tx_bytes += skb->len;
355 u64_stats_update_end(&dstats->syncp);
356 } else {
357 this_cpu_inc(dev->dstats->tx_drps);
358 }
359
360 return ret;
361}
362
David Ahern35402e32015-10-12 11:47:09 -0700363#if IS_ENABLED(CONFIG_IPV6)
David Ahern35402e32015-10-12 11:47:09 -0700364/* modelled after ip6_finish_output2 */
365static int vrf_finish_output6(struct net *net, struct sock *sk,
366 struct sk_buff *skb)
367{
368 struct dst_entry *dst = skb_dst(skb);
369 struct net_device *dev = dst->dev;
370 struct neighbour *neigh;
371 struct in6_addr *nexthop;
372 int ret;
373
David Ahern8b8fbe52016-12-14 14:31:11 -0800374 nf_reset(skb);
375
David Ahern35402e32015-10-12 11:47:09 -0700376 skb->protocol = htons(ETH_P_IPV6);
377 skb->dev = dev;
378
379 rcu_read_lock_bh();
380 nexthop = rt6_nexthop((struct rt6_info *)dst, &ipv6_hdr(skb)->daddr);
381 neigh = __ipv6_neigh_lookup_noref(dst->dev, nexthop);
382 if (unlikely(!neigh))
383 neigh = __neigh_create(&nd_tbl, nexthop, dst->dev, false);
384 if (!IS_ERR(neigh)) {
385 ret = dst_neigh_output(dst, neigh, skb);
386 rcu_read_unlock_bh();
387 return ret;
388 }
389 rcu_read_unlock_bh();
390
391 IP6_INC_STATS(dev_net(dst->dev),
392 ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
393 kfree_skb(skb);
394 return -EINVAL;
395}
396
397/* modelled after ip6_output */
398static int vrf_output6(struct net *net, struct sock *sk, struct sk_buff *skb)
399{
400 return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING,
401 net, sk, skb, NULL, skb_dst(skb)->dev,
402 vrf_finish_output6,
403 !(IP6CB(skb)->flags & IP6SKB_REROUTED));
404}
405
David Ahern4c1feac2016-09-10 12:09:56 -0700406/* set dst on skb to send packet to us via dev_xmit path. Allows
407 * packet to go through device based features such as qdisc, netfilter
408 * hooks and packet sockets with skb->dev set to vrf device.
409 */
410static struct sk_buff *vrf_ip6_out(struct net_device *vrf_dev,
411 struct sock *sk,
412 struct sk_buff *skb)
413{
414 struct net_vrf *vrf = netdev_priv(vrf_dev);
415 struct dst_entry *dst = NULL;
416 struct rt6_info *rt6;
417
418 /* don't divert link scope packets */
419 if (rt6_need_strict(&ipv6_hdr(skb)->daddr))
420 return skb;
421
422 rcu_read_lock();
423
424 rt6 = rcu_dereference(vrf->rt6);
425 if (likely(rt6)) {
426 dst = &rt6->dst;
427 dst_hold(dst);
428 }
429
430 rcu_read_unlock();
431
432 if (unlikely(!dst)) {
433 vrf_tx_error(vrf_dev, skb);
434 return NULL;
435 }
436
437 skb_dst_drop(skb);
438 skb_dst_set(skb, dst);
439
440 return skb;
441}
442
David Ahernb0e95cc2016-05-13 12:23:45 -0700443/* holding rtnl */
David Ahern810e5302016-06-14 11:37:21 -0700444static void vrf_rt6_release(struct net_device *dev, struct net_vrf *vrf)
David Ahern35402e32015-10-12 11:47:09 -0700445{
David Ahernb0e95cc2016-05-13 12:23:45 -0700446 struct rt6_info *rt6 = rtnl_dereference(vrf->rt6);
David Ahernb4869aa2016-06-06 20:50:40 -0700447 struct rt6_info *rt6_local = rtnl_dereference(vrf->rt6_local);
David Ahern810e5302016-06-14 11:37:21 -0700448 struct net *net = dev_net(dev);
449 struct dst_entry *dst;
David Ahernb0e95cc2016-05-13 12:23:45 -0700450
David Ahernb4869aa2016-06-06 20:50:40 -0700451 RCU_INIT_POINTER(vrf->rt6, NULL);
452 RCU_INIT_POINTER(vrf->rt6_local, NULL);
453 synchronize_rcu();
David Ahernb0e95cc2016-05-13 12:23:45 -0700454
David Ahern810e5302016-06-14 11:37:21 -0700455 /* move dev in dst's to loopback so this VRF device can be deleted
456 * - based on dst_ifdown
457 */
458 if (rt6) {
459 dst = &rt6->dst;
460 dev_put(dst->dev);
461 dst->dev = net->loopback_dev;
462 dev_hold(dst->dev);
463 dst_release(dst);
464 }
David Ahernb4869aa2016-06-06 20:50:40 -0700465
466 if (rt6_local) {
467 if (rt6_local->rt6i_idev)
468 in6_dev_put(rt6_local->rt6i_idev);
469
David Ahern810e5302016-06-14 11:37:21 -0700470 dst = &rt6_local->dst;
471 dev_put(dst->dev);
472 dst->dev = net->loopback_dev;
473 dev_hold(dst->dev);
474 dst_release(dst);
David Ahernb4869aa2016-06-06 20:50:40 -0700475 }
David Ahern35402e32015-10-12 11:47:09 -0700476}
477
478static int vrf_rt6_create(struct net_device *dev)
479{
David Ahernb4869aa2016-06-06 20:50:40 -0700480 int flags = DST_HOST | DST_NOPOLICY | DST_NOXFRM | DST_NOCACHE;
David Ahern35402e32015-10-12 11:47:09 -0700481 struct net_vrf *vrf = netdev_priv(dev);
David Ahern9ab179d2016-04-07 11:10:06 -0700482 struct net *net = dev_net(dev);
David Ahernb3b46632016-05-04 21:46:12 -0700483 struct fib6_table *rt6i_table;
David Ahernb4869aa2016-06-06 20:50:40 -0700484 struct rt6_info *rt6, *rt6_local;
David Ahern35402e32015-10-12 11:47:09 -0700485 int rc = -ENOMEM;
486
David Aherne4348632016-06-09 10:21:00 -0700487 /* IPv6 can be CONFIG enabled and then disabled runtime */
488 if (!ipv6_mod_enabled())
489 return 0;
490
David Ahernb3b46632016-05-04 21:46:12 -0700491 rt6i_table = fib6_new_table(net, vrf->tb_id);
492 if (!rt6i_table)
493 goto out;
494
David Ahernb4869aa2016-06-06 20:50:40 -0700495 /* create a dst for routing packets out a VRF device */
496 rt6 = ip6_dst_alloc(net, dev, flags);
David Ahern35402e32015-10-12 11:47:09 -0700497 if (!rt6)
498 goto out;
499
David Ahern9ab179d2016-04-07 11:10:06 -0700500 dst_hold(&rt6->dst);
David Ahernb3b46632016-05-04 21:46:12 -0700501
502 rt6->rt6i_table = rt6i_table;
503 rt6->dst.output = vrf_output6;
David Ahernb4869aa2016-06-06 20:50:40 -0700504
505 /* create a dst for local routing - packets sent locally
506 * to local address via the VRF device as a loopback
507 */
508 rt6_local = ip6_dst_alloc(net, dev, flags);
509 if (!rt6_local) {
510 dst_release(&rt6->dst);
511 goto out;
512 }
513
514 dst_hold(&rt6_local->dst);
515
516 rt6_local->rt6i_idev = in6_dev_get(dev);
517 rt6_local->rt6i_flags = RTF_UP | RTF_NONEXTHOP | RTF_LOCAL;
518 rt6_local->rt6i_table = rt6i_table;
519 rt6_local->dst.input = ip6_input;
520
David Ahernb0e95cc2016-05-13 12:23:45 -0700521 rcu_assign_pointer(vrf->rt6, rt6);
David Ahernb4869aa2016-06-06 20:50:40 -0700522 rcu_assign_pointer(vrf->rt6_local, rt6_local);
David Ahernb0e95cc2016-05-13 12:23:45 -0700523
David Ahern35402e32015-10-12 11:47:09 -0700524 rc = 0;
525out:
526 return rc;
527}
528#else
David Ahern4c1feac2016-09-10 12:09:56 -0700529static struct sk_buff *vrf_ip6_out(struct net_device *vrf_dev,
530 struct sock *sk,
531 struct sk_buff *skb)
532{
533 return skb;
534}
535
David Ahern810e5302016-06-14 11:37:21 -0700536static void vrf_rt6_release(struct net_device *dev, struct net_vrf *vrf)
David Ahern35402e32015-10-12 11:47:09 -0700537{
538}
539
540static int vrf_rt6_create(struct net_device *dev)
541{
542 return 0;
543}
544#endif
545
David Ahern8f583362015-08-27 10:10:50 -0700546/* modelled after ip_finish_output2 */
Eric W. Biederman0c4b51f2015-09-15 20:04:18 -0500547static int vrf_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb)
David Ahern193125d2015-08-13 14:59:10 -0600548{
David Ahern8f583362015-08-27 10:10:50 -0700549 struct dst_entry *dst = skb_dst(skb);
550 struct rtable *rt = (struct rtable *)dst;
551 struct net_device *dev = dst->dev;
552 unsigned int hh_len = LL_RESERVED_SPACE(dev);
553 struct neighbour *neigh;
554 u32 nexthop;
555 int ret = -EINVAL;
556
David Ahern8b8fbe52016-12-14 14:31:11 -0800557 nf_reset(skb);
558
David Ahern8f583362015-08-27 10:10:50 -0700559 /* Be paranoid, rather than too clever. */
560 if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) {
561 struct sk_buff *skb2;
562
563 skb2 = skb_realloc_headroom(skb, LL_RESERVED_SPACE(dev));
564 if (!skb2) {
565 ret = -ENOMEM;
566 goto err;
567 }
568 if (skb->sk)
569 skb_set_owner_w(skb2, skb->sk);
570
571 consume_skb(skb);
572 skb = skb2;
573 }
574
575 rcu_read_lock_bh();
576
577 nexthop = (__force u32)rt_nexthop(rt, ip_hdr(skb)->daddr);
578 neigh = __ipv4_neigh_lookup_noref(dev, nexthop);
579 if (unlikely(!neigh))
580 neigh = __neigh_create(&arp_tbl, &nexthop, dev, false);
581 if (!IS_ERR(neigh))
582 ret = dst_neigh_output(dst, neigh, skb);
583
584 rcu_read_unlock_bh();
585err:
586 if (unlikely(ret < 0))
587 vrf_tx_error(skb->dev, skb);
588 return ret;
David Ahern193125d2015-08-13 14:59:10 -0600589}
590
Eric W. Biedermanede20592015-10-07 16:48:47 -0500591static int vrf_output(struct net *net, struct sock *sk, struct sk_buff *skb)
David Ahern193125d2015-08-13 14:59:10 -0600592{
593 struct net_device *dev = skb_dst(skb)->dev;
594
Eric W. Biederman29a26a52015-09-15 20:04:16 -0500595 IP_UPD_PO_STATS(net, IPSTATS_MIB_OUT, skb->len);
David Ahern193125d2015-08-13 14:59:10 -0600596
597 skb->dev = dev;
598 skb->protocol = htons(ETH_P_IP);
599
Eric W. Biederman29a26a52015-09-15 20:04:16 -0500600 return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING,
601 net, sk, skb, NULL, dev,
David Ahern8f583362015-08-27 10:10:50 -0700602 vrf_finish_output,
David Ahern193125d2015-08-13 14:59:10 -0600603 !(IPCB(skb)->flags & IPSKB_REROUTED));
604}
605
David Ahernebfc1022016-09-10 12:09:55 -0700606/* set dst on skb to send packet to us via dev_xmit path. Allows
607 * packet to go through device based features such as qdisc, netfilter
608 * hooks and packet sockets with skb->dev set to vrf device.
609 */
610static struct sk_buff *vrf_ip_out(struct net_device *vrf_dev,
611 struct sock *sk,
612 struct sk_buff *skb)
613{
614 struct net_vrf *vrf = netdev_priv(vrf_dev);
615 struct dst_entry *dst = NULL;
616 struct rtable *rth;
617
618 rcu_read_lock();
619
620 rth = rcu_dereference(vrf->rth);
621 if (likely(rth)) {
622 dst = &rth->dst;
623 dst_hold(dst);
624 }
625
626 rcu_read_unlock();
627
628 if (unlikely(!dst)) {
629 vrf_tx_error(vrf_dev, skb);
630 return NULL;
631 }
632
633 skb_dst_drop(skb);
634 skb_dst_set(skb, dst);
635
636 return skb;
637}
638
639/* called with rcu lock held */
640static struct sk_buff *vrf_l3_out(struct net_device *vrf_dev,
641 struct sock *sk,
642 struct sk_buff *skb,
643 u16 proto)
644{
645 switch (proto) {
646 case AF_INET:
647 return vrf_ip_out(vrf_dev, sk, skb);
David Ahern4c1feac2016-09-10 12:09:56 -0700648 case AF_INET6:
649 return vrf_ip6_out(vrf_dev, sk, skb);
David Ahernebfc1022016-09-10 12:09:55 -0700650 }
651
652 return skb;
653}
654
David Ahernb0e95cc2016-05-13 12:23:45 -0700655/* holding rtnl */
David Ahern810e5302016-06-14 11:37:21 -0700656static void vrf_rtable_release(struct net_device *dev, struct net_vrf *vrf)
David Ahern193125d2015-08-13 14:59:10 -0600657{
David Ahernb0e95cc2016-05-13 12:23:45 -0700658 struct rtable *rth = rtnl_dereference(vrf->rth);
David Ahernafe80a42016-06-06 20:50:39 -0700659 struct rtable *rth_local = rtnl_dereference(vrf->rth_local);
David Ahern810e5302016-06-14 11:37:21 -0700660 struct net *net = dev_net(dev);
661 struct dst_entry *dst;
David Ahern193125d2015-08-13 14:59:10 -0600662
David Ahernafe80a42016-06-06 20:50:39 -0700663 RCU_INIT_POINTER(vrf->rth, NULL);
664 RCU_INIT_POINTER(vrf->rth_local, NULL);
665 synchronize_rcu();
David Ahernb0e95cc2016-05-13 12:23:45 -0700666
David Ahern810e5302016-06-14 11:37:21 -0700667 /* move dev in dst's to loopback so this VRF device can be deleted
668 * - based on dst_ifdown
669 */
670 if (rth) {
671 dst = &rth->dst;
672 dev_put(dst->dev);
673 dst->dev = net->loopback_dev;
674 dev_hold(dst->dev);
675 dst_release(dst);
676 }
David Ahernafe80a42016-06-06 20:50:39 -0700677
David Ahern810e5302016-06-14 11:37:21 -0700678 if (rth_local) {
679 dst = &rth_local->dst;
680 dev_put(dst->dev);
681 dst->dev = net->loopback_dev;
682 dev_hold(dst->dev);
683 dst_release(dst);
684 }
David Ahern193125d2015-08-13 14:59:10 -0600685}
686
David Ahernb0e95cc2016-05-13 12:23:45 -0700687static int vrf_rtable_create(struct net_device *dev)
David Ahern193125d2015-08-13 14:59:10 -0600688{
David Ahernb7503e02015-09-02 13:58:35 -0700689 struct net_vrf *vrf = netdev_priv(dev);
David Ahernafe80a42016-06-06 20:50:39 -0700690 struct rtable *rth, *rth_local;
David Ahern193125d2015-08-13 14:59:10 -0600691
David Ahernb3b46632016-05-04 21:46:12 -0700692 if (!fib_new_table(dev_net(dev), vrf->tb_id))
David Ahernb0e95cc2016-05-13 12:23:45 -0700693 return -ENOMEM;
David Ahernb3b46632016-05-04 21:46:12 -0700694
David Ahernafe80a42016-06-06 20:50:39 -0700695 /* create a dst for routing packets out through a VRF device */
David Ahern9ab179d2016-04-07 11:10:06 -0700696 rth = rt_dst_alloc(dev, 0, RTN_UNICAST, 1, 1, 0);
David Ahernb0e95cc2016-05-13 12:23:45 -0700697 if (!rth)
698 return -ENOMEM;
David Ahern193125d2015-08-13 14:59:10 -0600699
David Ahernafe80a42016-06-06 20:50:39 -0700700 /* create a dst for local ingress routing - packets sent locally
701 * to local address via the VRF device as a loopback
702 */
703 rth_local = rt_dst_alloc(dev, RTCF_LOCAL, RTN_LOCAL, 1, 1, 0);
704 if (!rth_local) {
705 dst_release(&rth->dst);
706 return -ENOMEM;
707 }
708
David Ahernb0e95cc2016-05-13 12:23:45 -0700709 rth->dst.output = vrf_output;
710 rth->rt_table_id = vrf->tb_id;
711
David Ahernafe80a42016-06-06 20:50:39 -0700712 rth_local->rt_table_id = vrf->tb_id;
713
David Ahernb0e95cc2016-05-13 12:23:45 -0700714 rcu_assign_pointer(vrf->rth, rth);
David Ahernafe80a42016-06-06 20:50:39 -0700715 rcu_assign_pointer(vrf->rth_local, rth_local);
David Ahernb0e95cc2016-05-13 12:23:45 -0700716
717 return 0;
David Ahern193125d2015-08-13 14:59:10 -0600718}
719
720/**************************** device handling ********************/
721
722/* cycle interface to flush neighbor cache and move routes across tables */
723static void cycle_netdev(struct net_device *dev)
724{
725 unsigned int flags = dev->flags;
726 int ret;
727
728 if (!netif_running(dev))
729 return;
730
731 ret = dev_change_flags(dev, flags & ~IFF_UP);
732 if (ret >= 0)
733 ret = dev_change_flags(dev, flags);
734
735 if (ret < 0) {
736 netdev_err(dev,
737 "Failed to cycle device %s; route tables might be wrong!\n",
738 dev->name);
739 }
740}
741
David Ahern193125d2015-08-13 14:59:10 -0600742static int do_vrf_add_slave(struct net_device *dev, struct net_device *port_dev)
743{
Nikolay Aleksandrovbad53162015-11-24 14:29:16 +0100744 int ret;
David Ahern193125d2015-08-13 14:59:10 -0600745
Jiri Pirko29bf24a2015-12-03 12:12:11 +0100746 ret = netdev_master_upper_dev_link(port_dev, dev, NULL, NULL);
David Ahern193125d2015-08-13 14:59:10 -0600747 if (ret < 0)
David Ahern74b20582016-05-10 11:19:50 -0700748 return ret;
David Ahern193125d2015-08-13 14:59:10 -0600749
David Ahernfee6d4c2015-10-05 08:51:24 -0700750 port_dev->priv_flags |= IFF_L3MDEV_SLAVE;
David Ahern193125d2015-08-13 14:59:10 -0600751 cycle_netdev(port_dev);
752
753 return 0;
David Ahern193125d2015-08-13 14:59:10 -0600754}
755
756static int vrf_add_slave(struct net_device *dev, struct net_device *port_dev)
757{
David Ahernfee6d4c2015-10-05 08:51:24 -0700758 if (netif_is_l3_master(port_dev) || netif_is_l3_slave(port_dev))
David Ahern193125d2015-08-13 14:59:10 -0600759 return -EINVAL;
760
761 return do_vrf_add_slave(dev, port_dev);
762}
763
764/* inverse of do_vrf_add_slave */
765static int do_vrf_del_slave(struct net_device *dev, struct net_device *port_dev)
766{
David Ahern193125d2015-08-13 14:59:10 -0600767 netdev_upper_dev_unlink(port_dev, dev);
David Ahernfee6d4c2015-10-05 08:51:24 -0700768 port_dev->priv_flags &= ~IFF_L3MDEV_SLAVE;
David Ahern193125d2015-08-13 14:59:10 -0600769
David Ahern193125d2015-08-13 14:59:10 -0600770 cycle_netdev(port_dev);
771
David Ahern193125d2015-08-13 14:59:10 -0600772 return 0;
773}
774
775static int vrf_del_slave(struct net_device *dev, struct net_device *port_dev)
776{
David Ahern193125d2015-08-13 14:59:10 -0600777 return do_vrf_del_slave(dev, port_dev);
778}
779
780static void vrf_dev_uninit(struct net_device *dev)
781{
782 struct net_vrf *vrf = netdev_priv(dev);
Nikolay Aleksandrovbad53162015-11-24 14:29:16 +0100783 struct net_device *port_dev;
784 struct list_head *iter;
David Ahern193125d2015-08-13 14:59:10 -0600785
David Ahern810e5302016-06-14 11:37:21 -0700786 vrf_rtable_release(dev, vrf);
787 vrf_rt6_release(dev, vrf);
David Ahern193125d2015-08-13 14:59:10 -0600788
Nikolay Aleksandrovbad53162015-11-24 14:29:16 +0100789 netdev_for_each_lower_dev(dev, port_dev, iter)
790 vrf_del_slave(dev, port_dev);
David Ahern193125d2015-08-13 14:59:10 -0600791
Nikolay Aleksandrov3a4a27d2015-08-18 20:28:03 +0300792 free_percpu(dev->dstats);
David Ahern193125d2015-08-13 14:59:10 -0600793 dev->dstats = NULL;
794}
795
796static int vrf_dev_init(struct net_device *dev)
797{
798 struct net_vrf *vrf = netdev_priv(dev);
799
David Ahern193125d2015-08-13 14:59:10 -0600800 dev->dstats = netdev_alloc_pcpu_stats(struct pcpu_dstats);
801 if (!dev->dstats)
802 goto out_nomem;
803
804 /* create the default dst which points back to us */
David Ahernb0e95cc2016-05-13 12:23:45 -0700805 if (vrf_rtable_create(dev) != 0)
David Ahern193125d2015-08-13 14:59:10 -0600806 goto out_stats;
807
David Ahern35402e32015-10-12 11:47:09 -0700808 if (vrf_rt6_create(dev) != 0)
809 goto out_rth;
810
David Ahern193125d2015-08-13 14:59:10 -0600811 dev->flags = IFF_MASTER | IFF_NOARP;
812
David Ahernb87ab6b2016-06-01 21:16:39 -0700813 /* MTU is irrelevant for VRF device; set to 64k similar to lo */
814 dev->mtu = 64 * 1024;
815
816 /* similarly, oper state is irrelevant; set to up to avoid confusion */
817 dev->operstate = IF_OPER_UP;
Eric Dumazet78e7a2a2016-06-09 07:45:13 -0700818 netdev_lockdep_set_classes(dev);
David Ahern193125d2015-08-13 14:59:10 -0600819 return 0;
820
David Ahern35402e32015-10-12 11:47:09 -0700821out_rth:
David Ahern810e5302016-06-14 11:37:21 -0700822 vrf_rtable_release(dev, vrf);
David Ahern193125d2015-08-13 14:59:10 -0600823out_stats:
824 free_percpu(dev->dstats);
825 dev->dstats = NULL;
826out_nomem:
827 return -ENOMEM;
828}
829
830static const struct net_device_ops vrf_netdev_ops = {
831 .ndo_init = vrf_dev_init,
832 .ndo_uninit = vrf_dev_uninit,
833 .ndo_start_xmit = vrf_xmit,
834 .ndo_get_stats64 = vrf_get_stats64,
835 .ndo_add_slave = vrf_add_slave,
836 .ndo_del_slave = vrf_del_slave,
837};
838
David Ahernee15ee52015-09-29 20:07:12 -0700839static u32 vrf_fib_table(const struct net_device *dev)
840{
841 struct net_vrf *vrf = netdev_priv(dev);
842
843 return vrf->tb_id;
844}
845
David Ahern73e20b72016-07-04 18:47:41 -0700846static int vrf_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
847{
848 return 0;
849}
850
851static struct sk_buff *vrf_rcv_nfhook(u8 pf, unsigned int hook,
852 struct sk_buff *skb,
853 struct net_device *dev)
854{
855 struct net *net = dev_net(dev);
856
David Ahern73e20b72016-07-04 18:47:41 -0700857 if (NF_HOOK(pf, hook, net, NULL, skb, dev, NULL, vrf_rcv_finish) < 0)
858 skb = NULL; /* kfree_skb(skb) handled by nf code */
859
860 return skb;
861}
862
David Ahern35402e32015-10-12 11:47:09 -0700863#if IS_ENABLED(CONFIG_IPV6)
David Ahern74b20582016-05-10 11:19:50 -0700864/* neighbor handling is done with actual device; do not want
865 * to flip skb->dev for those ndisc packets. This really fails
866 * for multiple next protocols (e.g., NEXTHDR_HOP). But it is
867 * a start.
868 */
869static bool ipv6_ndisc_frame(const struct sk_buff *skb)
870{
871 const struct ipv6hdr *iph = ipv6_hdr(skb);
872 bool rc = false;
873
874 if (iph->nexthdr == NEXTHDR_ICMP) {
875 const struct icmp6hdr *icmph;
876 struct icmp6hdr _icmph;
877
878 icmph = skb_header_pointer(skb, sizeof(*iph),
879 sizeof(_icmph), &_icmph);
880 if (!icmph)
881 goto out;
882
883 switch (icmph->icmp6_type) {
884 case NDISC_ROUTER_SOLICITATION:
885 case NDISC_ROUTER_ADVERTISEMENT:
886 case NDISC_NEIGHBOUR_SOLICITATION:
887 case NDISC_NEIGHBOUR_ADVERTISEMENT:
888 case NDISC_REDIRECT:
889 rc = true;
890 break;
891 }
892 }
893
894out:
895 return rc;
896}
897
David Ahern9ff74382016-06-13 13:44:19 -0700898static struct rt6_info *vrf_ip6_route_lookup(struct net *net,
899 const struct net_device *dev,
900 struct flowi6 *fl6,
901 int ifindex,
902 int flags)
903{
904 struct net_vrf *vrf = netdev_priv(dev);
905 struct fib6_table *table = NULL;
906 struct rt6_info *rt6;
907
908 rcu_read_lock();
909
910 /* fib6_table does not have a refcnt and can not be freed */
911 rt6 = rcu_dereference(vrf->rt6);
912 if (likely(rt6))
913 table = rt6->rt6i_table;
914
915 rcu_read_unlock();
916
917 if (!table)
918 return NULL;
919
920 return ip6_pol_route(net, table, ifindex, fl6, flags);
921}
922
923static void vrf_ip6_input_dst(struct sk_buff *skb, struct net_device *vrf_dev,
924 int ifindex)
925{
926 const struct ipv6hdr *iph = ipv6_hdr(skb);
927 struct flowi6 fl6 = {
928 .daddr = iph->daddr,
929 .saddr = iph->saddr,
930 .flowlabel = ip6_flowinfo(iph),
931 .flowi6_mark = skb->mark,
932 .flowi6_proto = iph->nexthdr,
933 .flowi6_iif = ifindex,
934 };
935 struct net *net = dev_net(vrf_dev);
936 struct rt6_info *rt6;
937
938 rt6 = vrf_ip6_route_lookup(net, vrf_dev, &fl6, ifindex,
939 RT6_LOOKUP_F_HAS_SADDR | RT6_LOOKUP_F_IFACE);
940 if (unlikely(!rt6))
941 return;
942
943 if (unlikely(&rt6->dst == &net->ipv6.ip6_null_entry->dst))
944 return;
945
946 skb_dst_set(skb, &rt6->dst);
947}
948
David Ahern74b20582016-05-10 11:19:50 -0700949static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev,
950 struct sk_buff *skb)
951{
David Ahern9ff74382016-06-13 13:44:19 -0700952 int orig_iif = skb->skb_iif;
953 bool need_strict;
954
David Ahernb4869aa2016-06-06 20:50:40 -0700955 /* loopback traffic; do not push through packet taps again.
956 * Reset pkt_type for upper layers to process skb
957 */
958 if (skb->pkt_type == PACKET_LOOPBACK) {
959 skb->dev = vrf_dev;
960 skb->skb_iif = vrf_dev->ifindex;
David Aherna04a4802016-10-16 20:02:52 -0700961 IP6CB(skb)->flags |= IP6SKB_L3SLAVE;
David Ahernb4869aa2016-06-06 20:50:40 -0700962 skb->pkt_type = PACKET_HOST;
963 goto out;
964 }
965
David Ahern9ff74382016-06-13 13:44:19 -0700966 /* if packet is NDISC or addressed to multicast or link-local
967 * then keep the ingress interface
968 */
969 need_strict = rt6_need_strict(&ipv6_hdr(skb)->daddr);
970 if (!ipv6_ndisc_frame(skb) && !need_strict) {
David Aherna4d205a2017-01-03 09:37:55 -0800971 vrf_rx_stats(vrf_dev, skb->len);
David Ahern74b20582016-05-10 11:19:50 -0700972 skb->dev = vrf_dev;
973 skb->skb_iif = vrf_dev->ifindex;
974
975 skb_push(skb, skb->mac_len);
976 dev_queue_xmit_nit(skb, vrf_dev);
977 skb_pull(skb, skb->mac_len);
978
979 IP6CB(skb)->flags |= IP6SKB_L3SLAVE;
980 }
981
David Ahern9ff74382016-06-13 13:44:19 -0700982 if (need_strict)
983 vrf_ip6_input_dst(skb, vrf_dev, orig_iif);
984
David Ahern73e20b72016-07-04 18:47:41 -0700985 skb = vrf_rcv_nfhook(NFPROTO_IPV6, NF_INET_PRE_ROUTING, skb, vrf_dev);
David Ahernb4869aa2016-06-06 20:50:40 -0700986out:
David Ahern74b20582016-05-10 11:19:50 -0700987 return skb;
988}
989
990#else
991static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev,
992 struct sk_buff *skb)
993{
994 return skb;
995}
996#endif
997
998static struct sk_buff *vrf_ip_rcv(struct net_device *vrf_dev,
999 struct sk_buff *skb)
1000{
1001 skb->dev = vrf_dev;
1002 skb->skb_iif = vrf_dev->ifindex;
David Aherna04a4802016-10-16 20:02:52 -07001003 IPCB(skb)->flags |= IPSKB_L3SLAVE;
David Ahern74b20582016-05-10 11:19:50 -07001004
David Ahernafe80a42016-06-06 20:50:39 -07001005 /* loopback traffic; do not push through packet taps again.
1006 * Reset pkt_type for upper layers to process skb
1007 */
1008 if (skb->pkt_type == PACKET_LOOPBACK) {
1009 skb->pkt_type = PACKET_HOST;
1010 goto out;
1011 }
1012
David Aherna4d205a2017-01-03 09:37:55 -08001013 vrf_rx_stats(vrf_dev, skb->len);
1014
David Ahern74b20582016-05-10 11:19:50 -07001015 skb_push(skb, skb->mac_len);
1016 dev_queue_xmit_nit(skb, vrf_dev);
1017 skb_pull(skb, skb->mac_len);
1018
David Ahern73e20b72016-07-04 18:47:41 -07001019 skb = vrf_rcv_nfhook(NFPROTO_IPV4, NF_INET_PRE_ROUTING, skb, vrf_dev);
David Ahernafe80a42016-06-06 20:50:39 -07001020out:
David Ahern74b20582016-05-10 11:19:50 -07001021 return skb;
1022}
1023
1024/* called with rcu lock held */
1025static struct sk_buff *vrf_l3_rcv(struct net_device *vrf_dev,
1026 struct sk_buff *skb,
1027 u16 proto)
1028{
1029 switch (proto) {
1030 case AF_INET:
1031 return vrf_ip_rcv(vrf_dev, skb);
1032 case AF_INET6:
1033 return vrf_ip6_rcv(vrf_dev, skb);
1034 }
1035
1036 return skb;
1037}
1038
1039#if IS_ENABLED(CONFIG_IPV6)
David Ahern4c1feac2016-09-10 12:09:56 -07001040/* send to link-local or multicast address via interface enslaved to
1041 * VRF device. Force lookup to VRF table without changing flow struct
1042 */
1043static struct dst_entry *vrf_link_scope_lookup(const struct net_device *dev,
1044 struct flowi6 *fl6)
David Ahern35402e32015-10-12 11:47:09 -07001045{
David Ahern9ff74382016-06-13 13:44:19 -07001046 struct net *net = dev_net(dev);
David Ahern4c1feac2016-09-10 12:09:56 -07001047 int flags = RT6_LOOKUP_F_IFACE;
David Ahernb0e95cc2016-05-13 12:23:45 -07001048 struct dst_entry *dst = NULL;
David Ahern9ff74382016-06-13 13:44:19 -07001049 struct rt6_info *rt;
David Ahern35402e32015-10-12 11:47:09 -07001050
David Ahern4c1feac2016-09-10 12:09:56 -07001051 /* VRF device does not have a link-local address and
1052 * sending packets to link-local or mcast addresses over
1053 * a VRF device does not make sense
1054 */
1055 if (fl6->flowi6_oif == dev->ifindex) {
1056 dst = &net->ipv6.ip6_null_entry->dst;
1057 dst_hold(dst);
1058 return dst;
David Ahern35402e32015-10-12 11:47:09 -07001059 }
1060
David Ahern4c1feac2016-09-10 12:09:56 -07001061 if (!ipv6_addr_any(&fl6->saddr))
1062 flags |= RT6_LOOKUP_F_HAS_SADDR;
1063
1064 rt = vrf_ip6_route_lookup(net, dev, fl6, fl6->flowi6_oif, flags);
1065 if (rt)
1066 dst = &rt->dst;
David Ahern9ff74382016-06-13 13:44:19 -07001067
David Ahernb0e95cc2016-05-13 12:23:45 -07001068 return dst;
David Ahern35402e32015-10-12 11:47:09 -07001069}
1070#endif
1071
David Ahernee15ee52015-09-29 20:07:12 -07001072static const struct l3mdev_ops vrf_l3mdev_ops = {
1073 .l3mdev_fib_table = vrf_fib_table,
David Ahern74b20582016-05-10 11:19:50 -07001074 .l3mdev_l3_rcv = vrf_l3_rcv,
David Ahernebfc1022016-09-10 12:09:55 -07001075 .l3mdev_l3_out = vrf_l3_out,
David Ahern35402e32015-10-12 11:47:09 -07001076#if IS_ENABLED(CONFIG_IPV6)
David Ahern4c1feac2016-09-10 12:09:56 -07001077 .l3mdev_link_scope_lookup = vrf_link_scope_lookup,
David Ahern35402e32015-10-12 11:47:09 -07001078#endif
David Ahernee15ee52015-09-29 20:07:12 -07001079};
1080
David Ahern193125d2015-08-13 14:59:10 -06001081static void vrf_get_drvinfo(struct net_device *dev,
1082 struct ethtool_drvinfo *info)
1083{
1084 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1085 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1086}
1087
1088static const struct ethtool_ops vrf_ethtool_ops = {
1089 .get_drvinfo = vrf_get_drvinfo,
1090};
1091
David Ahern1aa6c4f2016-06-08 10:55:40 -07001092static inline size_t vrf_fib_rule_nl_size(void)
1093{
1094 size_t sz;
1095
1096 sz = NLMSG_ALIGN(sizeof(struct fib_rule_hdr));
1097 sz += nla_total_size(sizeof(u8)); /* FRA_L3MDEV */
1098 sz += nla_total_size(sizeof(u32)); /* FRA_PRIORITY */
1099
1100 return sz;
1101}
1102
1103static int vrf_fib_rule(const struct net_device *dev, __u8 family, bool add_it)
1104{
1105 struct fib_rule_hdr *frh;
1106 struct nlmsghdr *nlh;
1107 struct sk_buff *skb;
1108 int err;
1109
David Aherne4348632016-06-09 10:21:00 -07001110 if (family == AF_INET6 && !ipv6_mod_enabled())
1111 return 0;
1112
David Ahern1aa6c4f2016-06-08 10:55:40 -07001113 skb = nlmsg_new(vrf_fib_rule_nl_size(), GFP_KERNEL);
1114 if (!skb)
1115 return -ENOMEM;
1116
1117 nlh = nlmsg_put(skb, 0, 0, 0, sizeof(*frh), 0);
1118 if (!nlh)
1119 goto nla_put_failure;
1120
1121 /* rule only needs to appear once */
1122 nlh->nlmsg_flags &= NLM_F_EXCL;
1123
1124 frh = nlmsg_data(nlh);
1125 memset(frh, 0, sizeof(*frh));
1126 frh->family = family;
1127 frh->action = FR_ACT_TO_TBL;
1128
1129 if (nla_put_u32(skb, FRA_L3MDEV, 1))
1130 goto nla_put_failure;
1131
1132 if (nla_put_u32(skb, FRA_PRIORITY, FIB_RULE_PREF))
1133 goto nla_put_failure;
1134
1135 nlmsg_end(skb, nlh);
1136
1137 /* fib_nl_{new,del}rule handling looks for net from skb->sk */
1138 skb->sk = dev_net(dev)->rtnl;
1139 if (add_it) {
1140 err = fib_nl_newrule(skb, nlh);
1141 if (err == -EEXIST)
1142 err = 0;
1143 } else {
1144 err = fib_nl_delrule(skb, nlh);
1145 if (err == -ENOENT)
1146 err = 0;
1147 }
1148 nlmsg_free(skb);
1149
1150 return err;
1151
1152nla_put_failure:
1153 nlmsg_free(skb);
1154
1155 return -EMSGSIZE;
1156}
1157
1158static int vrf_add_fib_rules(const struct net_device *dev)
1159{
1160 int err;
1161
1162 err = vrf_fib_rule(dev, AF_INET, true);
1163 if (err < 0)
1164 goto out_err;
1165
1166 err = vrf_fib_rule(dev, AF_INET6, true);
1167 if (err < 0)
1168 goto ipv6_err;
1169
1170 return 0;
1171
1172ipv6_err:
1173 vrf_fib_rule(dev, AF_INET, false);
1174
1175out_err:
1176 netdev_err(dev, "Failed to add FIB rules.\n");
1177 return err;
1178}
1179
David Ahern193125d2015-08-13 14:59:10 -06001180static void vrf_setup(struct net_device *dev)
1181{
1182 ether_setup(dev);
1183
1184 /* Initialize the device structure. */
1185 dev->netdev_ops = &vrf_netdev_ops;
David Ahernee15ee52015-09-29 20:07:12 -07001186 dev->l3mdev_ops = &vrf_l3mdev_ops;
David Ahern193125d2015-08-13 14:59:10 -06001187 dev->ethtool_ops = &vrf_ethtool_ops;
1188 dev->destructor = free_netdev;
1189
1190 /* Fill in device structure with ethernet-generic values. */
1191 eth_hw_addr_random(dev);
1192
1193 /* don't acquire vrf device's netif_tx_lock when transmitting */
1194 dev->features |= NETIF_F_LLTX;
1195
1196 /* don't allow vrf devices to change network namespaces. */
1197 dev->features |= NETIF_F_NETNS_LOCAL;
David Ahern78896812016-06-13 17:14:12 -07001198
1199 /* does not make sense for a VLAN to be added to a vrf device */
1200 dev->features |= NETIF_F_VLAN_CHALLENGED;
1201
1202 /* enable offload features */
1203 dev->features |= NETIF_F_GSO_SOFTWARE;
1204 dev->features |= NETIF_F_RXCSUM | NETIF_F_HW_CSUM;
1205 dev->features |= NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA;
1206
1207 dev->hw_features = dev->features;
1208 dev->hw_enc_features = dev->features;
1209
1210 /* default to no qdisc; user can add if desired */
1211 dev->priv_flags |= IFF_NO_QUEUE;
David Ahern193125d2015-08-13 14:59:10 -06001212}
1213
1214static int vrf_validate(struct nlattr *tb[], struct nlattr *data[])
1215{
1216 if (tb[IFLA_ADDRESS]) {
1217 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
1218 return -EINVAL;
1219 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
1220 return -EADDRNOTAVAIL;
1221 }
1222 return 0;
1223}
1224
1225static void vrf_dellink(struct net_device *dev, struct list_head *head)
1226{
David Ahern193125d2015-08-13 14:59:10 -06001227 unregister_netdevice_queue(dev, head);
1228}
1229
1230static int vrf_newlink(struct net *src_net, struct net_device *dev,
1231 struct nlattr *tb[], struct nlattr *data[])
1232{
1233 struct net_vrf *vrf = netdev_priv(dev);
David Ahern1aa6c4f2016-06-08 10:55:40 -07001234 int err;
David Ahern193125d2015-08-13 14:59:10 -06001235
1236 if (!data || !data[IFLA_VRF_TABLE])
1237 return -EINVAL;
1238
1239 vrf->tb_id = nla_get_u32(data[IFLA_VRF_TABLE]);
1240
David Ahern007979e2015-09-29 20:07:10 -07001241 dev->priv_flags |= IFF_L3MDEV_MASTER;
David Ahern193125d2015-08-13 14:59:10 -06001242
David Ahern1aa6c4f2016-06-08 10:55:40 -07001243 err = register_netdevice(dev);
1244 if (err)
1245 goto out;
1246
1247 if (add_fib_rules) {
1248 err = vrf_add_fib_rules(dev);
1249 if (err) {
1250 unregister_netdevice(dev);
1251 goto out;
1252 }
1253 add_fib_rules = false;
1254 }
1255
1256out:
1257 return err;
David Ahern193125d2015-08-13 14:59:10 -06001258}
1259
1260static size_t vrf_nl_getsize(const struct net_device *dev)
1261{
1262 return nla_total_size(sizeof(u32)); /* IFLA_VRF_TABLE */
1263}
1264
1265static int vrf_fillinfo(struct sk_buff *skb,
1266 const struct net_device *dev)
1267{
1268 struct net_vrf *vrf = netdev_priv(dev);
1269
1270 return nla_put_u32(skb, IFLA_VRF_TABLE, vrf->tb_id);
1271}
1272
David Ahern67eb0332016-02-02 07:43:45 -08001273static size_t vrf_get_slave_size(const struct net_device *bond_dev,
1274 const struct net_device *slave_dev)
1275{
1276 return nla_total_size(sizeof(u32)); /* IFLA_VRF_PORT_TABLE */
1277}
1278
1279static int vrf_fill_slave_info(struct sk_buff *skb,
1280 const struct net_device *vrf_dev,
1281 const struct net_device *slave_dev)
1282{
1283 struct net_vrf *vrf = netdev_priv(vrf_dev);
1284
1285 if (nla_put_u32(skb, IFLA_VRF_PORT_TABLE, vrf->tb_id))
1286 return -EMSGSIZE;
1287
1288 return 0;
1289}
1290
David Ahern193125d2015-08-13 14:59:10 -06001291static const struct nla_policy vrf_nl_policy[IFLA_VRF_MAX + 1] = {
1292 [IFLA_VRF_TABLE] = { .type = NLA_U32 },
1293};
1294
1295static struct rtnl_link_ops vrf_link_ops __read_mostly = {
1296 .kind = DRV_NAME,
1297 .priv_size = sizeof(struct net_vrf),
1298
1299 .get_size = vrf_nl_getsize,
1300 .policy = vrf_nl_policy,
1301 .validate = vrf_validate,
1302 .fill_info = vrf_fillinfo,
1303
David Ahern67eb0332016-02-02 07:43:45 -08001304 .get_slave_size = vrf_get_slave_size,
1305 .fill_slave_info = vrf_fill_slave_info,
1306
David Ahern193125d2015-08-13 14:59:10 -06001307 .newlink = vrf_newlink,
1308 .dellink = vrf_dellink,
1309 .setup = vrf_setup,
1310 .maxtype = IFLA_VRF_MAX,
1311};
1312
1313static int vrf_device_event(struct notifier_block *unused,
1314 unsigned long event, void *ptr)
1315{
1316 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1317
1318 /* only care about unregister events to drop slave references */
1319 if (event == NETDEV_UNREGISTER) {
David Ahern193125d2015-08-13 14:59:10 -06001320 struct net_device *vrf_dev;
1321
David Ahernfee6d4c2015-10-05 08:51:24 -07001322 if (!netif_is_l3_slave(dev))
David Ahern193125d2015-08-13 14:59:10 -06001323 goto out;
1324
Nikolay Aleksandrov58aa9082015-08-18 20:28:04 +03001325 vrf_dev = netdev_master_upper_dev_get(dev);
1326 vrf_del_slave(vrf_dev, dev);
David Ahern193125d2015-08-13 14:59:10 -06001327 }
1328out:
1329 return NOTIFY_DONE;
1330}
1331
1332static struct notifier_block vrf_notifier_block __read_mostly = {
1333 .notifier_call = vrf_device_event,
1334};
1335
1336static int __init vrf_init_module(void)
1337{
1338 int rc;
1339
David Ahern193125d2015-08-13 14:59:10 -06001340 register_netdevice_notifier(&vrf_notifier_block);
1341
1342 rc = rtnl_link_register(&vrf_link_ops);
1343 if (rc < 0)
1344 goto error;
1345
1346 return 0;
1347
1348error:
1349 unregister_netdevice_notifier(&vrf_notifier_block);
David Ahern193125d2015-08-13 14:59:10 -06001350 return rc;
1351}
1352
David Ahern193125d2015-08-13 14:59:10 -06001353module_init(vrf_init_module);
David Ahern193125d2015-08-13 14:59:10 -06001354MODULE_AUTHOR("Shrijeet Mukherjee, David Ahern");
1355MODULE_DESCRIPTION("Device driver to instantiate VRF domains");
1356MODULE_LICENSE("GPL");
1357MODULE_ALIAS_RTNL_LINK(DRV_NAME);
1358MODULE_VERSION(DRV_VERSION);