blob: ec0cb658d9eacf2f0463937f2399be8e9477dc7f [file] [log] [blame]
David Ahern193125d2015-08-13 14:59:10 -06001/*
2 * vrf.c: device driver to encapsulate a VRF space
3 *
4 * Copyright (c) 2015 Cumulus Networks. All rights reserved.
5 * Copyright (c) 2015 Shrijeet Mukherjee <shm@cumulusnetworks.com>
6 * Copyright (c) 2015 David Ahern <dsa@cumulusnetworks.com>
7 *
8 * Based on dummy, team and ipvlan drivers
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 */
15
16#include <linux/module.h>
17#include <linux/kernel.h>
18#include <linux/netdevice.h>
19#include <linux/etherdevice.h>
20#include <linux/ip.h>
21#include <linux/init.h>
22#include <linux/moduleparam.h>
23#include <linux/netfilter.h>
24#include <linux/rtnetlink.h>
25#include <net/rtnetlink.h>
26#include <linux/u64_stats_sync.h>
27#include <linux/hashtable.h>
28
29#include <linux/inetdevice.h>
David Ahern8f583362015-08-27 10:10:50 -070030#include <net/arp.h>
David Ahern193125d2015-08-13 14:59:10 -060031#include <net/ip.h>
32#include <net/ip_fib.h>
David Ahern35402e32015-10-12 11:47:09 -070033#include <net/ip6_fib.h>
David Ahern193125d2015-08-13 14:59:10 -060034#include <net/ip6_route.h>
David Ahern193125d2015-08-13 14:59:10 -060035#include <net/route.h>
36#include <net/addrconf.h>
David Ahernee15ee52015-09-29 20:07:12 -070037#include <net/l3mdev.h>
David Ahern193125d2015-08-13 14:59:10 -060038
David Ahern8cbb512c2015-10-05 08:51:26 -070039#define RT_FL_TOS(oldflp4) \
40 ((oldflp4)->flowi4_tos & (IPTOS_RT_MASK | RTO_ONLINK))
41
David Ahern193125d2015-08-13 14:59:10 -060042#define DRV_NAME "vrf"
43#define DRV_VERSION "1.0"
44
David Ahernec539512015-09-29 20:07:17 -070045struct net_vrf {
David Ahernb0e95cc2016-05-13 12:23:45 -070046 struct rtable __rcu *rth;
47 struct rt6_info __rcu *rt6;
David Ahernec539512015-09-29 20:07:17 -070048 u32 tb_id;
49};
50
David Ahern193125d2015-08-13 14:59:10 -060051struct pcpu_dstats {
52 u64 tx_pkts;
53 u64 tx_bytes;
54 u64 tx_drps;
55 u64 rx_pkts;
56 u64 rx_bytes;
57 struct u64_stats_sync syncp;
58};
59
Nikolay Aleksandrov57b8efa2015-08-19 06:12:29 +030060static void vrf_tx_error(struct net_device *vrf_dev, struct sk_buff *skb)
61{
62 vrf_dev->stats.tx_errors++;
63 kfree_skb(skb);
64}
65
David Ahern193125d2015-08-13 14:59:10 -060066static struct rtnl_link_stats64 *vrf_get_stats64(struct net_device *dev,
67 struct rtnl_link_stats64 *stats)
68{
69 int i;
70
71 for_each_possible_cpu(i) {
72 const struct pcpu_dstats *dstats;
73 u64 tbytes, tpkts, tdrops, rbytes, rpkts;
74 unsigned int start;
75
76 dstats = per_cpu_ptr(dev->dstats, i);
77 do {
78 start = u64_stats_fetch_begin_irq(&dstats->syncp);
79 tbytes = dstats->tx_bytes;
80 tpkts = dstats->tx_pkts;
81 tdrops = dstats->tx_drps;
82 rbytes = dstats->rx_bytes;
83 rpkts = dstats->rx_pkts;
84 } while (u64_stats_fetch_retry_irq(&dstats->syncp, start));
85 stats->tx_bytes += tbytes;
86 stats->tx_packets += tpkts;
87 stats->tx_dropped += tdrops;
88 stats->rx_bytes += rbytes;
89 stats->rx_packets += rpkts;
90 }
91 return stats;
92}
93
David Ahern35402e32015-10-12 11:47:09 -070094#if IS_ENABLED(CONFIG_IPV6)
95static netdev_tx_t vrf_process_v6_outbound(struct sk_buff *skb,
96 struct net_device *dev)
97{
98 const struct ipv6hdr *iph = ipv6_hdr(skb);
99 struct net *net = dev_net(skb->dev);
100 struct flowi6 fl6 = {
101 /* needed to match OIF rule */
102 .flowi6_oif = dev->ifindex,
103 .flowi6_iif = LOOPBACK_IFINDEX,
104 .daddr = iph->daddr,
105 .saddr = iph->saddr,
106 .flowlabel = ip6_flowinfo(iph),
107 .flowi6_mark = skb->mark,
108 .flowi6_proto = iph->nexthdr,
109 .flowi6_flags = FLOWI_FLAG_L3MDEV_SRC | FLOWI_FLAG_SKIP_NH_OIF,
110 };
111 int ret = NET_XMIT_DROP;
112 struct dst_entry *dst;
113 struct dst_entry *dst_null = &net->ipv6.ip6_null_entry->dst;
114
115 dst = ip6_route_output(net, NULL, &fl6);
116 if (dst == dst_null)
117 goto err;
118
119 skb_dst_drop(skb);
120 skb_dst_set(skb, dst);
121
David Ahern911a66f2016-06-06 20:50:38 -0700122 /* strip the ethernet header added for pass through VRF device */
123 __skb_pull(skb, skb_network_offset(skb));
124
David Ahern35402e32015-10-12 11:47:09 -0700125 ret = ip6_local_out(net, skb->sk, skb);
126 if (unlikely(net_xmit_eval(ret)))
127 dev->stats.tx_errors++;
128 else
129 ret = NET_XMIT_SUCCESS;
130
131 return ret;
132err:
133 vrf_tx_error(dev, skb);
134 return NET_XMIT_DROP;
135}
136#else
David Ahern193125d2015-08-13 14:59:10 -0600137static netdev_tx_t vrf_process_v6_outbound(struct sk_buff *skb,
138 struct net_device *dev)
139{
Nikolay Aleksandrov57b8efa2015-08-19 06:12:29 +0300140 vrf_tx_error(dev, skb);
141 return NET_XMIT_DROP;
David Ahern193125d2015-08-13 14:59:10 -0600142}
David Ahern35402e32015-10-12 11:47:09 -0700143#endif
David Ahern193125d2015-08-13 14:59:10 -0600144
David Ahern193125d2015-08-13 14:59:10 -0600145static netdev_tx_t vrf_process_v4_outbound(struct sk_buff *skb,
146 struct net_device *vrf_dev)
147{
148 struct iphdr *ip4h = ip_hdr(skb);
149 int ret = NET_XMIT_DROP;
150 struct flowi4 fl4 = {
151 /* needed to match OIF rule */
152 .flowi4_oif = vrf_dev->ifindex,
153 .flowi4_iif = LOOPBACK_IFINDEX,
154 .flowi4_tos = RT_TOS(ip4h->tos),
David Ahern6e2895a2015-10-05 08:51:23 -0700155 .flowi4_flags = FLOWI_FLAG_ANYSRC | FLOWI_FLAG_L3MDEV_SRC |
David Ahern58189ca2015-09-15 15:10:50 -0700156 FLOWI_FLAG_SKIP_NH_OIF,
David Ahern193125d2015-08-13 14:59:10 -0600157 .daddr = ip4h->daddr,
158 };
David Ahern911a66f2016-06-06 20:50:38 -0700159 struct net *net = dev_net(vrf_dev);
160 struct rtable *rt;
David Ahern193125d2015-08-13 14:59:10 -0600161
David Ahern911a66f2016-06-06 20:50:38 -0700162 rt = ip_route_output_flow(net, &fl4, NULL);
163 if (IS_ERR(rt))
David Ahern193125d2015-08-13 14:59:10 -0600164 goto err;
165
David Ahern911a66f2016-06-06 20:50:38 -0700166 if (rt->rt_type != RTN_UNICAST && rt->rt_type != RTN_LOCAL) {
167 ip_rt_put(rt);
168 goto err;
169 }
170
171 skb_dst_drop(skb);
172 skb_dst_set(skb, &rt->dst);
173
174 /* strip the ethernet header added for pass through VRF device */
175 __skb_pull(skb, skb_network_offset(skb));
176
David Ahern193125d2015-08-13 14:59:10 -0600177 if (!ip4h->saddr) {
178 ip4h->saddr = inet_select_addr(skb_dst(skb)->dev, 0,
179 RT_SCOPE_LINK);
180 }
181
Eric W. Biederman33224b12015-10-07 16:48:46 -0500182 ret = ip_local_out(dev_net(skb_dst(skb)->dev), skb->sk, skb);
David Ahern193125d2015-08-13 14:59:10 -0600183 if (unlikely(net_xmit_eval(ret)))
184 vrf_dev->stats.tx_errors++;
185 else
186 ret = NET_XMIT_SUCCESS;
187
188out:
189 return ret;
190err:
Nikolay Aleksandrov57b8efa2015-08-19 06:12:29 +0300191 vrf_tx_error(vrf_dev, skb);
David Ahern193125d2015-08-13 14:59:10 -0600192 goto out;
193}
194
195static netdev_tx_t is_ip_tx_frame(struct sk_buff *skb, struct net_device *dev)
196{
197 switch (skb->protocol) {
198 case htons(ETH_P_IP):
199 return vrf_process_v4_outbound(skb, dev);
200 case htons(ETH_P_IPV6):
201 return vrf_process_v6_outbound(skb, dev);
202 default:
Nikolay Aleksandrov57b8efa2015-08-19 06:12:29 +0300203 vrf_tx_error(dev, skb);
David Ahern193125d2015-08-13 14:59:10 -0600204 return NET_XMIT_DROP;
205 }
206}
207
208static netdev_tx_t vrf_xmit(struct sk_buff *skb, struct net_device *dev)
209{
210 netdev_tx_t ret = is_ip_tx_frame(skb, dev);
211
212 if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
213 struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
214
215 u64_stats_update_begin(&dstats->syncp);
216 dstats->tx_pkts++;
217 dstats->tx_bytes += skb->len;
218 u64_stats_update_end(&dstats->syncp);
219 } else {
220 this_cpu_inc(dev->dstats->tx_drps);
221 }
222
223 return ret;
224}
225
David Ahern35402e32015-10-12 11:47:09 -0700226#if IS_ENABLED(CONFIG_IPV6)
David Ahern35402e32015-10-12 11:47:09 -0700227/* modelled after ip6_finish_output2 */
228static int vrf_finish_output6(struct net *net, struct sock *sk,
229 struct sk_buff *skb)
230{
231 struct dst_entry *dst = skb_dst(skb);
232 struct net_device *dev = dst->dev;
233 struct neighbour *neigh;
234 struct in6_addr *nexthop;
235 int ret;
236
237 skb->protocol = htons(ETH_P_IPV6);
238 skb->dev = dev;
239
240 rcu_read_lock_bh();
241 nexthop = rt6_nexthop((struct rt6_info *)dst, &ipv6_hdr(skb)->daddr);
242 neigh = __ipv6_neigh_lookup_noref(dst->dev, nexthop);
243 if (unlikely(!neigh))
244 neigh = __neigh_create(&nd_tbl, nexthop, dst->dev, false);
245 if (!IS_ERR(neigh)) {
246 ret = dst_neigh_output(dst, neigh, skb);
247 rcu_read_unlock_bh();
248 return ret;
249 }
250 rcu_read_unlock_bh();
251
252 IP6_INC_STATS(dev_net(dst->dev),
253 ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
254 kfree_skb(skb);
255 return -EINVAL;
256}
257
258/* modelled after ip6_output */
259static int vrf_output6(struct net *net, struct sock *sk, struct sk_buff *skb)
260{
261 return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING,
262 net, sk, skb, NULL, skb_dst(skb)->dev,
263 vrf_finish_output6,
264 !(IP6CB(skb)->flags & IP6SKB_REROUTED));
265}
266
David Ahernb0e95cc2016-05-13 12:23:45 -0700267/* holding rtnl */
David Ahern9ab179d2016-04-07 11:10:06 -0700268static void vrf_rt6_release(struct net_vrf *vrf)
David Ahern35402e32015-10-12 11:47:09 -0700269{
David Ahernb0e95cc2016-05-13 12:23:45 -0700270 struct rt6_info *rt6 = rtnl_dereference(vrf->rt6);
271
David S. Miller3d9dc402016-06-06 15:58:34 -0700272 rcu_assign_pointer(vrf->rt6, NULL);
David Ahernb0e95cc2016-05-13 12:23:45 -0700273
274 if (rt6)
275 dst_release(&rt6->dst);
David Ahern35402e32015-10-12 11:47:09 -0700276}
277
278static int vrf_rt6_create(struct net_device *dev)
279{
280 struct net_vrf *vrf = netdev_priv(dev);
David Ahern9ab179d2016-04-07 11:10:06 -0700281 struct net *net = dev_net(dev);
David Ahernb3b46632016-05-04 21:46:12 -0700282 struct fib6_table *rt6i_table;
David S. Miller3d9dc402016-06-06 15:58:34 -0700283 struct rt6_info *rt6;
David Ahern35402e32015-10-12 11:47:09 -0700284 int rc = -ENOMEM;
285
David Ahernb3b46632016-05-04 21:46:12 -0700286 rt6i_table = fib6_new_table(net, vrf->tb_id);
287 if (!rt6i_table)
288 goto out;
289
David S. Miller3d9dc402016-06-06 15:58:34 -0700290 rt6 = ip6_dst_alloc(net, dev,
291 DST_HOST | DST_NOPOLICY | DST_NOXFRM | DST_NOCACHE);
David Ahern35402e32015-10-12 11:47:09 -0700292 if (!rt6)
293 goto out;
294
David Ahern9ab179d2016-04-07 11:10:06 -0700295 dst_hold(&rt6->dst);
David Ahernb3b46632016-05-04 21:46:12 -0700296
297 rt6->rt6i_table = rt6i_table;
298 rt6->dst.output = vrf_output6;
David Ahernb0e95cc2016-05-13 12:23:45 -0700299 rcu_assign_pointer(vrf->rt6, rt6);
300
David Ahern35402e32015-10-12 11:47:09 -0700301 rc = 0;
302out:
303 return rc;
304}
305#else
David Ahern9ab179d2016-04-07 11:10:06 -0700306static void vrf_rt6_release(struct net_vrf *vrf)
David Ahern35402e32015-10-12 11:47:09 -0700307{
308}
309
310static int vrf_rt6_create(struct net_device *dev)
311{
312 return 0;
313}
314#endif
315
David Ahern8f583362015-08-27 10:10:50 -0700316/* modelled after ip_finish_output2 */
Eric W. Biederman0c4b51f2015-09-15 20:04:18 -0500317static int vrf_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb)
David Ahern193125d2015-08-13 14:59:10 -0600318{
David Ahern8f583362015-08-27 10:10:50 -0700319 struct dst_entry *dst = skb_dst(skb);
320 struct rtable *rt = (struct rtable *)dst;
321 struct net_device *dev = dst->dev;
322 unsigned int hh_len = LL_RESERVED_SPACE(dev);
323 struct neighbour *neigh;
324 u32 nexthop;
325 int ret = -EINVAL;
326
327 /* Be paranoid, rather than too clever. */
328 if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) {
329 struct sk_buff *skb2;
330
331 skb2 = skb_realloc_headroom(skb, LL_RESERVED_SPACE(dev));
332 if (!skb2) {
333 ret = -ENOMEM;
334 goto err;
335 }
336 if (skb->sk)
337 skb_set_owner_w(skb2, skb->sk);
338
339 consume_skb(skb);
340 skb = skb2;
341 }
342
343 rcu_read_lock_bh();
344
345 nexthop = (__force u32)rt_nexthop(rt, ip_hdr(skb)->daddr);
346 neigh = __ipv4_neigh_lookup_noref(dev, nexthop);
347 if (unlikely(!neigh))
348 neigh = __neigh_create(&arp_tbl, &nexthop, dev, false);
349 if (!IS_ERR(neigh))
350 ret = dst_neigh_output(dst, neigh, skb);
351
352 rcu_read_unlock_bh();
353err:
354 if (unlikely(ret < 0))
355 vrf_tx_error(skb->dev, skb);
356 return ret;
David Ahern193125d2015-08-13 14:59:10 -0600357}
358
Eric W. Biedermanede20592015-10-07 16:48:47 -0500359static int vrf_output(struct net *net, struct sock *sk, struct sk_buff *skb)
David Ahern193125d2015-08-13 14:59:10 -0600360{
361 struct net_device *dev = skb_dst(skb)->dev;
362
Eric W. Biederman29a26a52015-09-15 20:04:16 -0500363 IP_UPD_PO_STATS(net, IPSTATS_MIB_OUT, skb->len);
David Ahern193125d2015-08-13 14:59:10 -0600364
365 skb->dev = dev;
366 skb->protocol = htons(ETH_P_IP);
367
Eric W. Biederman29a26a52015-09-15 20:04:16 -0500368 return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING,
369 net, sk, skb, NULL, dev,
David Ahern8f583362015-08-27 10:10:50 -0700370 vrf_finish_output,
David Ahern193125d2015-08-13 14:59:10 -0600371 !(IPCB(skb)->flags & IPSKB_REROUTED));
372}
373
David Ahernb0e95cc2016-05-13 12:23:45 -0700374/* holding rtnl */
David Ahern9ab179d2016-04-07 11:10:06 -0700375static void vrf_rtable_release(struct net_vrf *vrf)
David Ahern193125d2015-08-13 14:59:10 -0600376{
David Ahernb0e95cc2016-05-13 12:23:45 -0700377 struct rtable *rth = rtnl_dereference(vrf->rth);
David Ahern193125d2015-08-13 14:59:10 -0600378
David S. Miller3d9dc402016-06-06 15:58:34 -0700379 rcu_assign_pointer(vrf->rth, NULL);
David Ahernb0e95cc2016-05-13 12:23:45 -0700380
381 if (rth)
382 dst_release(&rth->dst);
David Ahern193125d2015-08-13 14:59:10 -0600383}
384
David Ahernb0e95cc2016-05-13 12:23:45 -0700385static int vrf_rtable_create(struct net_device *dev)
David Ahern193125d2015-08-13 14:59:10 -0600386{
David Ahernb7503e02015-09-02 13:58:35 -0700387 struct net_vrf *vrf = netdev_priv(dev);
David S. Miller3d9dc402016-06-06 15:58:34 -0700388 struct rtable *rth;
David Ahern193125d2015-08-13 14:59:10 -0600389
David Ahernb3b46632016-05-04 21:46:12 -0700390 if (!fib_new_table(dev_net(dev), vrf->tb_id))
David Ahernb0e95cc2016-05-13 12:23:45 -0700391 return -ENOMEM;
David Ahernb3b46632016-05-04 21:46:12 -0700392
David Ahern9ab179d2016-04-07 11:10:06 -0700393 rth = rt_dst_alloc(dev, 0, RTN_UNICAST, 1, 1, 0);
David Ahernb0e95cc2016-05-13 12:23:45 -0700394 if (!rth)
395 return -ENOMEM;
David Ahern193125d2015-08-13 14:59:10 -0600396
David Ahernb0e95cc2016-05-13 12:23:45 -0700397 rth->dst.output = vrf_output;
398 rth->rt_table_id = vrf->tb_id;
399
400 rcu_assign_pointer(vrf->rth, rth);
401
402 return 0;
David Ahern193125d2015-08-13 14:59:10 -0600403}
404
405/**************************** device handling ********************/
406
407/* cycle interface to flush neighbor cache and move routes across tables */
408static void cycle_netdev(struct net_device *dev)
409{
410 unsigned int flags = dev->flags;
411 int ret;
412
413 if (!netif_running(dev))
414 return;
415
416 ret = dev_change_flags(dev, flags & ~IFF_UP);
417 if (ret >= 0)
418 ret = dev_change_flags(dev, flags);
419
420 if (ret < 0) {
421 netdev_err(dev,
422 "Failed to cycle device %s; route tables might be wrong!\n",
423 dev->name);
424 }
425}
426
David Ahern193125d2015-08-13 14:59:10 -0600427static int do_vrf_add_slave(struct net_device *dev, struct net_device *port_dev)
428{
Nikolay Aleksandrovbad53162015-11-24 14:29:16 +0100429 int ret;
David Ahern193125d2015-08-13 14:59:10 -0600430
Jiri Pirko29bf24a2015-12-03 12:12:11 +0100431 ret = netdev_master_upper_dev_link(port_dev, dev, NULL, NULL);
David Ahern193125d2015-08-13 14:59:10 -0600432 if (ret < 0)
David Ahern74b20582016-05-10 11:19:50 -0700433 return ret;
David Ahern193125d2015-08-13 14:59:10 -0600434
David Ahernfee6d4c2015-10-05 08:51:24 -0700435 port_dev->priv_flags |= IFF_L3MDEV_SLAVE;
David Ahern193125d2015-08-13 14:59:10 -0600436 cycle_netdev(port_dev);
437
438 return 0;
David Ahern193125d2015-08-13 14:59:10 -0600439}
440
441static int vrf_add_slave(struct net_device *dev, struct net_device *port_dev)
442{
David Ahernfee6d4c2015-10-05 08:51:24 -0700443 if (netif_is_l3_master(port_dev) || netif_is_l3_slave(port_dev))
David Ahern193125d2015-08-13 14:59:10 -0600444 return -EINVAL;
445
446 return do_vrf_add_slave(dev, port_dev);
447}
448
449/* inverse of do_vrf_add_slave */
450static int do_vrf_del_slave(struct net_device *dev, struct net_device *port_dev)
451{
David Ahern193125d2015-08-13 14:59:10 -0600452 netdev_upper_dev_unlink(port_dev, dev);
David Ahernfee6d4c2015-10-05 08:51:24 -0700453 port_dev->priv_flags &= ~IFF_L3MDEV_SLAVE;
David Ahern193125d2015-08-13 14:59:10 -0600454
David Ahern193125d2015-08-13 14:59:10 -0600455 cycle_netdev(port_dev);
456
David Ahern193125d2015-08-13 14:59:10 -0600457 return 0;
458}
459
460static int vrf_del_slave(struct net_device *dev, struct net_device *port_dev)
461{
David Ahern193125d2015-08-13 14:59:10 -0600462 return do_vrf_del_slave(dev, port_dev);
463}
464
465static void vrf_dev_uninit(struct net_device *dev)
466{
467 struct net_vrf *vrf = netdev_priv(dev);
Nikolay Aleksandrovbad53162015-11-24 14:29:16 +0100468 struct net_device *port_dev;
469 struct list_head *iter;
David Ahern193125d2015-08-13 14:59:10 -0600470
David Ahern9ab179d2016-04-07 11:10:06 -0700471 vrf_rtable_release(vrf);
472 vrf_rt6_release(vrf);
David Ahern193125d2015-08-13 14:59:10 -0600473
Nikolay Aleksandrovbad53162015-11-24 14:29:16 +0100474 netdev_for_each_lower_dev(dev, port_dev, iter)
475 vrf_del_slave(dev, port_dev);
David Ahern193125d2015-08-13 14:59:10 -0600476
Nikolay Aleksandrov3a4a27d2015-08-18 20:28:03 +0300477 free_percpu(dev->dstats);
David Ahern193125d2015-08-13 14:59:10 -0600478 dev->dstats = NULL;
479}
480
481static int vrf_dev_init(struct net_device *dev)
482{
483 struct net_vrf *vrf = netdev_priv(dev);
484
David Ahern193125d2015-08-13 14:59:10 -0600485 dev->dstats = netdev_alloc_pcpu_stats(struct pcpu_dstats);
486 if (!dev->dstats)
487 goto out_nomem;
488
489 /* create the default dst which points back to us */
David Ahernb0e95cc2016-05-13 12:23:45 -0700490 if (vrf_rtable_create(dev) != 0)
David Ahern193125d2015-08-13 14:59:10 -0600491 goto out_stats;
492
David Ahern35402e32015-10-12 11:47:09 -0700493 if (vrf_rt6_create(dev) != 0)
494 goto out_rth;
495
David Ahern193125d2015-08-13 14:59:10 -0600496 dev->flags = IFF_MASTER | IFF_NOARP;
497
David Ahernb87ab6b2016-06-01 21:16:39 -0700498 /* MTU is irrelevant for VRF device; set to 64k similar to lo */
499 dev->mtu = 64 * 1024;
500
501 /* similarly, oper state is irrelevant; set to up to avoid confusion */
502 dev->operstate = IF_OPER_UP;
503
David Ahern193125d2015-08-13 14:59:10 -0600504 return 0;
505
David Ahern35402e32015-10-12 11:47:09 -0700506out_rth:
David Ahern9ab179d2016-04-07 11:10:06 -0700507 vrf_rtable_release(vrf);
David Ahern193125d2015-08-13 14:59:10 -0600508out_stats:
509 free_percpu(dev->dstats);
510 dev->dstats = NULL;
511out_nomem:
512 return -ENOMEM;
513}
514
515static const struct net_device_ops vrf_netdev_ops = {
516 .ndo_init = vrf_dev_init,
517 .ndo_uninit = vrf_dev_uninit,
518 .ndo_start_xmit = vrf_xmit,
519 .ndo_get_stats64 = vrf_get_stats64,
520 .ndo_add_slave = vrf_add_slave,
521 .ndo_del_slave = vrf_del_slave,
522};
523
David Ahernee15ee52015-09-29 20:07:12 -0700524static u32 vrf_fib_table(const struct net_device *dev)
525{
526 struct net_vrf *vrf = netdev_priv(dev);
527
528 return vrf->tb_id;
529}
530
531static struct rtable *vrf_get_rtable(const struct net_device *dev,
532 const struct flowi4 *fl4)
533{
534 struct rtable *rth = NULL;
535
David Ahern6e2895a2015-10-05 08:51:23 -0700536 if (!(fl4->flowi4_flags & FLOWI_FLAG_L3MDEV_SRC)) {
David Ahernee15ee52015-09-29 20:07:12 -0700537 struct net_vrf *vrf = netdev_priv(dev);
538
David Ahernb0e95cc2016-05-13 12:23:45 -0700539 rcu_read_lock();
540
541 rth = rcu_dereference(vrf->rth);
542 if (likely(rth))
543 dst_hold(&rth->dst);
544
545 rcu_read_unlock();
David Ahernee15ee52015-09-29 20:07:12 -0700546 }
547
548 return rth;
549}
550
David Ahern8cbb512c2015-10-05 08:51:26 -0700551/* called under rcu_read_lock */
David Ahernb5bdacf2016-01-04 09:09:27 -0800552static int vrf_get_saddr(struct net_device *dev, struct flowi4 *fl4)
David Ahern8cbb512c2015-10-05 08:51:26 -0700553{
554 struct fib_result res = { .tclassid = 0 };
555 struct net *net = dev_net(dev);
556 u32 orig_tos = fl4->flowi4_tos;
557 u8 flags = fl4->flowi4_flags;
558 u8 scope = fl4->flowi4_scope;
559 u8 tos = RT_FL_TOS(fl4);
David Ahernb5bdacf2016-01-04 09:09:27 -0800560 int rc;
David Ahern8cbb512c2015-10-05 08:51:26 -0700561
562 if (unlikely(!fl4->daddr))
David Ahernb5bdacf2016-01-04 09:09:27 -0800563 return 0;
David Ahern8cbb512c2015-10-05 08:51:26 -0700564
565 fl4->flowi4_flags |= FLOWI_FLAG_SKIP_NH_OIF;
566 fl4->flowi4_iif = LOOPBACK_IFINDEX;
David Ahern1ff23be2016-05-07 16:49:00 -0700567 /* make sure oif is set to VRF device for lookup */
568 fl4->flowi4_oif = dev->ifindex;
David Ahern8cbb512c2015-10-05 08:51:26 -0700569 fl4->flowi4_tos = tos & IPTOS_RT_MASK;
570 fl4->flowi4_scope = ((tos & RTO_ONLINK) ?
571 RT_SCOPE_LINK : RT_SCOPE_UNIVERSE);
572
David Ahernb5bdacf2016-01-04 09:09:27 -0800573 rc = fib_lookup(net, fl4, &res, 0);
574 if (!rc) {
David Ahern8cbb512c2015-10-05 08:51:26 -0700575 if (res.type == RTN_LOCAL)
576 fl4->saddr = res.fi->fib_prefsrc ? : fl4->daddr;
577 else
578 fib_select_path(net, &res, fl4, -1);
579 }
580
581 fl4->flowi4_flags = flags;
582 fl4->flowi4_tos = orig_tos;
583 fl4->flowi4_scope = scope;
David Ahernb5bdacf2016-01-04 09:09:27 -0800584
585 return rc;
David Ahern8cbb512c2015-10-05 08:51:26 -0700586}
587
David Ahern35402e32015-10-12 11:47:09 -0700588#if IS_ENABLED(CONFIG_IPV6)
David Ahern74b20582016-05-10 11:19:50 -0700589/* neighbor handling is done with actual device; do not want
590 * to flip skb->dev for those ndisc packets. This really fails
591 * for multiple next protocols (e.g., NEXTHDR_HOP). But it is
592 * a start.
593 */
594static bool ipv6_ndisc_frame(const struct sk_buff *skb)
595{
596 const struct ipv6hdr *iph = ipv6_hdr(skb);
597 bool rc = false;
598
599 if (iph->nexthdr == NEXTHDR_ICMP) {
600 const struct icmp6hdr *icmph;
601 struct icmp6hdr _icmph;
602
603 icmph = skb_header_pointer(skb, sizeof(*iph),
604 sizeof(_icmph), &_icmph);
605 if (!icmph)
606 goto out;
607
608 switch (icmph->icmp6_type) {
609 case NDISC_ROUTER_SOLICITATION:
610 case NDISC_ROUTER_ADVERTISEMENT:
611 case NDISC_NEIGHBOUR_SOLICITATION:
612 case NDISC_NEIGHBOUR_ADVERTISEMENT:
613 case NDISC_REDIRECT:
614 rc = true;
615 break;
616 }
617 }
618
619out:
620 return rc;
621}
622
623static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev,
624 struct sk_buff *skb)
625{
626 /* if packet is NDISC keep the ingress interface */
627 if (!ipv6_ndisc_frame(skb)) {
628 skb->dev = vrf_dev;
629 skb->skb_iif = vrf_dev->ifindex;
630
631 skb_push(skb, skb->mac_len);
632 dev_queue_xmit_nit(skb, vrf_dev);
633 skb_pull(skb, skb->mac_len);
634
635 IP6CB(skb)->flags |= IP6SKB_L3SLAVE;
636 }
637
638 return skb;
639}
640
641#else
642static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev,
643 struct sk_buff *skb)
644{
645 return skb;
646}
647#endif
648
649static struct sk_buff *vrf_ip_rcv(struct net_device *vrf_dev,
650 struct sk_buff *skb)
651{
652 skb->dev = vrf_dev;
653 skb->skb_iif = vrf_dev->ifindex;
654
655 skb_push(skb, skb->mac_len);
656 dev_queue_xmit_nit(skb, vrf_dev);
657 skb_pull(skb, skb->mac_len);
658
659 return skb;
660}
661
662/* called with rcu lock held */
663static struct sk_buff *vrf_l3_rcv(struct net_device *vrf_dev,
664 struct sk_buff *skb,
665 u16 proto)
666{
667 switch (proto) {
668 case AF_INET:
669 return vrf_ip_rcv(vrf_dev, skb);
670 case AF_INET6:
671 return vrf_ip6_rcv(vrf_dev, skb);
672 }
673
674 return skb;
675}
676
677#if IS_ENABLED(CONFIG_IPV6)
David Ahern35402e32015-10-12 11:47:09 -0700678static struct dst_entry *vrf_get_rt6_dst(const struct net_device *dev,
679 const struct flowi6 *fl6)
680{
David Ahernb0e95cc2016-05-13 12:23:45 -0700681 struct dst_entry *dst = NULL;
David Ahern35402e32015-10-12 11:47:09 -0700682
683 if (!(fl6->flowi6_flags & FLOWI_FLAG_L3MDEV_SRC)) {
684 struct net_vrf *vrf = netdev_priv(dev);
David Ahernb0e95cc2016-05-13 12:23:45 -0700685 struct rt6_info *rt;
David Ahern35402e32015-10-12 11:47:09 -0700686
David Ahernb0e95cc2016-05-13 12:23:45 -0700687 rcu_read_lock();
688
689 rt = rcu_dereference(vrf->rt6);
690 if (likely(rt)) {
691 dst = &rt->dst;
692 dst_hold(dst);
693 }
694
695 rcu_read_unlock();
David Ahern35402e32015-10-12 11:47:09 -0700696 }
697
David Ahernb0e95cc2016-05-13 12:23:45 -0700698 return dst;
David Ahern35402e32015-10-12 11:47:09 -0700699}
700#endif
701
David Ahernee15ee52015-09-29 20:07:12 -0700702static const struct l3mdev_ops vrf_l3mdev_ops = {
703 .l3mdev_fib_table = vrf_fib_table,
704 .l3mdev_get_rtable = vrf_get_rtable,
David Ahern8cbb512c2015-10-05 08:51:26 -0700705 .l3mdev_get_saddr = vrf_get_saddr,
David Ahern74b20582016-05-10 11:19:50 -0700706 .l3mdev_l3_rcv = vrf_l3_rcv,
David Ahern35402e32015-10-12 11:47:09 -0700707#if IS_ENABLED(CONFIG_IPV6)
708 .l3mdev_get_rt6_dst = vrf_get_rt6_dst,
709#endif
David Ahernee15ee52015-09-29 20:07:12 -0700710};
711
David Ahern193125d2015-08-13 14:59:10 -0600712static void vrf_get_drvinfo(struct net_device *dev,
713 struct ethtool_drvinfo *info)
714{
715 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
716 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
717}
718
719static const struct ethtool_ops vrf_ethtool_ops = {
720 .get_drvinfo = vrf_get_drvinfo,
721};
722
723static void vrf_setup(struct net_device *dev)
724{
725 ether_setup(dev);
726
727 /* Initialize the device structure. */
728 dev->netdev_ops = &vrf_netdev_ops;
David Ahernee15ee52015-09-29 20:07:12 -0700729 dev->l3mdev_ops = &vrf_l3mdev_ops;
David Ahern193125d2015-08-13 14:59:10 -0600730 dev->ethtool_ops = &vrf_ethtool_ops;
731 dev->destructor = free_netdev;
732
733 /* Fill in device structure with ethernet-generic values. */
734 eth_hw_addr_random(dev);
735
736 /* don't acquire vrf device's netif_tx_lock when transmitting */
737 dev->features |= NETIF_F_LLTX;
738
739 /* don't allow vrf devices to change network namespaces. */
740 dev->features |= NETIF_F_NETNS_LOCAL;
741}
742
743static int vrf_validate(struct nlattr *tb[], struct nlattr *data[])
744{
745 if (tb[IFLA_ADDRESS]) {
746 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
747 return -EINVAL;
748 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
749 return -EADDRNOTAVAIL;
750 }
751 return 0;
752}
753
754static void vrf_dellink(struct net_device *dev, struct list_head *head)
755{
David Ahern193125d2015-08-13 14:59:10 -0600756 unregister_netdevice_queue(dev, head);
757}
758
759static int vrf_newlink(struct net *src_net, struct net_device *dev,
760 struct nlattr *tb[], struct nlattr *data[])
761{
762 struct net_vrf *vrf = netdev_priv(dev);
David Ahern193125d2015-08-13 14:59:10 -0600763
764 if (!data || !data[IFLA_VRF_TABLE])
765 return -EINVAL;
766
767 vrf->tb_id = nla_get_u32(data[IFLA_VRF_TABLE]);
768
David Ahern007979e2015-09-29 20:07:10 -0700769 dev->priv_flags |= IFF_L3MDEV_MASTER;
David Ahern193125d2015-08-13 14:59:10 -0600770
Nikolay Aleksandrov7f109f72015-11-21 19:46:19 +0100771 return register_netdevice(dev);
David Ahern193125d2015-08-13 14:59:10 -0600772}
773
774static size_t vrf_nl_getsize(const struct net_device *dev)
775{
776 return nla_total_size(sizeof(u32)); /* IFLA_VRF_TABLE */
777}
778
779static int vrf_fillinfo(struct sk_buff *skb,
780 const struct net_device *dev)
781{
782 struct net_vrf *vrf = netdev_priv(dev);
783
784 return nla_put_u32(skb, IFLA_VRF_TABLE, vrf->tb_id);
785}
786
David Ahern67eb0332016-02-02 07:43:45 -0800787static size_t vrf_get_slave_size(const struct net_device *bond_dev,
788 const struct net_device *slave_dev)
789{
790 return nla_total_size(sizeof(u32)); /* IFLA_VRF_PORT_TABLE */
791}
792
793static int vrf_fill_slave_info(struct sk_buff *skb,
794 const struct net_device *vrf_dev,
795 const struct net_device *slave_dev)
796{
797 struct net_vrf *vrf = netdev_priv(vrf_dev);
798
799 if (nla_put_u32(skb, IFLA_VRF_PORT_TABLE, vrf->tb_id))
800 return -EMSGSIZE;
801
802 return 0;
803}
804
David Ahern193125d2015-08-13 14:59:10 -0600805static const struct nla_policy vrf_nl_policy[IFLA_VRF_MAX + 1] = {
806 [IFLA_VRF_TABLE] = { .type = NLA_U32 },
807};
808
809static struct rtnl_link_ops vrf_link_ops __read_mostly = {
810 .kind = DRV_NAME,
811 .priv_size = sizeof(struct net_vrf),
812
813 .get_size = vrf_nl_getsize,
814 .policy = vrf_nl_policy,
815 .validate = vrf_validate,
816 .fill_info = vrf_fillinfo,
817
David Ahern67eb0332016-02-02 07:43:45 -0800818 .get_slave_size = vrf_get_slave_size,
819 .fill_slave_info = vrf_fill_slave_info,
820
David Ahern193125d2015-08-13 14:59:10 -0600821 .newlink = vrf_newlink,
822 .dellink = vrf_dellink,
823 .setup = vrf_setup,
824 .maxtype = IFLA_VRF_MAX,
825};
826
827static int vrf_device_event(struct notifier_block *unused,
828 unsigned long event, void *ptr)
829{
830 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
831
832 /* only care about unregister events to drop slave references */
833 if (event == NETDEV_UNREGISTER) {
David Ahern193125d2015-08-13 14:59:10 -0600834 struct net_device *vrf_dev;
835
David Ahernfee6d4c2015-10-05 08:51:24 -0700836 if (!netif_is_l3_slave(dev))
David Ahern193125d2015-08-13 14:59:10 -0600837 goto out;
838
Nikolay Aleksandrov58aa9082015-08-18 20:28:04 +0300839 vrf_dev = netdev_master_upper_dev_get(dev);
840 vrf_del_slave(vrf_dev, dev);
David Ahern193125d2015-08-13 14:59:10 -0600841 }
842out:
843 return NOTIFY_DONE;
844}
845
846static struct notifier_block vrf_notifier_block __read_mostly = {
847 .notifier_call = vrf_device_event,
848};
849
850static int __init vrf_init_module(void)
851{
852 int rc;
853
David Ahern193125d2015-08-13 14:59:10 -0600854 register_netdevice_notifier(&vrf_notifier_block);
855
856 rc = rtnl_link_register(&vrf_link_ops);
857 if (rc < 0)
858 goto error;
859
860 return 0;
861
862error:
863 unregister_netdevice_notifier(&vrf_notifier_block);
David Ahern193125d2015-08-13 14:59:10 -0600864 return rc;
865}
866
David Ahern193125d2015-08-13 14:59:10 -0600867module_init(vrf_init_module);
David Ahern193125d2015-08-13 14:59:10 -0600868MODULE_AUTHOR("Shrijeet Mukherjee, David Ahern");
869MODULE_DESCRIPTION("Device driver to instantiate VRF domains");
870MODULE_LICENSE("GPL");
871MODULE_ALIAS_RTNL_LINK(DRV_NAME);
872MODULE_VERSION(DRV_VERSION);