blob: c8db55aa8280897c1a2cf5cc2c2f73aa84177b09 [file] [log] [blame]
David Ahern193125d2015-08-13 14:59:10 -06001/*
2 * vrf.c: device driver to encapsulate a VRF space
3 *
4 * Copyright (c) 2015 Cumulus Networks. All rights reserved.
5 * Copyright (c) 2015 Shrijeet Mukherjee <shm@cumulusnetworks.com>
6 * Copyright (c) 2015 David Ahern <dsa@cumulusnetworks.com>
7 *
8 * Based on dummy, team and ipvlan drivers
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 */
15
16#include <linux/module.h>
17#include <linux/kernel.h>
18#include <linux/netdevice.h>
19#include <linux/etherdevice.h>
20#include <linux/ip.h>
21#include <linux/init.h>
22#include <linux/moduleparam.h>
23#include <linux/netfilter.h>
24#include <linux/rtnetlink.h>
25#include <net/rtnetlink.h>
26#include <linux/u64_stats_sync.h>
27#include <linux/hashtable.h>
28
29#include <linux/inetdevice.h>
David Ahern8f583362015-08-27 10:10:50 -070030#include <net/arp.h>
David Ahern193125d2015-08-13 14:59:10 -060031#include <net/ip.h>
32#include <net/ip_fib.h>
David Ahern35402e32015-10-12 11:47:09 -070033#include <net/ip6_fib.h>
David Ahern193125d2015-08-13 14:59:10 -060034#include <net/ip6_route.h>
David Ahern193125d2015-08-13 14:59:10 -060035#include <net/route.h>
36#include <net/addrconf.h>
David Ahernee15ee52015-09-29 20:07:12 -070037#include <net/l3mdev.h>
David Ahern193125d2015-08-13 14:59:10 -060038
David Ahern8cbb512c2015-10-05 08:51:26 -070039#define RT_FL_TOS(oldflp4) \
40 ((oldflp4)->flowi4_tos & (IPTOS_RT_MASK | RTO_ONLINK))
41
David Ahern193125d2015-08-13 14:59:10 -060042#define DRV_NAME "vrf"
43#define DRV_VERSION "1.0"
44
David Ahern193125d2015-08-13 14:59:10 -060045#define vrf_master_get_rcu(dev) \
46 ((struct net_device *)rcu_dereference(dev->rx_handler_data))
47
David Ahernec539512015-09-29 20:07:17 -070048struct net_vrf {
David Ahernec539512015-09-29 20:07:17 -070049 struct rtable *rth;
David Ahern35402e32015-10-12 11:47:09 -070050 struct rt6_info *rt6;
David Ahernec539512015-09-29 20:07:17 -070051 u32 tb_id;
52};
53
David Ahern193125d2015-08-13 14:59:10 -060054struct pcpu_dstats {
55 u64 tx_pkts;
56 u64 tx_bytes;
57 u64 tx_drps;
58 u64 rx_pkts;
59 u64 rx_bytes;
60 struct u64_stats_sync syncp;
61};
62
David Ahern35402e32015-10-12 11:47:09 -070063/* neighbor handling is done with actual device; do not want
64 * to flip skb->dev for those ndisc packets. This really fails
65 * for multiple next protocols (e.g., NEXTHDR_HOP). But it is
66 * a start.
67 */
68#if IS_ENABLED(CONFIG_IPV6)
69static bool check_ipv6_frame(const struct sk_buff *skb)
70{
David Ahern65c38aa2016-02-23 10:10:26 -080071 const struct ipv6hdr *ipv6h;
72 struct ipv6hdr _ipv6h;
David Ahern35402e32015-10-12 11:47:09 -070073 bool rc = true;
74
David Ahern65c38aa2016-02-23 10:10:26 -080075 ipv6h = skb_header_pointer(skb, 0, sizeof(_ipv6h), &_ipv6h);
76 if (!ipv6h)
David Ahern35402e32015-10-12 11:47:09 -070077 goto out;
78
79 if (ipv6h->nexthdr == NEXTHDR_ICMP) {
80 const struct icmp6hdr *icmph;
David Ahern65c38aa2016-02-23 10:10:26 -080081 struct icmp6hdr _icmph;
David Ahern35402e32015-10-12 11:47:09 -070082
David Ahern65c38aa2016-02-23 10:10:26 -080083 icmph = skb_header_pointer(skb, sizeof(_ipv6h),
84 sizeof(_icmph), &_icmph);
85 if (!icmph)
David Ahern35402e32015-10-12 11:47:09 -070086 goto out;
87
David Ahern35402e32015-10-12 11:47:09 -070088 switch (icmph->icmp6_type) {
89 case NDISC_ROUTER_SOLICITATION:
90 case NDISC_ROUTER_ADVERTISEMENT:
91 case NDISC_NEIGHBOUR_SOLICITATION:
92 case NDISC_NEIGHBOUR_ADVERTISEMENT:
93 case NDISC_REDIRECT:
94 rc = false;
95 break;
96 }
97 }
98
99out:
100 return rc;
101}
102#else
103static bool check_ipv6_frame(const struct sk_buff *skb)
104{
105 return false;
106}
107#endif
108
David Ahern193125d2015-08-13 14:59:10 -0600109static bool is_ip_rx_frame(struct sk_buff *skb)
110{
111 switch (skb->protocol) {
112 case htons(ETH_P_IP):
David Ahern193125d2015-08-13 14:59:10 -0600113 return true;
David Ahern35402e32015-10-12 11:47:09 -0700114 case htons(ETH_P_IPV6):
115 return check_ipv6_frame(skb);
David Ahern193125d2015-08-13 14:59:10 -0600116 }
117 return false;
118}
119
Nikolay Aleksandrov57b8efa2015-08-19 06:12:29 +0300120static void vrf_tx_error(struct net_device *vrf_dev, struct sk_buff *skb)
121{
122 vrf_dev->stats.tx_errors++;
123 kfree_skb(skb);
124}
125
David Ahern193125d2015-08-13 14:59:10 -0600126/* note: already called with rcu_read_lock */
127static rx_handler_result_t vrf_handle_frame(struct sk_buff **pskb)
128{
129 struct sk_buff *skb = *pskb;
130
131 if (is_ip_rx_frame(skb)) {
132 struct net_device *dev = vrf_master_get_rcu(skb->dev);
133 struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
134
135 u64_stats_update_begin(&dstats->syncp);
136 dstats->rx_pkts++;
137 dstats->rx_bytes += skb->len;
138 u64_stats_update_end(&dstats->syncp);
139
140 skb->dev = dev;
141
142 return RX_HANDLER_ANOTHER;
143 }
144 return RX_HANDLER_PASS;
145}
146
147static struct rtnl_link_stats64 *vrf_get_stats64(struct net_device *dev,
148 struct rtnl_link_stats64 *stats)
149{
150 int i;
151
152 for_each_possible_cpu(i) {
153 const struct pcpu_dstats *dstats;
154 u64 tbytes, tpkts, tdrops, rbytes, rpkts;
155 unsigned int start;
156
157 dstats = per_cpu_ptr(dev->dstats, i);
158 do {
159 start = u64_stats_fetch_begin_irq(&dstats->syncp);
160 tbytes = dstats->tx_bytes;
161 tpkts = dstats->tx_pkts;
162 tdrops = dstats->tx_drps;
163 rbytes = dstats->rx_bytes;
164 rpkts = dstats->rx_pkts;
165 } while (u64_stats_fetch_retry_irq(&dstats->syncp, start));
166 stats->tx_bytes += tbytes;
167 stats->tx_packets += tpkts;
168 stats->tx_dropped += tdrops;
169 stats->rx_bytes += rbytes;
170 stats->rx_packets += rpkts;
171 }
172 return stats;
173}
174
David Ahern35402e32015-10-12 11:47:09 -0700175#if IS_ENABLED(CONFIG_IPV6)
176static netdev_tx_t vrf_process_v6_outbound(struct sk_buff *skb,
177 struct net_device *dev)
178{
179 const struct ipv6hdr *iph = ipv6_hdr(skb);
180 struct net *net = dev_net(skb->dev);
181 struct flowi6 fl6 = {
182 /* needed to match OIF rule */
183 .flowi6_oif = dev->ifindex,
184 .flowi6_iif = LOOPBACK_IFINDEX,
185 .daddr = iph->daddr,
186 .saddr = iph->saddr,
187 .flowlabel = ip6_flowinfo(iph),
188 .flowi6_mark = skb->mark,
189 .flowi6_proto = iph->nexthdr,
190 .flowi6_flags = FLOWI_FLAG_L3MDEV_SRC | FLOWI_FLAG_SKIP_NH_OIF,
191 };
192 int ret = NET_XMIT_DROP;
193 struct dst_entry *dst;
194 struct dst_entry *dst_null = &net->ipv6.ip6_null_entry->dst;
195
196 dst = ip6_route_output(net, NULL, &fl6);
197 if (dst == dst_null)
198 goto err;
199
200 skb_dst_drop(skb);
201 skb_dst_set(skb, dst);
202
203 ret = ip6_local_out(net, skb->sk, skb);
204 if (unlikely(net_xmit_eval(ret)))
205 dev->stats.tx_errors++;
206 else
207 ret = NET_XMIT_SUCCESS;
208
209 return ret;
210err:
211 vrf_tx_error(dev, skb);
212 return NET_XMIT_DROP;
213}
214#else
David Ahern193125d2015-08-13 14:59:10 -0600215static netdev_tx_t vrf_process_v6_outbound(struct sk_buff *skb,
216 struct net_device *dev)
217{
Nikolay Aleksandrov57b8efa2015-08-19 06:12:29 +0300218 vrf_tx_error(dev, skb);
219 return NET_XMIT_DROP;
David Ahern193125d2015-08-13 14:59:10 -0600220}
David Ahern35402e32015-10-12 11:47:09 -0700221#endif
David Ahern193125d2015-08-13 14:59:10 -0600222
223static int vrf_send_v4_prep(struct sk_buff *skb, struct flowi4 *fl4,
224 struct net_device *vrf_dev)
225{
226 struct rtable *rt;
227 int err = 1;
228
229 rt = ip_route_output_flow(dev_net(vrf_dev), fl4, NULL);
230 if (IS_ERR(rt))
231 goto out;
232
233 /* TO-DO: what about broadcast ? */
234 if (rt->rt_type != RTN_UNICAST && rt->rt_type != RTN_LOCAL) {
235 ip_rt_put(rt);
236 goto out;
237 }
238
239 skb_dst_drop(skb);
240 skb_dst_set(skb, &rt->dst);
241 err = 0;
242out:
243 return err;
244}
245
246static netdev_tx_t vrf_process_v4_outbound(struct sk_buff *skb,
247 struct net_device *vrf_dev)
248{
249 struct iphdr *ip4h = ip_hdr(skb);
250 int ret = NET_XMIT_DROP;
251 struct flowi4 fl4 = {
252 /* needed to match OIF rule */
253 .flowi4_oif = vrf_dev->ifindex,
254 .flowi4_iif = LOOPBACK_IFINDEX,
255 .flowi4_tos = RT_TOS(ip4h->tos),
David Ahern6e2895a2015-10-05 08:51:23 -0700256 .flowi4_flags = FLOWI_FLAG_ANYSRC | FLOWI_FLAG_L3MDEV_SRC |
David Ahern58189ca2015-09-15 15:10:50 -0700257 FLOWI_FLAG_SKIP_NH_OIF,
David Ahern193125d2015-08-13 14:59:10 -0600258 .daddr = ip4h->daddr,
259 };
260
261 if (vrf_send_v4_prep(skb, &fl4, vrf_dev))
262 goto err;
263
264 if (!ip4h->saddr) {
265 ip4h->saddr = inet_select_addr(skb_dst(skb)->dev, 0,
266 RT_SCOPE_LINK);
267 }
268
Eric W. Biederman33224b12015-10-07 16:48:46 -0500269 ret = ip_local_out(dev_net(skb_dst(skb)->dev), skb->sk, skb);
David Ahern193125d2015-08-13 14:59:10 -0600270 if (unlikely(net_xmit_eval(ret)))
271 vrf_dev->stats.tx_errors++;
272 else
273 ret = NET_XMIT_SUCCESS;
274
275out:
276 return ret;
277err:
Nikolay Aleksandrov57b8efa2015-08-19 06:12:29 +0300278 vrf_tx_error(vrf_dev, skb);
David Ahern193125d2015-08-13 14:59:10 -0600279 goto out;
280}
281
282static netdev_tx_t is_ip_tx_frame(struct sk_buff *skb, struct net_device *dev)
283{
David Ahern8f583362015-08-27 10:10:50 -0700284 /* strip the ethernet header added for pass through VRF device */
285 __skb_pull(skb, skb_network_offset(skb));
286
David Ahern193125d2015-08-13 14:59:10 -0600287 switch (skb->protocol) {
288 case htons(ETH_P_IP):
289 return vrf_process_v4_outbound(skb, dev);
290 case htons(ETH_P_IPV6):
291 return vrf_process_v6_outbound(skb, dev);
292 default:
Nikolay Aleksandrov57b8efa2015-08-19 06:12:29 +0300293 vrf_tx_error(dev, skb);
David Ahern193125d2015-08-13 14:59:10 -0600294 return NET_XMIT_DROP;
295 }
296}
297
298static netdev_tx_t vrf_xmit(struct sk_buff *skb, struct net_device *dev)
299{
300 netdev_tx_t ret = is_ip_tx_frame(skb, dev);
301
302 if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
303 struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
304
305 u64_stats_update_begin(&dstats->syncp);
306 dstats->tx_pkts++;
307 dstats->tx_bytes += skb->len;
308 u64_stats_update_end(&dstats->syncp);
309 } else {
310 this_cpu_inc(dev->dstats->tx_drps);
311 }
312
313 return ret;
314}
315
David Ahern35402e32015-10-12 11:47:09 -0700316#if IS_ENABLED(CONFIG_IPV6)
David Ahern35402e32015-10-12 11:47:09 -0700317/* modelled after ip6_finish_output2 */
318static int vrf_finish_output6(struct net *net, struct sock *sk,
319 struct sk_buff *skb)
320{
321 struct dst_entry *dst = skb_dst(skb);
322 struct net_device *dev = dst->dev;
323 struct neighbour *neigh;
324 struct in6_addr *nexthop;
325 int ret;
326
327 skb->protocol = htons(ETH_P_IPV6);
328 skb->dev = dev;
329
330 rcu_read_lock_bh();
331 nexthop = rt6_nexthop((struct rt6_info *)dst, &ipv6_hdr(skb)->daddr);
332 neigh = __ipv6_neigh_lookup_noref(dst->dev, nexthop);
333 if (unlikely(!neigh))
334 neigh = __neigh_create(&nd_tbl, nexthop, dst->dev, false);
335 if (!IS_ERR(neigh)) {
336 ret = dst_neigh_output(dst, neigh, skb);
337 rcu_read_unlock_bh();
338 return ret;
339 }
340 rcu_read_unlock_bh();
341
342 IP6_INC_STATS(dev_net(dst->dev),
343 ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
344 kfree_skb(skb);
345 return -EINVAL;
346}
347
348/* modelled after ip6_output */
349static int vrf_output6(struct net *net, struct sock *sk, struct sk_buff *skb)
350{
351 return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING,
352 net, sk, skb, NULL, skb_dst(skb)->dev,
353 vrf_finish_output6,
354 !(IP6CB(skb)->flags & IP6SKB_REROUTED));
355}
356
David Ahern9ab179d2016-04-07 11:10:06 -0700357static void vrf_rt6_release(struct net_vrf *vrf)
David Ahern35402e32015-10-12 11:47:09 -0700358{
David Ahern9ab179d2016-04-07 11:10:06 -0700359 dst_release(&vrf->rt6->dst);
David Ahern35402e32015-10-12 11:47:09 -0700360 vrf->rt6 = NULL;
361}
362
363static int vrf_rt6_create(struct net_device *dev)
364{
365 struct net_vrf *vrf = netdev_priv(dev);
David Ahern9ab179d2016-04-07 11:10:06 -0700366 struct net *net = dev_net(dev);
David Ahernb3b46632016-05-04 21:46:12 -0700367 struct fib6_table *rt6i_table;
David Ahern35402e32015-10-12 11:47:09 -0700368 struct rt6_info *rt6;
David Ahern35402e32015-10-12 11:47:09 -0700369 int rc = -ENOMEM;
370
David Ahernb3b46632016-05-04 21:46:12 -0700371 rt6i_table = fib6_new_table(net, vrf->tb_id);
372 if (!rt6i_table)
373 goto out;
374
David Ahern9ab179d2016-04-07 11:10:06 -0700375 rt6 = ip6_dst_alloc(net, dev,
376 DST_HOST | DST_NOPOLICY | DST_NOXFRM | DST_NOCACHE);
David Ahern35402e32015-10-12 11:47:09 -0700377 if (!rt6)
378 goto out;
379
David Ahern9ab179d2016-04-07 11:10:06 -0700380 dst_hold(&rt6->dst);
David Ahernb3b46632016-05-04 21:46:12 -0700381
382 rt6->rt6i_table = rt6i_table;
383 rt6->dst.output = vrf_output6;
David Ahern35402e32015-10-12 11:47:09 -0700384 vrf->rt6 = rt6;
385 rc = 0;
386out:
387 return rc;
388}
389#else
David Ahern9ab179d2016-04-07 11:10:06 -0700390static void vrf_rt6_release(struct net_vrf *vrf)
David Ahern35402e32015-10-12 11:47:09 -0700391{
392}
393
394static int vrf_rt6_create(struct net_device *dev)
395{
396 return 0;
397}
398#endif
399
David Ahern8f583362015-08-27 10:10:50 -0700400/* modelled after ip_finish_output2 */
Eric W. Biederman0c4b51f2015-09-15 20:04:18 -0500401static int vrf_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb)
David Ahern193125d2015-08-13 14:59:10 -0600402{
David Ahern8f583362015-08-27 10:10:50 -0700403 struct dst_entry *dst = skb_dst(skb);
404 struct rtable *rt = (struct rtable *)dst;
405 struct net_device *dev = dst->dev;
406 unsigned int hh_len = LL_RESERVED_SPACE(dev);
407 struct neighbour *neigh;
408 u32 nexthop;
409 int ret = -EINVAL;
410
411 /* Be paranoid, rather than too clever. */
412 if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) {
413 struct sk_buff *skb2;
414
415 skb2 = skb_realloc_headroom(skb, LL_RESERVED_SPACE(dev));
416 if (!skb2) {
417 ret = -ENOMEM;
418 goto err;
419 }
420 if (skb->sk)
421 skb_set_owner_w(skb2, skb->sk);
422
423 consume_skb(skb);
424 skb = skb2;
425 }
426
427 rcu_read_lock_bh();
428
429 nexthop = (__force u32)rt_nexthop(rt, ip_hdr(skb)->daddr);
430 neigh = __ipv4_neigh_lookup_noref(dev, nexthop);
431 if (unlikely(!neigh))
432 neigh = __neigh_create(&arp_tbl, &nexthop, dev, false);
433 if (!IS_ERR(neigh))
434 ret = dst_neigh_output(dst, neigh, skb);
435
436 rcu_read_unlock_bh();
437err:
438 if (unlikely(ret < 0))
439 vrf_tx_error(skb->dev, skb);
440 return ret;
David Ahern193125d2015-08-13 14:59:10 -0600441}
442
Eric W. Biedermanede20592015-10-07 16:48:47 -0500443static int vrf_output(struct net *net, struct sock *sk, struct sk_buff *skb)
David Ahern193125d2015-08-13 14:59:10 -0600444{
445 struct net_device *dev = skb_dst(skb)->dev;
446
Eric W. Biederman29a26a52015-09-15 20:04:16 -0500447 IP_UPD_PO_STATS(net, IPSTATS_MIB_OUT, skb->len);
David Ahern193125d2015-08-13 14:59:10 -0600448
449 skb->dev = dev;
450 skb->protocol = htons(ETH_P_IP);
451
Eric W. Biederman29a26a52015-09-15 20:04:16 -0500452 return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING,
453 net, sk, skb, NULL, dev,
David Ahern8f583362015-08-27 10:10:50 -0700454 vrf_finish_output,
David Ahern193125d2015-08-13 14:59:10 -0600455 !(IPCB(skb)->flags & IPSKB_REROUTED));
456}
457
David Ahern9ab179d2016-04-07 11:10:06 -0700458static void vrf_rtable_release(struct net_vrf *vrf)
David Ahern193125d2015-08-13 14:59:10 -0600459{
460 struct dst_entry *dst = (struct dst_entry *)vrf->rth;
461
David Ahern9ab179d2016-04-07 11:10:06 -0700462 dst_release(dst);
David Ahern193125d2015-08-13 14:59:10 -0600463 vrf->rth = NULL;
464}
465
466static struct rtable *vrf_rtable_create(struct net_device *dev)
467{
David Ahernb7503e02015-09-02 13:58:35 -0700468 struct net_vrf *vrf = netdev_priv(dev);
David Ahern193125d2015-08-13 14:59:10 -0600469 struct rtable *rth;
470
David Ahernb3b46632016-05-04 21:46:12 -0700471 if (!fib_new_table(dev_net(dev), vrf->tb_id))
472 return NULL;
473
David Ahern9ab179d2016-04-07 11:10:06 -0700474 rth = rt_dst_alloc(dev, 0, RTN_UNICAST, 1, 1, 0);
David Ahern193125d2015-08-13 14:59:10 -0600475 if (rth) {
476 rth->dst.output = vrf_output;
David Ahernb7503e02015-09-02 13:58:35 -0700477 rth->rt_table_id = vrf->tb_id;
David Ahern193125d2015-08-13 14:59:10 -0600478 }
479
480 return rth;
481}
482
483/**************************** device handling ********************/
484
485/* cycle interface to flush neighbor cache and move routes across tables */
486static void cycle_netdev(struct net_device *dev)
487{
488 unsigned int flags = dev->flags;
489 int ret;
490
491 if (!netif_running(dev))
492 return;
493
494 ret = dev_change_flags(dev, flags & ~IFF_UP);
495 if (ret >= 0)
496 ret = dev_change_flags(dev, flags);
497
498 if (ret < 0) {
499 netdev_err(dev,
500 "Failed to cycle device %s; route tables might be wrong!\n",
501 dev->name);
502 }
503}
504
David Ahern193125d2015-08-13 14:59:10 -0600505static int do_vrf_add_slave(struct net_device *dev, struct net_device *port_dev)
506{
Nikolay Aleksandrovbad53162015-11-24 14:29:16 +0100507 int ret;
David Ahern193125d2015-08-13 14:59:10 -0600508
David Ahern193125d2015-08-13 14:59:10 -0600509 /* register the packet handler for slave ports */
510 ret = netdev_rx_handler_register(port_dev, vrf_handle_frame, dev);
511 if (ret) {
512 netdev_err(port_dev,
513 "Device %s failed to register rx_handler\n",
514 port_dev->name);
Nikolay Aleksandrov15df5e72015-08-19 06:27:09 +0300515 goto out_fail;
David Ahern193125d2015-08-13 14:59:10 -0600516 }
517
Jiri Pirko29bf24a2015-12-03 12:12:11 +0100518 ret = netdev_master_upper_dev_link(port_dev, dev, NULL, NULL);
David Ahern193125d2015-08-13 14:59:10 -0600519 if (ret < 0)
520 goto out_unregister;
521
David Ahernfee6d4c2015-10-05 08:51:24 -0700522 port_dev->priv_flags |= IFF_L3MDEV_SLAVE;
David Ahern193125d2015-08-13 14:59:10 -0600523 cycle_netdev(port_dev);
524
525 return 0;
526
527out_unregister:
528 netdev_rx_handler_unregister(port_dev);
David Ahern193125d2015-08-13 14:59:10 -0600529out_fail:
David Ahern193125d2015-08-13 14:59:10 -0600530 return ret;
531}
532
533static int vrf_add_slave(struct net_device *dev, struct net_device *port_dev)
534{
David Ahernfee6d4c2015-10-05 08:51:24 -0700535 if (netif_is_l3_master(port_dev) || netif_is_l3_slave(port_dev))
David Ahern193125d2015-08-13 14:59:10 -0600536 return -EINVAL;
537
538 return do_vrf_add_slave(dev, port_dev);
539}
540
541/* inverse of do_vrf_add_slave */
542static int do_vrf_del_slave(struct net_device *dev, struct net_device *port_dev)
543{
David Ahern193125d2015-08-13 14:59:10 -0600544 netdev_upper_dev_unlink(port_dev, dev);
David Ahernfee6d4c2015-10-05 08:51:24 -0700545 port_dev->priv_flags &= ~IFF_L3MDEV_SLAVE;
David Ahern193125d2015-08-13 14:59:10 -0600546
547 netdev_rx_handler_unregister(port_dev);
548
David Ahern193125d2015-08-13 14:59:10 -0600549 cycle_netdev(port_dev);
550
David Ahern193125d2015-08-13 14:59:10 -0600551 return 0;
552}
553
554static int vrf_del_slave(struct net_device *dev, struct net_device *port_dev)
555{
David Ahern193125d2015-08-13 14:59:10 -0600556 return do_vrf_del_slave(dev, port_dev);
557}
558
559static void vrf_dev_uninit(struct net_device *dev)
560{
561 struct net_vrf *vrf = netdev_priv(dev);
Nikolay Aleksandrovbad53162015-11-24 14:29:16 +0100562 struct net_device *port_dev;
563 struct list_head *iter;
David Ahern193125d2015-08-13 14:59:10 -0600564
David Ahern9ab179d2016-04-07 11:10:06 -0700565 vrf_rtable_release(vrf);
566 vrf_rt6_release(vrf);
David Ahern193125d2015-08-13 14:59:10 -0600567
Nikolay Aleksandrovbad53162015-11-24 14:29:16 +0100568 netdev_for_each_lower_dev(dev, port_dev, iter)
569 vrf_del_slave(dev, port_dev);
David Ahern193125d2015-08-13 14:59:10 -0600570
Nikolay Aleksandrov3a4a27d2015-08-18 20:28:03 +0300571 free_percpu(dev->dstats);
David Ahern193125d2015-08-13 14:59:10 -0600572 dev->dstats = NULL;
573}
574
575static int vrf_dev_init(struct net_device *dev)
576{
577 struct net_vrf *vrf = netdev_priv(dev);
578
David Ahern193125d2015-08-13 14:59:10 -0600579 dev->dstats = netdev_alloc_pcpu_stats(struct pcpu_dstats);
580 if (!dev->dstats)
581 goto out_nomem;
582
583 /* create the default dst which points back to us */
584 vrf->rth = vrf_rtable_create(dev);
585 if (!vrf->rth)
586 goto out_stats;
587
David Ahern35402e32015-10-12 11:47:09 -0700588 if (vrf_rt6_create(dev) != 0)
589 goto out_rth;
590
David Ahern193125d2015-08-13 14:59:10 -0600591 dev->flags = IFF_MASTER | IFF_NOARP;
592
593 return 0;
594
David Ahern35402e32015-10-12 11:47:09 -0700595out_rth:
David Ahern9ab179d2016-04-07 11:10:06 -0700596 vrf_rtable_release(vrf);
David Ahern193125d2015-08-13 14:59:10 -0600597out_stats:
598 free_percpu(dev->dstats);
599 dev->dstats = NULL;
600out_nomem:
601 return -ENOMEM;
602}
603
604static const struct net_device_ops vrf_netdev_ops = {
605 .ndo_init = vrf_dev_init,
606 .ndo_uninit = vrf_dev_uninit,
607 .ndo_start_xmit = vrf_xmit,
608 .ndo_get_stats64 = vrf_get_stats64,
609 .ndo_add_slave = vrf_add_slave,
610 .ndo_del_slave = vrf_del_slave,
611};
612
David Ahernee15ee52015-09-29 20:07:12 -0700613static u32 vrf_fib_table(const struct net_device *dev)
614{
615 struct net_vrf *vrf = netdev_priv(dev);
616
617 return vrf->tb_id;
618}
619
620static struct rtable *vrf_get_rtable(const struct net_device *dev,
621 const struct flowi4 *fl4)
622{
623 struct rtable *rth = NULL;
624
David Ahern6e2895a2015-10-05 08:51:23 -0700625 if (!(fl4->flowi4_flags & FLOWI_FLAG_L3MDEV_SRC)) {
David Ahernee15ee52015-09-29 20:07:12 -0700626 struct net_vrf *vrf = netdev_priv(dev);
627
628 rth = vrf->rth;
David Ahern9ab179d2016-04-07 11:10:06 -0700629 dst_hold(&rth->dst);
David Ahernee15ee52015-09-29 20:07:12 -0700630 }
631
632 return rth;
633}
634
David Ahern8cbb512c2015-10-05 08:51:26 -0700635/* called under rcu_read_lock */
David Ahernb5bdacf2016-01-04 09:09:27 -0800636static int vrf_get_saddr(struct net_device *dev, struct flowi4 *fl4)
David Ahern8cbb512c2015-10-05 08:51:26 -0700637{
638 struct fib_result res = { .tclassid = 0 };
639 struct net *net = dev_net(dev);
640 u32 orig_tos = fl4->flowi4_tos;
641 u8 flags = fl4->flowi4_flags;
642 u8 scope = fl4->flowi4_scope;
643 u8 tos = RT_FL_TOS(fl4);
David Ahernb5bdacf2016-01-04 09:09:27 -0800644 int rc;
David Ahern8cbb512c2015-10-05 08:51:26 -0700645
646 if (unlikely(!fl4->daddr))
David Ahernb5bdacf2016-01-04 09:09:27 -0800647 return 0;
David Ahern8cbb512c2015-10-05 08:51:26 -0700648
649 fl4->flowi4_flags |= FLOWI_FLAG_SKIP_NH_OIF;
650 fl4->flowi4_iif = LOOPBACK_IFINDEX;
David Ahern1ff23be2016-05-07 16:49:00 -0700651 /* make sure oif is set to VRF device for lookup */
652 fl4->flowi4_oif = dev->ifindex;
David Ahern8cbb512c2015-10-05 08:51:26 -0700653 fl4->flowi4_tos = tos & IPTOS_RT_MASK;
654 fl4->flowi4_scope = ((tos & RTO_ONLINK) ?
655 RT_SCOPE_LINK : RT_SCOPE_UNIVERSE);
656
David Ahernb5bdacf2016-01-04 09:09:27 -0800657 rc = fib_lookup(net, fl4, &res, 0);
658 if (!rc) {
David Ahern8cbb512c2015-10-05 08:51:26 -0700659 if (res.type == RTN_LOCAL)
660 fl4->saddr = res.fi->fib_prefsrc ? : fl4->daddr;
661 else
662 fib_select_path(net, &res, fl4, -1);
663 }
664
665 fl4->flowi4_flags = flags;
666 fl4->flowi4_tos = orig_tos;
667 fl4->flowi4_scope = scope;
David Ahernb5bdacf2016-01-04 09:09:27 -0800668
669 return rc;
David Ahern8cbb512c2015-10-05 08:51:26 -0700670}
671
David Ahern35402e32015-10-12 11:47:09 -0700672#if IS_ENABLED(CONFIG_IPV6)
673static struct dst_entry *vrf_get_rt6_dst(const struct net_device *dev,
674 const struct flowi6 *fl6)
675{
676 struct rt6_info *rt = NULL;
677
678 if (!(fl6->flowi6_flags & FLOWI_FLAG_L3MDEV_SRC)) {
679 struct net_vrf *vrf = netdev_priv(dev);
680
681 rt = vrf->rt6;
David Ahern9ab179d2016-04-07 11:10:06 -0700682 dst_hold(&rt->dst);
David Ahern35402e32015-10-12 11:47:09 -0700683 }
684
685 return (struct dst_entry *)rt;
686}
687#endif
688
David Ahernee15ee52015-09-29 20:07:12 -0700689static const struct l3mdev_ops vrf_l3mdev_ops = {
690 .l3mdev_fib_table = vrf_fib_table,
691 .l3mdev_get_rtable = vrf_get_rtable,
David Ahern8cbb512c2015-10-05 08:51:26 -0700692 .l3mdev_get_saddr = vrf_get_saddr,
David Ahern35402e32015-10-12 11:47:09 -0700693#if IS_ENABLED(CONFIG_IPV6)
694 .l3mdev_get_rt6_dst = vrf_get_rt6_dst,
695#endif
David Ahernee15ee52015-09-29 20:07:12 -0700696};
697
David Ahern193125d2015-08-13 14:59:10 -0600698static void vrf_get_drvinfo(struct net_device *dev,
699 struct ethtool_drvinfo *info)
700{
701 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
702 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
703}
704
705static const struct ethtool_ops vrf_ethtool_ops = {
706 .get_drvinfo = vrf_get_drvinfo,
707};
708
709static void vrf_setup(struct net_device *dev)
710{
711 ether_setup(dev);
712
713 /* Initialize the device structure. */
714 dev->netdev_ops = &vrf_netdev_ops;
David Ahernee15ee52015-09-29 20:07:12 -0700715 dev->l3mdev_ops = &vrf_l3mdev_ops;
David Ahern193125d2015-08-13 14:59:10 -0600716 dev->ethtool_ops = &vrf_ethtool_ops;
717 dev->destructor = free_netdev;
718
719 /* Fill in device structure with ethernet-generic values. */
720 eth_hw_addr_random(dev);
721
722 /* don't acquire vrf device's netif_tx_lock when transmitting */
723 dev->features |= NETIF_F_LLTX;
724
725 /* don't allow vrf devices to change network namespaces. */
726 dev->features |= NETIF_F_NETNS_LOCAL;
727}
728
729static int vrf_validate(struct nlattr *tb[], struct nlattr *data[])
730{
731 if (tb[IFLA_ADDRESS]) {
732 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
733 return -EINVAL;
734 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
735 return -EADDRNOTAVAIL;
736 }
737 return 0;
738}
739
740static void vrf_dellink(struct net_device *dev, struct list_head *head)
741{
David Ahern193125d2015-08-13 14:59:10 -0600742 unregister_netdevice_queue(dev, head);
743}
744
745static int vrf_newlink(struct net *src_net, struct net_device *dev,
746 struct nlattr *tb[], struct nlattr *data[])
747{
748 struct net_vrf *vrf = netdev_priv(dev);
David Ahern193125d2015-08-13 14:59:10 -0600749
750 if (!data || !data[IFLA_VRF_TABLE])
751 return -EINVAL;
752
753 vrf->tb_id = nla_get_u32(data[IFLA_VRF_TABLE]);
754
David Ahern007979e2015-09-29 20:07:10 -0700755 dev->priv_flags |= IFF_L3MDEV_MASTER;
David Ahern193125d2015-08-13 14:59:10 -0600756
Nikolay Aleksandrov7f109f72015-11-21 19:46:19 +0100757 return register_netdevice(dev);
David Ahern193125d2015-08-13 14:59:10 -0600758}
759
760static size_t vrf_nl_getsize(const struct net_device *dev)
761{
762 return nla_total_size(sizeof(u32)); /* IFLA_VRF_TABLE */
763}
764
765static int vrf_fillinfo(struct sk_buff *skb,
766 const struct net_device *dev)
767{
768 struct net_vrf *vrf = netdev_priv(dev);
769
770 return nla_put_u32(skb, IFLA_VRF_TABLE, vrf->tb_id);
771}
772
David Ahern67eb0332016-02-02 07:43:45 -0800773static size_t vrf_get_slave_size(const struct net_device *bond_dev,
774 const struct net_device *slave_dev)
775{
776 return nla_total_size(sizeof(u32)); /* IFLA_VRF_PORT_TABLE */
777}
778
779static int vrf_fill_slave_info(struct sk_buff *skb,
780 const struct net_device *vrf_dev,
781 const struct net_device *slave_dev)
782{
783 struct net_vrf *vrf = netdev_priv(vrf_dev);
784
785 if (nla_put_u32(skb, IFLA_VRF_PORT_TABLE, vrf->tb_id))
786 return -EMSGSIZE;
787
788 return 0;
789}
790
David Ahern193125d2015-08-13 14:59:10 -0600791static const struct nla_policy vrf_nl_policy[IFLA_VRF_MAX + 1] = {
792 [IFLA_VRF_TABLE] = { .type = NLA_U32 },
793};
794
795static struct rtnl_link_ops vrf_link_ops __read_mostly = {
796 .kind = DRV_NAME,
797 .priv_size = sizeof(struct net_vrf),
798
799 .get_size = vrf_nl_getsize,
800 .policy = vrf_nl_policy,
801 .validate = vrf_validate,
802 .fill_info = vrf_fillinfo,
803
David Ahern67eb0332016-02-02 07:43:45 -0800804 .get_slave_size = vrf_get_slave_size,
805 .fill_slave_info = vrf_fill_slave_info,
806
David Ahern193125d2015-08-13 14:59:10 -0600807 .newlink = vrf_newlink,
808 .dellink = vrf_dellink,
809 .setup = vrf_setup,
810 .maxtype = IFLA_VRF_MAX,
811};
812
813static int vrf_device_event(struct notifier_block *unused,
814 unsigned long event, void *ptr)
815{
816 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
817
818 /* only care about unregister events to drop slave references */
819 if (event == NETDEV_UNREGISTER) {
David Ahern193125d2015-08-13 14:59:10 -0600820 struct net_device *vrf_dev;
821
David Ahernfee6d4c2015-10-05 08:51:24 -0700822 if (!netif_is_l3_slave(dev))
David Ahern193125d2015-08-13 14:59:10 -0600823 goto out;
824
Nikolay Aleksandrov58aa9082015-08-18 20:28:04 +0300825 vrf_dev = netdev_master_upper_dev_get(dev);
826 vrf_del_slave(vrf_dev, dev);
David Ahern193125d2015-08-13 14:59:10 -0600827 }
828out:
829 return NOTIFY_DONE;
830}
831
832static struct notifier_block vrf_notifier_block __read_mostly = {
833 .notifier_call = vrf_device_event,
834};
835
836static int __init vrf_init_module(void)
837{
838 int rc;
839
David Ahern193125d2015-08-13 14:59:10 -0600840 register_netdevice_notifier(&vrf_notifier_block);
841
842 rc = rtnl_link_register(&vrf_link_ops);
843 if (rc < 0)
844 goto error;
845
846 return 0;
847
848error:
849 unregister_netdevice_notifier(&vrf_notifier_block);
David Ahern193125d2015-08-13 14:59:10 -0600850 return rc;
851}
852
David Ahern193125d2015-08-13 14:59:10 -0600853module_init(vrf_init_module);
David Ahern193125d2015-08-13 14:59:10 -0600854MODULE_AUTHOR("Shrijeet Mukherjee, David Ahern");
855MODULE_DESCRIPTION("Device driver to instantiate VRF domains");
856MODULE_LICENSE("GPL");
857MODULE_ALIAS_RTNL_LINK(DRV_NAME);
858MODULE_VERSION(DRV_VERSION);