blob: 0b53a9cb6f84fcd4324990c6e6f2e1e01fa4d6de [file] [log] [blame]
stephen hemmingerd3428942012-10-01 12:32:35 +00001/*
2 * VXLAN: Virtual eXtensiable Local Area Network
3 *
4 * Copyright (c) 2012 Vyatta Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * TODO
11 * - use IANA UDP port number (when defined)
12 * - IPv6 (not in RFC)
13 */
14
15#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16
17#include <linux/kernel.h>
18#include <linux/types.h>
19#include <linux/module.h>
20#include <linux/errno.h>
21#include <linux/slab.h>
22#include <linux/skbuff.h>
23#include <linux/rculist.h>
24#include <linux/netdevice.h>
25#include <linux/in.h>
26#include <linux/ip.h>
27#include <linux/udp.h>
28#include <linux/igmp.h>
29#include <linux/etherdevice.h>
30#include <linux/if_ether.h>
stephen hemmingerd3428942012-10-01 12:32:35 +000031#include <linux/hash.h>
32#include <net/ip.h>
33#include <net/icmp.h>
34#include <net/udp.h>
35#include <net/rtnetlink.h>
36#include <net/route.h>
37#include <net/dsfield.h>
38#include <net/inet_ecn.h>
39#include <net/net_namespace.h>
40#include <net/netns/generic.h>
41
42#define VXLAN_VERSION "0.1"
43
44#define VNI_HASH_BITS 10
45#define VNI_HASH_SIZE (1<<VNI_HASH_BITS)
46#define FDB_HASH_BITS 8
47#define FDB_HASH_SIZE (1<<FDB_HASH_BITS)
48#define FDB_AGE_DEFAULT 300 /* 5 min */
49#define FDB_AGE_INTERVAL (10 * HZ) /* rescan interval */
50
51#define VXLAN_N_VID (1u << 24)
52#define VXLAN_VID_MASK (VXLAN_N_VID - 1)
53/* VLAN + IP header + UDP + VXLAN */
54#define VXLAN_HEADROOM (4 + 20 + 8 + 8)
55
56#define VXLAN_FLAGS 0x08000000 /* struct vxlanhdr.vx_flags required value. */
57
58/* VXLAN protocol header */
59struct vxlanhdr {
60 __be32 vx_flags;
61 __be32 vx_vni;
62};
63
64/* UDP port for VXLAN traffic. */
65static unsigned int vxlan_port __read_mostly = 8472;
66module_param_named(udp_port, vxlan_port, uint, 0444);
67MODULE_PARM_DESC(udp_port, "Destination UDP port");
68
69static bool log_ecn_error = true;
70module_param(log_ecn_error, bool, 0644);
71MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
72
73/* per-net private data for this module */
74static unsigned int vxlan_net_id;
75struct vxlan_net {
76 struct socket *sock; /* UDP encap socket */
77 struct hlist_head vni_list[VNI_HASH_SIZE];
78};
79
80/* Forwarding table entry */
81struct vxlan_fdb {
82 struct hlist_node hlist; /* linked list of entries */
83 struct rcu_head rcu;
84 unsigned long updated; /* jiffies */
85 unsigned long used;
86 __be32 remote_ip;
87 u16 state; /* see ndm_state */
88 u8 eth_addr[ETH_ALEN];
89};
90
91/* Per-cpu network traffic stats */
92struct vxlan_stats {
93 u64 rx_packets;
94 u64 rx_bytes;
95 u64 tx_packets;
96 u64 tx_bytes;
97 struct u64_stats_sync syncp;
98};
99
100/* Pseudo network device */
101struct vxlan_dev {
102 struct hlist_node hlist;
103 struct net_device *dev;
104 struct vxlan_stats __percpu *stats;
105 __u32 vni; /* virtual network id */
106 __be32 gaddr; /* multicast group */
107 __be32 saddr; /* source address */
108 unsigned int link; /* link to multicast over */
109 __u8 tos; /* TOS override */
110 __u8 ttl;
111 bool learn;
112
113 unsigned long age_interval;
114 struct timer_list age_timer;
115 spinlock_t hash_lock;
116 unsigned int addrcnt;
117 unsigned int addrmax;
118 unsigned int addrexceeded;
119
120 struct hlist_head fdb_head[FDB_HASH_SIZE];
121};
122
123/* salt for hash table */
124static u32 vxlan_salt __read_mostly;
125
126static inline struct hlist_head *vni_head(struct net *net, u32 id)
127{
128 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
129
130 return &vn->vni_list[hash_32(id, VNI_HASH_BITS)];
131}
132
133/* Look up VNI in a per net namespace table */
134static struct vxlan_dev *vxlan_find_vni(struct net *net, u32 id)
135{
136 struct vxlan_dev *vxlan;
137 struct hlist_node *node;
138
139 hlist_for_each_entry_rcu(vxlan, node, vni_head(net, id), hlist) {
140 if (vxlan->vni == id)
141 return vxlan;
142 }
143
144 return NULL;
145}
146
147/* Fill in neighbour message in skbuff. */
148static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan,
149 const struct vxlan_fdb *fdb,
150 u32 portid, u32 seq, int type, unsigned int flags)
151{
152 unsigned long now = jiffies;
153 struct nda_cacheinfo ci;
154 struct nlmsghdr *nlh;
155 struct ndmsg *ndm;
156
157 nlh = nlmsg_put(skb, portid, seq, type, sizeof(*ndm), flags);
158 if (nlh == NULL)
159 return -EMSGSIZE;
160
161 ndm = nlmsg_data(nlh);
162 memset(ndm, 0, sizeof(*ndm));
163 ndm->ndm_family = AF_BRIDGE;
164 ndm->ndm_state = fdb->state;
165 ndm->ndm_ifindex = vxlan->dev->ifindex;
166 ndm->ndm_flags = NTF_SELF;
167 ndm->ndm_type = NDA_DST;
168
169 if (nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->eth_addr))
170 goto nla_put_failure;
171
172 if (nla_put_be32(skb, NDA_DST, fdb->remote_ip))
173 goto nla_put_failure;
174
175 ci.ndm_used = jiffies_to_clock_t(now - fdb->used);
176 ci.ndm_confirmed = 0;
177 ci.ndm_updated = jiffies_to_clock_t(now - fdb->updated);
178 ci.ndm_refcnt = 0;
179
180 if (nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
181 goto nla_put_failure;
182
183 return nlmsg_end(skb, nlh);
184
185nla_put_failure:
186 nlmsg_cancel(skb, nlh);
187 return -EMSGSIZE;
188}
189
190static inline size_t vxlan_nlmsg_size(void)
191{
192 return NLMSG_ALIGN(sizeof(struct ndmsg))
193 + nla_total_size(ETH_ALEN) /* NDA_LLADDR */
194 + nla_total_size(sizeof(__be32)) /* NDA_DST */
195 + nla_total_size(sizeof(struct nda_cacheinfo));
196}
197
198static void vxlan_fdb_notify(struct vxlan_dev *vxlan,
199 const struct vxlan_fdb *fdb, int type)
200{
201 struct net *net = dev_net(vxlan->dev);
202 struct sk_buff *skb;
203 int err = -ENOBUFS;
204
205 skb = nlmsg_new(vxlan_nlmsg_size(), GFP_ATOMIC);
206 if (skb == NULL)
207 goto errout;
208
209 err = vxlan_fdb_info(skb, vxlan, fdb, 0, 0, type, 0);
210 if (err < 0) {
211 /* -EMSGSIZE implies BUG in vxlan_nlmsg_size() */
212 WARN_ON(err == -EMSGSIZE);
213 kfree_skb(skb);
214 goto errout;
215 }
216
217 rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
218 return;
219errout:
220 if (err < 0)
221 rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
222}
223
224/* Hash Ethernet address */
225static u32 eth_hash(const unsigned char *addr)
226{
227 u64 value = get_unaligned((u64 *)addr);
228
229 /* only want 6 bytes */
230#ifdef __BIG_ENDIAN
stephen hemmingerd3428942012-10-01 12:32:35 +0000231 value >>= 16;
stephen hemminger321fb992012-10-09 20:35:47 +0000232#else
233 value <<= 16;
stephen hemmingerd3428942012-10-01 12:32:35 +0000234#endif
235 return hash_64(value, FDB_HASH_BITS);
236}
237
238/* Hash chain to use given mac address */
239static inline struct hlist_head *vxlan_fdb_head(struct vxlan_dev *vxlan,
240 const u8 *mac)
241{
242 return &vxlan->fdb_head[eth_hash(mac)];
243}
244
245/* Look up Ethernet address in forwarding table */
246static struct vxlan_fdb *vxlan_find_mac(struct vxlan_dev *vxlan,
247 const u8 *mac)
248
249{
250 struct hlist_head *head = vxlan_fdb_head(vxlan, mac);
251 struct vxlan_fdb *f;
252 struct hlist_node *node;
253
254 hlist_for_each_entry_rcu(f, node, head, hlist) {
255 if (compare_ether_addr(mac, f->eth_addr) == 0)
256 return f;
257 }
258
259 return NULL;
260}
261
262/* Add new entry to forwarding table -- assumes lock held */
263static int vxlan_fdb_create(struct vxlan_dev *vxlan,
264 const u8 *mac, __be32 ip,
265 __u16 state, __u16 flags)
266{
267 struct vxlan_fdb *f;
268 int notify = 0;
269
270 f = vxlan_find_mac(vxlan, mac);
271 if (f) {
272 if (flags & NLM_F_EXCL) {
273 netdev_dbg(vxlan->dev,
274 "lost race to create %pM\n", mac);
275 return -EEXIST;
276 }
277 if (f->state != state) {
278 f->state = state;
279 f->updated = jiffies;
280 notify = 1;
281 }
282 } else {
283 if (!(flags & NLM_F_CREATE))
284 return -ENOENT;
285
286 if (vxlan->addrmax && vxlan->addrcnt >= vxlan->addrmax)
287 return -ENOSPC;
288
289 netdev_dbg(vxlan->dev, "add %pM -> %pI4\n", mac, &ip);
290 f = kmalloc(sizeof(*f), GFP_ATOMIC);
291 if (!f)
292 return -ENOMEM;
293
294 notify = 1;
295 f->remote_ip = ip;
296 f->state = state;
297 f->updated = f->used = jiffies;
298 memcpy(f->eth_addr, mac, ETH_ALEN);
299
300 ++vxlan->addrcnt;
301 hlist_add_head_rcu(&f->hlist,
302 vxlan_fdb_head(vxlan, mac));
303 }
304
305 if (notify)
306 vxlan_fdb_notify(vxlan, f, RTM_NEWNEIGH);
307
308 return 0;
309}
310
311static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f)
312{
313 netdev_dbg(vxlan->dev,
314 "delete %pM\n", f->eth_addr);
315
316 --vxlan->addrcnt;
317 vxlan_fdb_notify(vxlan, f, RTM_DELNEIGH);
318
319 hlist_del_rcu(&f->hlist);
320 kfree_rcu(f, rcu);
321}
322
323/* Add static entry (via netlink) */
324static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
325 struct net_device *dev,
326 const unsigned char *addr, u16 flags)
327{
328 struct vxlan_dev *vxlan = netdev_priv(dev);
329 __be32 ip;
330 int err;
331
332 if (!(ndm->ndm_state & (NUD_PERMANENT|NUD_REACHABLE))) {
333 pr_info("RTM_NEWNEIGH with invalid state %#x\n",
334 ndm->ndm_state);
335 return -EINVAL;
336 }
337
338 if (tb[NDA_DST] == NULL)
339 return -EINVAL;
340
341 if (nla_len(tb[NDA_DST]) != sizeof(__be32))
342 return -EAFNOSUPPORT;
343
344 ip = nla_get_be32(tb[NDA_DST]);
345
346 spin_lock_bh(&vxlan->hash_lock);
347 err = vxlan_fdb_create(vxlan, addr, ip, ndm->ndm_state, flags);
348 spin_unlock_bh(&vxlan->hash_lock);
349
350 return err;
351}
352
353/* Delete entry (via netlink) */
354static int vxlan_fdb_delete(struct ndmsg *ndm, struct net_device *dev,
355 const unsigned char *addr)
356{
357 struct vxlan_dev *vxlan = netdev_priv(dev);
358 struct vxlan_fdb *f;
359 int err = -ENOENT;
360
361 spin_lock_bh(&vxlan->hash_lock);
362 f = vxlan_find_mac(vxlan, addr);
363 if (f) {
364 vxlan_fdb_destroy(vxlan, f);
365 err = 0;
366 }
367 spin_unlock_bh(&vxlan->hash_lock);
368
369 return err;
370}
371
372/* Dump forwarding table */
373static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
374 struct net_device *dev, int idx)
375{
376 struct vxlan_dev *vxlan = netdev_priv(dev);
377 unsigned int h;
378
379 for (h = 0; h < FDB_HASH_SIZE; ++h) {
380 struct vxlan_fdb *f;
381 struct hlist_node *n;
382 int err;
383
384 hlist_for_each_entry_rcu(f, n, &vxlan->fdb_head[h], hlist) {
385 if (idx < cb->args[0])
386 goto skip;
387
388 err = vxlan_fdb_info(skb, vxlan, f,
389 NETLINK_CB(cb->skb).portid,
390 cb->nlh->nlmsg_seq,
391 RTM_NEWNEIGH,
392 NLM_F_MULTI);
393 if (err < 0)
394 break;
395skip:
396 ++idx;
397 }
398 }
399
400 return idx;
401}
402
403/* Watch incoming packets to learn mapping between Ethernet address
404 * and Tunnel endpoint.
405 */
406static void vxlan_snoop(struct net_device *dev,
407 __be32 src_ip, const u8 *src_mac)
408{
409 struct vxlan_dev *vxlan = netdev_priv(dev);
410 struct vxlan_fdb *f;
411 int err;
412
413 f = vxlan_find_mac(vxlan, src_mac);
414 if (likely(f)) {
415 f->used = jiffies;
416 if (likely(f->remote_ip == src_ip))
417 return;
418
419 if (net_ratelimit())
420 netdev_info(dev,
421 "%pM migrated from %pI4 to %pI4\n",
422 src_mac, &f->remote_ip, &src_ip);
423
424 f->remote_ip = src_ip;
425 f->updated = jiffies;
426 } else {
427 /* learned new entry */
428 spin_lock(&vxlan->hash_lock);
429 err = vxlan_fdb_create(vxlan, src_mac, src_ip,
430 NUD_REACHABLE,
431 NLM_F_EXCL|NLM_F_CREATE);
432 spin_unlock(&vxlan->hash_lock);
433 }
434}
435
436
437/* See if multicast group is already in use by other ID */
438static bool vxlan_group_used(struct vxlan_net *vn,
439 const struct vxlan_dev *this)
440{
441 const struct vxlan_dev *vxlan;
442 struct hlist_node *node;
443 unsigned h;
444
445 for (h = 0; h < VNI_HASH_SIZE; ++h)
446 hlist_for_each_entry(vxlan, node, &vn->vni_list[h], hlist) {
447 if (vxlan == this)
448 continue;
449
450 if (!netif_running(vxlan->dev))
451 continue;
452
453 if (vxlan->gaddr == this->gaddr)
454 return true;
455 }
456
457 return false;
458}
459
460/* kernel equivalent to IP_ADD_MEMBERSHIP */
461static int vxlan_join_group(struct net_device *dev)
462{
463 struct vxlan_dev *vxlan = netdev_priv(dev);
464 struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
465 struct sock *sk = vn->sock->sk;
466 struct ip_mreqn mreq = {
467 .imr_multiaddr.s_addr = vxlan->gaddr,
468 };
469 int err;
470
471 /* Already a member of group */
472 if (vxlan_group_used(vn, vxlan))
473 return 0;
474
475 /* Need to drop RTNL to call multicast join */
476 rtnl_unlock();
477 lock_sock(sk);
478 err = ip_mc_join_group(sk, &mreq);
479 release_sock(sk);
480 rtnl_lock();
481
482 return err;
483}
484
485
486/* kernel equivalent to IP_DROP_MEMBERSHIP */
487static int vxlan_leave_group(struct net_device *dev)
488{
489 struct vxlan_dev *vxlan = netdev_priv(dev);
490 struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
491 int err = 0;
492 struct sock *sk = vn->sock->sk;
493 struct ip_mreqn mreq = {
494 .imr_multiaddr.s_addr = vxlan->gaddr,
495 };
496
497 /* Only leave group when last vxlan is done. */
498 if (vxlan_group_used(vn, vxlan))
499 return 0;
500
501 /* Need to drop RTNL to call multicast leave */
502 rtnl_unlock();
503 lock_sock(sk);
504 err = ip_mc_leave_group(sk, &mreq);
505 release_sock(sk);
506 rtnl_lock();
507
508 return err;
509}
510
511/* Callback from net/ipv4/udp.c to receive packets */
512static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
513{
514 struct iphdr *oip;
515 struct vxlanhdr *vxh;
516 struct vxlan_dev *vxlan;
517 struct vxlan_stats *stats;
518 __u32 vni;
519 int err;
520
521 /* pop off outer UDP header */
522 __skb_pull(skb, sizeof(struct udphdr));
523
524 /* Need Vxlan and inner Ethernet header to be present */
525 if (!pskb_may_pull(skb, sizeof(struct vxlanhdr)))
526 goto error;
527
528 /* Drop packets with reserved bits set */
529 vxh = (struct vxlanhdr *) skb->data;
530 if (vxh->vx_flags != htonl(VXLAN_FLAGS) ||
531 (vxh->vx_vni & htonl(0xff))) {
532 netdev_dbg(skb->dev, "invalid vxlan flags=%#x vni=%#x\n",
533 ntohl(vxh->vx_flags), ntohl(vxh->vx_vni));
534 goto error;
535 }
536
537 __skb_pull(skb, sizeof(struct vxlanhdr));
538 skb_postpull_rcsum(skb, eth_hdr(skb), sizeof(struct vxlanhdr));
539
540 /* Is this VNI defined? */
541 vni = ntohl(vxh->vx_vni) >> 8;
542 vxlan = vxlan_find_vni(sock_net(sk), vni);
543 if (!vxlan) {
544 netdev_dbg(skb->dev, "unknown vni %d\n", vni);
545 goto drop;
546 }
547
548 if (!pskb_may_pull(skb, ETH_HLEN)) {
549 vxlan->dev->stats.rx_length_errors++;
550 vxlan->dev->stats.rx_errors++;
551 goto drop;
552 }
553
554 /* Re-examine inner Ethernet packet */
555 oip = ip_hdr(skb);
556 skb->protocol = eth_type_trans(skb, vxlan->dev);
557 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
558
559 /* Ignore packet loops (and multicast echo) */
560 if (compare_ether_addr(eth_hdr(skb)->h_source,
561 vxlan->dev->dev_addr) == 0)
562 goto drop;
563
564 if (vxlan->learn)
565 vxlan_snoop(skb->dev, oip->saddr, eth_hdr(skb)->h_source);
566
567 __skb_tunnel_rx(skb, vxlan->dev);
568 skb_reset_network_header(skb);
569
570 err = IP_ECN_decapsulate(oip, skb);
571 if (unlikely(err)) {
572 if (log_ecn_error)
573 net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n",
574 &oip->saddr, oip->tos);
575 if (err > 1) {
576 ++vxlan->dev->stats.rx_frame_errors;
577 ++vxlan->dev->stats.rx_errors;
578 goto drop;
579 }
580 }
581
582 stats = this_cpu_ptr(vxlan->stats);
583 u64_stats_update_begin(&stats->syncp);
584 stats->rx_packets++;
585 stats->rx_bytes += skb->len;
586 u64_stats_update_end(&stats->syncp);
587
588 netif_rx(skb);
589
590 return 0;
591error:
592 /* Put UDP header back */
593 __skb_push(skb, sizeof(struct udphdr));
594
595 return 1;
596drop:
597 /* Consume bad packet */
598 kfree_skb(skb);
599 return 0;
600}
601
602/* Extract dsfield from inner protocol */
603static inline u8 vxlan_get_dsfield(const struct iphdr *iph,
604 const struct sk_buff *skb)
605{
606 if (skb->protocol == htons(ETH_P_IP))
607 return iph->tos;
608 else if (skb->protocol == htons(ETH_P_IPV6))
609 return ipv6_get_dsfield((const struct ipv6hdr *)iph);
610 else
611 return 0;
612}
613
614/* Propogate ECN bits out */
615static inline u8 vxlan_ecn_encap(u8 tos,
616 const struct iphdr *iph,
617 const struct sk_buff *skb)
618{
619 u8 inner = vxlan_get_dsfield(iph, skb);
620
621 return INET_ECN_encapsulate(tos, inner);
622}
623
stephen hemmingeref59feb2012-10-09 20:35:46 +0000624static __be32 vxlan_find_dst(struct vxlan_dev *vxlan, struct sk_buff *skb)
625{
626 const struct ethhdr *eth = (struct ethhdr *) skb->data;
627 const struct vxlan_fdb *f;
628
629 if (is_multicast_ether_addr(eth->h_dest))
630 return vxlan->gaddr;
631
632 f = vxlan_find_mac(vxlan, eth->h_dest);
633 if (f)
634 return f->remote_ip;
635 else
636 return vxlan->gaddr;
637
638}
639
stephen hemmingerd3428942012-10-01 12:32:35 +0000640/* Transmit local packets over Vxlan
641 *
642 * Outer IP header inherits ECN and DF from inner header.
643 * Outer UDP destination is the VXLAN assigned port.
644 * source port is based on hash of flow if available
645 * otherwise use a random value
646 */
647static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
648{
649 struct vxlan_dev *vxlan = netdev_priv(dev);
650 struct rtable *rt;
stephen hemmingerd3428942012-10-01 12:32:35 +0000651 const struct iphdr *old_iph;
652 struct iphdr *iph;
653 struct vxlanhdr *vxh;
654 struct udphdr *uh;
655 struct flowi4 fl4;
stephen hemmingerd3428942012-10-01 12:32:35 +0000656 unsigned int pkt_len = skb->len;
657 u32 hash;
658 __be32 dst;
659 __be16 df = 0;
660 __u8 tos, ttl;
661 int err;
662
stephen hemmingeref59feb2012-10-09 20:35:46 +0000663 dst = vxlan_find_dst(vxlan, skb);
664 if (!dst)
665 goto drop;
666
stephen hemmingerd3428942012-10-01 12:32:35 +0000667 /* Need space for new headers (invalidates iph ptr) */
668 if (skb_cow_head(skb, VXLAN_HEADROOM))
669 goto drop;
670
stephen hemmingerd3428942012-10-01 12:32:35 +0000671 old_iph = ip_hdr(skb);
672
stephen hemmingerd3428942012-10-01 12:32:35 +0000673 ttl = vxlan->ttl;
674 if (!ttl && IN_MULTICAST(ntohl(dst)))
675 ttl = 1;
676
677 tos = vxlan->tos;
678 if (tos == 1)
679 tos = vxlan_get_dsfield(old_iph, skb);
680
681 hash = skb_get_rxhash(skb);
682
stephen hemmingerca78f182012-10-09 20:35:48 +0000683 memset(&fl4, 0, sizeof(fl4));
684 fl4.flowi4_oif = vxlan->link;
685 fl4.flowi4_tos = RT_TOS(tos);
686 fl4.daddr = dst;
687 fl4.saddr = vxlan->saddr;
688
689 rt = ip_route_output_key(dev_net(dev), &fl4);
stephen hemmingerd3428942012-10-01 12:32:35 +0000690 if (IS_ERR(rt)) {
691 netdev_dbg(dev, "no route to %pI4\n", &dst);
692 dev->stats.tx_carrier_errors++;
693 goto tx_error;
694 }
695
696 if (rt->dst.dev == dev) {
697 netdev_dbg(dev, "circular route to %pI4\n", &dst);
698 ip_rt_put(rt);
699 dev->stats.collisions++;
700 goto tx_error;
701 }
702
703 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
704 IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
705 IPSKB_REROUTED);
706 skb_dst_drop(skb);
707 skb_dst_set(skb, &rt->dst);
708
709 vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
710 vxh->vx_flags = htonl(VXLAN_FLAGS);
711 vxh->vx_vni = htonl(vxlan->vni << 8);
712
713 __skb_push(skb, sizeof(*uh));
714 skb_reset_transport_header(skb);
715 uh = udp_hdr(skb);
716
717 uh->dest = htons(vxlan_port);
718 uh->source = hash ? :random32();
719
720 uh->len = htons(skb->len);
721 uh->check = 0;
722
723 __skb_push(skb, sizeof(*iph));
724 skb_reset_network_header(skb);
725 iph = ip_hdr(skb);
726 iph->version = 4;
727 iph->ihl = sizeof(struct iphdr) >> 2;
728 iph->frag_off = df;
729 iph->protocol = IPPROTO_UDP;
730 iph->tos = vxlan_ecn_encap(tos, old_iph, skb);
stephen hemmingerca78f182012-10-09 20:35:48 +0000731 iph->daddr = dst;
stephen hemmingerd3428942012-10-01 12:32:35 +0000732 iph->saddr = fl4.saddr;
733 iph->ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
734
735 /* See __IPTUNNEL_XMIT */
736 skb->ip_summed = CHECKSUM_NONE;
737 ip_select_ident(iph, &rt->dst, NULL);
738
739 err = ip_local_out(skb);
740 if (likely(net_xmit_eval(err) == 0)) {
741 struct vxlan_stats *stats = this_cpu_ptr(vxlan->stats);
742
743 u64_stats_update_begin(&stats->syncp);
744 stats->tx_packets++;
745 stats->tx_bytes += pkt_len;
746 u64_stats_update_end(&stats->syncp);
747 } else {
748 dev->stats.tx_errors++;
749 dev->stats.tx_aborted_errors++;
750 }
751 return NETDEV_TX_OK;
752
753drop:
754 dev->stats.tx_dropped++;
755 goto tx_free;
756
757tx_error:
758 dev->stats.tx_errors++;
759tx_free:
760 dev_kfree_skb(skb);
761 return NETDEV_TX_OK;
762}
763
764/* Walk the forwarding table and purge stale entries */
765static void vxlan_cleanup(unsigned long arg)
766{
767 struct vxlan_dev *vxlan = (struct vxlan_dev *) arg;
768 unsigned long next_timer = jiffies + FDB_AGE_INTERVAL;
769 unsigned int h;
770
771 if (!netif_running(vxlan->dev))
772 return;
773
774 spin_lock_bh(&vxlan->hash_lock);
775 for (h = 0; h < FDB_HASH_SIZE; ++h) {
776 struct hlist_node *p, *n;
777 hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) {
778 struct vxlan_fdb *f
779 = container_of(p, struct vxlan_fdb, hlist);
780 unsigned long timeout;
781
782 if (f->state == NUD_PERMANENT)
783 continue;
784
785 timeout = f->used + vxlan->age_interval * HZ;
786 if (time_before_eq(timeout, jiffies)) {
787 netdev_dbg(vxlan->dev,
788 "garbage collect %pM\n",
789 f->eth_addr);
790 f->state = NUD_STALE;
791 vxlan_fdb_destroy(vxlan, f);
792 } else if (time_before(timeout, next_timer))
793 next_timer = timeout;
794 }
795 }
796 spin_unlock_bh(&vxlan->hash_lock);
797
798 mod_timer(&vxlan->age_timer, next_timer);
799}
800
801/* Setup stats when device is created */
802static int vxlan_init(struct net_device *dev)
803{
804 struct vxlan_dev *vxlan = netdev_priv(dev);
805
806 vxlan->stats = alloc_percpu(struct vxlan_stats);
807 if (!vxlan->stats)
808 return -ENOMEM;
809
810 return 0;
811}
812
813/* Start ageing timer and join group when device is brought up */
814static int vxlan_open(struct net_device *dev)
815{
816 struct vxlan_dev *vxlan = netdev_priv(dev);
817 int err;
818
819 if (vxlan->gaddr) {
820 err = vxlan_join_group(dev);
821 if (err)
822 return err;
823 }
824
825 if (vxlan->age_interval)
826 mod_timer(&vxlan->age_timer, jiffies + FDB_AGE_INTERVAL);
827
828 return 0;
829}
830
831/* Purge the forwarding table */
832static void vxlan_flush(struct vxlan_dev *vxlan)
833{
834 unsigned h;
835
836 spin_lock_bh(&vxlan->hash_lock);
837 for (h = 0; h < FDB_HASH_SIZE; ++h) {
838 struct hlist_node *p, *n;
839 hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) {
840 struct vxlan_fdb *f
841 = container_of(p, struct vxlan_fdb, hlist);
842 vxlan_fdb_destroy(vxlan, f);
843 }
844 }
845 spin_unlock_bh(&vxlan->hash_lock);
846}
847
848/* Cleanup timer and forwarding table on shutdown */
849static int vxlan_stop(struct net_device *dev)
850{
851 struct vxlan_dev *vxlan = netdev_priv(dev);
852
853 if (vxlan->gaddr)
854 vxlan_leave_group(dev);
855
856 del_timer_sync(&vxlan->age_timer);
857
858 vxlan_flush(vxlan);
859
860 return 0;
861}
862
863/* Merge per-cpu statistics */
864static struct rtnl_link_stats64 *vxlan_stats64(struct net_device *dev,
865 struct rtnl_link_stats64 *stats)
866{
867 struct vxlan_dev *vxlan = netdev_priv(dev);
868 struct vxlan_stats tmp, sum = { 0 };
869 unsigned int cpu;
870
871 for_each_possible_cpu(cpu) {
872 unsigned int start;
873 const struct vxlan_stats *stats
874 = per_cpu_ptr(vxlan->stats, cpu);
875
876 do {
877 start = u64_stats_fetch_begin_bh(&stats->syncp);
878 memcpy(&tmp, stats, sizeof(tmp));
879 } while (u64_stats_fetch_retry_bh(&stats->syncp, start));
880
881 sum.tx_bytes += tmp.tx_bytes;
882 sum.tx_packets += tmp.tx_packets;
883 sum.rx_bytes += tmp.rx_bytes;
884 sum.rx_packets += tmp.rx_packets;
885 }
886
887 stats->tx_bytes = sum.tx_bytes;
888 stats->tx_packets = sum.tx_packets;
889 stats->rx_bytes = sum.rx_bytes;
890 stats->rx_packets = sum.rx_packets;
891
892 stats->multicast = dev->stats.multicast;
893 stats->rx_length_errors = dev->stats.rx_length_errors;
894 stats->rx_frame_errors = dev->stats.rx_frame_errors;
895 stats->rx_errors = dev->stats.rx_errors;
896
897 stats->tx_dropped = dev->stats.tx_dropped;
898 stats->tx_carrier_errors = dev->stats.tx_carrier_errors;
899 stats->tx_aborted_errors = dev->stats.tx_aborted_errors;
900 stats->collisions = dev->stats.collisions;
901 stats->tx_errors = dev->stats.tx_errors;
902
903 return stats;
904}
905
906/* Stub, nothing needs to be done. */
907static void vxlan_set_multicast_list(struct net_device *dev)
908{
909}
910
911static const struct net_device_ops vxlan_netdev_ops = {
912 .ndo_init = vxlan_init,
913 .ndo_open = vxlan_open,
914 .ndo_stop = vxlan_stop,
915 .ndo_start_xmit = vxlan_xmit,
916 .ndo_get_stats64 = vxlan_stats64,
917 .ndo_set_rx_mode = vxlan_set_multicast_list,
918 .ndo_change_mtu = eth_change_mtu,
919 .ndo_validate_addr = eth_validate_addr,
920 .ndo_set_mac_address = eth_mac_addr,
921 .ndo_fdb_add = vxlan_fdb_add,
922 .ndo_fdb_del = vxlan_fdb_delete,
923 .ndo_fdb_dump = vxlan_fdb_dump,
924};
925
926/* Info for udev, that this is a virtual tunnel endpoint */
927static struct device_type vxlan_type = {
928 .name = "vxlan",
929};
930
931static void vxlan_free(struct net_device *dev)
932{
933 struct vxlan_dev *vxlan = netdev_priv(dev);
934
935 free_percpu(vxlan->stats);
936 free_netdev(dev);
937}
938
939/* Initialize the device structure. */
940static void vxlan_setup(struct net_device *dev)
941{
942 struct vxlan_dev *vxlan = netdev_priv(dev);
943 unsigned h;
944
945 eth_hw_addr_random(dev);
946 ether_setup(dev);
947
948 dev->netdev_ops = &vxlan_netdev_ops;
949 dev->destructor = vxlan_free;
950 SET_NETDEV_DEVTYPE(dev, &vxlan_type);
951
952 dev->tx_queue_len = 0;
953 dev->features |= NETIF_F_LLTX;
954 dev->features |= NETIF_F_NETNS_LOCAL;
955 dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
956
957 spin_lock_init(&vxlan->hash_lock);
958
959 init_timer_deferrable(&vxlan->age_timer);
960 vxlan->age_timer.function = vxlan_cleanup;
961 vxlan->age_timer.data = (unsigned long) vxlan;
962
963 vxlan->dev = dev;
964
965 for (h = 0; h < FDB_HASH_SIZE; ++h)
966 INIT_HLIST_HEAD(&vxlan->fdb_head[h]);
967}
968
969static const struct nla_policy vxlan_policy[IFLA_VXLAN_MAX + 1] = {
970 [IFLA_VXLAN_ID] = { .type = NLA_U32 },
971 [IFLA_VXLAN_GROUP] = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
972 [IFLA_VXLAN_LINK] = { .type = NLA_U32 },
973 [IFLA_VXLAN_LOCAL] = { .len = FIELD_SIZEOF(struct iphdr, saddr) },
974 [IFLA_VXLAN_TOS] = { .type = NLA_U8 },
975 [IFLA_VXLAN_TTL] = { .type = NLA_U8 },
976 [IFLA_VXLAN_LEARNING] = { .type = NLA_U8 },
977 [IFLA_VXLAN_AGEING] = { .type = NLA_U32 },
978 [IFLA_VXLAN_LIMIT] = { .type = NLA_U32 },
979};
980
981static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[])
982{
983 if (tb[IFLA_ADDRESS]) {
984 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) {
985 pr_debug("invalid link address (not ethernet)\n");
986 return -EINVAL;
987 }
988
989 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) {
990 pr_debug("invalid all zero ethernet address\n");
991 return -EADDRNOTAVAIL;
992 }
993 }
994
995 if (!data)
996 return -EINVAL;
997
998 if (data[IFLA_VXLAN_ID]) {
999 __u32 id = nla_get_u32(data[IFLA_VXLAN_ID]);
1000 if (id >= VXLAN_VID_MASK)
1001 return -ERANGE;
1002 }
1003
1004 if (data[IFLA_VXLAN_GROUP]) {
1005 __be32 gaddr = nla_get_be32(data[IFLA_VXLAN_GROUP]);
1006 if (!IN_MULTICAST(ntohl(gaddr))) {
1007 pr_debug("group address is not IPv4 multicast\n");
1008 return -EADDRNOTAVAIL;
1009 }
1010 }
1011 return 0;
1012}
1013
1014static int vxlan_newlink(struct net *net, struct net_device *dev,
1015 struct nlattr *tb[], struct nlattr *data[])
1016{
1017 struct vxlan_dev *vxlan = netdev_priv(dev);
1018 __u32 vni;
1019 int err;
1020
1021 if (!data[IFLA_VXLAN_ID])
1022 return -EINVAL;
1023
1024 vni = nla_get_u32(data[IFLA_VXLAN_ID]);
1025 if (vxlan_find_vni(net, vni)) {
1026 pr_info("duplicate VNI %u\n", vni);
1027 return -EEXIST;
1028 }
1029 vxlan->vni = vni;
1030
1031 if (data[IFLA_VXLAN_GROUP])
1032 vxlan->gaddr = nla_get_be32(data[IFLA_VXLAN_GROUP]);
1033
1034 if (data[IFLA_VXLAN_LOCAL])
1035 vxlan->saddr = nla_get_be32(data[IFLA_VXLAN_LOCAL]);
1036
1037 if (data[IFLA_VXLAN_LINK]) {
1038 vxlan->link = nla_get_u32(data[IFLA_VXLAN_LINK]);
1039
1040 if (!tb[IFLA_MTU]) {
1041 struct net_device *lowerdev;
1042 lowerdev = __dev_get_by_index(net, vxlan->link);
1043 dev->mtu = lowerdev->mtu - VXLAN_HEADROOM;
1044 }
1045 }
1046
1047 if (data[IFLA_VXLAN_TOS])
1048 vxlan->tos = nla_get_u8(data[IFLA_VXLAN_TOS]);
1049
1050 if (!data[IFLA_VXLAN_LEARNING] || nla_get_u8(data[IFLA_VXLAN_LEARNING]))
1051 vxlan->learn = true;
1052
1053 if (data[IFLA_VXLAN_AGEING])
1054 vxlan->age_interval = nla_get_u32(data[IFLA_VXLAN_AGEING]);
1055 else
1056 vxlan->age_interval = FDB_AGE_DEFAULT;
1057
1058 if (data[IFLA_VXLAN_LIMIT])
1059 vxlan->addrmax = nla_get_u32(data[IFLA_VXLAN_LIMIT]);
1060
1061 err = register_netdevice(dev);
1062 if (!err)
1063 hlist_add_head_rcu(&vxlan->hlist, vni_head(net, vxlan->vni));
1064
1065 return err;
1066}
1067
1068static void vxlan_dellink(struct net_device *dev, struct list_head *head)
1069{
1070 struct vxlan_dev *vxlan = netdev_priv(dev);
1071
1072 hlist_del_rcu(&vxlan->hlist);
1073
1074 unregister_netdevice_queue(dev, head);
1075}
1076
1077static size_t vxlan_get_size(const struct net_device *dev)
1078{
1079
1080 return nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_ID */
1081 nla_total_size(sizeof(__be32)) +/* IFLA_VXLAN_GROUP */
1082 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LINK */
1083 nla_total_size(sizeof(__be32))+ /* IFLA_VXLAN_LOCAL */
1084 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TTL */
1085 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TOS */
1086 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_LEARNING */
1087 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_AGEING */
1088 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LIMIT */
1089 0;
1090}
1091
1092static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
1093{
1094 const struct vxlan_dev *vxlan = netdev_priv(dev);
1095
1096 if (nla_put_u32(skb, IFLA_VXLAN_ID, vxlan->vni))
1097 goto nla_put_failure;
1098
Stephen Hemminger7c41c422012-10-08 14:55:30 -07001099 if (vxlan->gaddr && nla_put_be32(skb, IFLA_VXLAN_GROUP, vxlan->gaddr))
stephen hemmingerd3428942012-10-01 12:32:35 +00001100 goto nla_put_failure;
1101
1102 if (vxlan->link && nla_put_u32(skb, IFLA_VXLAN_LINK, vxlan->link))
1103 goto nla_put_failure;
1104
Stephen Hemminger7c41c422012-10-08 14:55:30 -07001105 if (vxlan->saddr && nla_put_be32(skb, IFLA_VXLAN_LOCAL, vxlan->saddr))
stephen hemmingerd3428942012-10-01 12:32:35 +00001106 goto nla_put_failure;
1107
1108 if (nla_put_u8(skb, IFLA_VXLAN_TTL, vxlan->ttl) ||
1109 nla_put_u8(skb, IFLA_VXLAN_TOS, vxlan->tos) ||
1110 nla_put_u8(skb, IFLA_VXLAN_LEARNING, vxlan->learn) ||
1111 nla_put_u32(skb, IFLA_VXLAN_AGEING, vxlan->age_interval) ||
1112 nla_put_u32(skb, IFLA_VXLAN_LIMIT, vxlan->addrmax))
1113 goto nla_put_failure;
1114
1115 return 0;
1116
1117nla_put_failure:
1118 return -EMSGSIZE;
1119}
1120
1121static struct rtnl_link_ops vxlan_link_ops __read_mostly = {
1122 .kind = "vxlan",
1123 .maxtype = IFLA_VXLAN_MAX,
1124 .policy = vxlan_policy,
1125 .priv_size = sizeof(struct vxlan_dev),
1126 .setup = vxlan_setup,
1127 .validate = vxlan_validate,
1128 .newlink = vxlan_newlink,
1129 .dellink = vxlan_dellink,
1130 .get_size = vxlan_get_size,
1131 .fill_info = vxlan_fill_info,
1132};
1133
1134static __net_init int vxlan_init_net(struct net *net)
1135{
1136 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
1137 struct sock *sk;
1138 struct sockaddr_in vxlan_addr = {
1139 .sin_family = AF_INET,
1140 .sin_addr.s_addr = htonl(INADDR_ANY),
1141 };
1142 int rc;
1143 unsigned h;
1144
1145 /* Create UDP socket for encapsulation receive. */
1146 rc = sock_create_kern(AF_INET, SOCK_DGRAM, IPPROTO_UDP, &vn->sock);
1147 if (rc < 0) {
1148 pr_debug("UDP socket create failed\n");
1149 return rc;
1150 }
stephen hemmingerbfe1b9b2012-10-01 18:49:21 +00001151 /* Put in proper namespace */
1152 sk = vn->sock->sk;
1153 sk_change_net(sk, net);
stephen hemmingerd3428942012-10-01 12:32:35 +00001154
1155 vxlan_addr.sin_port = htons(vxlan_port);
1156
1157 rc = kernel_bind(vn->sock, (struct sockaddr *) &vxlan_addr,
1158 sizeof(vxlan_addr));
1159 if (rc < 0) {
1160 pr_debug("bind for UDP socket %pI4:%u (%d)\n",
1161 &vxlan_addr.sin_addr, ntohs(vxlan_addr.sin_port), rc);
stephen hemmingerbfe1b9b2012-10-01 18:49:21 +00001162 sk_release_kernel(sk);
stephen hemmingerd3428942012-10-01 12:32:35 +00001163 vn->sock = NULL;
1164 return rc;
1165 }
1166
1167 /* Disable multicast loopback */
stephen hemmingerd3428942012-10-01 12:32:35 +00001168 inet_sk(sk)->mc_loop = 0;
1169
1170 /* Mark socket as an encapsulation socket. */
1171 udp_sk(sk)->encap_type = 1;
1172 udp_sk(sk)->encap_rcv = vxlan_udp_encap_recv;
1173 udp_encap_enable();
1174
1175 for (h = 0; h < VNI_HASH_SIZE; ++h)
1176 INIT_HLIST_HEAD(&vn->vni_list[h]);
1177
1178 return 0;
1179}
1180
1181static __net_exit void vxlan_exit_net(struct net *net)
1182{
1183 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
1184
1185 if (vn->sock) {
stephen hemmingerbfe1b9b2012-10-01 18:49:21 +00001186 sk_release_kernel(vn->sock->sk);
stephen hemmingerd3428942012-10-01 12:32:35 +00001187 vn->sock = NULL;
1188 }
1189}
1190
1191static struct pernet_operations vxlan_net_ops = {
1192 .init = vxlan_init_net,
1193 .exit = vxlan_exit_net,
1194 .id = &vxlan_net_id,
1195 .size = sizeof(struct vxlan_net),
1196};
1197
1198static int __init vxlan_init_module(void)
1199{
1200 int rc;
1201
1202 get_random_bytes(&vxlan_salt, sizeof(vxlan_salt));
1203
1204 rc = register_pernet_device(&vxlan_net_ops);
1205 if (rc)
1206 goto out1;
1207
1208 rc = rtnl_link_register(&vxlan_link_ops);
1209 if (rc)
1210 goto out2;
1211
1212 return 0;
1213
1214out2:
1215 unregister_pernet_device(&vxlan_net_ops);
1216out1:
1217 return rc;
1218}
1219module_init(vxlan_init_module);
1220
1221static void __exit vxlan_cleanup_module(void)
1222{
1223 rtnl_link_unregister(&vxlan_link_ops);
1224 unregister_pernet_device(&vxlan_net_ops);
1225}
1226module_exit(vxlan_cleanup_module);
1227
1228MODULE_LICENSE("GPL");
1229MODULE_VERSION(VXLAN_VERSION);
1230MODULE_AUTHOR("Stephen Hemminger <shemminger@vyatta.com>");
1231MODULE_ALIAS_RTNL_LINK("vxlan");