blob: 3b3fdf648ea741267800a2b49eee87d835d2370a [file] [log] [blame]
stephen hemmingerd3428942012-10-01 12:32:35 +00001/*
Rami Roseneb5ce432012-11-13 13:29:15 +00002 * VXLAN: Virtual eXtensible Local Area Network
stephen hemmingerd3428942012-10-01 12:32:35 +00003 *
4 * Copyright (c) 2012 Vyatta Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * TODO
11 * - use IANA UDP port number (when defined)
12 * - IPv6 (not in RFC)
13 */
14
15#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16
17#include <linux/kernel.h>
18#include <linux/types.h>
19#include <linux/module.h>
20#include <linux/errno.h>
21#include <linux/slab.h>
22#include <linux/skbuff.h>
23#include <linux/rculist.h>
24#include <linux/netdevice.h>
25#include <linux/in.h>
26#include <linux/ip.h>
27#include <linux/udp.h>
28#include <linux/igmp.h>
29#include <linux/etherdevice.h>
30#include <linux/if_ether.h>
stephen hemmingerd3428942012-10-01 12:32:35 +000031#include <linux/hash.h>
David Stevense4f67ad2012-11-20 02:50:14 +000032#include <net/arp.h>
33#include <net/ndisc.h>
stephen hemmingerd3428942012-10-01 12:32:35 +000034#include <net/ip.h>
35#include <net/icmp.h>
36#include <net/udp.h>
37#include <net/rtnetlink.h>
38#include <net/route.h>
39#include <net/dsfield.h>
40#include <net/inet_ecn.h>
41#include <net/net_namespace.h>
42#include <net/netns/generic.h>
43
44#define VXLAN_VERSION "0.1"
45
46#define VNI_HASH_BITS 10
47#define VNI_HASH_SIZE (1<<VNI_HASH_BITS)
48#define FDB_HASH_BITS 8
49#define FDB_HASH_SIZE (1<<FDB_HASH_BITS)
50#define FDB_AGE_DEFAULT 300 /* 5 min */
51#define FDB_AGE_INTERVAL (10 * HZ) /* rescan interval */
52
53#define VXLAN_N_VID (1u << 24)
54#define VXLAN_VID_MASK (VXLAN_N_VID - 1)
Alexander Duyck52b702f2012-11-09 13:35:24 +000055/* IP header + UDP + VXLAN + Ethernet header */
56#define VXLAN_HEADROOM (20 + 8 + 8 + 14)
stephen hemmingerd3428942012-10-01 12:32:35 +000057
58#define VXLAN_FLAGS 0x08000000 /* struct vxlanhdr.vx_flags required value. */
59
60/* VXLAN protocol header */
61struct vxlanhdr {
62 __be32 vx_flags;
63 __be32 vx_vni;
64};
65
66/* UDP port for VXLAN traffic. */
67static unsigned int vxlan_port __read_mostly = 8472;
68module_param_named(udp_port, vxlan_port, uint, 0444);
69MODULE_PARM_DESC(udp_port, "Destination UDP port");
70
71static bool log_ecn_error = true;
72module_param(log_ecn_error, bool, 0644);
73MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
74
75/* per-net private data for this module */
76static unsigned int vxlan_net_id;
77struct vxlan_net {
78 struct socket *sock; /* UDP encap socket */
79 struct hlist_head vni_list[VNI_HASH_SIZE];
80};
81
82/* Forwarding table entry */
83struct vxlan_fdb {
84 struct hlist_node hlist; /* linked list of entries */
85 struct rcu_head rcu;
86 unsigned long updated; /* jiffies */
87 unsigned long used;
88 __be32 remote_ip;
89 u16 state; /* see ndm_state */
90 u8 eth_addr[ETH_ALEN];
91};
92
93/* Per-cpu network traffic stats */
94struct vxlan_stats {
95 u64 rx_packets;
96 u64 rx_bytes;
97 u64 tx_packets;
98 u64 tx_bytes;
99 struct u64_stats_sync syncp;
100};
101
102/* Pseudo network device */
103struct vxlan_dev {
104 struct hlist_node hlist;
105 struct net_device *dev;
106 struct vxlan_stats __percpu *stats;
107 __u32 vni; /* virtual network id */
108 __be32 gaddr; /* multicast group */
109 __be32 saddr; /* source address */
110 unsigned int link; /* link to multicast over */
stephen hemminger05f47d62012-10-09 20:35:50 +0000111 __u16 port_min; /* source port range */
112 __u16 port_max;
stephen hemmingerd3428942012-10-01 12:32:35 +0000113 __u8 tos; /* TOS override */
114 __u8 ttl;
David Stevense4f67ad2012-11-20 02:50:14 +0000115 u32 flags; /* VXLAN_F_* below */
stephen hemmingerd3428942012-10-01 12:32:35 +0000116
117 unsigned long age_interval;
118 struct timer_list age_timer;
119 spinlock_t hash_lock;
120 unsigned int addrcnt;
121 unsigned int addrmax;
stephen hemmingerd3428942012-10-01 12:32:35 +0000122
123 struct hlist_head fdb_head[FDB_HASH_SIZE];
124};
125
David Stevense4f67ad2012-11-20 02:50:14 +0000126#define VXLAN_F_LEARN 0x01
127#define VXLAN_F_PROXY 0x02
128#define VXLAN_F_RSC 0x04
129#define VXLAN_F_L2MISS 0x08
130#define VXLAN_F_L3MISS 0x10
131
stephen hemmingerd3428942012-10-01 12:32:35 +0000132/* salt for hash table */
133static u32 vxlan_salt __read_mostly;
134
135static inline struct hlist_head *vni_head(struct net *net, u32 id)
136{
137 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
138
139 return &vn->vni_list[hash_32(id, VNI_HASH_BITS)];
140}
141
142/* Look up VNI in a per net namespace table */
143static struct vxlan_dev *vxlan_find_vni(struct net *net, u32 id)
144{
145 struct vxlan_dev *vxlan;
146 struct hlist_node *node;
147
148 hlist_for_each_entry_rcu(vxlan, node, vni_head(net, id), hlist) {
149 if (vxlan->vni == id)
150 return vxlan;
151 }
152
153 return NULL;
154}
155
156/* Fill in neighbour message in skbuff. */
157static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan,
158 const struct vxlan_fdb *fdb,
159 u32 portid, u32 seq, int type, unsigned int flags)
160{
161 unsigned long now = jiffies;
162 struct nda_cacheinfo ci;
163 struct nlmsghdr *nlh;
164 struct ndmsg *ndm;
David Stevense4f67ad2012-11-20 02:50:14 +0000165 bool send_ip, send_eth;
stephen hemmingerd3428942012-10-01 12:32:35 +0000166
167 nlh = nlmsg_put(skb, portid, seq, type, sizeof(*ndm), flags);
168 if (nlh == NULL)
169 return -EMSGSIZE;
170
171 ndm = nlmsg_data(nlh);
172 memset(ndm, 0, sizeof(*ndm));
David Stevense4f67ad2012-11-20 02:50:14 +0000173
174 send_eth = send_ip = true;
175
176 if (type == RTM_GETNEIGH) {
177 ndm->ndm_family = AF_INET;
178 send_ip = fdb->remote_ip != 0;
179 send_eth = !is_zero_ether_addr(fdb->eth_addr);
180 } else
181 ndm->ndm_family = AF_BRIDGE;
stephen hemmingerd3428942012-10-01 12:32:35 +0000182 ndm->ndm_state = fdb->state;
183 ndm->ndm_ifindex = vxlan->dev->ifindex;
184 ndm->ndm_flags = NTF_SELF;
185 ndm->ndm_type = NDA_DST;
186
David Stevense4f67ad2012-11-20 02:50:14 +0000187 if (send_eth && nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->eth_addr))
stephen hemmingerd3428942012-10-01 12:32:35 +0000188 goto nla_put_failure;
189
David Stevense4f67ad2012-11-20 02:50:14 +0000190 if (send_ip && nla_put_be32(skb, NDA_DST, fdb->remote_ip))
stephen hemmingerd3428942012-10-01 12:32:35 +0000191 goto nla_put_failure;
192
193 ci.ndm_used = jiffies_to_clock_t(now - fdb->used);
194 ci.ndm_confirmed = 0;
195 ci.ndm_updated = jiffies_to_clock_t(now - fdb->updated);
196 ci.ndm_refcnt = 0;
197
198 if (nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
199 goto nla_put_failure;
200
201 return nlmsg_end(skb, nlh);
202
203nla_put_failure:
204 nlmsg_cancel(skb, nlh);
205 return -EMSGSIZE;
206}
207
208static inline size_t vxlan_nlmsg_size(void)
209{
210 return NLMSG_ALIGN(sizeof(struct ndmsg))
211 + nla_total_size(ETH_ALEN) /* NDA_LLADDR */
212 + nla_total_size(sizeof(__be32)) /* NDA_DST */
213 + nla_total_size(sizeof(struct nda_cacheinfo));
214}
215
216static void vxlan_fdb_notify(struct vxlan_dev *vxlan,
217 const struct vxlan_fdb *fdb, int type)
218{
219 struct net *net = dev_net(vxlan->dev);
220 struct sk_buff *skb;
221 int err = -ENOBUFS;
222
223 skb = nlmsg_new(vxlan_nlmsg_size(), GFP_ATOMIC);
224 if (skb == NULL)
225 goto errout;
226
227 err = vxlan_fdb_info(skb, vxlan, fdb, 0, 0, type, 0);
228 if (err < 0) {
229 /* -EMSGSIZE implies BUG in vxlan_nlmsg_size() */
230 WARN_ON(err == -EMSGSIZE);
231 kfree_skb(skb);
232 goto errout;
233 }
234
235 rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
236 return;
237errout:
238 if (err < 0)
239 rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
240}
241
David Stevense4f67ad2012-11-20 02:50:14 +0000242static void vxlan_ip_miss(struct net_device *dev, __be32 ipa)
243{
244 struct vxlan_dev *vxlan = netdev_priv(dev);
245 struct vxlan_fdb f;
246
247 memset(&f, 0, sizeof f);
248 f.state = NUD_STALE;
249 f.remote_ip = ipa; /* goes to NDA_DST */
250
251 vxlan_fdb_notify(vxlan, &f, RTM_GETNEIGH);
252}
253
254static void vxlan_fdb_miss(struct vxlan_dev *vxlan, const u8 eth_addr[ETH_ALEN])
255{
256 struct vxlan_fdb f;
257
258 memset(&f, 0, sizeof f);
259 f.state = NUD_STALE;
260 memcpy(f.eth_addr, eth_addr, ETH_ALEN);
261
262 vxlan_fdb_notify(vxlan, &f, RTM_GETNEIGH);
263}
264
stephen hemmingerd3428942012-10-01 12:32:35 +0000265/* Hash Ethernet address */
266static u32 eth_hash(const unsigned char *addr)
267{
268 u64 value = get_unaligned((u64 *)addr);
269
270 /* only want 6 bytes */
271#ifdef __BIG_ENDIAN
stephen hemmingerd3428942012-10-01 12:32:35 +0000272 value >>= 16;
stephen hemminger321fb992012-10-09 20:35:47 +0000273#else
274 value <<= 16;
stephen hemmingerd3428942012-10-01 12:32:35 +0000275#endif
276 return hash_64(value, FDB_HASH_BITS);
277}
278
279/* Hash chain to use given mac address */
280static inline struct hlist_head *vxlan_fdb_head(struct vxlan_dev *vxlan,
281 const u8 *mac)
282{
283 return &vxlan->fdb_head[eth_hash(mac)];
284}
285
286/* Look up Ethernet address in forwarding table */
287static struct vxlan_fdb *vxlan_find_mac(struct vxlan_dev *vxlan,
288 const u8 *mac)
289
290{
291 struct hlist_head *head = vxlan_fdb_head(vxlan, mac);
292 struct vxlan_fdb *f;
293 struct hlist_node *node;
294
295 hlist_for_each_entry_rcu(f, node, head, hlist) {
296 if (compare_ether_addr(mac, f->eth_addr) == 0)
297 return f;
298 }
299
300 return NULL;
301}
302
303/* Add new entry to forwarding table -- assumes lock held */
304static int vxlan_fdb_create(struct vxlan_dev *vxlan,
305 const u8 *mac, __be32 ip,
306 __u16 state, __u16 flags)
307{
308 struct vxlan_fdb *f;
309 int notify = 0;
310
311 f = vxlan_find_mac(vxlan, mac);
312 if (f) {
313 if (flags & NLM_F_EXCL) {
314 netdev_dbg(vxlan->dev,
315 "lost race to create %pM\n", mac);
316 return -EEXIST;
317 }
318 if (f->state != state) {
319 f->state = state;
320 f->updated = jiffies;
321 notify = 1;
322 }
323 } else {
324 if (!(flags & NLM_F_CREATE))
325 return -ENOENT;
326
327 if (vxlan->addrmax && vxlan->addrcnt >= vxlan->addrmax)
328 return -ENOSPC;
329
330 netdev_dbg(vxlan->dev, "add %pM -> %pI4\n", mac, &ip);
331 f = kmalloc(sizeof(*f), GFP_ATOMIC);
332 if (!f)
333 return -ENOMEM;
334
335 notify = 1;
336 f->remote_ip = ip;
337 f->state = state;
338 f->updated = f->used = jiffies;
339 memcpy(f->eth_addr, mac, ETH_ALEN);
340
341 ++vxlan->addrcnt;
342 hlist_add_head_rcu(&f->hlist,
343 vxlan_fdb_head(vxlan, mac));
344 }
345
346 if (notify)
347 vxlan_fdb_notify(vxlan, f, RTM_NEWNEIGH);
348
349 return 0;
350}
351
352static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f)
353{
354 netdev_dbg(vxlan->dev,
355 "delete %pM\n", f->eth_addr);
356
357 --vxlan->addrcnt;
358 vxlan_fdb_notify(vxlan, f, RTM_DELNEIGH);
359
360 hlist_del_rcu(&f->hlist);
361 kfree_rcu(f, rcu);
362}
363
364/* Add static entry (via netlink) */
365static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
366 struct net_device *dev,
367 const unsigned char *addr, u16 flags)
368{
369 struct vxlan_dev *vxlan = netdev_priv(dev);
370 __be32 ip;
371 int err;
372
373 if (!(ndm->ndm_state & (NUD_PERMANENT|NUD_REACHABLE))) {
374 pr_info("RTM_NEWNEIGH with invalid state %#x\n",
375 ndm->ndm_state);
376 return -EINVAL;
377 }
378
379 if (tb[NDA_DST] == NULL)
380 return -EINVAL;
381
382 if (nla_len(tb[NDA_DST]) != sizeof(__be32))
383 return -EAFNOSUPPORT;
384
385 ip = nla_get_be32(tb[NDA_DST]);
386
387 spin_lock_bh(&vxlan->hash_lock);
388 err = vxlan_fdb_create(vxlan, addr, ip, ndm->ndm_state, flags);
389 spin_unlock_bh(&vxlan->hash_lock);
390
391 return err;
392}
393
394/* Delete entry (via netlink) */
395static int vxlan_fdb_delete(struct ndmsg *ndm, struct net_device *dev,
396 const unsigned char *addr)
397{
398 struct vxlan_dev *vxlan = netdev_priv(dev);
399 struct vxlan_fdb *f;
400 int err = -ENOENT;
401
402 spin_lock_bh(&vxlan->hash_lock);
403 f = vxlan_find_mac(vxlan, addr);
404 if (f) {
405 vxlan_fdb_destroy(vxlan, f);
406 err = 0;
407 }
408 spin_unlock_bh(&vxlan->hash_lock);
409
410 return err;
411}
412
413/* Dump forwarding table */
414static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
415 struct net_device *dev, int idx)
416{
417 struct vxlan_dev *vxlan = netdev_priv(dev);
418 unsigned int h;
419
420 for (h = 0; h < FDB_HASH_SIZE; ++h) {
421 struct vxlan_fdb *f;
422 struct hlist_node *n;
423 int err;
424
425 hlist_for_each_entry_rcu(f, n, &vxlan->fdb_head[h], hlist) {
426 if (idx < cb->args[0])
427 goto skip;
428
429 err = vxlan_fdb_info(skb, vxlan, f,
430 NETLINK_CB(cb->skb).portid,
431 cb->nlh->nlmsg_seq,
432 RTM_NEWNEIGH,
433 NLM_F_MULTI);
434 if (err < 0)
435 break;
436skip:
437 ++idx;
438 }
439 }
440
441 return idx;
442}
443
444/* Watch incoming packets to learn mapping between Ethernet address
445 * and Tunnel endpoint.
446 */
447static void vxlan_snoop(struct net_device *dev,
448 __be32 src_ip, const u8 *src_mac)
449{
450 struct vxlan_dev *vxlan = netdev_priv(dev);
451 struct vxlan_fdb *f;
452 int err;
453
454 f = vxlan_find_mac(vxlan, src_mac);
455 if (likely(f)) {
456 f->used = jiffies;
457 if (likely(f->remote_ip == src_ip))
458 return;
459
460 if (net_ratelimit())
461 netdev_info(dev,
462 "%pM migrated from %pI4 to %pI4\n",
463 src_mac, &f->remote_ip, &src_ip);
464
465 f->remote_ip = src_ip;
466 f->updated = jiffies;
467 } else {
468 /* learned new entry */
469 spin_lock(&vxlan->hash_lock);
470 err = vxlan_fdb_create(vxlan, src_mac, src_ip,
471 NUD_REACHABLE,
472 NLM_F_EXCL|NLM_F_CREATE);
473 spin_unlock(&vxlan->hash_lock);
474 }
475}
476
477
478/* See if multicast group is already in use by other ID */
479static bool vxlan_group_used(struct vxlan_net *vn,
480 const struct vxlan_dev *this)
481{
482 const struct vxlan_dev *vxlan;
483 struct hlist_node *node;
484 unsigned h;
485
486 for (h = 0; h < VNI_HASH_SIZE; ++h)
487 hlist_for_each_entry(vxlan, node, &vn->vni_list[h], hlist) {
488 if (vxlan == this)
489 continue;
490
491 if (!netif_running(vxlan->dev))
492 continue;
493
494 if (vxlan->gaddr == this->gaddr)
495 return true;
496 }
497
498 return false;
499}
500
501/* kernel equivalent to IP_ADD_MEMBERSHIP */
502static int vxlan_join_group(struct net_device *dev)
503{
504 struct vxlan_dev *vxlan = netdev_priv(dev);
505 struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
506 struct sock *sk = vn->sock->sk;
507 struct ip_mreqn mreq = {
508 .imr_multiaddr.s_addr = vxlan->gaddr,
509 };
510 int err;
511
512 /* Already a member of group */
513 if (vxlan_group_used(vn, vxlan))
514 return 0;
515
516 /* Need to drop RTNL to call multicast join */
517 rtnl_unlock();
518 lock_sock(sk);
519 err = ip_mc_join_group(sk, &mreq);
520 release_sock(sk);
521 rtnl_lock();
522
523 return err;
524}
525
526
527/* kernel equivalent to IP_DROP_MEMBERSHIP */
528static int vxlan_leave_group(struct net_device *dev)
529{
530 struct vxlan_dev *vxlan = netdev_priv(dev);
531 struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
532 int err = 0;
533 struct sock *sk = vn->sock->sk;
534 struct ip_mreqn mreq = {
535 .imr_multiaddr.s_addr = vxlan->gaddr,
536 };
537
538 /* Only leave group when last vxlan is done. */
539 if (vxlan_group_used(vn, vxlan))
540 return 0;
541
542 /* Need to drop RTNL to call multicast leave */
543 rtnl_unlock();
544 lock_sock(sk);
545 err = ip_mc_leave_group(sk, &mreq);
546 release_sock(sk);
547 rtnl_lock();
548
549 return err;
550}
551
552/* Callback from net/ipv4/udp.c to receive packets */
553static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
554{
555 struct iphdr *oip;
556 struct vxlanhdr *vxh;
557 struct vxlan_dev *vxlan;
558 struct vxlan_stats *stats;
559 __u32 vni;
560 int err;
561
562 /* pop off outer UDP header */
563 __skb_pull(skb, sizeof(struct udphdr));
564
565 /* Need Vxlan and inner Ethernet header to be present */
566 if (!pskb_may_pull(skb, sizeof(struct vxlanhdr)))
567 goto error;
568
569 /* Drop packets with reserved bits set */
570 vxh = (struct vxlanhdr *) skb->data;
571 if (vxh->vx_flags != htonl(VXLAN_FLAGS) ||
572 (vxh->vx_vni & htonl(0xff))) {
573 netdev_dbg(skb->dev, "invalid vxlan flags=%#x vni=%#x\n",
574 ntohl(vxh->vx_flags), ntohl(vxh->vx_vni));
575 goto error;
576 }
577
578 __skb_pull(skb, sizeof(struct vxlanhdr));
stephen hemmingerd3428942012-10-01 12:32:35 +0000579
580 /* Is this VNI defined? */
581 vni = ntohl(vxh->vx_vni) >> 8;
582 vxlan = vxlan_find_vni(sock_net(sk), vni);
583 if (!vxlan) {
584 netdev_dbg(skb->dev, "unknown vni %d\n", vni);
585 goto drop;
586 }
587
588 if (!pskb_may_pull(skb, ETH_HLEN)) {
589 vxlan->dev->stats.rx_length_errors++;
590 vxlan->dev->stats.rx_errors++;
591 goto drop;
592 }
593
David Stevense4f67ad2012-11-20 02:50:14 +0000594 skb_reset_mac_header(skb);
595
stephen hemmingerd3428942012-10-01 12:32:35 +0000596 /* Re-examine inner Ethernet packet */
597 oip = ip_hdr(skb);
598 skb->protocol = eth_type_trans(skb, vxlan->dev);
stephen hemmingerd3428942012-10-01 12:32:35 +0000599
600 /* Ignore packet loops (and multicast echo) */
601 if (compare_ether_addr(eth_hdr(skb)->h_source,
602 vxlan->dev->dev_addr) == 0)
603 goto drop;
604
David Stevense4f67ad2012-11-20 02:50:14 +0000605 if (vxlan->flags & VXLAN_F_LEARN)
stephen hemmingerd3428942012-10-01 12:32:35 +0000606 vxlan_snoop(skb->dev, oip->saddr, eth_hdr(skb)->h_source);
607
608 __skb_tunnel_rx(skb, vxlan->dev);
609 skb_reset_network_header(skb);
Joseph Gasparakis0afb1662012-12-07 14:14:18 +0000610
611 /* If the NIC driver gave us an encapsulated packet with
612 * CHECKSUM_UNNECESSARY and Rx checksum feature is enabled,
613 * leave the CHECKSUM_UNNECESSARY, the device checksummed it
614 * for us. Otherwise force the upper layers to verify it.
615 */
616 if (skb->ip_summed != CHECKSUM_UNNECESSARY || !skb->encapsulation ||
617 !(vxlan->dev->features & NETIF_F_RXCSUM))
618 skb->ip_summed = CHECKSUM_NONE;
619
620 skb->encapsulation = 0;
stephen hemmingerd3428942012-10-01 12:32:35 +0000621
622 err = IP_ECN_decapsulate(oip, skb);
623 if (unlikely(err)) {
624 if (log_ecn_error)
625 net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n",
626 &oip->saddr, oip->tos);
627 if (err > 1) {
628 ++vxlan->dev->stats.rx_frame_errors;
629 ++vxlan->dev->stats.rx_errors;
630 goto drop;
631 }
632 }
633
634 stats = this_cpu_ptr(vxlan->stats);
635 u64_stats_update_begin(&stats->syncp);
636 stats->rx_packets++;
637 stats->rx_bytes += skb->len;
638 u64_stats_update_end(&stats->syncp);
639
640 netif_rx(skb);
641
642 return 0;
643error:
644 /* Put UDP header back */
645 __skb_push(skb, sizeof(struct udphdr));
646
647 return 1;
648drop:
649 /* Consume bad packet */
650 kfree_skb(skb);
651 return 0;
652}
653
David Stevense4f67ad2012-11-20 02:50:14 +0000654static int arp_reduce(struct net_device *dev, struct sk_buff *skb)
655{
656 struct vxlan_dev *vxlan = netdev_priv(dev);
657 struct arphdr *parp;
658 u8 *arpptr, *sha;
659 __be32 sip, tip;
660 struct neighbour *n;
661
662 if (dev->flags & IFF_NOARP)
663 goto out;
664
665 if (!pskb_may_pull(skb, arp_hdr_len(dev))) {
666 dev->stats.tx_dropped++;
667 goto out;
668 }
669 parp = arp_hdr(skb);
670
671 if ((parp->ar_hrd != htons(ARPHRD_ETHER) &&
672 parp->ar_hrd != htons(ARPHRD_IEEE802)) ||
673 parp->ar_pro != htons(ETH_P_IP) ||
674 parp->ar_op != htons(ARPOP_REQUEST) ||
675 parp->ar_hln != dev->addr_len ||
676 parp->ar_pln != 4)
677 goto out;
678 arpptr = (u8 *)parp + sizeof(struct arphdr);
679 sha = arpptr;
680 arpptr += dev->addr_len; /* sha */
681 memcpy(&sip, arpptr, sizeof(sip));
682 arpptr += sizeof(sip);
683 arpptr += dev->addr_len; /* tha */
684 memcpy(&tip, arpptr, sizeof(tip));
685
686 if (ipv4_is_loopback(tip) ||
687 ipv4_is_multicast(tip))
688 goto out;
689
690 n = neigh_lookup(&arp_tbl, &tip, dev);
691
692 if (n) {
693 struct vxlan_dev *vxlan = netdev_priv(dev);
694 struct vxlan_fdb *f;
695 struct sk_buff *reply;
696
697 if (!(n->nud_state & NUD_CONNECTED)) {
698 neigh_release(n);
699 goto out;
700 }
701
702 f = vxlan_find_mac(vxlan, n->ha);
703 if (f && f->remote_ip == 0) {
704 /* bridge-local neighbor */
705 neigh_release(n);
706 goto out;
707 }
708
709 reply = arp_create(ARPOP_REPLY, ETH_P_ARP, sip, dev, tip, sha,
710 n->ha, sha);
711
712 neigh_release(n);
713
714 skb_reset_mac_header(reply);
715 __skb_pull(reply, skb_network_offset(reply));
716 reply->ip_summed = CHECKSUM_UNNECESSARY;
717 reply->pkt_type = PACKET_HOST;
718
719 if (netif_rx_ni(reply) == NET_RX_DROP)
720 dev->stats.rx_dropped++;
721 } else if (vxlan->flags & VXLAN_F_L3MISS)
722 vxlan_ip_miss(dev, tip);
723out:
724 consume_skb(skb);
725 return NETDEV_TX_OK;
726}
727
728static bool route_shortcircuit(struct net_device *dev, struct sk_buff *skb)
729{
730 struct vxlan_dev *vxlan = netdev_priv(dev);
731 struct neighbour *n;
732 struct iphdr *pip;
733
734 if (is_multicast_ether_addr(eth_hdr(skb)->h_dest))
735 return false;
736
737 n = NULL;
738 switch (ntohs(eth_hdr(skb)->h_proto)) {
739 case ETH_P_IP:
740 if (!pskb_may_pull(skb, sizeof(struct iphdr)))
741 return false;
742 pip = ip_hdr(skb);
743 n = neigh_lookup(&arp_tbl, &pip->daddr, dev);
744 break;
745 default:
746 return false;
747 }
748
749 if (n) {
750 bool diff;
751
752 diff = compare_ether_addr(eth_hdr(skb)->h_dest, n->ha) != 0;
753 if (diff) {
754 memcpy(eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest,
755 dev->addr_len);
756 memcpy(eth_hdr(skb)->h_dest, n->ha, dev->addr_len);
757 }
758 neigh_release(n);
759 return diff;
760 } else if (vxlan->flags & VXLAN_F_L3MISS)
761 vxlan_ip_miss(dev, pip->daddr);
762 return false;
763}
764
stephen hemmingerd3428942012-10-01 12:32:35 +0000765/* Extract dsfield from inner protocol */
766static inline u8 vxlan_get_dsfield(const struct iphdr *iph,
767 const struct sk_buff *skb)
768{
769 if (skb->protocol == htons(ETH_P_IP))
770 return iph->tos;
771 else if (skb->protocol == htons(ETH_P_IPV6))
772 return ipv6_get_dsfield((const struct ipv6hdr *)iph);
773 else
774 return 0;
775}
776
777/* Propogate ECN bits out */
778static inline u8 vxlan_ecn_encap(u8 tos,
779 const struct iphdr *iph,
780 const struct sk_buff *skb)
781{
782 u8 inner = vxlan_get_dsfield(iph, skb);
783
784 return INET_ECN_encapsulate(tos, inner);
785}
786
stephen hemminger1cad8712012-10-09 20:35:49 +0000787static void vxlan_sock_free(struct sk_buff *skb)
788{
789 sock_put(skb->sk);
790}
791
792/* On transmit, associate with the tunnel socket */
793static void vxlan_set_owner(struct net_device *dev, struct sk_buff *skb)
794{
795 struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
796 struct sock *sk = vn->sock->sk;
797
798 skb_orphan(skb);
799 sock_hold(sk);
800 skb->sk = sk;
801 skb->destructor = vxlan_sock_free;
802}
803
stephen hemminger05f47d62012-10-09 20:35:50 +0000804/* Compute source port for outgoing packet
805 * first choice to use L4 flow hash since it will spread
806 * better and maybe available from hardware
807 * secondary choice is to use jhash on the Ethernet header
808 */
809static u16 vxlan_src_port(const struct vxlan_dev *vxlan, struct sk_buff *skb)
810{
811 unsigned int range = (vxlan->port_max - vxlan->port_min) + 1;
812 u32 hash;
813
814 hash = skb_get_rxhash(skb);
815 if (!hash)
816 hash = jhash(skb->data, 2 * ETH_ALEN,
817 (__force u32) skb->protocol);
818
819 return (((u64) hash * range) >> 32) + vxlan->port_min;
820}
821
stephen hemmingerd3428942012-10-01 12:32:35 +0000822/* Transmit local packets over Vxlan
823 *
824 * Outer IP header inherits ECN and DF from inner header.
825 * Outer UDP destination is the VXLAN assigned port.
stephen hemminger05f47d62012-10-09 20:35:50 +0000826 * source port is based on hash of flow
stephen hemmingerd3428942012-10-01 12:32:35 +0000827 */
828static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
829{
830 struct vxlan_dev *vxlan = netdev_priv(dev);
831 struct rtable *rt;
stephen hemmingerd3428942012-10-01 12:32:35 +0000832 const struct iphdr *old_iph;
David Stevense4f67ad2012-11-20 02:50:14 +0000833 struct ethhdr *eth;
stephen hemmingerd3428942012-10-01 12:32:35 +0000834 struct iphdr *iph;
835 struct vxlanhdr *vxh;
836 struct udphdr *uh;
837 struct flowi4 fl4;
stephen hemmingerd3428942012-10-01 12:32:35 +0000838 unsigned int pkt_len = skb->len;
stephen hemmingerd3428942012-10-01 12:32:35 +0000839 __be32 dst;
stephen hemminger05f47d62012-10-09 20:35:50 +0000840 __u16 src_port;
stephen hemmingerd3428942012-10-01 12:32:35 +0000841 __be16 df = 0;
842 __u8 tos, ttl;
843 int err;
David Stevense4f67ad2012-11-20 02:50:14 +0000844 bool did_rsc = false;
845 const struct vxlan_fdb *f;
stephen hemmingerd3428942012-10-01 12:32:35 +0000846
David Stevense4f67ad2012-11-20 02:50:14 +0000847 skb_reset_mac_header(skb);
848 eth = eth_hdr(skb);
849
850 if ((vxlan->flags & VXLAN_F_PROXY) && ntohs(eth->h_proto) == ETH_P_ARP)
851 return arp_reduce(dev, skb);
852 else if ((vxlan->flags&VXLAN_F_RSC) && ntohs(eth->h_proto) == ETH_P_IP)
853 did_rsc = route_shortcircuit(dev, skb);
854
855 f = vxlan_find_mac(vxlan, eth->h_dest);
856 if (f == NULL) {
857 did_rsc = false;
858 dst = vxlan->gaddr;
859 if (!dst && (vxlan->flags & VXLAN_F_L2MISS) &&
860 !is_multicast_ether_addr(eth->h_dest))
861 vxlan_fdb_miss(vxlan, eth->h_dest);
862 } else
863 dst = f->remote_ip;
864
865 if (!dst) {
866 if (did_rsc) {
867 __skb_pull(skb, skb_network_offset(skb));
868 skb->ip_summed = CHECKSUM_NONE;
869 skb->pkt_type = PACKET_HOST;
870
871 /* short-circuited back to local bridge */
872 if (netif_rx(skb) == NET_RX_SUCCESS) {
873 struct vxlan_stats *stats =
874 this_cpu_ptr(vxlan->stats);
875
876 u64_stats_update_begin(&stats->syncp);
877 stats->tx_packets++;
878 stats->tx_bytes += pkt_len;
879 u64_stats_update_end(&stats->syncp);
880 } else {
881 dev->stats.tx_errors++;
882 dev->stats.tx_aborted_errors++;
883 }
884 return NETDEV_TX_OK;
885 }
stephen hemmingeref59feb2012-10-09 20:35:46 +0000886 goto drop;
David Stevense4f67ad2012-11-20 02:50:14 +0000887 }
stephen hemmingeref59feb2012-10-09 20:35:46 +0000888
Joseph Gasparakisd6727fe2012-12-07 14:14:16 +0000889 if (!skb->encapsulation) {
890 skb_reset_inner_headers(skb);
891 skb->encapsulation = 1;
892 }
893
stephen hemmingerd3428942012-10-01 12:32:35 +0000894 /* Need space for new headers (invalidates iph ptr) */
895 if (skb_cow_head(skb, VXLAN_HEADROOM))
896 goto drop;
897
stephen hemmingerd3428942012-10-01 12:32:35 +0000898 old_iph = ip_hdr(skb);
899
stephen hemmingerd3428942012-10-01 12:32:35 +0000900 ttl = vxlan->ttl;
901 if (!ttl && IN_MULTICAST(ntohl(dst)))
902 ttl = 1;
903
904 tos = vxlan->tos;
905 if (tos == 1)
906 tos = vxlan_get_dsfield(old_iph, skb);
907
stephen hemminger05f47d62012-10-09 20:35:50 +0000908 src_port = vxlan_src_port(vxlan, skb);
stephen hemmingerd3428942012-10-01 12:32:35 +0000909
stephen hemmingerca78f182012-10-09 20:35:48 +0000910 memset(&fl4, 0, sizeof(fl4));
911 fl4.flowi4_oif = vxlan->link;
912 fl4.flowi4_tos = RT_TOS(tos);
913 fl4.daddr = dst;
914 fl4.saddr = vxlan->saddr;
915
916 rt = ip_route_output_key(dev_net(dev), &fl4);
stephen hemmingerd3428942012-10-01 12:32:35 +0000917 if (IS_ERR(rt)) {
918 netdev_dbg(dev, "no route to %pI4\n", &dst);
919 dev->stats.tx_carrier_errors++;
920 goto tx_error;
921 }
922
923 if (rt->dst.dev == dev) {
924 netdev_dbg(dev, "circular route to %pI4\n", &dst);
925 ip_rt_put(rt);
926 dev->stats.collisions++;
927 goto tx_error;
928 }
929
930 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
931 IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
932 IPSKB_REROUTED);
933 skb_dst_drop(skb);
934 skb_dst_set(skb, &rt->dst);
935
936 vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
937 vxh->vx_flags = htonl(VXLAN_FLAGS);
938 vxh->vx_vni = htonl(vxlan->vni << 8);
939
940 __skb_push(skb, sizeof(*uh));
941 skb_reset_transport_header(skb);
942 uh = udp_hdr(skb);
943
944 uh->dest = htons(vxlan_port);
stephen hemminger05f47d62012-10-09 20:35:50 +0000945 uh->source = htons(src_port);
stephen hemmingerd3428942012-10-01 12:32:35 +0000946
947 uh->len = htons(skb->len);
948 uh->check = 0;
949
950 __skb_push(skb, sizeof(*iph));
951 skb_reset_network_header(skb);
952 iph = ip_hdr(skb);
953 iph->version = 4;
954 iph->ihl = sizeof(struct iphdr) >> 2;
955 iph->frag_off = df;
956 iph->protocol = IPPROTO_UDP;
957 iph->tos = vxlan_ecn_encap(tos, old_iph, skb);
stephen hemmingerca78f182012-10-09 20:35:48 +0000958 iph->daddr = dst;
stephen hemmingerd3428942012-10-01 12:32:35 +0000959 iph->saddr = fl4.saddr;
960 iph->ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
961
stephen hemminger1cad8712012-10-09 20:35:49 +0000962 vxlan_set_owner(dev, skb);
963
Amerigo Wangaa0010f2012-11-11 21:52:33 +0000964 /* See iptunnel_xmit() */
Joseph Gasparakisd6727fe2012-12-07 14:14:16 +0000965 if (skb->ip_summed != CHECKSUM_PARTIAL)
966 skb->ip_summed = CHECKSUM_NONE;
stephen hemmingerd3428942012-10-01 12:32:35 +0000967 ip_select_ident(iph, &rt->dst, NULL);
968
969 err = ip_local_out(skb);
970 if (likely(net_xmit_eval(err) == 0)) {
971 struct vxlan_stats *stats = this_cpu_ptr(vxlan->stats);
972
973 u64_stats_update_begin(&stats->syncp);
974 stats->tx_packets++;
975 stats->tx_bytes += pkt_len;
976 u64_stats_update_end(&stats->syncp);
977 } else {
978 dev->stats.tx_errors++;
979 dev->stats.tx_aborted_errors++;
980 }
981 return NETDEV_TX_OK;
982
983drop:
984 dev->stats.tx_dropped++;
985 goto tx_free;
986
987tx_error:
988 dev->stats.tx_errors++;
989tx_free:
990 dev_kfree_skb(skb);
991 return NETDEV_TX_OK;
992}
993
994/* Walk the forwarding table and purge stale entries */
995static void vxlan_cleanup(unsigned long arg)
996{
997 struct vxlan_dev *vxlan = (struct vxlan_dev *) arg;
998 unsigned long next_timer = jiffies + FDB_AGE_INTERVAL;
999 unsigned int h;
1000
1001 if (!netif_running(vxlan->dev))
1002 return;
1003
1004 spin_lock_bh(&vxlan->hash_lock);
1005 for (h = 0; h < FDB_HASH_SIZE; ++h) {
1006 struct hlist_node *p, *n;
1007 hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) {
1008 struct vxlan_fdb *f
1009 = container_of(p, struct vxlan_fdb, hlist);
1010 unsigned long timeout;
1011
stephen hemminger3c172862012-10-26 06:24:34 +00001012 if (f->state & NUD_PERMANENT)
stephen hemmingerd3428942012-10-01 12:32:35 +00001013 continue;
1014
1015 timeout = f->used + vxlan->age_interval * HZ;
1016 if (time_before_eq(timeout, jiffies)) {
1017 netdev_dbg(vxlan->dev,
1018 "garbage collect %pM\n",
1019 f->eth_addr);
1020 f->state = NUD_STALE;
1021 vxlan_fdb_destroy(vxlan, f);
1022 } else if (time_before(timeout, next_timer))
1023 next_timer = timeout;
1024 }
1025 }
1026 spin_unlock_bh(&vxlan->hash_lock);
1027
1028 mod_timer(&vxlan->age_timer, next_timer);
1029}
1030
1031/* Setup stats when device is created */
1032static int vxlan_init(struct net_device *dev)
1033{
1034 struct vxlan_dev *vxlan = netdev_priv(dev);
1035
1036 vxlan->stats = alloc_percpu(struct vxlan_stats);
1037 if (!vxlan->stats)
1038 return -ENOMEM;
1039
1040 return 0;
1041}
1042
1043/* Start ageing timer and join group when device is brought up */
1044static int vxlan_open(struct net_device *dev)
1045{
1046 struct vxlan_dev *vxlan = netdev_priv(dev);
1047 int err;
1048
1049 if (vxlan->gaddr) {
1050 err = vxlan_join_group(dev);
1051 if (err)
1052 return err;
1053 }
1054
1055 if (vxlan->age_interval)
1056 mod_timer(&vxlan->age_timer, jiffies + FDB_AGE_INTERVAL);
1057
1058 return 0;
1059}
1060
1061/* Purge the forwarding table */
1062static void vxlan_flush(struct vxlan_dev *vxlan)
1063{
1064 unsigned h;
1065
1066 spin_lock_bh(&vxlan->hash_lock);
1067 for (h = 0; h < FDB_HASH_SIZE; ++h) {
1068 struct hlist_node *p, *n;
1069 hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) {
1070 struct vxlan_fdb *f
1071 = container_of(p, struct vxlan_fdb, hlist);
1072 vxlan_fdb_destroy(vxlan, f);
1073 }
1074 }
1075 spin_unlock_bh(&vxlan->hash_lock);
1076}
1077
1078/* Cleanup timer and forwarding table on shutdown */
1079static int vxlan_stop(struct net_device *dev)
1080{
1081 struct vxlan_dev *vxlan = netdev_priv(dev);
1082
1083 if (vxlan->gaddr)
1084 vxlan_leave_group(dev);
1085
1086 del_timer_sync(&vxlan->age_timer);
1087
1088 vxlan_flush(vxlan);
1089
1090 return 0;
1091}
1092
1093/* Merge per-cpu statistics */
1094static struct rtnl_link_stats64 *vxlan_stats64(struct net_device *dev,
1095 struct rtnl_link_stats64 *stats)
1096{
1097 struct vxlan_dev *vxlan = netdev_priv(dev);
1098 struct vxlan_stats tmp, sum = { 0 };
1099 unsigned int cpu;
1100
1101 for_each_possible_cpu(cpu) {
1102 unsigned int start;
1103 const struct vxlan_stats *stats
1104 = per_cpu_ptr(vxlan->stats, cpu);
1105
1106 do {
1107 start = u64_stats_fetch_begin_bh(&stats->syncp);
1108 memcpy(&tmp, stats, sizeof(tmp));
1109 } while (u64_stats_fetch_retry_bh(&stats->syncp, start));
1110
1111 sum.tx_bytes += tmp.tx_bytes;
1112 sum.tx_packets += tmp.tx_packets;
1113 sum.rx_bytes += tmp.rx_bytes;
1114 sum.rx_packets += tmp.rx_packets;
1115 }
1116
1117 stats->tx_bytes = sum.tx_bytes;
1118 stats->tx_packets = sum.tx_packets;
1119 stats->rx_bytes = sum.rx_bytes;
1120 stats->rx_packets = sum.rx_packets;
1121
1122 stats->multicast = dev->stats.multicast;
1123 stats->rx_length_errors = dev->stats.rx_length_errors;
1124 stats->rx_frame_errors = dev->stats.rx_frame_errors;
1125 stats->rx_errors = dev->stats.rx_errors;
1126
1127 stats->tx_dropped = dev->stats.tx_dropped;
1128 stats->tx_carrier_errors = dev->stats.tx_carrier_errors;
1129 stats->tx_aborted_errors = dev->stats.tx_aborted_errors;
1130 stats->collisions = dev->stats.collisions;
1131 stats->tx_errors = dev->stats.tx_errors;
1132
1133 return stats;
1134}
1135
1136/* Stub, nothing needs to be done. */
1137static void vxlan_set_multicast_list(struct net_device *dev)
1138{
1139}
1140
1141static const struct net_device_ops vxlan_netdev_ops = {
1142 .ndo_init = vxlan_init,
1143 .ndo_open = vxlan_open,
1144 .ndo_stop = vxlan_stop,
1145 .ndo_start_xmit = vxlan_xmit,
1146 .ndo_get_stats64 = vxlan_stats64,
1147 .ndo_set_rx_mode = vxlan_set_multicast_list,
1148 .ndo_change_mtu = eth_change_mtu,
1149 .ndo_validate_addr = eth_validate_addr,
1150 .ndo_set_mac_address = eth_mac_addr,
1151 .ndo_fdb_add = vxlan_fdb_add,
1152 .ndo_fdb_del = vxlan_fdb_delete,
1153 .ndo_fdb_dump = vxlan_fdb_dump,
1154};
1155
1156/* Info for udev, that this is a virtual tunnel endpoint */
1157static struct device_type vxlan_type = {
1158 .name = "vxlan",
1159};
1160
1161static void vxlan_free(struct net_device *dev)
1162{
1163 struct vxlan_dev *vxlan = netdev_priv(dev);
1164
1165 free_percpu(vxlan->stats);
1166 free_netdev(dev);
1167}
1168
1169/* Initialize the device structure. */
1170static void vxlan_setup(struct net_device *dev)
1171{
1172 struct vxlan_dev *vxlan = netdev_priv(dev);
1173 unsigned h;
stephen hemminger05f47d62012-10-09 20:35:50 +00001174 int low, high;
stephen hemmingerd3428942012-10-01 12:32:35 +00001175
1176 eth_hw_addr_random(dev);
1177 ether_setup(dev);
stephen hemminger2840bf22012-10-09 20:35:51 +00001178 dev->hard_header_len = ETH_HLEN + VXLAN_HEADROOM;
stephen hemmingerd3428942012-10-01 12:32:35 +00001179
1180 dev->netdev_ops = &vxlan_netdev_ops;
1181 dev->destructor = vxlan_free;
1182 SET_NETDEV_DEVTYPE(dev, &vxlan_type);
1183
1184 dev->tx_queue_len = 0;
1185 dev->features |= NETIF_F_LLTX;
1186 dev->features |= NETIF_F_NETNS_LOCAL;
Joseph Gasparakisd6727fe2012-12-07 14:14:16 +00001187 dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM;
Joseph Gasparakis0afb1662012-12-07 14:14:18 +00001188 dev->features |= NETIF_F_RXCSUM;
1189
1190 dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
stephen hemmingerd3428942012-10-01 12:32:35 +00001191 dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
1192
1193 spin_lock_init(&vxlan->hash_lock);
1194
1195 init_timer_deferrable(&vxlan->age_timer);
1196 vxlan->age_timer.function = vxlan_cleanup;
1197 vxlan->age_timer.data = (unsigned long) vxlan;
1198
stephen hemminger05f47d62012-10-09 20:35:50 +00001199 inet_get_local_port_range(&low, &high);
1200 vxlan->port_min = low;
1201 vxlan->port_max = high;
1202
stephen hemmingerd3428942012-10-01 12:32:35 +00001203 vxlan->dev = dev;
1204
1205 for (h = 0; h < FDB_HASH_SIZE; ++h)
1206 INIT_HLIST_HEAD(&vxlan->fdb_head[h]);
1207}
1208
1209static const struct nla_policy vxlan_policy[IFLA_VXLAN_MAX + 1] = {
1210 [IFLA_VXLAN_ID] = { .type = NLA_U32 },
1211 [IFLA_VXLAN_GROUP] = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
1212 [IFLA_VXLAN_LINK] = { .type = NLA_U32 },
1213 [IFLA_VXLAN_LOCAL] = { .len = FIELD_SIZEOF(struct iphdr, saddr) },
1214 [IFLA_VXLAN_TOS] = { .type = NLA_U8 },
1215 [IFLA_VXLAN_TTL] = { .type = NLA_U8 },
1216 [IFLA_VXLAN_LEARNING] = { .type = NLA_U8 },
1217 [IFLA_VXLAN_AGEING] = { .type = NLA_U32 },
1218 [IFLA_VXLAN_LIMIT] = { .type = NLA_U32 },
stephen hemminger05f47d62012-10-09 20:35:50 +00001219 [IFLA_VXLAN_PORT_RANGE] = { .len = sizeof(struct ifla_vxlan_port_range) },
David Stevense4f67ad2012-11-20 02:50:14 +00001220 [IFLA_VXLAN_PROXY] = { .type = NLA_U8 },
1221 [IFLA_VXLAN_RSC] = { .type = NLA_U8 },
1222 [IFLA_VXLAN_L2MISS] = { .type = NLA_U8 },
1223 [IFLA_VXLAN_L3MISS] = { .type = NLA_U8 },
stephen hemmingerd3428942012-10-01 12:32:35 +00001224};
1225
1226static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[])
1227{
1228 if (tb[IFLA_ADDRESS]) {
1229 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) {
1230 pr_debug("invalid link address (not ethernet)\n");
1231 return -EINVAL;
1232 }
1233
1234 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) {
1235 pr_debug("invalid all zero ethernet address\n");
1236 return -EADDRNOTAVAIL;
1237 }
1238 }
1239
1240 if (!data)
1241 return -EINVAL;
1242
1243 if (data[IFLA_VXLAN_ID]) {
1244 __u32 id = nla_get_u32(data[IFLA_VXLAN_ID]);
1245 if (id >= VXLAN_VID_MASK)
1246 return -ERANGE;
1247 }
1248
1249 if (data[IFLA_VXLAN_GROUP]) {
1250 __be32 gaddr = nla_get_be32(data[IFLA_VXLAN_GROUP]);
1251 if (!IN_MULTICAST(ntohl(gaddr))) {
1252 pr_debug("group address is not IPv4 multicast\n");
1253 return -EADDRNOTAVAIL;
1254 }
1255 }
stephen hemminger05f47d62012-10-09 20:35:50 +00001256
1257 if (data[IFLA_VXLAN_PORT_RANGE]) {
1258 const struct ifla_vxlan_port_range *p
1259 = nla_data(data[IFLA_VXLAN_PORT_RANGE]);
1260
1261 if (ntohs(p->high) < ntohs(p->low)) {
1262 pr_debug("port range %u .. %u not valid\n",
1263 ntohs(p->low), ntohs(p->high));
1264 return -EINVAL;
1265 }
1266 }
1267
stephen hemmingerd3428942012-10-01 12:32:35 +00001268 return 0;
1269}
1270
1271static int vxlan_newlink(struct net *net, struct net_device *dev,
1272 struct nlattr *tb[], struct nlattr *data[])
1273{
1274 struct vxlan_dev *vxlan = netdev_priv(dev);
1275 __u32 vni;
1276 int err;
1277
1278 if (!data[IFLA_VXLAN_ID])
1279 return -EINVAL;
1280
1281 vni = nla_get_u32(data[IFLA_VXLAN_ID]);
1282 if (vxlan_find_vni(net, vni)) {
1283 pr_info("duplicate VNI %u\n", vni);
1284 return -EEXIST;
1285 }
1286 vxlan->vni = vni;
1287
1288 if (data[IFLA_VXLAN_GROUP])
1289 vxlan->gaddr = nla_get_be32(data[IFLA_VXLAN_GROUP]);
1290
1291 if (data[IFLA_VXLAN_LOCAL])
1292 vxlan->saddr = nla_get_be32(data[IFLA_VXLAN_LOCAL]);
1293
stephen hemminger34e02aa2012-10-09 20:35:53 +00001294 if (data[IFLA_VXLAN_LINK] &&
1295 (vxlan->link = nla_get_u32(data[IFLA_VXLAN_LINK]))) {
1296 struct net_device *lowerdev
1297 = __dev_get_by_index(net, vxlan->link);
stephen hemmingerd3428942012-10-01 12:32:35 +00001298
stephen hemminger34e02aa2012-10-09 20:35:53 +00001299 if (!lowerdev) {
1300 pr_info("ifindex %d does not exist\n", vxlan->link);
1301 return -ENODEV;
stephen hemmingerd3428942012-10-01 12:32:35 +00001302 }
stephen hemminger34e02aa2012-10-09 20:35:53 +00001303
1304 if (!tb[IFLA_MTU])
1305 dev->mtu = lowerdev->mtu - VXLAN_HEADROOM;
Alexander Duyck1ba56fb2012-11-13 13:10:59 +00001306
1307 /* update header length based on lower device */
1308 dev->hard_header_len = lowerdev->hard_header_len +
1309 VXLAN_HEADROOM;
stephen hemmingerd3428942012-10-01 12:32:35 +00001310 }
1311
1312 if (data[IFLA_VXLAN_TOS])
1313 vxlan->tos = nla_get_u8(data[IFLA_VXLAN_TOS]);
1314
Vincent Bernatafb97182012-10-30 10:27:16 +00001315 if (data[IFLA_VXLAN_TTL])
1316 vxlan->ttl = nla_get_u8(data[IFLA_VXLAN_TTL]);
1317
stephen hemmingerd3428942012-10-01 12:32:35 +00001318 if (!data[IFLA_VXLAN_LEARNING] || nla_get_u8(data[IFLA_VXLAN_LEARNING]))
David Stevense4f67ad2012-11-20 02:50:14 +00001319 vxlan->flags |= VXLAN_F_LEARN;
stephen hemmingerd3428942012-10-01 12:32:35 +00001320
1321 if (data[IFLA_VXLAN_AGEING])
1322 vxlan->age_interval = nla_get_u32(data[IFLA_VXLAN_AGEING]);
1323 else
1324 vxlan->age_interval = FDB_AGE_DEFAULT;
1325
David Stevense4f67ad2012-11-20 02:50:14 +00001326 if (data[IFLA_VXLAN_PROXY] && nla_get_u8(data[IFLA_VXLAN_PROXY]))
1327 vxlan->flags |= VXLAN_F_PROXY;
1328
1329 if (data[IFLA_VXLAN_RSC] && nla_get_u8(data[IFLA_VXLAN_RSC]))
1330 vxlan->flags |= VXLAN_F_RSC;
1331
1332 if (data[IFLA_VXLAN_L2MISS] && nla_get_u8(data[IFLA_VXLAN_L2MISS]))
1333 vxlan->flags |= VXLAN_F_L2MISS;
1334
1335 if (data[IFLA_VXLAN_L3MISS] && nla_get_u8(data[IFLA_VXLAN_L3MISS]))
1336 vxlan->flags |= VXLAN_F_L3MISS;
1337
stephen hemmingerd3428942012-10-01 12:32:35 +00001338 if (data[IFLA_VXLAN_LIMIT])
1339 vxlan->addrmax = nla_get_u32(data[IFLA_VXLAN_LIMIT]);
1340
stephen hemminger05f47d62012-10-09 20:35:50 +00001341 if (data[IFLA_VXLAN_PORT_RANGE]) {
1342 const struct ifla_vxlan_port_range *p
1343 = nla_data(data[IFLA_VXLAN_PORT_RANGE]);
1344 vxlan->port_min = ntohs(p->low);
1345 vxlan->port_max = ntohs(p->high);
1346 }
1347
stephen hemmingerd3428942012-10-01 12:32:35 +00001348 err = register_netdevice(dev);
1349 if (!err)
1350 hlist_add_head_rcu(&vxlan->hlist, vni_head(net, vxlan->vni));
1351
1352 return err;
1353}
1354
1355static void vxlan_dellink(struct net_device *dev, struct list_head *head)
1356{
1357 struct vxlan_dev *vxlan = netdev_priv(dev);
1358
1359 hlist_del_rcu(&vxlan->hlist);
1360
1361 unregister_netdevice_queue(dev, head);
1362}
1363
1364static size_t vxlan_get_size(const struct net_device *dev)
1365{
1366
1367 return nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_ID */
1368 nla_total_size(sizeof(__be32)) +/* IFLA_VXLAN_GROUP */
1369 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LINK */
1370 nla_total_size(sizeof(__be32))+ /* IFLA_VXLAN_LOCAL */
1371 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TTL */
1372 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TOS */
1373 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_LEARNING */
David Stevense4f67ad2012-11-20 02:50:14 +00001374 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_PROXY */
1375 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_RSC */
1376 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_L2MISS */
1377 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_L3MISS */
stephen hemmingerd3428942012-10-01 12:32:35 +00001378 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_AGEING */
1379 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LIMIT */
stephen hemminger05f47d62012-10-09 20:35:50 +00001380 nla_total_size(sizeof(struct ifla_vxlan_port_range)) +
stephen hemmingerd3428942012-10-01 12:32:35 +00001381 0;
1382}
1383
1384static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
1385{
1386 const struct vxlan_dev *vxlan = netdev_priv(dev);
stephen hemminger05f47d62012-10-09 20:35:50 +00001387 struct ifla_vxlan_port_range ports = {
1388 .low = htons(vxlan->port_min),
1389 .high = htons(vxlan->port_max),
1390 };
stephen hemmingerd3428942012-10-01 12:32:35 +00001391
1392 if (nla_put_u32(skb, IFLA_VXLAN_ID, vxlan->vni))
1393 goto nla_put_failure;
1394
Stephen Hemminger7c41c422012-10-08 14:55:30 -07001395 if (vxlan->gaddr && nla_put_be32(skb, IFLA_VXLAN_GROUP, vxlan->gaddr))
stephen hemmingerd3428942012-10-01 12:32:35 +00001396 goto nla_put_failure;
1397
1398 if (vxlan->link && nla_put_u32(skb, IFLA_VXLAN_LINK, vxlan->link))
1399 goto nla_put_failure;
1400
Stephen Hemminger7c41c422012-10-08 14:55:30 -07001401 if (vxlan->saddr && nla_put_be32(skb, IFLA_VXLAN_LOCAL, vxlan->saddr))
stephen hemmingerd3428942012-10-01 12:32:35 +00001402 goto nla_put_failure;
1403
1404 if (nla_put_u8(skb, IFLA_VXLAN_TTL, vxlan->ttl) ||
1405 nla_put_u8(skb, IFLA_VXLAN_TOS, vxlan->tos) ||
David Stevense4f67ad2012-11-20 02:50:14 +00001406 nla_put_u8(skb, IFLA_VXLAN_LEARNING,
1407 !!(vxlan->flags & VXLAN_F_LEARN)) ||
1408 nla_put_u8(skb, IFLA_VXLAN_PROXY,
1409 !!(vxlan->flags & VXLAN_F_PROXY)) ||
1410 nla_put_u8(skb, IFLA_VXLAN_RSC, !!(vxlan->flags & VXLAN_F_RSC)) ||
1411 nla_put_u8(skb, IFLA_VXLAN_L2MISS,
1412 !!(vxlan->flags & VXLAN_F_L2MISS)) ||
1413 nla_put_u8(skb, IFLA_VXLAN_L3MISS,
1414 !!(vxlan->flags & VXLAN_F_L3MISS)) ||
stephen hemmingerd3428942012-10-01 12:32:35 +00001415 nla_put_u32(skb, IFLA_VXLAN_AGEING, vxlan->age_interval) ||
1416 nla_put_u32(skb, IFLA_VXLAN_LIMIT, vxlan->addrmax))
1417 goto nla_put_failure;
1418
stephen hemminger05f47d62012-10-09 20:35:50 +00001419 if (nla_put(skb, IFLA_VXLAN_PORT_RANGE, sizeof(ports), &ports))
1420 goto nla_put_failure;
1421
stephen hemmingerd3428942012-10-01 12:32:35 +00001422 return 0;
1423
1424nla_put_failure:
1425 return -EMSGSIZE;
1426}
1427
1428static struct rtnl_link_ops vxlan_link_ops __read_mostly = {
1429 .kind = "vxlan",
1430 .maxtype = IFLA_VXLAN_MAX,
1431 .policy = vxlan_policy,
1432 .priv_size = sizeof(struct vxlan_dev),
1433 .setup = vxlan_setup,
1434 .validate = vxlan_validate,
1435 .newlink = vxlan_newlink,
1436 .dellink = vxlan_dellink,
1437 .get_size = vxlan_get_size,
1438 .fill_info = vxlan_fill_info,
1439};
1440
1441static __net_init int vxlan_init_net(struct net *net)
1442{
1443 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
1444 struct sock *sk;
1445 struct sockaddr_in vxlan_addr = {
1446 .sin_family = AF_INET,
1447 .sin_addr.s_addr = htonl(INADDR_ANY),
1448 };
1449 int rc;
1450 unsigned h;
1451
1452 /* Create UDP socket for encapsulation receive. */
1453 rc = sock_create_kern(AF_INET, SOCK_DGRAM, IPPROTO_UDP, &vn->sock);
1454 if (rc < 0) {
1455 pr_debug("UDP socket create failed\n");
1456 return rc;
1457 }
stephen hemmingerbfe1b9b2012-10-01 18:49:21 +00001458 /* Put in proper namespace */
1459 sk = vn->sock->sk;
1460 sk_change_net(sk, net);
stephen hemmingerd3428942012-10-01 12:32:35 +00001461
1462 vxlan_addr.sin_port = htons(vxlan_port);
1463
1464 rc = kernel_bind(vn->sock, (struct sockaddr *) &vxlan_addr,
1465 sizeof(vxlan_addr));
1466 if (rc < 0) {
1467 pr_debug("bind for UDP socket %pI4:%u (%d)\n",
1468 &vxlan_addr.sin_addr, ntohs(vxlan_addr.sin_port), rc);
stephen hemmingerbfe1b9b2012-10-01 18:49:21 +00001469 sk_release_kernel(sk);
stephen hemmingerd3428942012-10-01 12:32:35 +00001470 vn->sock = NULL;
1471 return rc;
1472 }
1473
1474 /* Disable multicast loopback */
stephen hemmingerd3428942012-10-01 12:32:35 +00001475 inet_sk(sk)->mc_loop = 0;
1476
1477 /* Mark socket as an encapsulation socket. */
1478 udp_sk(sk)->encap_type = 1;
1479 udp_sk(sk)->encap_rcv = vxlan_udp_encap_recv;
1480 udp_encap_enable();
1481
1482 for (h = 0; h < VNI_HASH_SIZE; ++h)
1483 INIT_HLIST_HEAD(&vn->vni_list[h]);
1484
1485 return 0;
1486}
1487
1488static __net_exit void vxlan_exit_net(struct net *net)
1489{
1490 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
1491
1492 if (vn->sock) {
stephen hemmingerbfe1b9b2012-10-01 18:49:21 +00001493 sk_release_kernel(vn->sock->sk);
stephen hemmingerd3428942012-10-01 12:32:35 +00001494 vn->sock = NULL;
1495 }
1496}
1497
1498static struct pernet_operations vxlan_net_ops = {
1499 .init = vxlan_init_net,
1500 .exit = vxlan_exit_net,
1501 .id = &vxlan_net_id,
1502 .size = sizeof(struct vxlan_net),
1503};
1504
1505static int __init vxlan_init_module(void)
1506{
1507 int rc;
1508
1509 get_random_bytes(&vxlan_salt, sizeof(vxlan_salt));
1510
1511 rc = register_pernet_device(&vxlan_net_ops);
1512 if (rc)
1513 goto out1;
1514
1515 rc = rtnl_link_register(&vxlan_link_ops);
1516 if (rc)
1517 goto out2;
1518
1519 return 0;
1520
1521out2:
1522 unregister_pernet_device(&vxlan_net_ops);
1523out1:
1524 return rc;
1525}
1526module_init(vxlan_init_module);
1527
1528static void __exit vxlan_cleanup_module(void)
1529{
1530 rtnl_link_unregister(&vxlan_link_ops);
1531 unregister_pernet_device(&vxlan_net_ops);
1532}
1533module_exit(vxlan_cleanup_module);
1534
1535MODULE_LICENSE("GPL");
1536MODULE_VERSION(VXLAN_VERSION);
1537MODULE_AUTHOR("Stephen Hemminger <shemminger@vyatta.com>");
1538MODULE_ALIAS_RTNL_LINK("vxlan");