blob: 9d70421cf3a04e5eace7f079e38ad75c36709713 [file] [log] [blame]
stephen hemmingerd3428942012-10-01 12:32:35 +00001/*
Rami Roseneb5ce432012-11-13 13:29:15 +00002 * VXLAN: Virtual eXtensible Local Area Network
stephen hemmingerd3428942012-10-01 12:32:35 +00003 *
4 * Copyright (c) 2012 Vyatta Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * TODO
11 * - use IANA UDP port number (when defined)
12 * - IPv6 (not in RFC)
13 */
14
15#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16
17#include <linux/kernel.h>
18#include <linux/types.h>
19#include <linux/module.h>
20#include <linux/errno.h>
21#include <linux/slab.h>
22#include <linux/skbuff.h>
23#include <linux/rculist.h>
24#include <linux/netdevice.h>
25#include <linux/in.h>
26#include <linux/ip.h>
27#include <linux/udp.h>
28#include <linux/igmp.h>
29#include <linux/etherdevice.h>
30#include <linux/if_ether.h>
stephen hemmingerd3428942012-10-01 12:32:35 +000031#include <linux/hash.h>
Yan Burman1b13c972013-01-29 23:43:07 +000032#include <linux/ethtool.h>
David Stevense4f67ad2012-11-20 02:50:14 +000033#include <net/arp.h>
34#include <net/ndisc.h>
stephen hemmingerd3428942012-10-01 12:32:35 +000035#include <net/ip.h>
36#include <net/icmp.h>
37#include <net/udp.h>
38#include <net/rtnetlink.h>
39#include <net/route.h>
40#include <net/dsfield.h>
41#include <net/inet_ecn.h>
42#include <net/net_namespace.h>
43#include <net/netns/generic.h>
44
45#define VXLAN_VERSION "0.1"
46
47#define VNI_HASH_BITS 10
48#define VNI_HASH_SIZE (1<<VNI_HASH_BITS)
49#define FDB_HASH_BITS 8
50#define FDB_HASH_SIZE (1<<FDB_HASH_BITS)
51#define FDB_AGE_DEFAULT 300 /* 5 min */
52#define FDB_AGE_INTERVAL (10 * HZ) /* rescan interval */
53
54#define VXLAN_N_VID (1u << 24)
55#define VXLAN_VID_MASK (VXLAN_N_VID - 1)
Alexander Duyck52b702f2012-11-09 13:35:24 +000056/* IP header + UDP + VXLAN + Ethernet header */
57#define VXLAN_HEADROOM (20 + 8 + 8 + 14)
stephen hemmingerd3428942012-10-01 12:32:35 +000058
59#define VXLAN_FLAGS 0x08000000 /* struct vxlanhdr.vx_flags required value. */
60
61/* VXLAN protocol header */
62struct vxlanhdr {
63 __be32 vx_flags;
64 __be32 vx_vni;
65};
66
67/* UDP port for VXLAN traffic. */
68static unsigned int vxlan_port __read_mostly = 8472;
69module_param_named(udp_port, vxlan_port, uint, 0444);
70MODULE_PARM_DESC(udp_port, "Destination UDP port");
71
72static bool log_ecn_error = true;
73module_param(log_ecn_error, bool, 0644);
74MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
75
76/* per-net private data for this module */
77static unsigned int vxlan_net_id;
78struct vxlan_net {
79 struct socket *sock; /* UDP encap socket */
80 struct hlist_head vni_list[VNI_HASH_SIZE];
81};
82
83/* Forwarding table entry */
84struct vxlan_fdb {
85 struct hlist_node hlist; /* linked list of entries */
86 struct rcu_head rcu;
87 unsigned long updated; /* jiffies */
88 unsigned long used;
89 __be32 remote_ip;
90 u16 state; /* see ndm_state */
91 u8 eth_addr[ETH_ALEN];
92};
93
94/* Per-cpu network traffic stats */
95struct vxlan_stats {
96 u64 rx_packets;
97 u64 rx_bytes;
98 u64 tx_packets;
99 u64 tx_bytes;
100 struct u64_stats_sync syncp;
101};
102
103/* Pseudo network device */
104struct vxlan_dev {
105 struct hlist_node hlist;
106 struct net_device *dev;
107 struct vxlan_stats __percpu *stats;
108 __u32 vni; /* virtual network id */
109 __be32 gaddr; /* multicast group */
110 __be32 saddr; /* source address */
111 unsigned int link; /* link to multicast over */
stephen hemminger05f47d62012-10-09 20:35:50 +0000112 __u16 port_min; /* source port range */
113 __u16 port_max;
stephen hemmingerd3428942012-10-01 12:32:35 +0000114 __u8 tos; /* TOS override */
115 __u8 ttl;
David Stevense4f67ad2012-11-20 02:50:14 +0000116 u32 flags; /* VXLAN_F_* below */
stephen hemmingerd3428942012-10-01 12:32:35 +0000117
118 unsigned long age_interval;
119 struct timer_list age_timer;
120 spinlock_t hash_lock;
121 unsigned int addrcnt;
122 unsigned int addrmax;
stephen hemmingerd3428942012-10-01 12:32:35 +0000123
124 struct hlist_head fdb_head[FDB_HASH_SIZE];
125};
126
David Stevense4f67ad2012-11-20 02:50:14 +0000127#define VXLAN_F_LEARN 0x01
128#define VXLAN_F_PROXY 0x02
129#define VXLAN_F_RSC 0x04
130#define VXLAN_F_L2MISS 0x08
131#define VXLAN_F_L3MISS 0x10
132
stephen hemmingerd3428942012-10-01 12:32:35 +0000133/* salt for hash table */
134static u32 vxlan_salt __read_mostly;
135
136static inline struct hlist_head *vni_head(struct net *net, u32 id)
137{
138 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
139
140 return &vn->vni_list[hash_32(id, VNI_HASH_BITS)];
141}
142
143/* Look up VNI in a per net namespace table */
144static struct vxlan_dev *vxlan_find_vni(struct net *net, u32 id)
145{
146 struct vxlan_dev *vxlan;
147 struct hlist_node *node;
148
149 hlist_for_each_entry_rcu(vxlan, node, vni_head(net, id), hlist) {
150 if (vxlan->vni == id)
151 return vxlan;
152 }
153
154 return NULL;
155}
156
157/* Fill in neighbour message in skbuff. */
158static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan,
159 const struct vxlan_fdb *fdb,
160 u32 portid, u32 seq, int type, unsigned int flags)
161{
162 unsigned long now = jiffies;
163 struct nda_cacheinfo ci;
164 struct nlmsghdr *nlh;
165 struct ndmsg *ndm;
David Stevense4f67ad2012-11-20 02:50:14 +0000166 bool send_ip, send_eth;
stephen hemmingerd3428942012-10-01 12:32:35 +0000167
168 nlh = nlmsg_put(skb, portid, seq, type, sizeof(*ndm), flags);
169 if (nlh == NULL)
170 return -EMSGSIZE;
171
172 ndm = nlmsg_data(nlh);
173 memset(ndm, 0, sizeof(*ndm));
David Stevense4f67ad2012-11-20 02:50:14 +0000174
175 send_eth = send_ip = true;
176
177 if (type == RTM_GETNEIGH) {
178 ndm->ndm_family = AF_INET;
179 send_ip = fdb->remote_ip != 0;
180 send_eth = !is_zero_ether_addr(fdb->eth_addr);
181 } else
182 ndm->ndm_family = AF_BRIDGE;
stephen hemmingerd3428942012-10-01 12:32:35 +0000183 ndm->ndm_state = fdb->state;
184 ndm->ndm_ifindex = vxlan->dev->ifindex;
185 ndm->ndm_flags = NTF_SELF;
186 ndm->ndm_type = NDA_DST;
187
David Stevense4f67ad2012-11-20 02:50:14 +0000188 if (send_eth && nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->eth_addr))
stephen hemmingerd3428942012-10-01 12:32:35 +0000189 goto nla_put_failure;
190
David Stevense4f67ad2012-11-20 02:50:14 +0000191 if (send_ip && nla_put_be32(skb, NDA_DST, fdb->remote_ip))
stephen hemmingerd3428942012-10-01 12:32:35 +0000192 goto nla_put_failure;
193
194 ci.ndm_used = jiffies_to_clock_t(now - fdb->used);
195 ci.ndm_confirmed = 0;
196 ci.ndm_updated = jiffies_to_clock_t(now - fdb->updated);
197 ci.ndm_refcnt = 0;
198
199 if (nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
200 goto nla_put_failure;
201
202 return nlmsg_end(skb, nlh);
203
204nla_put_failure:
205 nlmsg_cancel(skb, nlh);
206 return -EMSGSIZE;
207}
208
209static inline size_t vxlan_nlmsg_size(void)
210{
211 return NLMSG_ALIGN(sizeof(struct ndmsg))
212 + nla_total_size(ETH_ALEN) /* NDA_LLADDR */
213 + nla_total_size(sizeof(__be32)) /* NDA_DST */
214 + nla_total_size(sizeof(struct nda_cacheinfo));
215}
216
217static void vxlan_fdb_notify(struct vxlan_dev *vxlan,
218 const struct vxlan_fdb *fdb, int type)
219{
220 struct net *net = dev_net(vxlan->dev);
221 struct sk_buff *skb;
222 int err = -ENOBUFS;
223
224 skb = nlmsg_new(vxlan_nlmsg_size(), GFP_ATOMIC);
225 if (skb == NULL)
226 goto errout;
227
228 err = vxlan_fdb_info(skb, vxlan, fdb, 0, 0, type, 0);
229 if (err < 0) {
230 /* -EMSGSIZE implies BUG in vxlan_nlmsg_size() */
231 WARN_ON(err == -EMSGSIZE);
232 kfree_skb(skb);
233 goto errout;
234 }
235
236 rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
237 return;
238errout:
239 if (err < 0)
240 rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
241}
242
David Stevense4f67ad2012-11-20 02:50:14 +0000243static void vxlan_ip_miss(struct net_device *dev, __be32 ipa)
244{
245 struct vxlan_dev *vxlan = netdev_priv(dev);
246 struct vxlan_fdb f;
247
248 memset(&f, 0, sizeof f);
249 f.state = NUD_STALE;
250 f.remote_ip = ipa; /* goes to NDA_DST */
251
252 vxlan_fdb_notify(vxlan, &f, RTM_GETNEIGH);
253}
254
255static void vxlan_fdb_miss(struct vxlan_dev *vxlan, const u8 eth_addr[ETH_ALEN])
256{
257 struct vxlan_fdb f;
258
259 memset(&f, 0, sizeof f);
260 f.state = NUD_STALE;
261 memcpy(f.eth_addr, eth_addr, ETH_ALEN);
262
263 vxlan_fdb_notify(vxlan, &f, RTM_GETNEIGH);
264}
265
stephen hemmingerd3428942012-10-01 12:32:35 +0000266/* Hash Ethernet address */
267static u32 eth_hash(const unsigned char *addr)
268{
269 u64 value = get_unaligned((u64 *)addr);
270
271 /* only want 6 bytes */
272#ifdef __BIG_ENDIAN
stephen hemmingerd3428942012-10-01 12:32:35 +0000273 value >>= 16;
stephen hemminger321fb992012-10-09 20:35:47 +0000274#else
275 value <<= 16;
stephen hemmingerd3428942012-10-01 12:32:35 +0000276#endif
277 return hash_64(value, FDB_HASH_BITS);
278}
279
280/* Hash chain to use given mac address */
281static inline struct hlist_head *vxlan_fdb_head(struct vxlan_dev *vxlan,
282 const u8 *mac)
283{
284 return &vxlan->fdb_head[eth_hash(mac)];
285}
286
287/* Look up Ethernet address in forwarding table */
288static struct vxlan_fdb *vxlan_find_mac(struct vxlan_dev *vxlan,
289 const u8 *mac)
290
291{
292 struct hlist_head *head = vxlan_fdb_head(vxlan, mac);
293 struct vxlan_fdb *f;
294 struct hlist_node *node;
295
296 hlist_for_each_entry_rcu(f, node, head, hlist) {
297 if (compare_ether_addr(mac, f->eth_addr) == 0)
298 return f;
299 }
300
301 return NULL;
302}
303
304/* Add new entry to forwarding table -- assumes lock held */
305static int vxlan_fdb_create(struct vxlan_dev *vxlan,
306 const u8 *mac, __be32 ip,
307 __u16 state, __u16 flags)
308{
309 struct vxlan_fdb *f;
310 int notify = 0;
311
312 f = vxlan_find_mac(vxlan, mac);
313 if (f) {
314 if (flags & NLM_F_EXCL) {
315 netdev_dbg(vxlan->dev,
316 "lost race to create %pM\n", mac);
317 return -EEXIST;
318 }
319 if (f->state != state) {
320 f->state = state;
321 f->updated = jiffies;
322 notify = 1;
323 }
324 } else {
325 if (!(flags & NLM_F_CREATE))
326 return -ENOENT;
327
328 if (vxlan->addrmax && vxlan->addrcnt >= vxlan->addrmax)
329 return -ENOSPC;
330
331 netdev_dbg(vxlan->dev, "add %pM -> %pI4\n", mac, &ip);
332 f = kmalloc(sizeof(*f), GFP_ATOMIC);
333 if (!f)
334 return -ENOMEM;
335
336 notify = 1;
337 f->remote_ip = ip;
338 f->state = state;
339 f->updated = f->used = jiffies;
340 memcpy(f->eth_addr, mac, ETH_ALEN);
341
342 ++vxlan->addrcnt;
343 hlist_add_head_rcu(&f->hlist,
344 vxlan_fdb_head(vxlan, mac));
345 }
346
347 if (notify)
348 vxlan_fdb_notify(vxlan, f, RTM_NEWNEIGH);
349
350 return 0;
351}
352
353static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f)
354{
355 netdev_dbg(vxlan->dev,
356 "delete %pM\n", f->eth_addr);
357
358 --vxlan->addrcnt;
359 vxlan_fdb_notify(vxlan, f, RTM_DELNEIGH);
360
361 hlist_del_rcu(&f->hlist);
362 kfree_rcu(f, rcu);
363}
364
365/* Add static entry (via netlink) */
366static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
367 struct net_device *dev,
368 const unsigned char *addr, u16 flags)
369{
370 struct vxlan_dev *vxlan = netdev_priv(dev);
371 __be32 ip;
372 int err;
373
374 if (!(ndm->ndm_state & (NUD_PERMANENT|NUD_REACHABLE))) {
375 pr_info("RTM_NEWNEIGH with invalid state %#x\n",
376 ndm->ndm_state);
377 return -EINVAL;
378 }
379
380 if (tb[NDA_DST] == NULL)
381 return -EINVAL;
382
383 if (nla_len(tb[NDA_DST]) != sizeof(__be32))
384 return -EAFNOSUPPORT;
385
386 ip = nla_get_be32(tb[NDA_DST]);
387
388 spin_lock_bh(&vxlan->hash_lock);
389 err = vxlan_fdb_create(vxlan, addr, ip, ndm->ndm_state, flags);
390 spin_unlock_bh(&vxlan->hash_lock);
391
392 return err;
393}
394
395/* Delete entry (via netlink) */
Vlad Yasevich1690be62013-02-13 12:00:18 +0000396static int vxlan_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
397 struct net_device *dev,
stephen hemmingerd3428942012-10-01 12:32:35 +0000398 const unsigned char *addr)
399{
400 struct vxlan_dev *vxlan = netdev_priv(dev);
401 struct vxlan_fdb *f;
402 int err = -ENOENT;
403
404 spin_lock_bh(&vxlan->hash_lock);
405 f = vxlan_find_mac(vxlan, addr);
406 if (f) {
407 vxlan_fdb_destroy(vxlan, f);
408 err = 0;
409 }
410 spin_unlock_bh(&vxlan->hash_lock);
411
412 return err;
413}
414
415/* Dump forwarding table */
416static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
417 struct net_device *dev, int idx)
418{
419 struct vxlan_dev *vxlan = netdev_priv(dev);
420 unsigned int h;
421
422 for (h = 0; h < FDB_HASH_SIZE; ++h) {
423 struct vxlan_fdb *f;
424 struct hlist_node *n;
425 int err;
426
427 hlist_for_each_entry_rcu(f, n, &vxlan->fdb_head[h], hlist) {
428 if (idx < cb->args[0])
429 goto skip;
430
431 err = vxlan_fdb_info(skb, vxlan, f,
432 NETLINK_CB(cb->skb).portid,
433 cb->nlh->nlmsg_seq,
434 RTM_NEWNEIGH,
435 NLM_F_MULTI);
436 if (err < 0)
437 break;
438skip:
439 ++idx;
440 }
441 }
442
443 return idx;
444}
445
446/* Watch incoming packets to learn mapping between Ethernet address
447 * and Tunnel endpoint.
448 */
449static void vxlan_snoop(struct net_device *dev,
450 __be32 src_ip, const u8 *src_mac)
451{
452 struct vxlan_dev *vxlan = netdev_priv(dev);
453 struct vxlan_fdb *f;
454 int err;
455
456 f = vxlan_find_mac(vxlan, src_mac);
457 if (likely(f)) {
458 f->used = jiffies;
459 if (likely(f->remote_ip == src_ip))
460 return;
461
462 if (net_ratelimit())
463 netdev_info(dev,
464 "%pM migrated from %pI4 to %pI4\n",
465 src_mac, &f->remote_ip, &src_ip);
466
467 f->remote_ip = src_ip;
468 f->updated = jiffies;
469 } else {
470 /* learned new entry */
471 spin_lock(&vxlan->hash_lock);
472 err = vxlan_fdb_create(vxlan, src_mac, src_ip,
473 NUD_REACHABLE,
474 NLM_F_EXCL|NLM_F_CREATE);
475 spin_unlock(&vxlan->hash_lock);
476 }
477}
478
479
480/* See if multicast group is already in use by other ID */
481static bool vxlan_group_used(struct vxlan_net *vn,
482 const struct vxlan_dev *this)
483{
484 const struct vxlan_dev *vxlan;
485 struct hlist_node *node;
486 unsigned h;
487
488 for (h = 0; h < VNI_HASH_SIZE; ++h)
489 hlist_for_each_entry(vxlan, node, &vn->vni_list[h], hlist) {
490 if (vxlan == this)
491 continue;
492
493 if (!netif_running(vxlan->dev))
494 continue;
495
496 if (vxlan->gaddr == this->gaddr)
497 return true;
498 }
499
500 return false;
501}
502
503/* kernel equivalent to IP_ADD_MEMBERSHIP */
504static int vxlan_join_group(struct net_device *dev)
505{
506 struct vxlan_dev *vxlan = netdev_priv(dev);
507 struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
508 struct sock *sk = vn->sock->sk;
509 struct ip_mreqn mreq = {
Yan Burmanaf9b0782012-12-20 03:36:08 +0000510 .imr_multiaddr.s_addr = vxlan->gaddr,
511 .imr_ifindex = vxlan->link,
stephen hemmingerd3428942012-10-01 12:32:35 +0000512 };
513 int err;
514
515 /* Already a member of group */
516 if (vxlan_group_used(vn, vxlan))
517 return 0;
518
519 /* Need to drop RTNL to call multicast join */
520 rtnl_unlock();
521 lock_sock(sk);
522 err = ip_mc_join_group(sk, &mreq);
523 release_sock(sk);
524 rtnl_lock();
525
526 return err;
527}
528
529
530/* kernel equivalent to IP_DROP_MEMBERSHIP */
531static int vxlan_leave_group(struct net_device *dev)
532{
533 struct vxlan_dev *vxlan = netdev_priv(dev);
534 struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
535 int err = 0;
536 struct sock *sk = vn->sock->sk;
537 struct ip_mreqn mreq = {
Yan Burmanaf9b0782012-12-20 03:36:08 +0000538 .imr_multiaddr.s_addr = vxlan->gaddr,
539 .imr_ifindex = vxlan->link,
stephen hemmingerd3428942012-10-01 12:32:35 +0000540 };
541
542 /* Only leave group when last vxlan is done. */
543 if (vxlan_group_used(vn, vxlan))
544 return 0;
545
546 /* Need to drop RTNL to call multicast leave */
547 rtnl_unlock();
548 lock_sock(sk);
549 err = ip_mc_leave_group(sk, &mreq);
550 release_sock(sk);
551 rtnl_lock();
552
553 return err;
554}
555
556/* Callback from net/ipv4/udp.c to receive packets */
557static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
558{
559 struct iphdr *oip;
560 struct vxlanhdr *vxh;
561 struct vxlan_dev *vxlan;
562 struct vxlan_stats *stats;
563 __u32 vni;
564 int err;
565
566 /* pop off outer UDP header */
567 __skb_pull(skb, sizeof(struct udphdr));
568
569 /* Need Vxlan and inner Ethernet header to be present */
570 if (!pskb_may_pull(skb, sizeof(struct vxlanhdr)))
571 goto error;
572
573 /* Drop packets with reserved bits set */
574 vxh = (struct vxlanhdr *) skb->data;
575 if (vxh->vx_flags != htonl(VXLAN_FLAGS) ||
576 (vxh->vx_vni & htonl(0xff))) {
577 netdev_dbg(skb->dev, "invalid vxlan flags=%#x vni=%#x\n",
578 ntohl(vxh->vx_flags), ntohl(vxh->vx_vni));
579 goto error;
580 }
581
582 __skb_pull(skb, sizeof(struct vxlanhdr));
stephen hemmingerd3428942012-10-01 12:32:35 +0000583
584 /* Is this VNI defined? */
585 vni = ntohl(vxh->vx_vni) >> 8;
586 vxlan = vxlan_find_vni(sock_net(sk), vni);
587 if (!vxlan) {
588 netdev_dbg(skb->dev, "unknown vni %d\n", vni);
589 goto drop;
590 }
591
592 if (!pskb_may_pull(skb, ETH_HLEN)) {
593 vxlan->dev->stats.rx_length_errors++;
594 vxlan->dev->stats.rx_errors++;
595 goto drop;
596 }
597
David Stevense4f67ad2012-11-20 02:50:14 +0000598 skb_reset_mac_header(skb);
599
stephen hemmingerd3428942012-10-01 12:32:35 +0000600 /* Re-examine inner Ethernet packet */
601 oip = ip_hdr(skb);
602 skb->protocol = eth_type_trans(skb, vxlan->dev);
stephen hemmingerd3428942012-10-01 12:32:35 +0000603
604 /* Ignore packet loops (and multicast echo) */
605 if (compare_ether_addr(eth_hdr(skb)->h_source,
606 vxlan->dev->dev_addr) == 0)
607 goto drop;
608
David Stevense4f67ad2012-11-20 02:50:14 +0000609 if (vxlan->flags & VXLAN_F_LEARN)
stephen hemmingerd3428942012-10-01 12:32:35 +0000610 vxlan_snoop(skb->dev, oip->saddr, eth_hdr(skb)->h_source);
611
612 __skb_tunnel_rx(skb, vxlan->dev);
613 skb_reset_network_header(skb);
Joseph Gasparakis0afb1662012-12-07 14:14:18 +0000614
615 /* If the NIC driver gave us an encapsulated packet with
616 * CHECKSUM_UNNECESSARY and Rx checksum feature is enabled,
617 * leave the CHECKSUM_UNNECESSARY, the device checksummed it
618 * for us. Otherwise force the upper layers to verify it.
619 */
620 if (skb->ip_summed != CHECKSUM_UNNECESSARY || !skb->encapsulation ||
621 !(vxlan->dev->features & NETIF_F_RXCSUM))
622 skb->ip_summed = CHECKSUM_NONE;
623
624 skb->encapsulation = 0;
stephen hemmingerd3428942012-10-01 12:32:35 +0000625
626 err = IP_ECN_decapsulate(oip, skb);
627 if (unlikely(err)) {
628 if (log_ecn_error)
629 net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n",
630 &oip->saddr, oip->tos);
631 if (err > 1) {
632 ++vxlan->dev->stats.rx_frame_errors;
633 ++vxlan->dev->stats.rx_errors;
634 goto drop;
635 }
636 }
637
638 stats = this_cpu_ptr(vxlan->stats);
639 u64_stats_update_begin(&stats->syncp);
640 stats->rx_packets++;
641 stats->rx_bytes += skb->len;
642 u64_stats_update_end(&stats->syncp);
643
644 netif_rx(skb);
645
646 return 0;
647error:
648 /* Put UDP header back */
649 __skb_push(skb, sizeof(struct udphdr));
650
651 return 1;
652drop:
653 /* Consume bad packet */
654 kfree_skb(skb);
655 return 0;
656}
657
David Stevense4f67ad2012-11-20 02:50:14 +0000658static int arp_reduce(struct net_device *dev, struct sk_buff *skb)
659{
660 struct vxlan_dev *vxlan = netdev_priv(dev);
661 struct arphdr *parp;
662 u8 *arpptr, *sha;
663 __be32 sip, tip;
664 struct neighbour *n;
665
666 if (dev->flags & IFF_NOARP)
667 goto out;
668
669 if (!pskb_may_pull(skb, arp_hdr_len(dev))) {
670 dev->stats.tx_dropped++;
671 goto out;
672 }
673 parp = arp_hdr(skb);
674
675 if ((parp->ar_hrd != htons(ARPHRD_ETHER) &&
676 parp->ar_hrd != htons(ARPHRD_IEEE802)) ||
677 parp->ar_pro != htons(ETH_P_IP) ||
678 parp->ar_op != htons(ARPOP_REQUEST) ||
679 parp->ar_hln != dev->addr_len ||
680 parp->ar_pln != 4)
681 goto out;
682 arpptr = (u8 *)parp + sizeof(struct arphdr);
683 sha = arpptr;
684 arpptr += dev->addr_len; /* sha */
685 memcpy(&sip, arpptr, sizeof(sip));
686 arpptr += sizeof(sip);
687 arpptr += dev->addr_len; /* tha */
688 memcpy(&tip, arpptr, sizeof(tip));
689
690 if (ipv4_is_loopback(tip) ||
691 ipv4_is_multicast(tip))
692 goto out;
693
694 n = neigh_lookup(&arp_tbl, &tip, dev);
695
696 if (n) {
697 struct vxlan_dev *vxlan = netdev_priv(dev);
698 struct vxlan_fdb *f;
699 struct sk_buff *reply;
700
701 if (!(n->nud_state & NUD_CONNECTED)) {
702 neigh_release(n);
703 goto out;
704 }
705
706 f = vxlan_find_mac(vxlan, n->ha);
707 if (f && f->remote_ip == 0) {
708 /* bridge-local neighbor */
709 neigh_release(n);
710 goto out;
711 }
712
713 reply = arp_create(ARPOP_REPLY, ETH_P_ARP, sip, dev, tip, sha,
714 n->ha, sha);
715
716 neigh_release(n);
717
718 skb_reset_mac_header(reply);
719 __skb_pull(reply, skb_network_offset(reply));
720 reply->ip_summed = CHECKSUM_UNNECESSARY;
721 reply->pkt_type = PACKET_HOST;
722
723 if (netif_rx_ni(reply) == NET_RX_DROP)
724 dev->stats.rx_dropped++;
725 } else if (vxlan->flags & VXLAN_F_L3MISS)
726 vxlan_ip_miss(dev, tip);
727out:
728 consume_skb(skb);
729 return NETDEV_TX_OK;
730}
731
732static bool route_shortcircuit(struct net_device *dev, struct sk_buff *skb)
733{
734 struct vxlan_dev *vxlan = netdev_priv(dev);
735 struct neighbour *n;
736 struct iphdr *pip;
737
738 if (is_multicast_ether_addr(eth_hdr(skb)->h_dest))
739 return false;
740
741 n = NULL;
742 switch (ntohs(eth_hdr(skb)->h_proto)) {
743 case ETH_P_IP:
744 if (!pskb_may_pull(skb, sizeof(struct iphdr)))
745 return false;
746 pip = ip_hdr(skb);
747 n = neigh_lookup(&arp_tbl, &pip->daddr, dev);
748 break;
749 default:
750 return false;
751 }
752
753 if (n) {
754 bool diff;
755
756 diff = compare_ether_addr(eth_hdr(skb)->h_dest, n->ha) != 0;
757 if (diff) {
758 memcpy(eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest,
759 dev->addr_len);
760 memcpy(eth_hdr(skb)->h_dest, n->ha, dev->addr_len);
761 }
762 neigh_release(n);
763 return diff;
764 } else if (vxlan->flags & VXLAN_F_L3MISS)
765 vxlan_ip_miss(dev, pip->daddr);
766 return false;
767}
768
stephen hemmingerd3428942012-10-01 12:32:35 +0000769/* Extract dsfield from inner protocol */
770static inline u8 vxlan_get_dsfield(const struct iphdr *iph,
771 const struct sk_buff *skb)
772{
773 if (skb->protocol == htons(ETH_P_IP))
774 return iph->tos;
775 else if (skb->protocol == htons(ETH_P_IPV6))
776 return ipv6_get_dsfield((const struct ipv6hdr *)iph);
777 else
778 return 0;
779}
780
781/* Propogate ECN bits out */
782static inline u8 vxlan_ecn_encap(u8 tos,
783 const struct iphdr *iph,
784 const struct sk_buff *skb)
785{
786 u8 inner = vxlan_get_dsfield(iph, skb);
787
788 return INET_ECN_encapsulate(tos, inner);
789}
790
stephen hemminger1cad8712012-10-09 20:35:49 +0000791static void vxlan_sock_free(struct sk_buff *skb)
792{
793 sock_put(skb->sk);
794}
795
796/* On transmit, associate with the tunnel socket */
797static void vxlan_set_owner(struct net_device *dev, struct sk_buff *skb)
798{
799 struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
800 struct sock *sk = vn->sock->sk;
801
802 skb_orphan(skb);
803 sock_hold(sk);
804 skb->sk = sk;
805 skb->destructor = vxlan_sock_free;
806}
807
stephen hemminger05f47d62012-10-09 20:35:50 +0000808/* Compute source port for outgoing packet
809 * first choice to use L4 flow hash since it will spread
810 * better and maybe available from hardware
811 * secondary choice is to use jhash on the Ethernet header
812 */
813static u16 vxlan_src_port(const struct vxlan_dev *vxlan, struct sk_buff *skb)
814{
815 unsigned int range = (vxlan->port_max - vxlan->port_min) + 1;
816 u32 hash;
817
818 hash = skb_get_rxhash(skb);
819 if (!hash)
820 hash = jhash(skb->data, 2 * ETH_ALEN,
821 (__force u32) skb->protocol);
822
823 return (((u64) hash * range) >> 32) + vxlan->port_min;
824}
825
stephen hemmingerd3428942012-10-01 12:32:35 +0000826/* Transmit local packets over Vxlan
827 *
828 * Outer IP header inherits ECN and DF from inner header.
829 * Outer UDP destination is the VXLAN assigned port.
stephen hemminger05f47d62012-10-09 20:35:50 +0000830 * source port is based on hash of flow
stephen hemmingerd3428942012-10-01 12:32:35 +0000831 */
832static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
833{
834 struct vxlan_dev *vxlan = netdev_priv(dev);
835 struct rtable *rt;
stephen hemmingerd3428942012-10-01 12:32:35 +0000836 const struct iphdr *old_iph;
David Stevense4f67ad2012-11-20 02:50:14 +0000837 struct ethhdr *eth;
stephen hemmingerd3428942012-10-01 12:32:35 +0000838 struct iphdr *iph;
839 struct vxlanhdr *vxh;
840 struct udphdr *uh;
841 struct flowi4 fl4;
stephen hemmingerd3428942012-10-01 12:32:35 +0000842 unsigned int pkt_len = skb->len;
stephen hemmingerd3428942012-10-01 12:32:35 +0000843 __be32 dst;
stephen hemminger05f47d62012-10-09 20:35:50 +0000844 __u16 src_port;
stephen hemmingerd3428942012-10-01 12:32:35 +0000845 __be16 df = 0;
846 __u8 tos, ttl;
847 int err;
David Stevense4f67ad2012-11-20 02:50:14 +0000848 bool did_rsc = false;
849 const struct vxlan_fdb *f;
stephen hemmingerd3428942012-10-01 12:32:35 +0000850
David Stevense4f67ad2012-11-20 02:50:14 +0000851 skb_reset_mac_header(skb);
852 eth = eth_hdr(skb);
853
854 if ((vxlan->flags & VXLAN_F_PROXY) && ntohs(eth->h_proto) == ETH_P_ARP)
855 return arp_reduce(dev, skb);
856 else if ((vxlan->flags&VXLAN_F_RSC) && ntohs(eth->h_proto) == ETH_P_IP)
857 did_rsc = route_shortcircuit(dev, skb);
858
859 f = vxlan_find_mac(vxlan, eth->h_dest);
860 if (f == NULL) {
861 did_rsc = false;
862 dst = vxlan->gaddr;
863 if (!dst && (vxlan->flags & VXLAN_F_L2MISS) &&
864 !is_multicast_ether_addr(eth->h_dest))
865 vxlan_fdb_miss(vxlan, eth->h_dest);
866 } else
867 dst = f->remote_ip;
868
869 if (!dst) {
870 if (did_rsc) {
871 __skb_pull(skb, skb_network_offset(skb));
872 skb->ip_summed = CHECKSUM_NONE;
873 skb->pkt_type = PACKET_HOST;
874
875 /* short-circuited back to local bridge */
876 if (netif_rx(skb) == NET_RX_SUCCESS) {
877 struct vxlan_stats *stats =
878 this_cpu_ptr(vxlan->stats);
879
880 u64_stats_update_begin(&stats->syncp);
881 stats->tx_packets++;
882 stats->tx_bytes += pkt_len;
883 u64_stats_update_end(&stats->syncp);
884 } else {
885 dev->stats.tx_errors++;
886 dev->stats.tx_aborted_errors++;
887 }
888 return NETDEV_TX_OK;
889 }
stephen hemmingeref59feb2012-10-09 20:35:46 +0000890 goto drop;
David Stevense4f67ad2012-11-20 02:50:14 +0000891 }
stephen hemmingeref59feb2012-10-09 20:35:46 +0000892
Joseph Gasparakisd6727fe2012-12-07 14:14:16 +0000893 if (!skb->encapsulation) {
894 skb_reset_inner_headers(skb);
895 skb->encapsulation = 1;
896 }
897
stephen hemmingerd3428942012-10-01 12:32:35 +0000898 /* Need space for new headers (invalidates iph ptr) */
899 if (skb_cow_head(skb, VXLAN_HEADROOM))
900 goto drop;
901
stephen hemmingerd3428942012-10-01 12:32:35 +0000902 old_iph = ip_hdr(skb);
903
stephen hemmingerd3428942012-10-01 12:32:35 +0000904 ttl = vxlan->ttl;
905 if (!ttl && IN_MULTICAST(ntohl(dst)))
906 ttl = 1;
907
908 tos = vxlan->tos;
909 if (tos == 1)
910 tos = vxlan_get_dsfield(old_iph, skb);
911
stephen hemminger05f47d62012-10-09 20:35:50 +0000912 src_port = vxlan_src_port(vxlan, skb);
stephen hemmingerd3428942012-10-01 12:32:35 +0000913
stephen hemmingerca78f182012-10-09 20:35:48 +0000914 memset(&fl4, 0, sizeof(fl4));
915 fl4.flowi4_oif = vxlan->link;
916 fl4.flowi4_tos = RT_TOS(tos);
917 fl4.daddr = dst;
918 fl4.saddr = vxlan->saddr;
919
920 rt = ip_route_output_key(dev_net(dev), &fl4);
stephen hemmingerd3428942012-10-01 12:32:35 +0000921 if (IS_ERR(rt)) {
922 netdev_dbg(dev, "no route to %pI4\n", &dst);
923 dev->stats.tx_carrier_errors++;
924 goto tx_error;
925 }
926
927 if (rt->dst.dev == dev) {
928 netdev_dbg(dev, "circular route to %pI4\n", &dst);
929 ip_rt_put(rt);
930 dev->stats.collisions++;
931 goto tx_error;
932 }
933
934 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
935 IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
936 IPSKB_REROUTED);
937 skb_dst_drop(skb);
938 skb_dst_set(skb, &rt->dst);
939
940 vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
941 vxh->vx_flags = htonl(VXLAN_FLAGS);
942 vxh->vx_vni = htonl(vxlan->vni << 8);
943
944 __skb_push(skb, sizeof(*uh));
945 skb_reset_transport_header(skb);
946 uh = udp_hdr(skb);
947
948 uh->dest = htons(vxlan_port);
stephen hemminger05f47d62012-10-09 20:35:50 +0000949 uh->source = htons(src_port);
stephen hemmingerd3428942012-10-01 12:32:35 +0000950
951 uh->len = htons(skb->len);
952 uh->check = 0;
953
954 __skb_push(skb, sizeof(*iph));
955 skb_reset_network_header(skb);
956 iph = ip_hdr(skb);
957 iph->version = 4;
958 iph->ihl = sizeof(struct iphdr) >> 2;
959 iph->frag_off = df;
960 iph->protocol = IPPROTO_UDP;
961 iph->tos = vxlan_ecn_encap(tos, old_iph, skb);
stephen hemmingerca78f182012-10-09 20:35:48 +0000962 iph->daddr = dst;
stephen hemmingerd3428942012-10-01 12:32:35 +0000963 iph->saddr = fl4.saddr;
964 iph->ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
965
stephen hemminger1cad8712012-10-09 20:35:49 +0000966 vxlan_set_owner(dev, skb);
967
Amerigo Wangaa0010f2012-11-11 21:52:33 +0000968 /* See iptunnel_xmit() */
Joseph Gasparakisd6727fe2012-12-07 14:14:16 +0000969 if (skb->ip_summed != CHECKSUM_PARTIAL)
970 skb->ip_summed = CHECKSUM_NONE;
stephen hemmingerd3428942012-10-01 12:32:35 +0000971 ip_select_ident(iph, &rt->dst, NULL);
972
973 err = ip_local_out(skb);
974 if (likely(net_xmit_eval(err) == 0)) {
975 struct vxlan_stats *stats = this_cpu_ptr(vxlan->stats);
976
977 u64_stats_update_begin(&stats->syncp);
978 stats->tx_packets++;
979 stats->tx_bytes += pkt_len;
980 u64_stats_update_end(&stats->syncp);
981 } else {
982 dev->stats.tx_errors++;
983 dev->stats.tx_aborted_errors++;
984 }
985 return NETDEV_TX_OK;
986
987drop:
988 dev->stats.tx_dropped++;
989 goto tx_free;
990
991tx_error:
992 dev->stats.tx_errors++;
993tx_free:
994 dev_kfree_skb(skb);
995 return NETDEV_TX_OK;
996}
997
998/* Walk the forwarding table and purge stale entries */
999static void vxlan_cleanup(unsigned long arg)
1000{
1001 struct vxlan_dev *vxlan = (struct vxlan_dev *) arg;
1002 unsigned long next_timer = jiffies + FDB_AGE_INTERVAL;
1003 unsigned int h;
1004
1005 if (!netif_running(vxlan->dev))
1006 return;
1007
1008 spin_lock_bh(&vxlan->hash_lock);
1009 for (h = 0; h < FDB_HASH_SIZE; ++h) {
1010 struct hlist_node *p, *n;
1011 hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) {
1012 struct vxlan_fdb *f
1013 = container_of(p, struct vxlan_fdb, hlist);
1014 unsigned long timeout;
1015
stephen hemminger3c172862012-10-26 06:24:34 +00001016 if (f->state & NUD_PERMANENT)
stephen hemmingerd3428942012-10-01 12:32:35 +00001017 continue;
1018
1019 timeout = f->used + vxlan->age_interval * HZ;
1020 if (time_before_eq(timeout, jiffies)) {
1021 netdev_dbg(vxlan->dev,
1022 "garbage collect %pM\n",
1023 f->eth_addr);
1024 f->state = NUD_STALE;
1025 vxlan_fdb_destroy(vxlan, f);
1026 } else if (time_before(timeout, next_timer))
1027 next_timer = timeout;
1028 }
1029 }
1030 spin_unlock_bh(&vxlan->hash_lock);
1031
1032 mod_timer(&vxlan->age_timer, next_timer);
1033}
1034
1035/* Setup stats when device is created */
1036static int vxlan_init(struct net_device *dev)
1037{
1038 struct vxlan_dev *vxlan = netdev_priv(dev);
1039
1040 vxlan->stats = alloc_percpu(struct vxlan_stats);
1041 if (!vxlan->stats)
1042 return -ENOMEM;
1043
1044 return 0;
1045}
1046
1047/* Start ageing timer and join group when device is brought up */
1048static int vxlan_open(struct net_device *dev)
1049{
1050 struct vxlan_dev *vxlan = netdev_priv(dev);
1051 int err;
1052
1053 if (vxlan->gaddr) {
1054 err = vxlan_join_group(dev);
1055 if (err)
1056 return err;
1057 }
1058
1059 if (vxlan->age_interval)
1060 mod_timer(&vxlan->age_timer, jiffies + FDB_AGE_INTERVAL);
1061
1062 return 0;
1063}
1064
1065/* Purge the forwarding table */
1066static void vxlan_flush(struct vxlan_dev *vxlan)
1067{
1068 unsigned h;
1069
1070 spin_lock_bh(&vxlan->hash_lock);
1071 for (h = 0; h < FDB_HASH_SIZE; ++h) {
1072 struct hlist_node *p, *n;
1073 hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) {
1074 struct vxlan_fdb *f
1075 = container_of(p, struct vxlan_fdb, hlist);
1076 vxlan_fdb_destroy(vxlan, f);
1077 }
1078 }
1079 spin_unlock_bh(&vxlan->hash_lock);
1080}
1081
1082/* Cleanup timer and forwarding table on shutdown */
1083static int vxlan_stop(struct net_device *dev)
1084{
1085 struct vxlan_dev *vxlan = netdev_priv(dev);
1086
1087 if (vxlan->gaddr)
1088 vxlan_leave_group(dev);
1089
1090 del_timer_sync(&vxlan->age_timer);
1091
1092 vxlan_flush(vxlan);
1093
1094 return 0;
1095}
1096
1097/* Merge per-cpu statistics */
1098static struct rtnl_link_stats64 *vxlan_stats64(struct net_device *dev,
1099 struct rtnl_link_stats64 *stats)
1100{
1101 struct vxlan_dev *vxlan = netdev_priv(dev);
1102 struct vxlan_stats tmp, sum = { 0 };
1103 unsigned int cpu;
1104
1105 for_each_possible_cpu(cpu) {
1106 unsigned int start;
1107 const struct vxlan_stats *stats
1108 = per_cpu_ptr(vxlan->stats, cpu);
1109
1110 do {
1111 start = u64_stats_fetch_begin_bh(&stats->syncp);
1112 memcpy(&tmp, stats, sizeof(tmp));
1113 } while (u64_stats_fetch_retry_bh(&stats->syncp, start));
1114
1115 sum.tx_bytes += tmp.tx_bytes;
1116 sum.tx_packets += tmp.tx_packets;
1117 sum.rx_bytes += tmp.rx_bytes;
1118 sum.rx_packets += tmp.rx_packets;
1119 }
1120
1121 stats->tx_bytes = sum.tx_bytes;
1122 stats->tx_packets = sum.tx_packets;
1123 stats->rx_bytes = sum.rx_bytes;
1124 stats->rx_packets = sum.rx_packets;
1125
1126 stats->multicast = dev->stats.multicast;
1127 stats->rx_length_errors = dev->stats.rx_length_errors;
1128 stats->rx_frame_errors = dev->stats.rx_frame_errors;
1129 stats->rx_errors = dev->stats.rx_errors;
1130
1131 stats->tx_dropped = dev->stats.tx_dropped;
1132 stats->tx_carrier_errors = dev->stats.tx_carrier_errors;
1133 stats->tx_aborted_errors = dev->stats.tx_aborted_errors;
1134 stats->collisions = dev->stats.collisions;
1135 stats->tx_errors = dev->stats.tx_errors;
1136
1137 return stats;
1138}
1139
1140/* Stub, nothing needs to be done. */
1141static void vxlan_set_multicast_list(struct net_device *dev)
1142{
1143}
1144
1145static const struct net_device_ops vxlan_netdev_ops = {
1146 .ndo_init = vxlan_init,
1147 .ndo_open = vxlan_open,
1148 .ndo_stop = vxlan_stop,
1149 .ndo_start_xmit = vxlan_xmit,
1150 .ndo_get_stats64 = vxlan_stats64,
1151 .ndo_set_rx_mode = vxlan_set_multicast_list,
1152 .ndo_change_mtu = eth_change_mtu,
1153 .ndo_validate_addr = eth_validate_addr,
1154 .ndo_set_mac_address = eth_mac_addr,
1155 .ndo_fdb_add = vxlan_fdb_add,
1156 .ndo_fdb_del = vxlan_fdb_delete,
1157 .ndo_fdb_dump = vxlan_fdb_dump,
1158};
1159
1160/* Info for udev, that this is a virtual tunnel endpoint */
1161static struct device_type vxlan_type = {
1162 .name = "vxlan",
1163};
1164
1165static void vxlan_free(struct net_device *dev)
1166{
1167 struct vxlan_dev *vxlan = netdev_priv(dev);
1168
1169 free_percpu(vxlan->stats);
1170 free_netdev(dev);
1171}
1172
1173/* Initialize the device structure. */
1174static void vxlan_setup(struct net_device *dev)
1175{
1176 struct vxlan_dev *vxlan = netdev_priv(dev);
1177 unsigned h;
stephen hemminger05f47d62012-10-09 20:35:50 +00001178 int low, high;
stephen hemmingerd3428942012-10-01 12:32:35 +00001179
1180 eth_hw_addr_random(dev);
1181 ether_setup(dev);
stephen hemminger2840bf22012-10-09 20:35:51 +00001182 dev->hard_header_len = ETH_HLEN + VXLAN_HEADROOM;
stephen hemmingerd3428942012-10-01 12:32:35 +00001183
1184 dev->netdev_ops = &vxlan_netdev_ops;
1185 dev->destructor = vxlan_free;
1186 SET_NETDEV_DEVTYPE(dev, &vxlan_type);
1187
1188 dev->tx_queue_len = 0;
1189 dev->features |= NETIF_F_LLTX;
1190 dev->features |= NETIF_F_NETNS_LOCAL;
Joseph Gasparakisd6727fe2012-12-07 14:14:16 +00001191 dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM;
Joseph Gasparakis0afb1662012-12-07 14:14:18 +00001192 dev->features |= NETIF_F_RXCSUM;
1193
1194 dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
stephen hemmingerd3428942012-10-01 12:32:35 +00001195 dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
stephen hemminger6602d002012-12-31 12:00:21 +00001196 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
stephen hemmingerd3428942012-10-01 12:32:35 +00001197
1198 spin_lock_init(&vxlan->hash_lock);
1199
1200 init_timer_deferrable(&vxlan->age_timer);
1201 vxlan->age_timer.function = vxlan_cleanup;
1202 vxlan->age_timer.data = (unsigned long) vxlan;
1203
stephen hemminger05f47d62012-10-09 20:35:50 +00001204 inet_get_local_port_range(&low, &high);
1205 vxlan->port_min = low;
1206 vxlan->port_max = high;
1207
stephen hemmingerd3428942012-10-01 12:32:35 +00001208 vxlan->dev = dev;
1209
1210 for (h = 0; h < FDB_HASH_SIZE; ++h)
1211 INIT_HLIST_HEAD(&vxlan->fdb_head[h]);
1212}
1213
1214static const struct nla_policy vxlan_policy[IFLA_VXLAN_MAX + 1] = {
1215 [IFLA_VXLAN_ID] = { .type = NLA_U32 },
1216 [IFLA_VXLAN_GROUP] = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
1217 [IFLA_VXLAN_LINK] = { .type = NLA_U32 },
1218 [IFLA_VXLAN_LOCAL] = { .len = FIELD_SIZEOF(struct iphdr, saddr) },
1219 [IFLA_VXLAN_TOS] = { .type = NLA_U8 },
1220 [IFLA_VXLAN_TTL] = { .type = NLA_U8 },
1221 [IFLA_VXLAN_LEARNING] = { .type = NLA_U8 },
1222 [IFLA_VXLAN_AGEING] = { .type = NLA_U32 },
1223 [IFLA_VXLAN_LIMIT] = { .type = NLA_U32 },
stephen hemminger05f47d62012-10-09 20:35:50 +00001224 [IFLA_VXLAN_PORT_RANGE] = { .len = sizeof(struct ifla_vxlan_port_range) },
David Stevense4f67ad2012-11-20 02:50:14 +00001225 [IFLA_VXLAN_PROXY] = { .type = NLA_U8 },
1226 [IFLA_VXLAN_RSC] = { .type = NLA_U8 },
1227 [IFLA_VXLAN_L2MISS] = { .type = NLA_U8 },
1228 [IFLA_VXLAN_L3MISS] = { .type = NLA_U8 },
stephen hemmingerd3428942012-10-01 12:32:35 +00001229};
1230
1231static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[])
1232{
1233 if (tb[IFLA_ADDRESS]) {
1234 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) {
1235 pr_debug("invalid link address (not ethernet)\n");
1236 return -EINVAL;
1237 }
1238
1239 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) {
1240 pr_debug("invalid all zero ethernet address\n");
1241 return -EADDRNOTAVAIL;
1242 }
1243 }
1244
1245 if (!data)
1246 return -EINVAL;
1247
1248 if (data[IFLA_VXLAN_ID]) {
1249 __u32 id = nla_get_u32(data[IFLA_VXLAN_ID]);
1250 if (id >= VXLAN_VID_MASK)
1251 return -ERANGE;
1252 }
1253
1254 if (data[IFLA_VXLAN_GROUP]) {
1255 __be32 gaddr = nla_get_be32(data[IFLA_VXLAN_GROUP]);
1256 if (!IN_MULTICAST(ntohl(gaddr))) {
1257 pr_debug("group address is not IPv4 multicast\n");
1258 return -EADDRNOTAVAIL;
1259 }
1260 }
stephen hemminger05f47d62012-10-09 20:35:50 +00001261
1262 if (data[IFLA_VXLAN_PORT_RANGE]) {
1263 const struct ifla_vxlan_port_range *p
1264 = nla_data(data[IFLA_VXLAN_PORT_RANGE]);
1265
1266 if (ntohs(p->high) < ntohs(p->low)) {
1267 pr_debug("port range %u .. %u not valid\n",
1268 ntohs(p->low), ntohs(p->high));
1269 return -EINVAL;
1270 }
1271 }
1272
stephen hemmingerd3428942012-10-01 12:32:35 +00001273 return 0;
1274}
1275
Yan Burman1b13c972013-01-29 23:43:07 +00001276static void vxlan_get_drvinfo(struct net_device *netdev,
1277 struct ethtool_drvinfo *drvinfo)
1278{
1279 strlcpy(drvinfo->version, VXLAN_VERSION, sizeof(drvinfo->version));
1280 strlcpy(drvinfo->driver, "vxlan", sizeof(drvinfo->driver));
1281}
1282
1283static const struct ethtool_ops vxlan_ethtool_ops = {
1284 .get_drvinfo = vxlan_get_drvinfo,
1285 .get_link = ethtool_op_get_link,
1286};
1287
stephen hemmingerd3428942012-10-01 12:32:35 +00001288static int vxlan_newlink(struct net *net, struct net_device *dev,
1289 struct nlattr *tb[], struct nlattr *data[])
1290{
1291 struct vxlan_dev *vxlan = netdev_priv(dev);
1292 __u32 vni;
1293 int err;
1294
1295 if (!data[IFLA_VXLAN_ID])
1296 return -EINVAL;
1297
1298 vni = nla_get_u32(data[IFLA_VXLAN_ID]);
1299 if (vxlan_find_vni(net, vni)) {
1300 pr_info("duplicate VNI %u\n", vni);
1301 return -EEXIST;
1302 }
1303 vxlan->vni = vni;
1304
1305 if (data[IFLA_VXLAN_GROUP])
1306 vxlan->gaddr = nla_get_be32(data[IFLA_VXLAN_GROUP]);
1307
1308 if (data[IFLA_VXLAN_LOCAL])
1309 vxlan->saddr = nla_get_be32(data[IFLA_VXLAN_LOCAL]);
1310
stephen hemminger34e02aa2012-10-09 20:35:53 +00001311 if (data[IFLA_VXLAN_LINK] &&
1312 (vxlan->link = nla_get_u32(data[IFLA_VXLAN_LINK]))) {
1313 struct net_device *lowerdev
1314 = __dev_get_by_index(net, vxlan->link);
stephen hemmingerd3428942012-10-01 12:32:35 +00001315
stephen hemminger34e02aa2012-10-09 20:35:53 +00001316 if (!lowerdev) {
1317 pr_info("ifindex %d does not exist\n", vxlan->link);
1318 return -ENODEV;
stephen hemmingerd3428942012-10-01 12:32:35 +00001319 }
stephen hemminger34e02aa2012-10-09 20:35:53 +00001320
1321 if (!tb[IFLA_MTU])
1322 dev->mtu = lowerdev->mtu - VXLAN_HEADROOM;
Alexander Duyck1ba56fb2012-11-13 13:10:59 +00001323
1324 /* update header length based on lower device */
1325 dev->hard_header_len = lowerdev->hard_header_len +
1326 VXLAN_HEADROOM;
stephen hemmingerd3428942012-10-01 12:32:35 +00001327 }
1328
1329 if (data[IFLA_VXLAN_TOS])
1330 vxlan->tos = nla_get_u8(data[IFLA_VXLAN_TOS]);
1331
Vincent Bernatafb97182012-10-30 10:27:16 +00001332 if (data[IFLA_VXLAN_TTL])
1333 vxlan->ttl = nla_get_u8(data[IFLA_VXLAN_TTL]);
1334
stephen hemmingerd3428942012-10-01 12:32:35 +00001335 if (!data[IFLA_VXLAN_LEARNING] || nla_get_u8(data[IFLA_VXLAN_LEARNING]))
David Stevense4f67ad2012-11-20 02:50:14 +00001336 vxlan->flags |= VXLAN_F_LEARN;
stephen hemmingerd3428942012-10-01 12:32:35 +00001337
1338 if (data[IFLA_VXLAN_AGEING])
1339 vxlan->age_interval = nla_get_u32(data[IFLA_VXLAN_AGEING]);
1340 else
1341 vxlan->age_interval = FDB_AGE_DEFAULT;
1342
David Stevense4f67ad2012-11-20 02:50:14 +00001343 if (data[IFLA_VXLAN_PROXY] && nla_get_u8(data[IFLA_VXLAN_PROXY]))
1344 vxlan->flags |= VXLAN_F_PROXY;
1345
1346 if (data[IFLA_VXLAN_RSC] && nla_get_u8(data[IFLA_VXLAN_RSC]))
1347 vxlan->flags |= VXLAN_F_RSC;
1348
1349 if (data[IFLA_VXLAN_L2MISS] && nla_get_u8(data[IFLA_VXLAN_L2MISS]))
1350 vxlan->flags |= VXLAN_F_L2MISS;
1351
1352 if (data[IFLA_VXLAN_L3MISS] && nla_get_u8(data[IFLA_VXLAN_L3MISS]))
1353 vxlan->flags |= VXLAN_F_L3MISS;
1354
stephen hemmingerd3428942012-10-01 12:32:35 +00001355 if (data[IFLA_VXLAN_LIMIT])
1356 vxlan->addrmax = nla_get_u32(data[IFLA_VXLAN_LIMIT]);
1357
stephen hemminger05f47d62012-10-09 20:35:50 +00001358 if (data[IFLA_VXLAN_PORT_RANGE]) {
1359 const struct ifla_vxlan_port_range *p
1360 = nla_data(data[IFLA_VXLAN_PORT_RANGE]);
1361 vxlan->port_min = ntohs(p->low);
1362 vxlan->port_max = ntohs(p->high);
1363 }
1364
Yan Burman1b13c972013-01-29 23:43:07 +00001365 SET_ETHTOOL_OPS(dev, &vxlan_ethtool_ops);
1366
stephen hemmingerd3428942012-10-01 12:32:35 +00001367 err = register_netdevice(dev);
1368 if (!err)
1369 hlist_add_head_rcu(&vxlan->hlist, vni_head(net, vxlan->vni));
1370
1371 return err;
1372}
1373
1374static void vxlan_dellink(struct net_device *dev, struct list_head *head)
1375{
1376 struct vxlan_dev *vxlan = netdev_priv(dev);
1377
1378 hlist_del_rcu(&vxlan->hlist);
1379
1380 unregister_netdevice_queue(dev, head);
1381}
1382
1383static size_t vxlan_get_size(const struct net_device *dev)
1384{
1385
1386 return nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_ID */
1387 nla_total_size(sizeof(__be32)) +/* IFLA_VXLAN_GROUP */
1388 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LINK */
1389 nla_total_size(sizeof(__be32))+ /* IFLA_VXLAN_LOCAL */
1390 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TTL */
1391 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TOS */
1392 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_LEARNING */
David Stevense4f67ad2012-11-20 02:50:14 +00001393 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_PROXY */
1394 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_RSC */
1395 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_L2MISS */
1396 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_L3MISS */
stephen hemmingerd3428942012-10-01 12:32:35 +00001397 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_AGEING */
1398 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LIMIT */
stephen hemminger05f47d62012-10-09 20:35:50 +00001399 nla_total_size(sizeof(struct ifla_vxlan_port_range)) +
stephen hemmingerd3428942012-10-01 12:32:35 +00001400 0;
1401}
1402
1403static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
1404{
1405 const struct vxlan_dev *vxlan = netdev_priv(dev);
stephen hemminger05f47d62012-10-09 20:35:50 +00001406 struct ifla_vxlan_port_range ports = {
1407 .low = htons(vxlan->port_min),
1408 .high = htons(vxlan->port_max),
1409 };
stephen hemmingerd3428942012-10-01 12:32:35 +00001410
1411 if (nla_put_u32(skb, IFLA_VXLAN_ID, vxlan->vni))
1412 goto nla_put_failure;
1413
Stephen Hemminger7c41c422012-10-08 14:55:30 -07001414 if (vxlan->gaddr && nla_put_be32(skb, IFLA_VXLAN_GROUP, vxlan->gaddr))
stephen hemmingerd3428942012-10-01 12:32:35 +00001415 goto nla_put_failure;
1416
1417 if (vxlan->link && nla_put_u32(skb, IFLA_VXLAN_LINK, vxlan->link))
1418 goto nla_put_failure;
1419
Stephen Hemminger7c41c422012-10-08 14:55:30 -07001420 if (vxlan->saddr && nla_put_be32(skb, IFLA_VXLAN_LOCAL, vxlan->saddr))
stephen hemmingerd3428942012-10-01 12:32:35 +00001421 goto nla_put_failure;
1422
1423 if (nla_put_u8(skb, IFLA_VXLAN_TTL, vxlan->ttl) ||
1424 nla_put_u8(skb, IFLA_VXLAN_TOS, vxlan->tos) ||
David Stevense4f67ad2012-11-20 02:50:14 +00001425 nla_put_u8(skb, IFLA_VXLAN_LEARNING,
1426 !!(vxlan->flags & VXLAN_F_LEARN)) ||
1427 nla_put_u8(skb, IFLA_VXLAN_PROXY,
1428 !!(vxlan->flags & VXLAN_F_PROXY)) ||
1429 nla_put_u8(skb, IFLA_VXLAN_RSC, !!(vxlan->flags & VXLAN_F_RSC)) ||
1430 nla_put_u8(skb, IFLA_VXLAN_L2MISS,
1431 !!(vxlan->flags & VXLAN_F_L2MISS)) ||
1432 nla_put_u8(skb, IFLA_VXLAN_L3MISS,
1433 !!(vxlan->flags & VXLAN_F_L3MISS)) ||
stephen hemmingerd3428942012-10-01 12:32:35 +00001434 nla_put_u32(skb, IFLA_VXLAN_AGEING, vxlan->age_interval) ||
1435 nla_put_u32(skb, IFLA_VXLAN_LIMIT, vxlan->addrmax))
1436 goto nla_put_failure;
1437
stephen hemminger05f47d62012-10-09 20:35:50 +00001438 if (nla_put(skb, IFLA_VXLAN_PORT_RANGE, sizeof(ports), &ports))
1439 goto nla_put_failure;
1440
stephen hemmingerd3428942012-10-01 12:32:35 +00001441 return 0;
1442
1443nla_put_failure:
1444 return -EMSGSIZE;
1445}
1446
1447static struct rtnl_link_ops vxlan_link_ops __read_mostly = {
1448 .kind = "vxlan",
1449 .maxtype = IFLA_VXLAN_MAX,
1450 .policy = vxlan_policy,
1451 .priv_size = sizeof(struct vxlan_dev),
1452 .setup = vxlan_setup,
1453 .validate = vxlan_validate,
1454 .newlink = vxlan_newlink,
1455 .dellink = vxlan_dellink,
1456 .get_size = vxlan_get_size,
1457 .fill_info = vxlan_fill_info,
1458};
1459
1460static __net_init int vxlan_init_net(struct net *net)
1461{
1462 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
1463 struct sock *sk;
1464 struct sockaddr_in vxlan_addr = {
1465 .sin_family = AF_INET,
1466 .sin_addr.s_addr = htonl(INADDR_ANY),
1467 };
1468 int rc;
1469 unsigned h;
1470
1471 /* Create UDP socket for encapsulation receive. */
1472 rc = sock_create_kern(AF_INET, SOCK_DGRAM, IPPROTO_UDP, &vn->sock);
1473 if (rc < 0) {
1474 pr_debug("UDP socket create failed\n");
1475 return rc;
1476 }
stephen hemmingerbfe1b9b2012-10-01 18:49:21 +00001477 /* Put in proper namespace */
1478 sk = vn->sock->sk;
1479 sk_change_net(sk, net);
stephen hemmingerd3428942012-10-01 12:32:35 +00001480
1481 vxlan_addr.sin_port = htons(vxlan_port);
1482
1483 rc = kernel_bind(vn->sock, (struct sockaddr *) &vxlan_addr,
1484 sizeof(vxlan_addr));
1485 if (rc < 0) {
1486 pr_debug("bind for UDP socket %pI4:%u (%d)\n",
1487 &vxlan_addr.sin_addr, ntohs(vxlan_addr.sin_port), rc);
stephen hemmingerbfe1b9b2012-10-01 18:49:21 +00001488 sk_release_kernel(sk);
stephen hemmingerd3428942012-10-01 12:32:35 +00001489 vn->sock = NULL;
1490 return rc;
1491 }
1492
1493 /* Disable multicast loopback */
stephen hemmingerd3428942012-10-01 12:32:35 +00001494 inet_sk(sk)->mc_loop = 0;
1495
1496 /* Mark socket as an encapsulation socket. */
1497 udp_sk(sk)->encap_type = 1;
1498 udp_sk(sk)->encap_rcv = vxlan_udp_encap_recv;
1499 udp_encap_enable();
1500
1501 for (h = 0; h < VNI_HASH_SIZE; ++h)
1502 INIT_HLIST_HEAD(&vn->vni_list[h]);
1503
1504 return 0;
1505}
1506
1507static __net_exit void vxlan_exit_net(struct net *net)
1508{
1509 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
1510
1511 if (vn->sock) {
stephen hemmingerbfe1b9b2012-10-01 18:49:21 +00001512 sk_release_kernel(vn->sock->sk);
stephen hemmingerd3428942012-10-01 12:32:35 +00001513 vn->sock = NULL;
1514 }
1515}
1516
1517static struct pernet_operations vxlan_net_ops = {
1518 .init = vxlan_init_net,
1519 .exit = vxlan_exit_net,
1520 .id = &vxlan_net_id,
1521 .size = sizeof(struct vxlan_net),
1522};
1523
1524static int __init vxlan_init_module(void)
1525{
1526 int rc;
1527
1528 get_random_bytes(&vxlan_salt, sizeof(vxlan_salt));
1529
1530 rc = register_pernet_device(&vxlan_net_ops);
1531 if (rc)
1532 goto out1;
1533
1534 rc = rtnl_link_register(&vxlan_link_ops);
1535 if (rc)
1536 goto out2;
1537
1538 return 0;
1539
1540out2:
1541 unregister_pernet_device(&vxlan_net_ops);
1542out1:
1543 return rc;
1544}
1545module_init(vxlan_init_module);
1546
1547static void __exit vxlan_cleanup_module(void)
1548{
1549 rtnl_link_unregister(&vxlan_link_ops);
1550 unregister_pernet_device(&vxlan_net_ops);
1551}
1552module_exit(vxlan_cleanup_module);
1553
1554MODULE_LICENSE("GPL");
1555MODULE_VERSION(VXLAN_VERSION);
1556MODULE_AUTHOR("Stephen Hemminger <shemminger@vyatta.com>");
1557MODULE_ALIAS_RTNL_LINK("vxlan");