blob: f736823f843732d85a30f090e05e5cfd2b14d9ab [file] [log] [blame]
stephen hemmingerd3428942012-10-01 12:32:35 +00001/*
Rami Roseneb5ce432012-11-13 13:29:15 +00002 * VXLAN: Virtual eXtensible Local Area Network
stephen hemmingerd3428942012-10-01 12:32:35 +00003 *
4 * Copyright (c) 2012 Vyatta Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * TODO
11 * - use IANA UDP port number (when defined)
12 * - IPv6 (not in RFC)
13 */
14
15#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16
17#include <linux/kernel.h>
18#include <linux/types.h>
19#include <linux/module.h>
20#include <linux/errno.h>
21#include <linux/slab.h>
22#include <linux/skbuff.h>
23#include <linux/rculist.h>
24#include <linux/netdevice.h>
25#include <linux/in.h>
26#include <linux/ip.h>
27#include <linux/udp.h>
28#include <linux/igmp.h>
29#include <linux/etherdevice.h>
30#include <linux/if_ether.h>
stephen hemmingerd3428942012-10-01 12:32:35 +000031#include <linux/hash.h>
Yan Burman1b13c972013-01-29 23:43:07 +000032#include <linux/ethtool.h>
David Stevense4f67ad2012-11-20 02:50:14 +000033#include <net/arp.h>
34#include <net/ndisc.h>
stephen hemmingerd3428942012-10-01 12:32:35 +000035#include <net/ip.h>
Pravin B Shelar8dc98eb2013-02-22 07:30:40 +000036#include <net/ipip.h>
stephen hemmingerd3428942012-10-01 12:32:35 +000037#include <net/icmp.h>
38#include <net/udp.h>
39#include <net/rtnetlink.h>
40#include <net/route.h>
41#include <net/dsfield.h>
42#include <net/inet_ecn.h>
43#include <net/net_namespace.h>
44#include <net/netns/generic.h>
45
46#define VXLAN_VERSION "0.1"
47
48#define VNI_HASH_BITS 10
49#define VNI_HASH_SIZE (1<<VNI_HASH_BITS)
50#define FDB_HASH_BITS 8
51#define FDB_HASH_SIZE (1<<FDB_HASH_BITS)
52#define FDB_AGE_DEFAULT 300 /* 5 min */
53#define FDB_AGE_INTERVAL (10 * HZ) /* rescan interval */
54
55#define VXLAN_N_VID (1u << 24)
56#define VXLAN_VID_MASK (VXLAN_N_VID - 1)
Alexander Duyck52b702f2012-11-09 13:35:24 +000057/* IP header + UDP + VXLAN + Ethernet header */
58#define VXLAN_HEADROOM (20 + 8 + 8 + 14)
stephen hemmingerd3428942012-10-01 12:32:35 +000059
60#define VXLAN_FLAGS 0x08000000 /* struct vxlanhdr.vx_flags required value. */
61
62/* VXLAN protocol header */
63struct vxlanhdr {
64 __be32 vx_flags;
65 __be32 vx_vni;
66};
67
68/* UDP port for VXLAN traffic. */
69static unsigned int vxlan_port __read_mostly = 8472;
70module_param_named(udp_port, vxlan_port, uint, 0444);
71MODULE_PARM_DESC(udp_port, "Destination UDP port");
72
73static bool log_ecn_error = true;
74module_param(log_ecn_error, bool, 0644);
75MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
76
77/* per-net private data for this module */
78static unsigned int vxlan_net_id;
79struct vxlan_net {
80 struct socket *sock; /* UDP encap socket */
81 struct hlist_head vni_list[VNI_HASH_SIZE];
82};
83
84/* Forwarding table entry */
85struct vxlan_fdb {
86 struct hlist_node hlist; /* linked list of entries */
87 struct rcu_head rcu;
88 unsigned long updated; /* jiffies */
89 unsigned long used;
90 __be32 remote_ip;
91 u16 state; /* see ndm_state */
92 u8 eth_addr[ETH_ALEN];
93};
94
95/* Per-cpu network traffic stats */
96struct vxlan_stats {
97 u64 rx_packets;
98 u64 rx_bytes;
99 u64 tx_packets;
100 u64 tx_bytes;
101 struct u64_stats_sync syncp;
102};
103
104/* Pseudo network device */
105struct vxlan_dev {
106 struct hlist_node hlist;
107 struct net_device *dev;
108 struct vxlan_stats __percpu *stats;
109 __u32 vni; /* virtual network id */
110 __be32 gaddr; /* multicast group */
111 __be32 saddr; /* source address */
112 unsigned int link; /* link to multicast over */
stephen hemminger05f47d62012-10-09 20:35:50 +0000113 __u16 port_min; /* source port range */
114 __u16 port_max;
stephen hemmingerd3428942012-10-01 12:32:35 +0000115 __u8 tos; /* TOS override */
116 __u8 ttl;
David Stevense4f67ad2012-11-20 02:50:14 +0000117 u32 flags; /* VXLAN_F_* below */
stephen hemmingerd3428942012-10-01 12:32:35 +0000118
119 unsigned long age_interval;
120 struct timer_list age_timer;
121 spinlock_t hash_lock;
122 unsigned int addrcnt;
123 unsigned int addrmax;
stephen hemmingerd3428942012-10-01 12:32:35 +0000124
125 struct hlist_head fdb_head[FDB_HASH_SIZE];
126};
127
David Stevense4f67ad2012-11-20 02:50:14 +0000128#define VXLAN_F_LEARN 0x01
129#define VXLAN_F_PROXY 0x02
130#define VXLAN_F_RSC 0x04
131#define VXLAN_F_L2MISS 0x08
132#define VXLAN_F_L3MISS 0x10
133
stephen hemmingerd3428942012-10-01 12:32:35 +0000134/* salt for hash table */
135static u32 vxlan_salt __read_mostly;
136
137static inline struct hlist_head *vni_head(struct net *net, u32 id)
138{
139 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
140
141 return &vn->vni_list[hash_32(id, VNI_HASH_BITS)];
142}
143
144/* Look up VNI in a per net namespace table */
145static struct vxlan_dev *vxlan_find_vni(struct net *net, u32 id)
146{
147 struct vxlan_dev *vxlan;
148 struct hlist_node *node;
149
150 hlist_for_each_entry_rcu(vxlan, node, vni_head(net, id), hlist) {
151 if (vxlan->vni == id)
152 return vxlan;
153 }
154
155 return NULL;
156}
157
158/* Fill in neighbour message in skbuff. */
159static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan,
160 const struct vxlan_fdb *fdb,
161 u32 portid, u32 seq, int type, unsigned int flags)
162{
163 unsigned long now = jiffies;
164 struct nda_cacheinfo ci;
165 struct nlmsghdr *nlh;
166 struct ndmsg *ndm;
David Stevense4f67ad2012-11-20 02:50:14 +0000167 bool send_ip, send_eth;
stephen hemmingerd3428942012-10-01 12:32:35 +0000168
169 nlh = nlmsg_put(skb, portid, seq, type, sizeof(*ndm), flags);
170 if (nlh == NULL)
171 return -EMSGSIZE;
172
173 ndm = nlmsg_data(nlh);
174 memset(ndm, 0, sizeof(*ndm));
David Stevense4f67ad2012-11-20 02:50:14 +0000175
176 send_eth = send_ip = true;
177
178 if (type == RTM_GETNEIGH) {
179 ndm->ndm_family = AF_INET;
180 send_ip = fdb->remote_ip != 0;
181 send_eth = !is_zero_ether_addr(fdb->eth_addr);
182 } else
183 ndm->ndm_family = AF_BRIDGE;
stephen hemmingerd3428942012-10-01 12:32:35 +0000184 ndm->ndm_state = fdb->state;
185 ndm->ndm_ifindex = vxlan->dev->ifindex;
186 ndm->ndm_flags = NTF_SELF;
187 ndm->ndm_type = NDA_DST;
188
David Stevense4f67ad2012-11-20 02:50:14 +0000189 if (send_eth && nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->eth_addr))
stephen hemmingerd3428942012-10-01 12:32:35 +0000190 goto nla_put_failure;
191
David Stevense4f67ad2012-11-20 02:50:14 +0000192 if (send_ip && nla_put_be32(skb, NDA_DST, fdb->remote_ip))
stephen hemmingerd3428942012-10-01 12:32:35 +0000193 goto nla_put_failure;
194
195 ci.ndm_used = jiffies_to_clock_t(now - fdb->used);
196 ci.ndm_confirmed = 0;
197 ci.ndm_updated = jiffies_to_clock_t(now - fdb->updated);
198 ci.ndm_refcnt = 0;
199
200 if (nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
201 goto nla_put_failure;
202
203 return nlmsg_end(skb, nlh);
204
205nla_put_failure:
206 nlmsg_cancel(skb, nlh);
207 return -EMSGSIZE;
208}
209
210static inline size_t vxlan_nlmsg_size(void)
211{
212 return NLMSG_ALIGN(sizeof(struct ndmsg))
213 + nla_total_size(ETH_ALEN) /* NDA_LLADDR */
214 + nla_total_size(sizeof(__be32)) /* NDA_DST */
215 + nla_total_size(sizeof(struct nda_cacheinfo));
216}
217
218static void vxlan_fdb_notify(struct vxlan_dev *vxlan,
219 const struct vxlan_fdb *fdb, int type)
220{
221 struct net *net = dev_net(vxlan->dev);
222 struct sk_buff *skb;
223 int err = -ENOBUFS;
224
225 skb = nlmsg_new(vxlan_nlmsg_size(), GFP_ATOMIC);
226 if (skb == NULL)
227 goto errout;
228
229 err = vxlan_fdb_info(skb, vxlan, fdb, 0, 0, type, 0);
230 if (err < 0) {
231 /* -EMSGSIZE implies BUG in vxlan_nlmsg_size() */
232 WARN_ON(err == -EMSGSIZE);
233 kfree_skb(skb);
234 goto errout;
235 }
236
237 rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
238 return;
239errout:
240 if (err < 0)
241 rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
242}
243
David Stevense4f67ad2012-11-20 02:50:14 +0000244static void vxlan_ip_miss(struct net_device *dev, __be32 ipa)
245{
246 struct vxlan_dev *vxlan = netdev_priv(dev);
247 struct vxlan_fdb f;
248
249 memset(&f, 0, sizeof f);
250 f.state = NUD_STALE;
251 f.remote_ip = ipa; /* goes to NDA_DST */
252
253 vxlan_fdb_notify(vxlan, &f, RTM_GETNEIGH);
254}
255
256static void vxlan_fdb_miss(struct vxlan_dev *vxlan, const u8 eth_addr[ETH_ALEN])
257{
258 struct vxlan_fdb f;
259
260 memset(&f, 0, sizeof f);
261 f.state = NUD_STALE;
262 memcpy(f.eth_addr, eth_addr, ETH_ALEN);
263
264 vxlan_fdb_notify(vxlan, &f, RTM_GETNEIGH);
265}
266
stephen hemmingerd3428942012-10-01 12:32:35 +0000267/* Hash Ethernet address */
268static u32 eth_hash(const unsigned char *addr)
269{
270 u64 value = get_unaligned((u64 *)addr);
271
272 /* only want 6 bytes */
273#ifdef __BIG_ENDIAN
stephen hemmingerd3428942012-10-01 12:32:35 +0000274 value >>= 16;
stephen hemminger321fb992012-10-09 20:35:47 +0000275#else
276 value <<= 16;
stephen hemmingerd3428942012-10-01 12:32:35 +0000277#endif
278 return hash_64(value, FDB_HASH_BITS);
279}
280
281/* Hash chain to use given mac address */
282static inline struct hlist_head *vxlan_fdb_head(struct vxlan_dev *vxlan,
283 const u8 *mac)
284{
285 return &vxlan->fdb_head[eth_hash(mac)];
286}
287
288/* Look up Ethernet address in forwarding table */
289static struct vxlan_fdb *vxlan_find_mac(struct vxlan_dev *vxlan,
290 const u8 *mac)
291
292{
293 struct hlist_head *head = vxlan_fdb_head(vxlan, mac);
294 struct vxlan_fdb *f;
295 struct hlist_node *node;
296
297 hlist_for_each_entry_rcu(f, node, head, hlist) {
298 if (compare_ether_addr(mac, f->eth_addr) == 0)
299 return f;
300 }
301
302 return NULL;
303}
304
305/* Add new entry to forwarding table -- assumes lock held */
306static int vxlan_fdb_create(struct vxlan_dev *vxlan,
307 const u8 *mac, __be32 ip,
308 __u16 state, __u16 flags)
309{
310 struct vxlan_fdb *f;
311 int notify = 0;
312
313 f = vxlan_find_mac(vxlan, mac);
314 if (f) {
315 if (flags & NLM_F_EXCL) {
316 netdev_dbg(vxlan->dev,
317 "lost race to create %pM\n", mac);
318 return -EEXIST;
319 }
320 if (f->state != state) {
321 f->state = state;
322 f->updated = jiffies;
323 notify = 1;
324 }
325 } else {
326 if (!(flags & NLM_F_CREATE))
327 return -ENOENT;
328
329 if (vxlan->addrmax && vxlan->addrcnt >= vxlan->addrmax)
330 return -ENOSPC;
331
332 netdev_dbg(vxlan->dev, "add %pM -> %pI4\n", mac, &ip);
333 f = kmalloc(sizeof(*f), GFP_ATOMIC);
334 if (!f)
335 return -ENOMEM;
336
337 notify = 1;
338 f->remote_ip = ip;
339 f->state = state;
340 f->updated = f->used = jiffies;
341 memcpy(f->eth_addr, mac, ETH_ALEN);
342
343 ++vxlan->addrcnt;
344 hlist_add_head_rcu(&f->hlist,
345 vxlan_fdb_head(vxlan, mac));
346 }
347
348 if (notify)
349 vxlan_fdb_notify(vxlan, f, RTM_NEWNEIGH);
350
351 return 0;
352}
353
354static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f)
355{
356 netdev_dbg(vxlan->dev,
357 "delete %pM\n", f->eth_addr);
358
359 --vxlan->addrcnt;
360 vxlan_fdb_notify(vxlan, f, RTM_DELNEIGH);
361
362 hlist_del_rcu(&f->hlist);
363 kfree_rcu(f, rcu);
364}
365
366/* Add static entry (via netlink) */
367static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
368 struct net_device *dev,
369 const unsigned char *addr, u16 flags)
370{
371 struct vxlan_dev *vxlan = netdev_priv(dev);
372 __be32 ip;
373 int err;
374
375 if (!(ndm->ndm_state & (NUD_PERMANENT|NUD_REACHABLE))) {
376 pr_info("RTM_NEWNEIGH with invalid state %#x\n",
377 ndm->ndm_state);
378 return -EINVAL;
379 }
380
381 if (tb[NDA_DST] == NULL)
382 return -EINVAL;
383
384 if (nla_len(tb[NDA_DST]) != sizeof(__be32))
385 return -EAFNOSUPPORT;
386
387 ip = nla_get_be32(tb[NDA_DST]);
388
389 spin_lock_bh(&vxlan->hash_lock);
390 err = vxlan_fdb_create(vxlan, addr, ip, ndm->ndm_state, flags);
391 spin_unlock_bh(&vxlan->hash_lock);
392
393 return err;
394}
395
396/* Delete entry (via netlink) */
Vlad Yasevich1690be62013-02-13 12:00:18 +0000397static int vxlan_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
398 struct net_device *dev,
stephen hemmingerd3428942012-10-01 12:32:35 +0000399 const unsigned char *addr)
400{
401 struct vxlan_dev *vxlan = netdev_priv(dev);
402 struct vxlan_fdb *f;
403 int err = -ENOENT;
404
405 spin_lock_bh(&vxlan->hash_lock);
406 f = vxlan_find_mac(vxlan, addr);
407 if (f) {
408 vxlan_fdb_destroy(vxlan, f);
409 err = 0;
410 }
411 spin_unlock_bh(&vxlan->hash_lock);
412
413 return err;
414}
415
416/* Dump forwarding table */
417static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
418 struct net_device *dev, int idx)
419{
420 struct vxlan_dev *vxlan = netdev_priv(dev);
421 unsigned int h;
422
423 for (h = 0; h < FDB_HASH_SIZE; ++h) {
424 struct vxlan_fdb *f;
425 struct hlist_node *n;
426 int err;
427
428 hlist_for_each_entry_rcu(f, n, &vxlan->fdb_head[h], hlist) {
429 if (idx < cb->args[0])
430 goto skip;
431
432 err = vxlan_fdb_info(skb, vxlan, f,
433 NETLINK_CB(cb->skb).portid,
434 cb->nlh->nlmsg_seq,
435 RTM_NEWNEIGH,
436 NLM_F_MULTI);
437 if (err < 0)
438 break;
439skip:
440 ++idx;
441 }
442 }
443
444 return idx;
445}
446
447/* Watch incoming packets to learn mapping between Ethernet address
448 * and Tunnel endpoint.
449 */
450static void vxlan_snoop(struct net_device *dev,
451 __be32 src_ip, const u8 *src_mac)
452{
453 struct vxlan_dev *vxlan = netdev_priv(dev);
454 struct vxlan_fdb *f;
455 int err;
456
457 f = vxlan_find_mac(vxlan, src_mac);
458 if (likely(f)) {
459 f->used = jiffies;
460 if (likely(f->remote_ip == src_ip))
461 return;
462
463 if (net_ratelimit())
464 netdev_info(dev,
465 "%pM migrated from %pI4 to %pI4\n",
466 src_mac, &f->remote_ip, &src_ip);
467
468 f->remote_ip = src_ip;
469 f->updated = jiffies;
470 } else {
471 /* learned new entry */
472 spin_lock(&vxlan->hash_lock);
473 err = vxlan_fdb_create(vxlan, src_mac, src_ip,
474 NUD_REACHABLE,
475 NLM_F_EXCL|NLM_F_CREATE);
476 spin_unlock(&vxlan->hash_lock);
477 }
478}
479
480
481/* See if multicast group is already in use by other ID */
482static bool vxlan_group_used(struct vxlan_net *vn,
483 const struct vxlan_dev *this)
484{
485 const struct vxlan_dev *vxlan;
486 struct hlist_node *node;
487 unsigned h;
488
489 for (h = 0; h < VNI_HASH_SIZE; ++h)
490 hlist_for_each_entry(vxlan, node, &vn->vni_list[h], hlist) {
491 if (vxlan == this)
492 continue;
493
494 if (!netif_running(vxlan->dev))
495 continue;
496
497 if (vxlan->gaddr == this->gaddr)
498 return true;
499 }
500
501 return false;
502}
503
504/* kernel equivalent to IP_ADD_MEMBERSHIP */
505static int vxlan_join_group(struct net_device *dev)
506{
507 struct vxlan_dev *vxlan = netdev_priv(dev);
508 struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
509 struct sock *sk = vn->sock->sk;
510 struct ip_mreqn mreq = {
Yan Burmanaf9b0782012-12-20 03:36:08 +0000511 .imr_multiaddr.s_addr = vxlan->gaddr,
512 .imr_ifindex = vxlan->link,
stephen hemmingerd3428942012-10-01 12:32:35 +0000513 };
514 int err;
515
516 /* Already a member of group */
517 if (vxlan_group_used(vn, vxlan))
518 return 0;
519
520 /* Need to drop RTNL to call multicast join */
521 rtnl_unlock();
522 lock_sock(sk);
523 err = ip_mc_join_group(sk, &mreq);
524 release_sock(sk);
525 rtnl_lock();
526
527 return err;
528}
529
530
531/* kernel equivalent to IP_DROP_MEMBERSHIP */
532static int vxlan_leave_group(struct net_device *dev)
533{
534 struct vxlan_dev *vxlan = netdev_priv(dev);
535 struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
536 int err = 0;
537 struct sock *sk = vn->sock->sk;
538 struct ip_mreqn mreq = {
Yan Burmanaf9b0782012-12-20 03:36:08 +0000539 .imr_multiaddr.s_addr = vxlan->gaddr,
540 .imr_ifindex = vxlan->link,
stephen hemmingerd3428942012-10-01 12:32:35 +0000541 };
542
543 /* Only leave group when last vxlan is done. */
544 if (vxlan_group_used(vn, vxlan))
545 return 0;
546
547 /* Need to drop RTNL to call multicast leave */
548 rtnl_unlock();
549 lock_sock(sk);
550 err = ip_mc_leave_group(sk, &mreq);
551 release_sock(sk);
552 rtnl_lock();
553
554 return err;
555}
556
557/* Callback from net/ipv4/udp.c to receive packets */
558static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
559{
560 struct iphdr *oip;
561 struct vxlanhdr *vxh;
562 struct vxlan_dev *vxlan;
563 struct vxlan_stats *stats;
564 __u32 vni;
565 int err;
566
567 /* pop off outer UDP header */
568 __skb_pull(skb, sizeof(struct udphdr));
569
570 /* Need Vxlan and inner Ethernet header to be present */
571 if (!pskb_may_pull(skb, sizeof(struct vxlanhdr)))
572 goto error;
573
574 /* Drop packets with reserved bits set */
575 vxh = (struct vxlanhdr *) skb->data;
576 if (vxh->vx_flags != htonl(VXLAN_FLAGS) ||
577 (vxh->vx_vni & htonl(0xff))) {
578 netdev_dbg(skb->dev, "invalid vxlan flags=%#x vni=%#x\n",
579 ntohl(vxh->vx_flags), ntohl(vxh->vx_vni));
580 goto error;
581 }
582
583 __skb_pull(skb, sizeof(struct vxlanhdr));
stephen hemmingerd3428942012-10-01 12:32:35 +0000584
585 /* Is this VNI defined? */
586 vni = ntohl(vxh->vx_vni) >> 8;
587 vxlan = vxlan_find_vni(sock_net(sk), vni);
588 if (!vxlan) {
589 netdev_dbg(skb->dev, "unknown vni %d\n", vni);
590 goto drop;
591 }
592
593 if (!pskb_may_pull(skb, ETH_HLEN)) {
594 vxlan->dev->stats.rx_length_errors++;
595 vxlan->dev->stats.rx_errors++;
596 goto drop;
597 }
598
David Stevense4f67ad2012-11-20 02:50:14 +0000599 skb_reset_mac_header(skb);
600
stephen hemmingerd3428942012-10-01 12:32:35 +0000601 /* Re-examine inner Ethernet packet */
602 oip = ip_hdr(skb);
603 skb->protocol = eth_type_trans(skb, vxlan->dev);
stephen hemmingerd3428942012-10-01 12:32:35 +0000604
605 /* Ignore packet loops (and multicast echo) */
606 if (compare_ether_addr(eth_hdr(skb)->h_source,
607 vxlan->dev->dev_addr) == 0)
608 goto drop;
609
David Stevense4f67ad2012-11-20 02:50:14 +0000610 if (vxlan->flags & VXLAN_F_LEARN)
stephen hemmingerd3428942012-10-01 12:32:35 +0000611 vxlan_snoop(skb->dev, oip->saddr, eth_hdr(skb)->h_source);
612
613 __skb_tunnel_rx(skb, vxlan->dev);
614 skb_reset_network_header(skb);
Joseph Gasparakis0afb1662012-12-07 14:14:18 +0000615
616 /* If the NIC driver gave us an encapsulated packet with
617 * CHECKSUM_UNNECESSARY and Rx checksum feature is enabled,
618 * leave the CHECKSUM_UNNECESSARY, the device checksummed it
619 * for us. Otherwise force the upper layers to verify it.
620 */
621 if (skb->ip_summed != CHECKSUM_UNNECESSARY || !skb->encapsulation ||
622 !(vxlan->dev->features & NETIF_F_RXCSUM))
623 skb->ip_summed = CHECKSUM_NONE;
624
625 skb->encapsulation = 0;
stephen hemmingerd3428942012-10-01 12:32:35 +0000626
627 err = IP_ECN_decapsulate(oip, skb);
628 if (unlikely(err)) {
629 if (log_ecn_error)
630 net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n",
631 &oip->saddr, oip->tos);
632 if (err > 1) {
633 ++vxlan->dev->stats.rx_frame_errors;
634 ++vxlan->dev->stats.rx_errors;
635 goto drop;
636 }
637 }
638
639 stats = this_cpu_ptr(vxlan->stats);
640 u64_stats_update_begin(&stats->syncp);
641 stats->rx_packets++;
642 stats->rx_bytes += skb->len;
643 u64_stats_update_end(&stats->syncp);
644
645 netif_rx(skb);
646
647 return 0;
648error:
649 /* Put UDP header back */
650 __skb_push(skb, sizeof(struct udphdr));
651
652 return 1;
653drop:
654 /* Consume bad packet */
655 kfree_skb(skb);
656 return 0;
657}
658
David Stevense4f67ad2012-11-20 02:50:14 +0000659static int arp_reduce(struct net_device *dev, struct sk_buff *skb)
660{
661 struct vxlan_dev *vxlan = netdev_priv(dev);
662 struct arphdr *parp;
663 u8 *arpptr, *sha;
664 __be32 sip, tip;
665 struct neighbour *n;
666
667 if (dev->flags & IFF_NOARP)
668 goto out;
669
670 if (!pskb_may_pull(skb, arp_hdr_len(dev))) {
671 dev->stats.tx_dropped++;
672 goto out;
673 }
674 parp = arp_hdr(skb);
675
676 if ((parp->ar_hrd != htons(ARPHRD_ETHER) &&
677 parp->ar_hrd != htons(ARPHRD_IEEE802)) ||
678 parp->ar_pro != htons(ETH_P_IP) ||
679 parp->ar_op != htons(ARPOP_REQUEST) ||
680 parp->ar_hln != dev->addr_len ||
681 parp->ar_pln != 4)
682 goto out;
683 arpptr = (u8 *)parp + sizeof(struct arphdr);
684 sha = arpptr;
685 arpptr += dev->addr_len; /* sha */
686 memcpy(&sip, arpptr, sizeof(sip));
687 arpptr += sizeof(sip);
688 arpptr += dev->addr_len; /* tha */
689 memcpy(&tip, arpptr, sizeof(tip));
690
691 if (ipv4_is_loopback(tip) ||
692 ipv4_is_multicast(tip))
693 goto out;
694
695 n = neigh_lookup(&arp_tbl, &tip, dev);
696
697 if (n) {
698 struct vxlan_dev *vxlan = netdev_priv(dev);
699 struct vxlan_fdb *f;
700 struct sk_buff *reply;
701
702 if (!(n->nud_state & NUD_CONNECTED)) {
703 neigh_release(n);
704 goto out;
705 }
706
707 f = vxlan_find_mac(vxlan, n->ha);
708 if (f && f->remote_ip == 0) {
709 /* bridge-local neighbor */
710 neigh_release(n);
711 goto out;
712 }
713
714 reply = arp_create(ARPOP_REPLY, ETH_P_ARP, sip, dev, tip, sha,
715 n->ha, sha);
716
717 neigh_release(n);
718
719 skb_reset_mac_header(reply);
720 __skb_pull(reply, skb_network_offset(reply));
721 reply->ip_summed = CHECKSUM_UNNECESSARY;
722 reply->pkt_type = PACKET_HOST;
723
724 if (netif_rx_ni(reply) == NET_RX_DROP)
725 dev->stats.rx_dropped++;
726 } else if (vxlan->flags & VXLAN_F_L3MISS)
727 vxlan_ip_miss(dev, tip);
728out:
729 consume_skb(skb);
730 return NETDEV_TX_OK;
731}
732
733static bool route_shortcircuit(struct net_device *dev, struct sk_buff *skb)
734{
735 struct vxlan_dev *vxlan = netdev_priv(dev);
736 struct neighbour *n;
737 struct iphdr *pip;
738
739 if (is_multicast_ether_addr(eth_hdr(skb)->h_dest))
740 return false;
741
742 n = NULL;
743 switch (ntohs(eth_hdr(skb)->h_proto)) {
744 case ETH_P_IP:
745 if (!pskb_may_pull(skb, sizeof(struct iphdr)))
746 return false;
747 pip = ip_hdr(skb);
748 n = neigh_lookup(&arp_tbl, &pip->daddr, dev);
749 break;
750 default:
751 return false;
752 }
753
754 if (n) {
755 bool diff;
756
757 diff = compare_ether_addr(eth_hdr(skb)->h_dest, n->ha) != 0;
758 if (diff) {
759 memcpy(eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest,
760 dev->addr_len);
761 memcpy(eth_hdr(skb)->h_dest, n->ha, dev->addr_len);
762 }
763 neigh_release(n);
764 return diff;
765 } else if (vxlan->flags & VXLAN_F_L3MISS)
766 vxlan_ip_miss(dev, pip->daddr);
767 return false;
768}
769
stephen hemmingerd3428942012-10-01 12:32:35 +0000770/* Extract dsfield from inner protocol */
771static inline u8 vxlan_get_dsfield(const struct iphdr *iph,
772 const struct sk_buff *skb)
773{
774 if (skb->protocol == htons(ETH_P_IP))
775 return iph->tos;
776 else if (skb->protocol == htons(ETH_P_IPV6))
777 return ipv6_get_dsfield((const struct ipv6hdr *)iph);
778 else
779 return 0;
780}
781
782/* Propogate ECN bits out */
783static inline u8 vxlan_ecn_encap(u8 tos,
784 const struct iphdr *iph,
785 const struct sk_buff *skb)
786{
787 u8 inner = vxlan_get_dsfield(iph, skb);
788
789 return INET_ECN_encapsulate(tos, inner);
790}
791
stephen hemminger1cad8712012-10-09 20:35:49 +0000792static void vxlan_sock_free(struct sk_buff *skb)
793{
794 sock_put(skb->sk);
795}
796
797/* On transmit, associate with the tunnel socket */
798static void vxlan_set_owner(struct net_device *dev, struct sk_buff *skb)
799{
800 struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
801 struct sock *sk = vn->sock->sk;
802
803 skb_orphan(skb);
804 sock_hold(sk);
805 skb->sk = sk;
806 skb->destructor = vxlan_sock_free;
807}
808
stephen hemminger05f47d62012-10-09 20:35:50 +0000809/* Compute source port for outgoing packet
810 * first choice to use L4 flow hash since it will spread
811 * better and maybe available from hardware
812 * secondary choice is to use jhash on the Ethernet header
813 */
814static u16 vxlan_src_port(const struct vxlan_dev *vxlan, struct sk_buff *skb)
815{
816 unsigned int range = (vxlan->port_max - vxlan->port_min) + 1;
817 u32 hash;
818
819 hash = skb_get_rxhash(skb);
820 if (!hash)
821 hash = jhash(skb->data, 2 * ETH_ALEN,
822 (__force u32) skb->protocol);
823
824 return (((u64) hash * range) >> 32) + vxlan->port_min;
825}
826
stephen hemmingerd3428942012-10-01 12:32:35 +0000827/* Transmit local packets over Vxlan
828 *
829 * Outer IP header inherits ECN and DF from inner header.
830 * Outer UDP destination is the VXLAN assigned port.
stephen hemminger05f47d62012-10-09 20:35:50 +0000831 * source port is based on hash of flow
stephen hemmingerd3428942012-10-01 12:32:35 +0000832 */
833static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
834{
835 struct vxlan_dev *vxlan = netdev_priv(dev);
836 struct rtable *rt;
stephen hemmingerd3428942012-10-01 12:32:35 +0000837 const struct iphdr *old_iph;
David Stevense4f67ad2012-11-20 02:50:14 +0000838 struct ethhdr *eth;
stephen hemmingerd3428942012-10-01 12:32:35 +0000839 struct iphdr *iph;
840 struct vxlanhdr *vxh;
841 struct udphdr *uh;
842 struct flowi4 fl4;
stephen hemmingerd3428942012-10-01 12:32:35 +0000843 unsigned int pkt_len = skb->len;
stephen hemmingerd3428942012-10-01 12:32:35 +0000844 __be32 dst;
stephen hemminger05f47d62012-10-09 20:35:50 +0000845 __u16 src_port;
stephen hemmingerd3428942012-10-01 12:32:35 +0000846 __be16 df = 0;
847 __u8 tos, ttl;
848 int err;
David Stevense4f67ad2012-11-20 02:50:14 +0000849 bool did_rsc = false;
850 const struct vxlan_fdb *f;
stephen hemmingerd3428942012-10-01 12:32:35 +0000851
David Stevense4f67ad2012-11-20 02:50:14 +0000852 skb_reset_mac_header(skb);
853 eth = eth_hdr(skb);
854
855 if ((vxlan->flags & VXLAN_F_PROXY) && ntohs(eth->h_proto) == ETH_P_ARP)
856 return arp_reduce(dev, skb);
857 else if ((vxlan->flags&VXLAN_F_RSC) && ntohs(eth->h_proto) == ETH_P_IP)
858 did_rsc = route_shortcircuit(dev, skb);
859
860 f = vxlan_find_mac(vxlan, eth->h_dest);
861 if (f == NULL) {
862 did_rsc = false;
863 dst = vxlan->gaddr;
864 if (!dst && (vxlan->flags & VXLAN_F_L2MISS) &&
865 !is_multicast_ether_addr(eth->h_dest))
866 vxlan_fdb_miss(vxlan, eth->h_dest);
867 } else
868 dst = f->remote_ip;
869
870 if (!dst) {
871 if (did_rsc) {
872 __skb_pull(skb, skb_network_offset(skb));
873 skb->ip_summed = CHECKSUM_NONE;
874 skb->pkt_type = PACKET_HOST;
875
876 /* short-circuited back to local bridge */
877 if (netif_rx(skb) == NET_RX_SUCCESS) {
878 struct vxlan_stats *stats =
879 this_cpu_ptr(vxlan->stats);
880
881 u64_stats_update_begin(&stats->syncp);
882 stats->tx_packets++;
883 stats->tx_bytes += pkt_len;
884 u64_stats_update_end(&stats->syncp);
885 } else {
886 dev->stats.tx_errors++;
887 dev->stats.tx_aborted_errors++;
888 }
889 return NETDEV_TX_OK;
890 }
stephen hemmingeref59feb2012-10-09 20:35:46 +0000891 goto drop;
David Stevense4f67ad2012-11-20 02:50:14 +0000892 }
stephen hemmingeref59feb2012-10-09 20:35:46 +0000893
Joseph Gasparakisd6727fe2012-12-07 14:14:16 +0000894 if (!skb->encapsulation) {
895 skb_reset_inner_headers(skb);
896 skb->encapsulation = 1;
897 }
898
stephen hemmingerd3428942012-10-01 12:32:35 +0000899 /* Need space for new headers (invalidates iph ptr) */
900 if (skb_cow_head(skb, VXLAN_HEADROOM))
901 goto drop;
902
stephen hemmingerd3428942012-10-01 12:32:35 +0000903 old_iph = ip_hdr(skb);
904
stephen hemmingerd3428942012-10-01 12:32:35 +0000905 ttl = vxlan->ttl;
906 if (!ttl && IN_MULTICAST(ntohl(dst)))
907 ttl = 1;
908
909 tos = vxlan->tos;
910 if (tos == 1)
911 tos = vxlan_get_dsfield(old_iph, skb);
912
stephen hemminger05f47d62012-10-09 20:35:50 +0000913 src_port = vxlan_src_port(vxlan, skb);
stephen hemmingerd3428942012-10-01 12:32:35 +0000914
stephen hemmingerca78f182012-10-09 20:35:48 +0000915 memset(&fl4, 0, sizeof(fl4));
916 fl4.flowi4_oif = vxlan->link;
917 fl4.flowi4_tos = RT_TOS(tos);
918 fl4.daddr = dst;
919 fl4.saddr = vxlan->saddr;
920
921 rt = ip_route_output_key(dev_net(dev), &fl4);
stephen hemmingerd3428942012-10-01 12:32:35 +0000922 if (IS_ERR(rt)) {
923 netdev_dbg(dev, "no route to %pI4\n", &dst);
924 dev->stats.tx_carrier_errors++;
925 goto tx_error;
926 }
927
928 if (rt->dst.dev == dev) {
929 netdev_dbg(dev, "circular route to %pI4\n", &dst);
930 ip_rt_put(rt);
931 dev->stats.collisions++;
932 goto tx_error;
933 }
934
935 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
936 IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
937 IPSKB_REROUTED);
938 skb_dst_drop(skb);
939 skb_dst_set(skb, &rt->dst);
940
941 vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
942 vxh->vx_flags = htonl(VXLAN_FLAGS);
943 vxh->vx_vni = htonl(vxlan->vni << 8);
944
945 __skb_push(skb, sizeof(*uh));
946 skb_reset_transport_header(skb);
947 uh = udp_hdr(skb);
948
949 uh->dest = htons(vxlan_port);
stephen hemminger05f47d62012-10-09 20:35:50 +0000950 uh->source = htons(src_port);
stephen hemmingerd3428942012-10-01 12:32:35 +0000951
952 uh->len = htons(skb->len);
953 uh->check = 0;
954
955 __skb_push(skb, sizeof(*iph));
956 skb_reset_network_header(skb);
957 iph = ip_hdr(skb);
958 iph->version = 4;
959 iph->ihl = sizeof(struct iphdr) >> 2;
960 iph->frag_off = df;
961 iph->protocol = IPPROTO_UDP;
962 iph->tos = vxlan_ecn_encap(tos, old_iph, skb);
stephen hemmingerca78f182012-10-09 20:35:48 +0000963 iph->daddr = dst;
stephen hemmingerd3428942012-10-01 12:32:35 +0000964 iph->saddr = fl4.saddr;
965 iph->ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
Pravin B Shelar8dc98eb2013-02-22 07:30:40 +0000966 tunnel_ip_select_ident(skb, old_iph, &rt->dst);
stephen hemmingerd3428942012-10-01 12:32:35 +0000967
stephen hemminger1cad8712012-10-09 20:35:49 +0000968 vxlan_set_owner(dev, skb);
969
Amerigo Wangaa0010f2012-11-11 21:52:33 +0000970 /* See iptunnel_xmit() */
Joseph Gasparakisd6727fe2012-12-07 14:14:16 +0000971 if (skb->ip_summed != CHECKSUM_PARTIAL)
972 skb->ip_summed = CHECKSUM_NONE;
stephen hemmingerd3428942012-10-01 12:32:35 +0000973
974 err = ip_local_out(skb);
975 if (likely(net_xmit_eval(err) == 0)) {
976 struct vxlan_stats *stats = this_cpu_ptr(vxlan->stats);
977
978 u64_stats_update_begin(&stats->syncp);
979 stats->tx_packets++;
980 stats->tx_bytes += pkt_len;
981 u64_stats_update_end(&stats->syncp);
982 } else {
983 dev->stats.tx_errors++;
984 dev->stats.tx_aborted_errors++;
985 }
986 return NETDEV_TX_OK;
987
988drop:
989 dev->stats.tx_dropped++;
990 goto tx_free;
991
992tx_error:
993 dev->stats.tx_errors++;
994tx_free:
995 dev_kfree_skb(skb);
996 return NETDEV_TX_OK;
997}
998
999/* Walk the forwarding table and purge stale entries */
1000static void vxlan_cleanup(unsigned long arg)
1001{
1002 struct vxlan_dev *vxlan = (struct vxlan_dev *) arg;
1003 unsigned long next_timer = jiffies + FDB_AGE_INTERVAL;
1004 unsigned int h;
1005
1006 if (!netif_running(vxlan->dev))
1007 return;
1008
1009 spin_lock_bh(&vxlan->hash_lock);
1010 for (h = 0; h < FDB_HASH_SIZE; ++h) {
1011 struct hlist_node *p, *n;
1012 hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) {
1013 struct vxlan_fdb *f
1014 = container_of(p, struct vxlan_fdb, hlist);
1015 unsigned long timeout;
1016
stephen hemminger3c172862012-10-26 06:24:34 +00001017 if (f->state & NUD_PERMANENT)
stephen hemmingerd3428942012-10-01 12:32:35 +00001018 continue;
1019
1020 timeout = f->used + vxlan->age_interval * HZ;
1021 if (time_before_eq(timeout, jiffies)) {
1022 netdev_dbg(vxlan->dev,
1023 "garbage collect %pM\n",
1024 f->eth_addr);
1025 f->state = NUD_STALE;
1026 vxlan_fdb_destroy(vxlan, f);
1027 } else if (time_before(timeout, next_timer))
1028 next_timer = timeout;
1029 }
1030 }
1031 spin_unlock_bh(&vxlan->hash_lock);
1032
1033 mod_timer(&vxlan->age_timer, next_timer);
1034}
1035
1036/* Setup stats when device is created */
1037static int vxlan_init(struct net_device *dev)
1038{
1039 struct vxlan_dev *vxlan = netdev_priv(dev);
1040
1041 vxlan->stats = alloc_percpu(struct vxlan_stats);
1042 if (!vxlan->stats)
1043 return -ENOMEM;
1044
1045 return 0;
1046}
1047
1048/* Start ageing timer and join group when device is brought up */
1049static int vxlan_open(struct net_device *dev)
1050{
1051 struct vxlan_dev *vxlan = netdev_priv(dev);
1052 int err;
1053
1054 if (vxlan->gaddr) {
1055 err = vxlan_join_group(dev);
1056 if (err)
1057 return err;
1058 }
1059
1060 if (vxlan->age_interval)
1061 mod_timer(&vxlan->age_timer, jiffies + FDB_AGE_INTERVAL);
1062
1063 return 0;
1064}
1065
1066/* Purge the forwarding table */
1067static void vxlan_flush(struct vxlan_dev *vxlan)
1068{
1069 unsigned h;
1070
1071 spin_lock_bh(&vxlan->hash_lock);
1072 for (h = 0; h < FDB_HASH_SIZE; ++h) {
1073 struct hlist_node *p, *n;
1074 hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) {
1075 struct vxlan_fdb *f
1076 = container_of(p, struct vxlan_fdb, hlist);
1077 vxlan_fdb_destroy(vxlan, f);
1078 }
1079 }
1080 spin_unlock_bh(&vxlan->hash_lock);
1081}
1082
1083/* Cleanup timer and forwarding table on shutdown */
1084static int vxlan_stop(struct net_device *dev)
1085{
1086 struct vxlan_dev *vxlan = netdev_priv(dev);
1087
1088 if (vxlan->gaddr)
1089 vxlan_leave_group(dev);
1090
1091 del_timer_sync(&vxlan->age_timer);
1092
1093 vxlan_flush(vxlan);
1094
1095 return 0;
1096}
1097
1098/* Merge per-cpu statistics */
1099static struct rtnl_link_stats64 *vxlan_stats64(struct net_device *dev,
1100 struct rtnl_link_stats64 *stats)
1101{
1102 struct vxlan_dev *vxlan = netdev_priv(dev);
1103 struct vxlan_stats tmp, sum = { 0 };
1104 unsigned int cpu;
1105
1106 for_each_possible_cpu(cpu) {
1107 unsigned int start;
1108 const struct vxlan_stats *stats
1109 = per_cpu_ptr(vxlan->stats, cpu);
1110
1111 do {
1112 start = u64_stats_fetch_begin_bh(&stats->syncp);
1113 memcpy(&tmp, stats, sizeof(tmp));
1114 } while (u64_stats_fetch_retry_bh(&stats->syncp, start));
1115
1116 sum.tx_bytes += tmp.tx_bytes;
1117 sum.tx_packets += tmp.tx_packets;
1118 sum.rx_bytes += tmp.rx_bytes;
1119 sum.rx_packets += tmp.rx_packets;
1120 }
1121
1122 stats->tx_bytes = sum.tx_bytes;
1123 stats->tx_packets = sum.tx_packets;
1124 stats->rx_bytes = sum.rx_bytes;
1125 stats->rx_packets = sum.rx_packets;
1126
1127 stats->multicast = dev->stats.multicast;
1128 stats->rx_length_errors = dev->stats.rx_length_errors;
1129 stats->rx_frame_errors = dev->stats.rx_frame_errors;
1130 stats->rx_errors = dev->stats.rx_errors;
1131
1132 stats->tx_dropped = dev->stats.tx_dropped;
1133 stats->tx_carrier_errors = dev->stats.tx_carrier_errors;
1134 stats->tx_aborted_errors = dev->stats.tx_aborted_errors;
1135 stats->collisions = dev->stats.collisions;
1136 stats->tx_errors = dev->stats.tx_errors;
1137
1138 return stats;
1139}
1140
1141/* Stub, nothing needs to be done. */
1142static void vxlan_set_multicast_list(struct net_device *dev)
1143{
1144}
1145
1146static const struct net_device_ops vxlan_netdev_ops = {
1147 .ndo_init = vxlan_init,
1148 .ndo_open = vxlan_open,
1149 .ndo_stop = vxlan_stop,
1150 .ndo_start_xmit = vxlan_xmit,
1151 .ndo_get_stats64 = vxlan_stats64,
1152 .ndo_set_rx_mode = vxlan_set_multicast_list,
1153 .ndo_change_mtu = eth_change_mtu,
1154 .ndo_validate_addr = eth_validate_addr,
1155 .ndo_set_mac_address = eth_mac_addr,
1156 .ndo_fdb_add = vxlan_fdb_add,
1157 .ndo_fdb_del = vxlan_fdb_delete,
1158 .ndo_fdb_dump = vxlan_fdb_dump,
1159};
1160
1161/* Info for udev, that this is a virtual tunnel endpoint */
1162static struct device_type vxlan_type = {
1163 .name = "vxlan",
1164};
1165
1166static void vxlan_free(struct net_device *dev)
1167{
1168 struct vxlan_dev *vxlan = netdev_priv(dev);
1169
1170 free_percpu(vxlan->stats);
1171 free_netdev(dev);
1172}
1173
1174/* Initialize the device structure. */
1175static void vxlan_setup(struct net_device *dev)
1176{
1177 struct vxlan_dev *vxlan = netdev_priv(dev);
1178 unsigned h;
stephen hemminger05f47d62012-10-09 20:35:50 +00001179 int low, high;
stephen hemmingerd3428942012-10-01 12:32:35 +00001180
1181 eth_hw_addr_random(dev);
1182 ether_setup(dev);
stephen hemminger2840bf22012-10-09 20:35:51 +00001183 dev->hard_header_len = ETH_HLEN + VXLAN_HEADROOM;
stephen hemmingerd3428942012-10-01 12:32:35 +00001184
1185 dev->netdev_ops = &vxlan_netdev_ops;
1186 dev->destructor = vxlan_free;
1187 SET_NETDEV_DEVTYPE(dev, &vxlan_type);
1188
1189 dev->tx_queue_len = 0;
1190 dev->features |= NETIF_F_LLTX;
1191 dev->features |= NETIF_F_NETNS_LOCAL;
Joseph Gasparakisd6727fe2012-12-07 14:14:16 +00001192 dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM;
Joseph Gasparakis0afb1662012-12-07 14:14:18 +00001193 dev->features |= NETIF_F_RXCSUM;
1194
1195 dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
stephen hemmingerd3428942012-10-01 12:32:35 +00001196 dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
stephen hemminger6602d002012-12-31 12:00:21 +00001197 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
stephen hemmingerd3428942012-10-01 12:32:35 +00001198
1199 spin_lock_init(&vxlan->hash_lock);
1200
1201 init_timer_deferrable(&vxlan->age_timer);
1202 vxlan->age_timer.function = vxlan_cleanup;
1203 vxlan->age_timer.data = (unsigned long) vxlan;
1204
stephen hemminger05f47d62012-10-09 20:35:50 +00001205 inet_get_local_port_range(&low, &high);
1206 vxlan->port_min = low;
1207 vxlan->port_max = high;
1208
stephen hemmingerd3428942012-10-01 12:32:35 +00001209 vxlan->dev = dev;
1210
1211 for (h = 0; h < FDB_HASH_SIZE; ++h)
1212 INIT_HLIST_HEAD(&vxlan->fdb_head[h]);
1213}
1214
1215static const struct nla_policy vxlan_policy[IFLA_VXLAN_MAX + 1] = {
1216 [IFLA_VXLAN_ID] = { .type = NLA_U32 },
1217 [IFLA_VXLAN_GROUP] = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
1218 [IFLA_VXLAN_LINK] = { .type = NLA_U32 },
1219 [IFLA_VXLAN_LOCAL] = { .len = FIELD_SIZEOF(struct iphdr, saddr) },
1220 [IFLA_VXLAN_TOS] = { .type = NLA_U8 },
1221 [IFLA_VXLAN_TTL] = { .type = NLA_U8 },
1222 [IFLA_VXLAN_LEARNING] = { .type = NLA_U8 },
1223 [IFLA_VXLAN_AGEING] = { .type = NLA_U32 },
1224 [IFLA_VXLAN_LIMIT] = { .type = NLA_U32 },
stephen hemminger05f47d62012-10-09 20:35:50 +00001225 [IFLA_VXLAN_PORT_RANGE] = { .len = sizeof(struct ifla_vxlan_port_range) },
David Stevense4f67ad2012-11-20 02:50:14 +00001226 [IFLA_VXLAN_PROXY] = { .type = NLA_U8 },
1227 [IFLA_VXLAN_RSC] = { .type = NLA_U8 },
1228 [IFLA_VXLAN_L2MISS] = { .type = NLA_U8 },
1229 [IFLA_VXLAN_L3MISS] = { .type = NLA_U8 },
stephen hemmingerd3428942012-10-01 12:32:35 +00001230};
1231
1232static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[])
1233{
1234 if (tb[IFLA_ADDRESS]) {
1235 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) {
1236 pr_debug("invalid link address (not ethernet)\n");
1237 return -EINVAL;
1238 }
1239
1240 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) {
1241 pr_debug("invalid all zero ethernet address\n");
1242 return -EADDRNOTAVAIL;
1243 }
1244 }
1245
1246 if (!data)
1247 return -EINVAL;
1248
1249 if (data[IFLA_VXLAN_ID]) {
1250 __u32 id = nla_get_u32(data[IFLA_VXLAN_ID]);
1251 if (id >= VXLAN_VID_MASK)
1252 return -ERANGE;
1253 }
1254
1255 if (data[IFLA_VXLAN_GROUP]) {
1256 __be32 gaddr = nla_get_be32(data[IFLA_VXLAN_GROUP]);
1257 if (!IN_MULTICAST(ntohl(gaddr))) {
1258 pr_debug("group address is not IPv4 multicast\n");
1259 return -EADDRNOTAVAIL;
1260 }
1261 }
stephen hemminger05f47d62012-10-09 20:35:50 +00001262
1263 if (data[IFLA_VXLAN_PORT_RANGE]) {
1264 const struct ifla_vxlan_port_range *p
1265 = nla_data(data[IFLA_VXLAN_PORT_RANGE]);
1266
1267 if (ntohs(p->high) < ntohs(p->low)) {
1268 pr_debug("port range %u .. %u not valid\n",
1269 ntohs(p->low), ntohs(p->high));
1270 return -EINVAL;
1271 }
1272 }
1273
stephen hemmingerd3428942012-10-01 12:32:35 +00001274 return 0;
1275}
1276
Yan Burman1b13c972013-01-29 23:43:07 +00001277static void vxlan_get_drvinfo(struct net_device *netdev,
1278 struct ethtool_drvinfo *drvinfo)
1279{
1280 strlcpy(drvinfo->version, VXLAN_VERSION, sizeof(drvinfo->version));
1281 strlcpy(drvinfo->driver, "vxlan", sizeof(drvinfo->driver));
1282}
1283
1284static const struct ethtool_ops vxlan_ethtool_ops = {
1285 .get_drvinfo = vxlan_get_drvinfo,
1286 .get_link = ethtool_op_get_link,
1287};
1288
stephen hemmingerd3428942012-10-01 12:32:35 +00001289static int vxlan_newlink(struct net *net, struct net_device *dev,
1290 struct nlattr *tb[], struct nlattr *data[])
1291{
1292 struct vxlan_dev *vxlan = netdev_priv(dev);
1293 __u32 vni;
1294 int err;
1295
1296 if (!data[IFLA_VXLAN_ID])
1297 return -EINVAL;
1298
1299 vni = nla_get_u32(data[IFLA_VXLAN_ID]);
1300 if (vxlan_find_vni(net, vni)) {
1301 pr_info("duplicate VNI %u\n", vni);
1302 return -EEXIST;
1303 }
1304 vxlan->vni = vni;
1305
1306 if (data[IFLA_VXLAN_GROUP])
1307 vxlan->gaddr = nla_get_be32(data[IFLA_VXLAN_GROUP]);
1308
1309 if (data[IFLA_VXLAN_LOCAL])
1310 vxlan->saddr = nla_get_be32(data[IFLA_VXLAN_LOCAL]);
1311
stephen hemminger34e02aa2012-10-09 20:35:53 +00001312 if (data[IFLA_VXLAN_LINK] &&
1313 (vxlan->link = nla_get_u32(data[IFLA_VXLAN_LINK]))) {
1314 struct net_device *lowerdev
1315 = __dev_get_by_index(net, vxlan->link);
stephen hemmingerd3428942012-10-01 12:32:35 +00001316
stephen hemminger34e02aa2012-10-09 20:35:53 +00001317 if (!lowerdev) {
1318 pr_info("ifindex %d does not exist\n", vxlan->link);
1319 return -ENODEV;
stephen hemmingerd3428942012-10-01 12:32:35 +00001320 }
stephen hemminger34e02aa2012-10-09 20:35:53 +00001321
1322 if (!tb[IFLA_MTU])
1323 dev->mtu = lowerdev->mtu - VXLAN_HEADROOM;
Alexander Duyck1ba56fb2012-11-13 13:10:59 +00001324
1325 /* update header length based on lower device */
1326 dev->hard_header_len = lowerdev->hard_header_len +
1327 VXLAN_HEADROOM;
stephen hemmingerd3428942012-10-01 12:32:35 +00001328 }
1329
1330 if (data[IFLA_VXLAN_TOS])
1331 vxlan->tos = nla_get_u8(data[IFLA_VXLAN_TOS]);
1332
Vincent Bernatafb97182012-10-30 10:27:16 +00001333 if (data[IFLA_VXLAN_TTL])
1334 vxlan->ttl = nla_get_u8(data[IFLA_VXLAN_TTL]);
1335
stephen hemmingerd3428942012-10-01 12:32:35 +00001336 if (!data[IFLA_VXLAN_LEARNING] || nla_get_u8(data[IFLA_VXLAN_LEARNING]))
David Stevense4f67ad2012-11-20 02:50:14 +00001337 vxlan->flags |= VXLAN_F_LEARN;
stephen hemmingerd3428942012-10-01 12:32:35 +00001338
1339 if (data[IFLA_VXLAN_AGEING])
1340 vxlan->age_interval = nla_get_u32(data[IFLA_VXLAN_AGEING]);
1341 else
1342 vxlan->age_interval = FDB_AGE_DEFAULT;
1343
David Stevense4f67ad2012-11-20 02:50:14 +00001344 if (data[IFLA_VXLAN_PROXY] && nla_get_u8(data[IFLA_VXLAN_PROXY]))
1345 vxlan->flags |= VXLAN_F_PROXY;
1346
1347 if (data[IFLA_VXLAN_RSC] && nla_get_u8(data[IFLA_VXLAN_RSC]))
1348 vxlan->flags |= VXLAN_F_RSC;
1349
1350 if (data[IFLA_VXLAN_L2MISS] && nla_get_u8(data[IFLA_VXLAN_L2MISS]))
1351 vxlan->flags |= VXLAN_F_L2MISS;
1352
1353 if (data[IFLA_VXLAN_L3MISS] && nla_get_u8(data[IFLA_VXLAN_L3MISS]))
1354 vxlan->flags |= VXLAN_F_L3MISS;
1355
stephen hemmingerd3428942012-10-01 12:32:35 +00001356 if (data[IFLA_VXLAN_LIMIT])
1357 vxlan->addrmax = nla_get_u32(data[IFLA_VXLAN_LIMIT]);
1358
stephen hemminger05f47d62012-10-09 20:35:50 +00001359 if (data[IFLA_VXLAN_PORT_RANGE]) {
1360 const struct ifla_vxlan_port_range *p
1361 = nla_data(data[IFLA_VXLAN_PORT_RANGE]);
1362 vxlan->port_min = ntohs(p->low);
1363 vxlan->port_max = ntohs(p->high);
1364 }
1365
Yan Burman1b13c972013-01-29 23:43:07 +00001366 SET_ETHTOOL_OPS(dev, &vxlan_ethtool_ops);
1367
stephen hemmingerd3428942012-10-01 12:32:35 +00001368 err = register_netdevice(dev);
1369 if (!err)
1370 hlist_add_head_rcu(&vxlan->hlist, vni_head(net, vxlan->vni));
1371
1372 return err;
1373}
1374
1375static void vxlan_dellink(struct net_device *dev, struct list_head *head)
1376{
1377 struct vxlan_dev *vxlan = netdev_priv(dev);
1378
1379 hlist_del_rcu(&vxlan->hlist);
1380
1381 unregister_netdevice_queue(dev, head);
1382}
1383
1384static size_t vxlan_get_size(const struct net_device *dev)
1385{
1386
1387 return nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_ID */
1388 nla_total_size(sizeof(__be32)) +/* IFLA_VXLAN_GROUP */
1389 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LINK */
1390 nla_total_size(sizeof(__be32))+ /* IFLA_VXLAN_LOCAL */
1391 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TTL */
1392 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TOS */
1393 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_LEARNING */
David Stevense4f67ad2012-11-20 02:50:14 +00001394 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_PROXY */
1395 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_RSC */
1396 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_L2MISS */
1397 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_L3MISS */
stephen hemmingerd3428942012-10-01 12:32:35 +00001398 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_AGEING */
1399 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LIMIT */
stephen hemminger05f47d62012-10-09 20:35:50 +00001400 nla_total_size(sizeof(struct ifla_vxlan_port_range)) +
stephen hemmingerd3428942012-10-01 12:32:35 +00001401 0;
1402}
1403
1404static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
1405{
1406 const struct vxlan_dev *vxlan = netdev_priv(dev);
stephen hemminger05f47d62012-10-09 20:35:50 +00001407 struct ifla_vxlan_port_range ports = {
1408 .low = htons(vxlan->port_min),
1409 .high = htons(vxlan->port_max),
1410 };
stephen hemmingerd3428942012-10-01 12:32:35 +00001411
1412 if (nla_put_u32(skb, IFLA_VXLAN_ID, vxlan->vni))
1413 goto nla_put_failure;
1414
Stephen Hemminger7c41c422012-10-08 14:55:30 -07001415 if (vxlan->gaddr && nla_put_be32(skb, IFLA_VXLAN_GROUP, vxlan->gaddr))
stephen hemmingerd3428942012-10-01 12:32:35 +00001416 goto nla_put_failure;
1417
1418 if (vxlan->link && nla_put_u32(skb, IFLA_VXLAN_LINK, vxlan->link))
1419 goto nla_put_failure;
1420
Stephen Hemminger7c41c422012-10-08 14:55:30 -07001421 if (vxlan->saddr && nla_put_be32(skb, IFLA_VXLAN_LOCAL, vxlan->saddr))
stephen hemmingerd3428942012-10-01 12:32:35 +00001422 goto nla_put_failure;
1423
1424 if (nla_put_u8(skb, IFLA_VXLAN_TTL, vxlan->ttl) ||
1425 nla_put_u8(skb, IFLA_VXLAN_TOS, vxlan->tos) ||
David Stevense4f67ad2012-11-20 02:50:14 +00001426 nla_put_u8(skb, IFLA_VXLAN_LEARNING,
1427 !!(vxlan->flags & VXLAN_F_LEARN)) ||
1428 nla_put_u8(skb, IFLA_VXLAN_PROXY,
1429 !!(vxlan->flags & VXLAN_F_PROXY)) ||
1430 nla_put_u8(skb, IFLA_VXLAN_RSC, !!(vxlan->flags & VXLAN_F_RSC)) ||
1431 nla_put_u8(skb, IFLA_VXLAN_L2MISS,
1432 !!(vxlan->flags & VXLAN_F_L2MISS)) ||
1433 nla_put_u8(skb, IFLA_VXLAN_L3MISS,
1434 !!(vxlan->flags & VXLAN_F_L3MISS)) ||
stephen hemmingerd3428942012-10-01 12:32:35 +00001435 nla_put_u32(skb, IFLA_VXLAN_AGEING, vxlan->age_interval) ||
1436 nla_put_u32(skb, IFLA_VXLAN_LIMIT, vxlan->addrmax))
1437 goto nla_put_failure;
1438
stephen hemminger05f47d62012-10-09 20:35:50 +00001439 if (nla_put(skb, IFLA_VXLAN_PORT_RANGE, sizeof(ports), &ports))
1440 goto nla_put_failure;
1441
stephen hemmingerd3428942012-10-01 12:32:35 +00001442 return 0;
1443
1444nla_put_failure:
1445 return -EMSGSIZE;
1446}
1447
1448static struct rtnl_link_ops vxlan_link_ops __read_mostly = {
1449 .kind = "vxlan",
1450 .maxtype = IFLA_VXLAN_MAX,
1451 .policy = vxlan_policy,
1452 .priv_size = sizeof(struct vxlan_dev),
1453 .setup = vxlan_setup,
1454 .validate = vxlan_validate,
1455 .newlink = vxlan_newlink,
1456 .dellink = vxlan_dellink,
1457 .get_size = vxlan_get_size,
1458 .fill_info = vxlan_fill_info,
1459};
1460
1461static __net_init int vxlan_init_net(struct net *net)
1462{
1463 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
1464 struct sock *sk;
1465 struct sockaddr_in vxlan_addr = {
1466 .sin_family = AF_INET,
1467 .sin_addr.s_addr = htonl(INADDR_ANY),
1468 };
1469 int rc;
1470 unsigned h;
1471
1472 /* Create UDP socket for encapsulation receive. */
1473 rc = sock_create_kern(AF_INET, SOCK_DGRAM, IPPROTO_UDP, &vn->sock);
1474 if (rc < 0) {
1475 pr_debug("UDP socket create failed\n");
1476 return rc;
1477 }
stephen hemmingerbfe1b9b2012-10-01 18:49:21 +00001478 /* Put in proper namespace */
1479 sk = vn->sock->sk;
1480 sk_change_net(sk, net);
stephen hemmingerd3428942012-10-01 12:32:35 +00001481
1482 vxlan_addr.sin_port = htons(vxlan_port);
1483
1484 rc = kernel_bind(vn->sock, (struct sockaddr *) &vxlan_addr,
1485 sizeof(vxlan_addr));
1486 if (rc < 0) {
1487 pr_debug("bind for UDP socket %pI4:%u (%d)\n",
1488 &vxlan_addr.sin_addr, ntohs(vxlan_addr.sin_port), rc);
stephen hemmingerbfe1b9b2012-10-01 18:49:21 +00001489 sk_release_kernel(sk);
stephen hemmingerd3428942012-10-01 12:32:35 +00001490 vn->sock = NULL;
1491 return rc;
1492 }
1493
1494 /* Disable multicast loopback */
stephen hemmingerd3428942012-10-01 12:32:35 +00001495 inet_sk(sk)->mc_loop = 0;
1496
1497 /* Mark socket as an encapsulation socket. */
1498 udp_sk(sk)->encap_type = 1;
1499 udp_sk(sk)->encap_rcv = vxlan_udp_encap_recv;
1500 udp_encap_enable();
1501
1502 for (h = 0; h < VNI_HASH_SIZE; ++h)
1503 INIT_HLIST_HEAD(&vn->vni_list[h]);
1504
1505 return 0;
1506}
1507
1508static __net_exit void vxlan_exit_net(struct net *net)
1509{
1510 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
1511
1512 if (vn->sock) {
stephen hemmingerbfe1b9b2012-10-01 18:49:21 +00001513 sk_release_kernel(vn->sock->sk);
stephen hemmingerd3428942012-10-01 12:32:35 +00001514 vn->sock = NULL;
1515 }
1516}
1517
1518static struct pernet_operations vxlan_net_ops = {
1519 .init = vxlan_init_net,
1520 .exit = vxlan_exit_net,
1521 .id = &vxlan_net_id,
1522 .size = sizeof(struct vxlan_net),
1523};
1524
1525static int __init vxlan_init_module(void)
1526{
1527 int rc;
1528
1529 get_random_bytes(&vxlan_salt, sizeof(vxlan_salt));
1530
1531 rc = register_pernet_device(&vxlan_net_ops);
1532 if (rc)
1533 goto out1;
1534
1535 rc = rtnl_link_register(&vxlan_link_ops);
1536 if (rc)
1537 goto out2;
1538
1539 return 0;
1540
1541out2:
1542 unregister_pernet_device(&vxlan_net_ops);
1543out1:
1544 return rc;
1545}
1546module_init(vxlan_init_module);
1547
1548static void __exit vxlan_cleanup_module(void)
1549{
1550 rtnl_link_unregister(&vxlan_link_ops);
1551 unregister_pernet_device(&vxlan_net_ops);
1552}
1553module_exit(vxlan_cleanup_module);
1554
1555MODULE_LICENSE("GPL");
1556MODULE_VERSION(VXLAN_VERSION);
1557MODULE_AUTHOR("Stephen Hemminger <shemminger@vyatta.com>");
1558MODULE_ALIAS_RTNL_LINK("vxlan");