blob: b4a10bcb66a0f62be1606fa34629d120913fc74d [file] [log] [blame]
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001/*
2 * drivers/net/veth.c
3 *
4 * Copyright (C) 2007 OpenVZ http://openvz.org, SWsoft Inc
5 *
6 * Author: Pavel Emelianov <xemul@openvz.org>
7 * Ethtool interface from: Eric W. Biederman <ebiederm@xmission.com>
8 *
9 */
10
Pavel Emelyanove314dbd2007-09-25 16:14:46 -070011#include <linux/netdevice.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090012#include <linux/slab.h>
Pavel Emelyanove314dbd2007-09-25 16:14:46 -070013#include <linux/ethtool.h>
14#include <linux/etherdevice.h>
Eric Dumazetcf05c702011-06-19 22:48:34 -070015#include <linux/u64_stats_sync.h>
Pavel Emelyanove314dbd2007-09-25 16:14:46 -070016
Jiri Pirkof7b12602014-02-18 20:53:18 +010017#include <net/rtnetlink.h>
Pavel Emelyanove314dbd2007-09-25 16:14:46 -070018#include <net/dst.h>
19#include <net/xfrm.h>
Stephen Hemmingerecef9692007-12-25 17:23:59 -080020#include <linux/veth.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040021#include <linux/module.h>
Pavel Emelyanove314dbd2007-09-25 16:14:46 -070022
23#define DRV_NAME "veth"
24#define DRV_VERSION "1.0"
25
Eric Biederman38d40812009-03-03 23:36:04 -080026#define MIN_MTU 68 /* Min L3 MTU */
27#define MAX_MTU 65535 /* Max L3 MTU (arbitrary) */
Eric Biederman38d40812009-03-03 23:36:04 -080028
Eric Dumazet26811282012-12-29 16:02:43 +000029struct pcpu_vstats {
30 u64 packets;
31 u64 bytes;
Eric Dumazetcf05c702011-06-19 22:48:34 -070032 struct u64_stats_sync syncp;
Pavel Emelyanove314dbd2007-09-25 16:14:46 -070033};
34
35struct veth_priv {
Eric Dumazetd0e2c552013-01-04 15:42:40 +000036 struct net_device __rcu *peer;
Eric Dumazet26811282012-12-29 16:02:43 +000037 atomic64_t dropped;
Pavel Emelyanove314dbd2007-09-25 16:14:46 -070038};
39
Pavel Emelyanove314dbd2007-09-25 16:14:46 -070040/*
41 * ethtool interface
42 */
43
44static struct {
45 const char string[ETH_GSTRING_LEN];
46} ethtool_stats_keys[] = {
47 { "peer_ifindex" },
48};
49
50static int veth_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
51{
52 cmd->supported = 0;
53 cmd->advertising = 0;
David Decotigny70739492011-04-27 18:32:40 +000054 ethtool_cmd_speed_set(cmd, SPEED_10000);
Pavel Emelyanove314dbd2007-09-25 16:14:46 -070055 cmd->duplex = DUPLEX_FULL;
56 cmd->port = PORT_TP;
57 cmd->phy_address = 0;
58 cmd->transceiver = XCVR_INTERNAL;
59 cmd->autoneg = AUTONEG_DISABLE;
60 cmd->maxtxpkt = 0;
61 cmd->maxrxpkt = 0;
62 return 0;
63}
64
65static void veth_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
66{
Rick Jones33a5ba12011-11-15 14:59:53 +000067 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
68 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
Pavel Emelyanove314dbd2007-09-25 16:14:46 -070069}
70
71static void veth_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
72{
73 switch(stringset) {
74 case ETH_SS_STATS:
75 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
76 break;
77 }
78}
79
Jeff Garzikb9f2c042007-10-03 18:07:32 -070080static int veth_get_sset_count(struct net_device *dev, int sset)
Pavel Emelyanove314dbd2007-09-25 16:14:46 -070081{
Jeff Garzikb9f2c042007-10-03 18:07:32 -070082 switch (sset) {
83 case ETH_SS_STATS:
84 return ARRAY_SIZE(ethtool_stats_keys);
85 default:
86 return -EOPNOTSUPP;
87 }
Pavel Emelyanove314dbd2007-09-25 16:14:46 -070088}
89
90static void veth_get_ethtool_stats(struct net_device *dev,
91 struct ethtool_stats *stats, u64 *data)
92{
Eric Dumazetd0e2c552013-01-04 15:42:40 +000093 struct veth_priv *priv = netdev_priv(dev);
94 struct net_device *peer = rtnl_dereference(priv->peer);
Pavel Emelyanove314dbd2007-09-25 16:14:46 -070095
Eric Dumazetd0e2c552013-01-04 15:42:40 +000096 data[0] = peer ? peer->ifindex : 0;
Pavel Emelyanove314dbd2007-09-25 16:14:46 -070097}
98
Stephen Hemminger0fc0b732009-09-02 01:03:33 -070099static const struct ethtool_ops veth_ethtool_ops = {
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700100 .get_settings = veth_get_settings,
101 .get_drvinfo = veth_get_drvinfo,
102 .get_link = ethtool_op_get_link,
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700103 .get_strings = veth_get_strings,
Jeff Garzikb9f2c042007-10-03 18:07:32 -0700104 .get_sset_count = veth_get_sset_count,
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700105 .get_ethtool_stats = veth_get_ethtool_stats,
106};
107
Stephen Hemminger424efe92009-08-31 19:50:51 +0000108static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700109{
Eric Dumazet26811282012-12-29 16:02:43 +0000110 struct veth_priv *priv = netdev_priv(dev);
Eric Dumazetd0e2c552013-01-04 15:42:40 +0000111 struct net_device *rcv;
Eric Dumazet26811282012-12-29 16:02:43 +0000112 int length = skb->len;
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700113
Eric Dumazetd0e2c552013-01-04 15:42:40 +0000114 rcu_read_lock();
115 rcv = rcu_dereference(priv->peer);
116 if (unlikely(!rcv)) {
117 kfree_skb(skb);
118 goto drop;
119 }
Michał Mirosław0b796752010-12-14 12:35:13 +0000120 /* don't change ip_summed == CHECKSUM_PARTIAL, as that
Eric Dumazet26811282012-12-29 16:02:43 +0000121 * will cause bad checksum on forwarded packets
122 */
Michał Mirosława2c725f2011-03-31 01:01:35 +0000123 if (skb->ip_summed == CHECKSUM_NONE &&
124 rcv->features & NETIF_F_RXCSUM)
125 skb->ip_summed = CHECKSUM_UNNECESSARY;
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700126
Eric Dumazet26811282012-12-29 16:02:43 +0000127 if (likely(dev_forward_skb(rcv, skb) == NET_RX_SUCCESS)) {
128 struct pcpu_vstats *stats = this_cpu_ptr(dev->vstats);
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700129
Eric Dumazet26811282012-12-29 16:02:43 +0000130 u64_stats_update_begin(&stats->syncp);
131 stats->bytes += length;
132 stats->packets++;
133 u64_stats_update_end(&stats->syncp);
134 } else {
Eric Dumazetd0e2c552013-01-04 15:42:40 +0000135drop:
Eric Dumazet26811282012-12-29 16:02:43 +0000136 atomic64_inc(&priv->dropped);
137 }
Eric Dumazetd0e2c552013-01-04 15:42:40 +0000138 rcu_read_unlock();
Patrick McHardy6ed10652009-06-23 06:03:08 +0000139 return NETDEV_TX_OK;
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700140}
141
142/*
143 * general routines
144 */
145
Eric Dumazet26811282012-12-29 16:02:43 +0000146static u64 veth_stats_one(struct pcpu_vstats *result, struct net_device *dev)
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700147{
Eric Dumazetcf05c702011-06-19 22:48:34 -0700148 struct veth_priv *priv = netdev_priv(dev);
David S. Miller11687a12009-06-25 02:45:42 -0700149 int cpu;
David S. Miller11687a12009-06-25 02:45:42 -0700150
Eric Dumazet26811282012-12-29 16:02:43 +0000151 result->packets = 0;
152 result->bytes = 0;
Eric Dumazet2b1c8b02009-11-18 07:09:39 +0000153 for_each_possible_cpu(cpu) {
Eric Dumazet26811282012-12-29 16:02:43 +0000154 struct pcpu_vstats *stats = per_cpu_ptr(dev->vstats, cpu);
155 u64 packets, bytes;
Eric Dumazetcf05c702011-06-19 22:48:34 -0700156 unsigned int start;
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700157
Eric Dumazetcf05c702011-06-19 22:48:34 -0700158 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700159 start = u64_stats_fetch_begin_irq(&stats->syncp);
Eric Dumazet26811282012-12-29 16:02:43 +0000160 packets = stats->packets;
161 bytes = stats->bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -0700162 } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
Eric Dumazet26811282012-12-29 16:02:43 +0000163 result->packets += packets;
164 result->bytes += bytes;
David S. Miller11687a12009-06-25 02:45:42 -0700165 }
Eric Dumazet26811282012-12-29 16:02:43 +0000166 return atomic64_read(&priv->dropped);
167}
168
169static struct rtnl_link_stats64 *veth_get_stats64(struct net_device *dev,
170 struct rtnl_link_stats64 *tot)
171{
172 struct veth_priv *priv = netdev_priv(dev);
Eric Dumazetd0e2c552013-01-04 15:42:40 +0000173 struct net_device *peer;
Eric Dumazet26811282012-12-29 16:02:43 +0000174 struct pcpu_vstats one;
175
176 tot->tx_dropped = veth_stats_one(&one, dev);
177 tot->tx_bytes = one.bytes;
178 tot->tx_packets = one.packets;
179
Eric Dumazetd0e2c552013-01-04 15:42:40 +0000180 rcu_read_lock();
181 peer = rcu_dereference(priv->peer);
182 if (peer) {
183 tot->rx_dropped = veth_stats_one(&one, peer);
184 tot->rx_bytes = one.bytes;
185 tot->rx_packets = one.packets;
186 }
187 rcu_read_unlock();
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700188
stephen hemminger6311cc42011-06-08 14:53:59 +0000189 return tot;
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700190}
191
Gao feng5c70ef82013-10-04 16:52:24 +0800192/* fake multicast ability */
193static void veth_set_multicast_list(struct net_device *dev)
194{
195}
196
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700197static int veth_open(struct net_device *dev)
198{
Eric Dumazetd0e2c552013-01-04 15:42:40 +0000199 struct veth_priv *priv = netdev_priv(dev);
200 struct net_device *peer = rtnl_dereference(priv->peer);
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700201
Eric Dumazetd0e2c552013-01-04 15:42:40 +0000202 if (!peer)
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700203 return -ENOTCONN;
204
Eric Dumazetd0e2c552013-01-04 15:42:40 +0000205 if (peer->flags & IFF_UP) {
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700206 netif_carrier_on(dev);
Eric Dumazetd0e2c552013-01-04 15:42:40 +0000207 netif_carrier_on(peer);
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700208 }
209 return 0;
210}
211
Eric W. Biederman2cf48a12009-02-25 19:47:29 +0000212static int veth_close(struct net_device *dev)
213{
214 struct veth_priv *priv = netdev_priv(dev);
Eric Dumazet2efd32e2013-01-10 08:32:45 +0000215 struct net_device *peer = rtnl_dereference(priv->peer);
Eric W. Biederman2cf48a12009-02-25 19:47:29 +0000216
217 netif_carrier_off(dev);
Eric Dumazet2efd32e2013-01-10 08:32:45 +0000218 if (peer)
219 netif_carrier_off(peer);
Eric W. Biederman2cf48a12009-02-25 19:47:29 +0000220
221 return 0;
222}
223
Eric Biederman38d40812009-03-03 23:36:04 -0800224static int is_valid_veth_mtu(int new_mtu)
225{
Eric Dumazet807540b2010-09-23 05:40:09 +0000226 return new_mtu >= MIN_MTU && new_mtu <= MAX_MTU;
Eric Biederman38d40812009-03-03 23:36:04 -0800227}
228
229static int veth_change_mtu(struct net_device *dev, int new_mtu)
230{
231 if (!is_valid_veth_mtu(new_mtu))
232 return -EINVAL;
233 dev->mtu = new_mtu;
234 return 0;
235}
236
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700237static int veth_dev_init(struct net_device *dev)
238{
WANG Cong1c213bd2014-02-13 11:46:28 -0800239 dev->vstats = netdev_alloc_pcpu_stats(struct pcpu_vstats);
Eric Dumazet26811282012-12-29 16:02:43 +0000240 if (!dev->vstats)
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700241 return -ENOMEM;
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700242 return 0;
243}
244
David S. Miller11687a12009-06-25 02:45:42 -0700245static void veth_dev_free(struct net_device *dev)
246{
Eric Dumazet26811282012-12-29 16:02:43 +0000247 free_percpu(dev->vstats);
David S. Miller11687a12009-06-25 02:45:42 -0700248 free_netdev(dev);
249}
250
Stephen Hemminger4456e7b2008-11-19 21:50:10 -0800251static const struct net_device_ops veth_netdev_ops = {
Daniel Lezcanoee923622009-02-22 00:04:45 -0800252 .ndo_init = veth_dev_init,
253 .ndo_open = veth_open,
Eric W. Biederman2cf48a12009-02-25 19:47:29 +0000254 .ndo_stop = veth_close,
Daniel Lezcanoee923622009-02-22 00:04:45 -0800255 .ndo_start_xmit = veth_xmit,
Eric Biederman38d40812009-03-03 23:36:04 -0800256 .ndo_change_mtu = veth_change_mtu,
stephen hemminger6311cc42011-06-08 14:53:59 +0000257 .ndo_get_stats64 = veth_get_stats64,
Gao feng5c70ef82013-10-04 16:52:24 +0800258 .ndo_set_rx_mode = veth_set_multicast_list,
Daniel Lezcanoee923622009-02-22 00:04:45 -0800259 .ndo_set_mac_address = eth_mac_addr,
Stephen Hemminger4456e7b2008-11-19 21:50:10 -0800260};
261
Eric Dumazet80933152012-12-29 16:26:10 +0000262#define VETH_FEATURES (NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \
263 NETIF_F_HW_CSUM | NETIF_F_RXCSUM | NETIF_F_HIGHDMA | \
Eric Dumazet82d81892013-10-25 18:25:03 -0700264 NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL | \
265 NETIF_F_GSO_IPIP | NETIF_F_GSO_SIT | NETIF_F_UFO | \
Patrick McHardy28d2b132013-04-19 02:04:32 +0000266 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | \
267 NETIF_F_HW_VLAN_STAG_TX | NETIF_F_HW_VLAN_STAG_RX )
Eric Dumazet80933152012-12-29 16:26:10 +0000268
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700269static void veth_setup(struct net_device *dev)
270{
271 ether_setup(dev);
272
Neil Horman550fd082011-07-26 06:05:38 +0000273 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
Hannes Frederic Sowa23ea5a92012-10-30 16:22:01 +0000274 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
Neil Horman550fd082011-07-26 06:05:38 +0000275
Stephen Hemminger4456e7b2008-11-19 21:50:10 -0800276 dev->netdev_ops = &veth_netdev_ops;
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700277 dev->ethtool_ops = &veth_ethtool_ops;
278 dev->features |= NETIF_F_LLTX;
Eric Dumazet80933152012-12-29 16:26:10 +0000279 dev->features |= VETH_FEATURES;
Toshiaki Makita8d0d21f2014-02-18 21:20:08 +0900280 dev->vlan_features = dev->features &
Vlad Yasevich3f8c7072014-03-27 22:14:48 -0400281 ~(NETIF_F_HW_VLAN_CTAG_TX |
282 NETIF_F_HW_VLAN_STAG_TX |
283 NETIF_F_HW_VLAN_CTAG_RX |
284 NETIF_F_HW_VLAN_STAG_RX);
David S. Miller11687a12009-06-25 02:45:42 -0700285 dev->destructor = veth_dev_free;
Michał Mirosława2c725f2011-03-31 01:01:35 +0000286
Eric Dumazet80933152012-12-29 16:26:10 +0000287 dev->hw_features = VETH_FEATURES;
Eric Dumazet82d81892013-10-25 18:25:03 -0700288 dev->hw_enc_features = VETH_FEATURES;
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700289}
290
291/*
292 * netlink interface
293 */
294
295static int veth_validate(struct nlattr *tb[], struct nlattr *data[])
296{
297 if (tb[IFLA_ADDRESS]) {
298 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
299 return -EINVAL;
300 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
301 return -EADDRNOTAVAIL;
302 }
Eric Biederman38d40812009-03-03 23:36:04 -0800303 if (tb[IFLA_MTU]) {
304 if (!is_valid_veth_mtu(nla_get_u32(tb[IFLA_MTU])))
305 return -EINVAL;
306 }
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700307 return 0;
308}
309
310static struct rtnl_link_ops veth_link_ops;
311
Eric W. Biederman81adee42009-11-08 00:53:51 -0800312static int veth_newlink(struct net *src_net, struct net_device *dev,
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700313 struct nlattr *tb[], struct nlattr *data[])
314{
315 int err;
316 struct net_device *peer;
317 struct veth_priv *priv;
318 char ifname[IFNAMSIZ];
319 struct nlattr *peer_tb[IFLA_MAX + 1], **tbp;
Patrick McHardy3729d502010-02-26 06:34:54 +0000320 struct ifinfomsg *ifmp;
Eric W. Biederman81adee42009-11-08 00:53:51 -0800321 struct net *net;
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700322
323 /*
324 * create and register peer first
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700325 */
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700326 if (data != NULL && data[VETH_INFO_PEER] != NULL) {
327 struct nlattr *nla_peer;
328
329 nla_peer = data[VETH_INFO_PEER];
Patrick McHardy3729d502010-02-26 06:34:54 +0000330 ifmp = nla_data(nla_peer);
Jiri Pirkof7b12602014-02-18 20:53:18 +0100331 err = rtnl_nla_parse_ifla(peer_tb,
332 nla_data(nla_peer) + sizeof(struct ifinfomsg),
333 nla_len(nla_peer) - sizeof(struct ifinfomsg));
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700334 if (err < 0)
335 return err;
336
337 err = veth_validate(peer_tb, NULL);
338 if (err < 0)
339 return err;
340
341 tbp = peer_tb;
Patrick McHardy3729d502010-02-26 06:34:54 +0000342 } else {
343 ifmp = NULL;
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700344 tbp = tb;
Patrick McHardy3729d502010-02-26 06:34:54 +0000345 }
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700346
347 if (tbp[IFLA_IFNAME])
348 nla_strlcpy(ifname, tbp[IFLA_IFNAME], IFNAMSIZ);
349 else
350 snprintf(ifname, IFNAMSIZ, DRV_NAME "%%d");
351
Eric W. Biederman81adee42009-11-08 00:53:51 -0800352 net = rtnl_link_get_net(src_net, tbp);
353 if (IS_ERR(net))
354 return PTR_ERR(net);
355
Rami Rosenc0713562012-11-30 01:08:47 +0000356 peer = rtnl_create_link(net, ifname, &veth_link_ops, tbp);
Eric W. Biederman81adee42009-11-08 00:53:51 -0800357 if (IS_ERR(peer)) {
358 put_net(net);
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700359 return PTR_ERR(peer);
Eric W. Biederman81adee42009-11-08 00:53:51 -0800360 }
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700361
362 if (tbp[IFLA_ADDRESS] == NULL)
Danny Kukawkaf2cedb62012-02-15 06:45:39 +0000363 eth_hw_addr_random(peer);
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700364
Pavel Emelyanove6f8f1a2012-08-08 21:53:03 +0000365 if (ifmp && (dev->ifindex != 0))
366 peer->ifindex = ifmp->ifi_index;
367
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700368 err = register_netdevice(peer);
Eric W. Biederman81adee42009-11-08 00:53:51 -0800369 put_net(net);
370 net = NULL;
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700371 if (err < 0)
372 goto err_register_peer;
373
374 netif_carrier_off(peer);
375
Patrick McHardy3729d502010-02-26 06:34:54 +0000376 err = rtnl_configure_link(peer, ifmp);
377 if (err < 0)
378 goto err_configure_peer;
379
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700380 /*
381 * register dev last
382 *
383 * note, that since we've registered new device the dev's name
384 * should be re-allocated
385 */
386
387 if (tb[IFLA_ADDRESS] == NULL)
Danny Kukawkaf2cedb62012-02-15 06:45:39 +0000388 eth_hw_addr_random(dev);
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700389
Jiri Pirko6c8c4442011-04-30 01:28:17 +0000390 if (tb[IFLA_IFNAME])
391 nla_strlcpy(dev->name, tb[IFLA_IFNAME], IFNAMSIZ);
392 else
393 snprintf(dev->name, IFNAMSIZ, DRV_NAME "%%d");
394
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700395 err = register_netdevice(dev);
396 if (err < 0)
397 goto err_register_dev;
398
399 netif_carrier_off(dev);
400
401 /*
402 * tie the deviced together
403 */
404
405 priv = netdev_priv(dev);
Eric Dumazetd0e2c552013-01-04 15:42:40 +0000406 rcu_assign_pointer(priv->peer, peer);
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700407
408 priv = netdev_priv(peer);
Eric Dumazetd0e2c552013-01-04 15:42:40 +0000409 rcu_assign_pointer(priv->peer, dev);
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700410 return 0;
411
412err_register_dev:
413 /* nothing to do */
Patrick McHardy3729d502010-02-26 06:34:54 +0000414err_configure_peer:
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700415 unregister_netdevice(peer);
416 return err;
417
418err_register_peer:
419 free_netdev(peer);
420 return err;
421}
422
Eric Dumazet23289a32009-10-27 07:06:36 +0000423static void veth_dellink(struct net_device *dev, struct list_head *head)
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700424{
425 struct veth_priv *priv;
426 struct net_device *peer;
427
428 priv = netdev_priv(dev);
Eric Dumazetd0e2c552013-01-04 15:42:40 +0000429 peer = rtnl_dereference(priv->peer);
430
431 /* Note : dellink() is called from default_device_exit_batch(),
432 * before a rcu_synchronize() point. The devices are guaranteed
433 * not being freed before one RCU grace period.
434 */
435 RCU_INIT_POINTER(priv->peer, NULL);
Eric Dumazet24540532009-10-30 01:00:27 -0700436 unregister_netdevice_queue(dev, head);
Eric Dumazetf45a5c22013-02-08 20:10:49 +0000437
438 if (peer) {
439 priv = netdev_priv(peer);
440 RCU_INIT_POINTER(priv->peer, NULL);
441 unregister_netdevice_queue(peer, head);
442 }
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700443}
444
Thomas Graf23711432012-02-15 04:09:46 +0000445static const struct nla_policy veth_policy[VETH_INFO_MAX + 1] = {
446 [VETH_INFO_PEER] = { .len = sizeof(struct ifinfomsg) },
447};
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700448
449static struct rtnl_link_ops veth_link_ops = {
450 .kind = DRV_NAME,
451 .priv_size = sizeof(struct veth_priv),
452 .setup = veth_setup,
453 .validate = veth_validate,
454 .newlink = veth_newlink,
455 .dellink = veth_dellink,
456 .policy = veth_policy,
457 .maxtype = VETH_INFO_MAX,
458};
459
460/*
461 * init/fini
462 */
463
464static __init int veth_init(void)
465{
466 return rtnl_link_register(&veth_link_ops);
467}
468
469static __exit void veth_exit(void)
470{
Patrick McHardy68365452008-01-20 17:25:14 -0800471 rtnl_link_unregister(&veth_link_ops);
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700472}
473
474module_init(veth_init);
475module_exit(veth_exit);
476
477MODULE_DESCRIPTION("Virtual Ethernet Tunnel");
478MODULE_LICENSE("GPL v2");
479MODULE_ALIAS_RTNL_LINK(DRV_NAME);