blob: ee7460ee3d050c943ed7cb524e55415beca329a4 [file] [log] [blame]
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001/*
2 * drivers/net/veth.c
3 *
4 * Copyright (C) 2007 OpenVZ http://openvz.org, SWsoft Inc
5 *
6 * Author: Pavel Emelianov <xemul@openvz.org>
7 * Ethtool interface from: Eric W. Biederman <ebiederm@xmission.com>
8 *
9 */
10
Pavel Emelyanove314dbd2007-09-25 16:14:46 -070011#include <linux/netdevice.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090012#include <linux/slab.h>
Pavel Emelyanove314dbd2007-09-25 16:14:46 -070013#include <linux/ethtool.h>
14#include <linux/etherdevice.h>
Eric Dumazetcf05c702011-06-19 22:48:34 -070015#include <linux/u64_stats_sync.h>
Pavel Emelyanove314dbd2007-09-25 16:14:46 -070016
Jiri Pirkof7b12602014-02-18 20:53:18 +010017#include <net/rtnetlink.h>
Pavel Emelyanove314dbd2007-09-25 16:14:46 -070018#include <net/dst.h>
19#include <net/xfrm.h>
Stephen Hemmingerecef9692007-12-25 17:23:59 -080020#include <linux/veth.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040021#include <linux/module.h>
Pavel Emelyanove314dbd2007-09-25 16:14:46 -070022
23#define DRV_NAME "veth"
24#define DRV_VERSION "1.0"
25
Eric Biederman38d40812009-03-03 23:36:04 -080026#define MIN_MTU 68 /* Min L3 MTU */
27#define MAX_MTU 65535 /* Max L3 MTU (arbitrary) */
Eric Biederman38d40812009-03-03 23:36:04 -080028
Eric Dumazet26811282012-12-29 16:02:43 +000029struct pcpu_vstats {
30 u64 packets;
31 u64 bytes;
Eric Dumazetcf05c702011-06-19 22:48:34 -070032 struct u64_stats_sync syncp;
Pavel Emelyanove314dbd2007-09-25 16:14:46 -070033};
34
35struct veth_priv {
Eric Dumazetd0e2c552013-01-04 15:42:40 +000036 struct net_device __rcu *peer;
Eric Dumazet26811282012-12-29 16:02:43 +000037 atomic64_t dropped;
Paolo Abeni163e5292016-02-26 10:45:41 +010038 unsigned requested_headroom;
Pavel Emelyanove314dbd2007-09-25 16:14:46 -070039};
40
Pavel Emelyanove314dbd2007-09-25 16:14:46 -070041/*
42 * ethtool interface
43 */
44
45static struct {
46 const char string[ETH_GSTRING_LEN];
47} ethtool_stats_keys[] = {
48 { "peer_ifindex" },
49};
50
51static int veth_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
52{
53 cmd->supported = 0;
54 cmd->advertising = 0;
David Decotigny70739492011-04-27 18:32:40 +000055 ethtool_cmd_speed_set(cmd, SPEED_10000);
Pavel Emelyanove314dbd2007-09-25 16:14:46 -070056 cmd->duplex = DUPLEX_FULL;
57 cmd->port = PORT_TP;
58 cmd->phy_address = 0;
59 cmd->transceiver = XCVR_INTERNAL;
60 cmd->autoneg = AUTONEG_DISABLE;
61 cmd->maxtxpkt = 0;
62 cmd->maxrxpkt = 0;
63 return 0;
64}
65
66static void veth_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
67{
Rick Jones33a5ba12011-11-15 14:59:53 +000068 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
69 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
Pavel Emelyanove314dbd2007-09-25 16:14:46 -070070}
71
72static void veth_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
73{
74 switch(stringset) {
75 case ETH_SS_STATS:
76 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
77 break;
78 }
79}
80
Jeff Garzikb9f2c042007-10-03 18:07:32 -070081static int veth_get_sset_count(struct net_device *dev, int sset)
Pavel Emelyanove314dbd2007-09-25 16:14:46 -070082{
Jeff Garzikb9f2c042007-10-03 18:07:32 -070083 switch (sset) {
84 case ETH_SS_STATS:
85 return ARRAY_SIZE(ethtool_stats_keys);
86 default:
87 return -EOPNOTSUPP;
88 }
Pavel Emelyanove314dbd2007-09-25 16:14:46 -070089}
90
91static void veth_get_ethtool_stats(struct net_device *dev,
92 struct ethtool_stats *stats, u64 *data)
93{
Eric Dumazetd0e2c552013-01-04 15:42:40 +000094 struct veth_priv *priv = netdev_priv(dev);
95 struct net_device *peer = rtnl_dereference(priv->peer);
Pavel Emelyanove314dbd2007-09-25 16:14:46 -070096
Eric Dumazetd0e2c552013-01-04 15:42:40 +000097 data[0] = peer ? peer->ifindex : 0;
Pavel Emelyanove314dbd2007-09-25 16:14:46 -070098}
99
Stephen Hemminger0fc0b732009-09-02 01:03:33 -0700100static const struct ethtool_ops veth_ethtool_ops = {
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700101 .get_settings = veth_get_settings,
102 .get_drvinfo = veth_get_drvinfo,
103 .get_link = ethtool_op_get_link,
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700104 .get_strings = veth_get_strings,
Jeff Garzikb9f2c042007-10-03 18:07:32 -0700105 .get_sset_count = veth_get_sset_count,
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700106 .get_ethtool_stats = veth_get_ethtool_stats,
107};
108
Stephen Hemminger424efe92009-08-31 19:50:51 +0000109static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700110{
Eric Dumazet26811282012-12-29 16:02:43 +0000111 struct veth_priv *priv = netdev_priv(dev);
Eric Dumazetd0e2c552013-01-04 15:42:40 +0000112 struct net_device *rcv;
Eric Dumazet26811282012-12-29 16:02:43 +0000113 int length = skb->len;
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700114
Eric Dumazetd0e2c552013-01-04 15:42:40 +0000115 rcu_read_lock();
116 rcv = rcu_dereference(priv->peer);
117 if (unlikely(!rcv)) {
118 kfree_skb(skb);
119 goto drop;
120 }
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700121
Eric Dumazet26811282012-12-29 16:02:43 +0000122 if (likely(dev_forward_skb(rcv, skb) == NET_RX_SUCCESS)) {
123 struct pcpu_vstats *stats = this_cpu_ptr(dev->vstats);
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700124
Eric Dumazet26811282012-12-29 16:02:43 +0000125 u64_stats_update_begin(&stats->syncp);
126 stats->bytes += length;
127 stats->packets++;
128 u64_stats_update_end(&stats->syncp);
129 } else {
Eric Dumazetd0e2c552013-01-04 15:42:40 +0000130drop:
Eric Dumazet26811282012-12-29 16:02:43 +0000131 atomic64_inc(&priv->dropped);
132 }
Eric Dumazetd0e2c552013-01-04 15:42:40 +0000133 rcu_read_unlock();
Patrick McHardy6ed10652009-06-23 06:03:08 +0000134 return NETDEV_TX_OK;
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700135}
136
137/*
138 * general routines
139 */
140
Eric Dumazet26811282012-12-29 16:02:43 +0000141static u64 veth_stats_one(struct pcpu_vstats *result, struct net_device *dev)
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700142{
Eric Dumazetcf05c702011-06-19 22:48:34 -0700143 struct veth_priv *priv = netdev_priv(dev);
David S. Miller11687a12009-06-25 02:45:42 -0700144 int cpu;
David S. Miller11687a12009-06-25 02:45:42 -0700145
Eric Dumazet26811282012-12-29 16:02:43 +0000146 result->packets = 0;
147 result->bytes = 0;
Eric Dumazet2b1c8b02009-11-18 07:09:39 +0000148 for_each_possible_cpu(cpu) {
Eric Dumazet26811282012-12-29 16:02:43 +0000149 struct pcpu_vstats *stats = per_cpu_ptr(dev->vstats, cpu);
150 u64 packets, bytes;
Eric Dumazetcf05c702011-06-19 22:48:34 -0700151 unsigned int start;
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700152
Eric Dumazetcf05c702011-06-19 22:48:34 -0700153 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700154 start = u64_stats_fetch_begin_irq(&stats->syncp);
Eric Dumazet26811282012-12-29 16:02:43 +0000155 packets = stats->packets;
156 bytes = stats->bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -0700157 } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
Eric Dumazet26811282012-12-29 16:02:43 +0000158 result->packets += packets;
159 result->bytes += bytes;
David S. Miller11687a12009-06-25 02:45:42 -0700160 }
Eric Dumazet26811282012-12-29 16:02:43 +0000161 return atomic64_read(&priv->dropped);
162}
163
164static struct rtnl_link_stats64 *veth_get_stats64(struct net_device *dev,
165 struct rtnl_link_stats64 *tot)
166{
167 struct veth_priv *priv = netdev_priv(dev);
Eric Dumazetd0e2c552013-01-04 15:42:40 +0000168 struct net_device *peer;
Eric Dumazet26811282012-12-29 16:02:43 +0000169 struct pcpu_vstats one;
170
171 tot->tx_dropped = veth_stats_one(&one, dev);
172 tot->tx_bytes = one.bytes;
173 tot->tx_packets = one.packets;
174
Eric Dumazetd0e2c552013-01-04 15:42:40 +0000175 rcu_read_lock();
176 peer = rcu_dereference(priv->peer);
177 if (peer) {
178 tot->rx_dropped = veth_stats_one(&one, peer);
179 tot->rx_bytes = one.bytes;
180 tot->rx_packets = one.packets;
181 }
182 rcu_read_unlock();
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700183
stephen hemminger6311cc42011-06-08 14:53:59 +0000184 return tot;
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700185}
186
Gao feng5c70ef82013-10-04 16:52:24 +0800187/* fake multicast ability */
188static void veth_set_multicast_list(struct net_device *dev)
189{
190}
191
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700192static int veth_open(struct net_device *dev)
193{
Eric Dumazetd0e2c552013-01-04 15:42:40 +0000194 struct veth_priv *priv = netdev_priv(dev);
195 struct net_device *peer = rtnl_dereference(priv->peer);
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700196
Eric Dumazetd0e2c552013-01-04 15:42:40 +0000197 if (!peer)
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700198 return -ENOTCONN;
199
Eric Dumazetd0e2c552013-01-04 15:42:40 +0000200 if (peer->flags & IFF_UP) {
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700201 netif_carrier_on(dev);
Eric Dumazetd0e2c552013-01-04 15:42:40 +0000202 netif_carrier_on(peer);
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700203 }
204 return 0;
205}
206
Eric W. Biederman2cf48a12009-02-25 19:47:29 +0000207static int veth_close(struct net_device *dev)
208{
209 struct veth_priv *priv = netdev_priv(dev);
Eric Dumazet2efd32e2013-01-10 08:32:45 +0000210 struct net_device *peer = rtnl_dereference(priv->peer);
Eric W. Biederman2cf48a12009-02-25 19:47:29 +0000211
212 netif_carrier_off(dev);
Eric Dumazet2efd32e2013-01-10 08:32:45 +0000213 if (peer)
214 netif_carrier_off(peer);
Eric W. Biederman2cf48a12009-02-25 19:47:29 +0000215
216 return 0;
217}
218
Eric Biederman38d40812009-03-03 23:36:04 -0800219static int is_valid_veth_mtu(int new_mtu)
220{
Eric Dumazet807540b2010-09-23 05:40:09 +0000221 return new_mtu >= MIN_MTU && new_mtu <= MAX_MTU;
Eric Biederman38d40812009-03-03 23:36:04 -0800222}
223
224static int veth_change_mtu(struct net_device *dev, int new_mtu)
225{
226 if (!is_valid_veth_mtu(new_mtu))
227 return -EINVAL;
228 dev->mtu = new_mtu;
229 return 0;
230}
231
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700232static int veth_dev_init(struct net_device *dev)
233{
WANG Cong1c213bd2014-02-13 11:46:28 -0800234 dev->vstats = netdev_alloc_pcpu_stats(struct pcpu_vstats);
Eric Dumazet26811282012-12-29 16:02:43 +0000235 if (!dev->vstats)
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700236 return -ENOMEM;
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700237 return 0;
238}
239
David S. Miller11687a12009-06-25 02:45:42 -0700240static void veth_dev_free(struct net_device *dev)
241{
Eric Dumazet26811282012-12-29 16:02:43 +0000242 free_percpu(dev->vstats);
David S. Miller11687a12009-06-25 02:45:42 -0700243 free_netdev(dev);
244}
245
WANG Congbb446c12014-06-23 15:36:02 -0700246#ifdef CONFIG_NET_POLL_CONTROLLER
247static void veth_poll_controller(struct net_device *dev)
248{
249 /* veth only receives frames when its peer sends one
250 * Since it's a synchronous operation, we are guaranteed
251 * never to have pending data when we poll for it so
252 * there is nothing to do here.
253 *
254 * We need this though so netpoll recognizes us as an interface that
255 * supports polling, which enables bridge devices in virt setups to
256 * still use netconsole
257 */
258}
259#endif /* CONFIG_NET_POLL_CONTROLLER */
260
Nicolas Dichtela45253b2015-04-02 17:07:11 +0200261static int veth_get_iflink(const struct net_device *dev)
262{
263 struct veth_priv *priv = netdev_priv(dev);
264 struct net_device *peer;
265 int iflink;
266
267 rcu_read_lock();
268 peer = rcu_dereference(priv->peer);
269 iflink = peer ? peer->ifindex : 0;
270 rcu_read_unlock();
271
272 return iflink;
273}
274
Paolo Abeni163e5292016-02-26 10:45:41 +0100275static void veth_set_rx_headroom(struct net_device *dev, int new_hr)
276{
277 struct veth_priv *peer_priv, *priv = netdev_priv(dev);
278 struct net_device *peer;
279
280 if (new_hr < 0)
281 new_hr = 0;
282
283 rcu_read_lock();
284 peer = rcu_dereference(priv->peer);
285 if (unlikely(!peer))
286 goto out;
287
288 peer_priv = netdev_priv(peer);
289 priv->requested_headroom = new_hr;
290 new_hr = max(priv->requested_headroom, peer_priv->requested_headroom);
291 dev->needed_headroom = new_hr;
292 peer->needed_headroom = new_hr;
293
294out:
295 rcu_read_unlock();
296}
297
Stephen Hemminger4456e7b2008-11-19 21:50:10 -0800298static const struct net_device_ops veth_netdev_ops = {
Daniel Lezcanoee923622009-02-22 00:04:45 -0800299 .ndo_init = veth_dev_init,
300 .ndo_open = veth_open,
Eric W. Biederman2cf48a12009-02-25 19:47:29 +0000301 .ndo_stop = veth_close,
Daniel Lezcanoee923622009-02-22 00:04:45 -0800302 .ndo_start_xmit = veth_xmit,
Eric Biederman38d40812009-03-03 23:36:04 -0800303 .ndo_change_mtu = veth_change_mtu,
stephen hemminger6311cc42011-06-08 14:53:59 +0000304 .ndo_get_stats64 = veth_get_stats64,
Gao feng5c70ef82013-10-04 16:52:24 +0800305 .ndo_set_rx_mode = veth_set_multicast_list,
Daniel Lezcanoee923622009-02-22 00:04:45 -0800306 .ndo_set_mac_address = eth_mac_addr,
WANG Congbb446c12014-06-23 15:36:02 -0700307#ifdef CONFIG_NET_POLL_CONTROLLER
308 .ndo_poll_controller = veth_poll_controller,
309#endif
Nicolas Dichtela45253b2015-04-02 17:07:11 +0200310 .ndo_get_iflink = veth_get_iflink,
Toshiaki Makita1a04a822015-07-31 15:03:25 +0900311 .ndo_features_check = passthru_features_check,
Paolo Abeni163e5292016-02-26 10:45:41 +0100312 .ndo_set_rx_headroom = veth_set_rx_headroom,
Stephen Hemminger4456e7b2008-11-19 21:50:10 -0800313};
314
Alexander Duyck732912d72016-04-19 14:02:26 -0400315#define VETH_FEATURES (NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HW_CSUM | \
Xin Longc80fafb2016-08-25 13:21:49 +0800316 NETIF_F_RXCSUM | NETIF_F_SCTP_CRC | NETIF_F_HIGHDMA | \
Alexander Duyck732912d72016-04-19 14:02:26 -0400317 NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ENCAP_ALL | \
Patrick McHardy28d2b132013-04-19 02:04:32 +0000318 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | \
319 NETIF_F_HW_VLAN_STAG_TX | NETIF_F_HW_VLAN_STAG_RX )
Eric Dumazet80933152012-12-29 16:26:10 +0000320
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700321static void veth_setup(struct net_device *dev)
322{
323 ether_setup(dev);
324
Neil Horman550fd082011-07-26 06:05:38 +0000325 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
Hannes Frederic Sowa23ea5a92012-10-30 16:22:01 +0000326 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
Phil Sutter02f01ec2015-08-18 10:30:29 +0200327 dev->priv_flags |= IFF_NO_QUEUE;
Paolo Abeni163e5292016-02-26 10:45:41 +0100328 dev->priv_flags |= IFF_PHONY_HEADROOM;
Neil Horman550fd082011-07-26 06:05:38 +0000329
Stephen Hemminger4456e7b2008-11-19 21:50:10 -0800330 dev->netdev_ops = &veth_netdev_ops;
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700331 dev->ethtool_ops = &veth_ethtool_ops;
332 dev->features |= NETIF_F_LLTX;
Eric Dumazet80933152012-12-29 16:26:10 +0000333 dev->features |= VETH_FEATURES;
Toshiaki Makita8d0d21f2014-02-18 21:20:08 +0900334 dev->vlan_features = dev->features &
Vlad Yasevich3f8c7072014-03-27 22:14:48 -0400335 ~(NETIF_F_HW_VLAN_CTAG_TX |
336 NETIF_F_HW_VLAN_STAG_TX |
337 NETIF_F_HW_VLAN_CTAG_RX |
338 NETIF_F_HW_VLAN_STAG_RX);
David S. Miller11687a12009-06-25 02:45:42 -0700339 dev->destructor = veth_dev_free;
Michał Mirosława2c725f2011-03-31 01:01:35 +0000340
Eric Dumazet80933152012-12-29 16:26:10 +0000341 dev->hw_features = VETH_FEATURES;
Eric Dumazet82d81892013-10-25 18:25:03 -0700342 dev->hw_enc_features = VETH_FEATURES;
David Ahern607fca92016-08-24 20:10:45 -0700343 dev->mpls_features = NETIF_F_HW_CSUM | NETIF_F_GSO_SOFTWARE;
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700344}
345
346/*
347 * netlink interface
348 */
349
350static int veth_validate(struct nlattr *tb[], struct nlattr *data[])
351{
352 if (tb[IFLA_ADDRESS]) {
353 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
354 return -EINVAL;
355 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
356 return -EADDRNOTAVAIL;
357 }
Eric Biederman38d40812009-03-03 23:36:04 -0800358 if (tb[IFLA_MTU]) {
359 if (!is_valid_veth_mtu(nla_get_u32(tb[IFLA_MTU])))
360 return -EINVAL;
361 }
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700362 return 0;
363}
364
365static struct rtnl_link_ops veth_link_ops;
366
Eric W. Biederman81adee42009-11-08 00:53:51 -0800367static int veth_newlink(struct net *src_net, struct net_device *dev,
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700368 struct nlattr *tb[], struct nlattr *data[])
369{
370 int err;
371 struct net_device *peer;
372 struct veth_priv *priv;
373 char ifname[IFNAMSIZ];
374 struct nlattr *peer_tb[IFLA_MAX + 1], **tbp;
Tom Gundersen55177502014-07-14 16:37:25 +0200375 unsigned char name_assign_type;
Patrick McHardy3729d502010-02-26 06:34:54 +0000376 struct ifinfomsg *ifmp;
Eric W. Biederman81adee42009-11-08 00:53:51 -0800377 struct net *net;
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700378
379 /*
380 * create and register peer first
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700381 */
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700382 if (data != NULL && data[VETH_INFO_PEER] != NULL) {
383 struct nlattr *nla_peer;
384
385 nla_peer = data[VETH_INFO_PEER];
Patrick McHardy3729d502010-02-26 06:34:54 +0000386 ifmp = nla_data(nla_peer);
Jiri Pirkof7b12602014-02-18 20:53:18 +0100387 err = rtnl_nla_parse_ifla(peer_tb,
388 nla_data(nla_peer) + sizeof(struct ifinfomsg),
389 nla_len(nla_peer) - sizeof(struct ifinfomsg));
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700390 if (err < 0)
391 return err;
392
393 err = veth_validate(peer_tb, NULL);
394 if (err < 0)
395 return err;
396
397 tbp = peer_tb;
Patrick McHardy3729d502010-02-26 06:34:54 +0000398 } else {
399 ifmp = NULL;
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700400 tbp = tb;
Patrick McHardy3729d502010-02-26 06:34:54 +0000401 }
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700402
Tom Gundersen55177502014-07-14 16:37:25 +0200403 if (tbp[IFLA_IFNAME]) {
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700404 nla_strlcpy(ifname, tbp[IFLA_IFNAME], IFNAMSIZ);
Tom Gundersen55177502014-07-14 16:37:25 +0200405 name_assign_type = NET_NAME_USER;
406 } else {
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700407 snprintf(ifname, IFNAMSIZ, DRV_NAME "%%d");
Tom Gundersen55177502014-07-14 16:37:25 +0200408 name_assign_type = NET_NAME_ENUM;
409 }
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700410
Eric W. Biederman81adee42009-11-08 00:53:51 -0800411 net = rtnl_link_get_net(src_net, tbp);
412 if (IS_ERR(net))
413 return PTR_ERR(net);
414
Tom Gundersen55177502014-07-14 16:37:25 +0200415 peer = rtnl_create_link(net, ifname, name_assign_type,
416 &veth_link_ops, tbp);
Eric W. Biederman81adee42009-11-08 00:53:51 -0800417 if (IS_ERR(peer)) {
418 put_net(net);
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700419 return PTR_ERR(peer);
Eric W. Biederman81adee42009-11-08 00:53:51 -0800420 }
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700421
422 if (tbp[IFLA_ADDRESS] == NULL)
Danny Kukawkaf2cedb62012-02-15 06:45:39 +0000423 eth_hw_addr_random(peer);
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700424
Pavel Emelyanove6f8f1a2012-08-08 21:53:03 +0000425 if (ifmp && (dev->ifindex != 0))
426 peer->ifindex = ifmp->ifi_index;
427
Stephen Hemmingerc3fd4d32017-12-07 15:40:20 -0800428 peer->gso_max_size = dev->gso_max_size;
429 peer->gso_max_segs = dev->gso_max_segs;
430
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700431 err = register_netdevice(peer);
Eric W. Biederman81adee42009-11-08 00:53:51 -0800432 put_net(net);
433 net = NULL;
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700434 if (err < 0)
435 goto err_register_peer;
436
437 netif_carrier_off(peer);
438
Patrick McHardy3729d502010-02-26 06:34:54 +0000439 err = rtnl_configure_link(peer, ifmp);
440 if (err < 0)
441 goto err_configure_peer;
442
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700443 /*
444 * register dev last
445 *
446 * note, that since we've registered new device the dev's name
447 * should be re-allocated
448 */
449
450 if (tb[IFLA_ADDRESS] == NULL)
Danny Kukawkaf2cedb62012-02-15 06:45:39 +0000451 eth_hw_addr_random(dev);
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700452
Jiri Pirko6c8c4442011-04-30 01:28:17 +0000453 if (tb[IFLA_IFNAME])
454 nla_strlcpy(dev->name, tb[IFLA_IFNAME], IFNAMSIZ);
455 else
456 snprintf(dev->name, IFNAMSIZ, DRV_NAME "%%d");
457
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700458 err = register_netdevice(dev);
459 if (err < 0)
460 goto err_register_dev;
461
462 netif_carrier_off(dev);
463
464 /*
465 * tie the deviced together
466 */
467
468 priv = netdev_priv(dev);
Eric Dumazetd0e2c552013-01-04 15:42:40 +0000469 rcu_assign_pointer(priv->peer, peer);
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700470
471 priv = netdev_priv(peer);
Eric Dumazetd0e2c552013-01-04 15:42:40 +0000472 rcu_assign_pointer(priv->peer, dev);
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700473 return 0;
474
475err_register_dev:
476 /* nothing to do */
Patrick McHardy3729d502010-02-26 06:34:54 +0000477err_configure_peer:
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700478 unregister_netdevice(peer);
479 return err;
480
481err_register_peer:
482 free_netdev(peer);
483 return err;
484}
485
Eric Dumazet23289a32009-10-27 07:06:36 +0000486static void veth_dellink(struct net_device *dev, struct list_head *head)
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700487{
488 struct veth_priv *priv;
489 struct net_device *peer;
490
491 priv = netdev_priv(dev);
Eric Dumazetd0e2c552013-01-04 15:42:40 +0000492 peer = rtnl_dereference(priv->peer);
493
494 /* Note : dellink() is called from default_device_exit_batch(),
495 * before a rcu_synchronize() point. The devices are guaranteed
496 * not being freed before one RCU grace period.
497 */
498 RCU_INIT_POINTER(priv->peer, NULL);
Eric Dumazet24540532009-10-30 01:00:27 -0700499 unregister_netdevice_queue(dev, head);
Eric Dumazetf45a5c22013-02-08 20:10:49 +0000500
501 if (peer) {
502 priv = netdev_priv(peer);
503 RCU_INIT_POINTER(priv->peer, NULL);
504 unregister_netdevice_queue(peer, head);
505 }
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700506}
507
Thomas Graf23711432012-02-15 04:09:46 +0000508static const struct nla_policy veth_policy[VETH_INFO_MAX + 1] = {
509 [VETH_INFO_PEER] = { .len = sizeof(struct ifinfomsg) },
510};
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700511
Nicolas Dichtele5f4e7b2015-01-20 15:15:46 +0100512static struct net *veth_get_link_net(const struct net_device *dev)
513{
514 struct veth_priv *priv = netdev_priv(dev);
515 struct net_device *peer = rtnl_dereference(priv->peer);
516
517 return peer ? dev_net(peer) : dev_net(dev);
518}
519
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700520static struct rtnl_link_ops veth_link_ops = {
521 .kind = DRV_NAME,
522 .priv_size = sizeof(struct veth_priv),
523 .setup = veth_setup,
524 .validate = veth_validate,
525 .newlink = veth_newlink,
526 .dellink = veth_dellink,
527 .policy = veth_policy,
528 .maxtype = VETH_INFO_MAX,
Nicolas Dichtele5f4e7b2015-01-20 15:15:46 +0100529 .get_link_net = veth_get_link_net,
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700530};
531
532/*
533 * init/fini
534 */
535
536static __init int veth_init(void)
537{
538 return rtnl_link_register(&veth_link_ops);
539}
540
541static __exit void veth_exit(void)
542{
Patrick McHardy68365452008-01-20 17:25:14 -0800543 rtnl_link_unregister(&veth_link_ops);
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700544}
545
546module_init(veth_init);
547module_exit(veth_exit);
548
549MODULE_DESCRIPTION("Virtual Ethernet Tunnel");
550MODULE_LICENSE("GPL v2");
551MODULE_ALIAS_RTNL_LINK(DRV_NAME);