Pavel Emelyanov | e314dbd | 2007-09-25 16:14:46 -0700 | [diff] [blame] | 1 | /* |
| 2 | * drivers/net/veth.c |
| 3 | * |
| 4 | * Copyright (C) 2007 OpenVZ http://openvz.org, SWsoft Inc |
| 5 | * |
| 6 | * Author: Pavel Emelianov <xemul@openvz.org> |
| 7 | * Ethtool interface from: Eric W. Biederman <ebiederm@xmission.com> |
| 8 | * |
| 9 | */ |
| 10 | |
Pavel Emelyanov | e314dbd | 2007-09-25 16:14:46 -0700 | [diff] [blame] | 11 | #include <linux/netdevice.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 12 | #include <linux/slab.h> |
Pavel Emelyanov | e314dbd | 2007-09-25 16:14:46 -0700 | [diff] [blame] | 13 | #include <linux/ethtool.h> |
| 14 | #include <linux/etherdevice.h> |
Eric Dumazet | cf05c70 | 2011-06-19 22:48:34 -0700 | [diff] [blame] | 15 | #include <linux/u64_stats_sync.h> |
Pavel Emelyanov | e314dbd | 2007-09-25 16:14:46 -0700 | [diff] [blame] | 16 | |
Jiri Pirko | f7b1260 | 2014-02-18 20:53:18 +0100 | [diff] [blame] | 17 | #include <net/rtnetlink.h> |
Pavel Emelyanov | e314dbd | 2007-09-25 16:14:46 -0700 | [diff] [blame] | 18 | #include <net/dst.h> |
| 19 | #include <net/xfrm.h> |
Stephen Hemminger | ecef969 | 2007-12-25 17:23:59 -0800 | [diff] [blame] | 20 | #include <linux/veth.h> |
Paul Gortmaker | 9d9779e | 2011-07-03 15:21:01 -0400 | [diff] [blame] | 21 | #include <linux/module.h> |
Pavel Emelyanov | e314dbd | 2007-09-25 16:14:46 -0700 | [diff] [blame] | 22 | |
| 23 | #define DRV_NAME "veth" |
| 24 | #define DRV_VERSION "1.0" |
| 25 | |
Eric Biederman | 38d4081 | 2009-03-03 23:36:04 -0800 | [diff] [blame] | 26 | #define MIN_MTU 68 /* Min L3 MTU */ |
| 27 | #define MAX_MTU 65535 /* Max L3 MTU (arbitrary) */ |
Eric Biederman | 38d4081 | 2009-03-03 23:36:04 -0800 | [diff] [blame] | 28 | |
Eric Dumazet | 2681128 | 2012-12-29 16:02:43 +0000 | [diff] [blame] | 29 | struct pcpu_vstats { |
| 30 | u64 packets; |
| 31 | u64 bytes; |
Eric Dumazet | cf05c70 | 2011-06-19 22:48:34 -0700 | [diff] [blame] | 32 | struct u64_stats_sync syncp; |
Pavel Emelyanov | e314dbd | 2007-09-25 16:14:46 -0700 | [diff] [blame] | 33 | }; |
| 34 | |
| 35 | struct veth_priv { |
Eric Dumazet | d0e2c55 | 2013-01-04 15:42:40 +0000 | [diff] [blame] | 36 | struct net_device __rcu *peer; |
Eric Dumazet | 2681128 | 2012-12-29 16:02:43 +0000 | [diff] [blame] | 37 | atomic64_t dropped; |
Pavel Emelyanov | e314dbd | 2007-09-25 16:14:46 -0700 | [diff] [blame] | 38 | }; |
| 39 | |
Pavel Emelyanov | e314dbd | 2007-09-25 16:14:46 -0700 | [diff] [blame] | 40 | /* |
| 41 | * ethtool interface |
| 42 | */ |
| 43 | |
| 44 | static struct { |
| 45 | const char string[ETH_GSTRING_LEN]; |
| 46 | } ethtool_stats_keys[] = { |
| 47 | { "peer_ifindex" }, |
| 48 | }; |
| 49 | |
| 50 | static int veth_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) |
| 51 | { |
| 52 | cmd->supported = 0; |
| 53 | cmd->advertising = 0; |
David Decotigny | 7073949 | 2011-04-27 18:32:40 +0000 | [diff] [blame] | 54 | ethtool_cmd_speed_set(cmd, SPEED_10000); |
Pavel Emelyanov | e314dbd | 2007-09-25 16:14:46 -0700 | [diff] [blame] | 55 | cmd->duplex = DUPLEX_FULL; |
| 56 | cmd->port = PORT_TP; |
| 57 | cmd->phy_address = 0; |
| 58 | cmd->transceiver = XCVR_INTERNAL; |
| 59 | cmd->autoneg = AUTONEG_DISABLE; |
| 60 | cmd->maxtxpkt = 0; |
| 61 | cmd->maxrxpkt = 0; |
| 62 | return 0; |
| 63 | } |
| 64 | |
| 65 | static void veth_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) |
| 66 | { |
Rick Jones | 33a5ba1 | 2011-11-15 14:59:53 +0000 | [diff] [blame] | 67 | strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); |
| 68 | strlcpy(info->version, DRV_VERSION, sizeof(info->version)); |
Pavel Emelyanov | e314dbd | 2007-09-25 16:14:46 -0700 | [diff] [blame] | 69 | } |
| 70 | |
| 71 | static void veth_get_strings(struct net_device *dev, u32 stringset, u8 *buf) |
| 72 | { |
| 73 | switch(stringset) { |
| 74 | case ETH_SS_STATS: |
| 75 | memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys)); |
| 76 | break; |
| 77 | } |
| 78 | } |
| 79 | |
Jeff Garzik | b9f2c04 | 2007-10-03 18:07:32 -0700 | [diff] [blame] | 80 | static int veth_get_sset_count(struct net_device *dev, int sset) |
Pavel Emelyanov | e314dbd | 2007-09-25 16:14:46 -0700 | [diff] [blame] | 81 | { |
Jeff Garzik | b9f2c04 | 2007-10-03 18:07:32 -0700 | [diff] [blame] | 82 | switch (sset) { |
| 83 | case ETH_SS_STATS: |
| 84 | return ARRAY_SIZE(ethtool_stats_keys); |
| 85 | default: |
| 86 | return -EOPNOTSUPP; |
| 87 | } |
Pavel Emelyanov | e314dbd | 2007-09-25 16:14:46 -0700 | [diff] [blame] | 88 | } |
| 89 | |
| 90 | static void veth_get_ethtool_stats(struct net_device *dev, |
| 91 | struct ethtool_stats *stats, u64 *data) |
| 92 | { |
Eric Dumazet | d0e2c55 | 2013-01-04 15:42:40 +0000 | [diff] [blame] | 93 | struct veth_priv *priv = netdev_priv(dev); |
| 94 | struct net_device *peer = rtnl_dereference(priv->peer); |
Pavel Emelyanov | e314dbd | 2007-09-25 16:14:46 -0700 | [diff] [blame] | 95 | |
Eric Dumazet | d0e2c55 | 2013-01-04 15:42:40 +0000 | [diff] [blame] | 96 | data[0] = peer ? peer->ifindex : 0; |
Pavel Emelyanov | e314dbd | 2007-09-25 16:14:46 -0700 | [diff] [blame] | 97 | } |
| 98 | |
Stephen Hemminger | 0fc0b73 | 2009-09-02 01:03:33 -0700 | [diff] [blame] | 99 | static const struct ethtool_ops veth_ethtool_ops = { |
Pavel Emelyanov | e314dbd | 2007-09-25 16:14:46 -0700 | [diff] [blame] | 100 | .get_settings = veth_get_settings, |
| 101 | .get_drvinfo = veth_get_drvinfo, |
| 102 | .get_link = ethtool_op_get_link, |
Pavel Emelyanov | e314dbd | 2007-09-25 16:14:46 -0700 | [diff] [blame] | 103 | .get_strings = veth_get_strings, |
Jeff Garzik | b9f2c04 | 2007-10-03 18:07:32 -0700 | [diff] [blame] | 104 | .get_sset_count = veth_get_sset_count, |
Pavel Emelyanov | e314dbd | 2007-09-25 16:14:46 -0700 | [diff] [blame] | 105 | .get_ethtool_stats = veth_get_ethtool_stats, |
| 106 | }; |
| 107 | |
Stephen Hemminger | 424efe9 | 2009-08-31 19:50:51 +0000 | [diff] [blame] | 108 | static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev) |
Pavel Emelyanov | e314dbd | 2007-09-25 16:14:46 -0700 | [diff] [blame] | 109 | { |
Eric Dumazet | 2681128 | 2012-12-29 16:02:43 +0000 | [diff] [blame] | 110 | struct veth_priv *priv = netdev_priv(dev); |
Eric Dumazet | d0e2c55 | 2013-01-04 15:42:40 +0000 | [diff] [blame] | 111 | struct net_device *rcv; |
Eric Dumazet | 2681128 | 2012-12-29 16:02:43 +0000 | [diff] [blame] | 112 | int length = skb->len; |
Pavel Emelyanov | e314dbd | 2007-09-25 16:14:46 -0700 | [diff] [blame] | 113 | |
Eric Dumazet | d0e2c55 | 2013-01-04 15:42:40 +0000 | [diff] [blame] | 114 | rcu_read_lock(); |
| 115 | rcv = rcu_dereference(priv->peer); |
| 116 | if (unlikely(!rcv)) { |
| 117 | kfree_skb(skb); |
| 118 | goto drop; |
| 119 | } |
Michał Mirosław | 0b79675 | 2010-12-14 12:35:13 +0000 | [diff] [blame] | 120 | /* don't change ip_summed == CHECKSUM_PARTIAL, as that |
Eric Dumazet | 2681128 | 2012-12-29 16:02:43 +0000 | [diff] [blame] | 121 | * will cause bad checksum on forwarded packets |
| 122 | */ |
Michał Mirosław | a2c725f | 2011-03-31 01:01:35 +0000 | [diff] [blame] | 123 | if (skb->ip_summed == CHECKSUM_NONE && |
| 124 | rcv->features & NETIF_F_RXCSUM) |
| 125 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
Pavel Emelyanov | e314dbd | 2007-09-25 16:14:46 -0700 | [diff] [blame] | 126 | |
Eric Dumazet | 2681128 | 2012-12-29 16:02:43 +0000 | [diff] [blame] | 127 | if (likely(dev_forward_skb(rcv, skb) == NET_RX_SUCCESS)) { |
| 128 | struct pcpu_vstats *stats = this_cpu_ptr(dev->vstats); |
Pavel Emelyanov | e314dbd | 2007-09-25 16:14:46 -0700 | [diff] [blame] | 129 | |
Eric Dumazet | 2681128 | 2012-12-29 16:02:43 +0000 | [diff] [blame] | 130 | u64_stats_update_begin(&stats->syncp); |
| 131 | stats->bytes += length; |
| 132 | stats->packets++; |
| 133 | u64_stats_update_end(&stats->syncp); |
| 134 | } else { |
Eric Dumazet | d0e2c55 | 2013-01-04 15:42:40 +0000 | [diff] [blame] | 135 | drop: |
Eric Dumazet | 2681128 | 2012-12-29 16:02:43 +0000 | [diff] [blame] | 136 | atomic64_inc(&priv->dropped); |
| 137 | } |
Eric Dumazet | d0e2c55 | 2013-01-04 15:42:40 +0000 | [diff] [blame] | 138 | rcu_read_unlock(); |
Patrick McHardy | 6ed1065 | 2009-06-23 06:03:08 +0000 | [diff] [blame] | 139 | return NETDEV_TX_OK; |
Pavel Emelyanov | e314dbd | 2007-09-25 16:14:46 -0700 | [diff] [blame] | 140 | } |
| 141 | |
| 142 | /* |
| 143 | * general routines |
| 144 | */ |
| 145 | |
Eric Dumazet | 2681128 | 2012-12-29 16:02:43 +0000 | [diff] [blame] | 146 | static u64 veth_stats_one(struct pcpu_vstats *result, struct net_device *dev) |
Pavel Emelyanov | e314dbd | 2007-09-25 16:14:46 -0700 | [diff] [blame] | 147 | { |
Eric Dumazet | cf05c70 | 2011-06-19 22:48:34 -0700 | [diff] [blame] | 148 | struct veth_priv *priv = netdev_priv(dev); |
David S. Miller | 11687a1 | 2009-06-25 02:45:42 -0700 | [diff] [blame] | 149 | int cpu; |
David S. Miller | 11687a1 | 2009-06-25 02:45:42 -0700 | [diff] [blame] | 150 | |
Eric Dumazet | 2681128 | 2012-12-29 16:02:43 +0000 | [diff] [blame] | 151 | result->packets = 0; |
| 152 | result->bytes = 0; |
Eric Dumazet | 2b1c8b0 | 2009-11-18 07:09:39 +0000 | [diff] [blame] | 153 | for_each_possible_cpu(cpu) { |
Eric Dumazet | 2681128 | 2012-12-29 16:02:43 +0000 | [diff] [blame] | 154 | struct pcpu_vstats *stats = per_cpu_ptr(dev->vstats, cpu); |
| 155 | u64 packets, bytes; |
Eric Dumazet | cf05c70 | 2011-06-19 22:48:34 -0700 | [diff] [blame] | 156 | unsigned int start; |
Pavel Emelyanov | e314dbd | 2007-09-25 16:14:46 -0700 | [diff] [blame] | 157 | |
Eric Dumazet | cf05c70 | 2011-06-19 22:48:34 -0700 | [diff] [blame] | 158 | do { |
Eric W. Biederman | 57a7744 | 2014-03-13 21:26:42 -0700 | [diff] [blame] | 159 | start = u64_stats_fetch_begin_irq(&stats->syncp); |
Eric Dumazet | 2681128 | 2012-12-29 16:02:43 +0000 | [diff] [blame] | 160 | packets = stats->packets; |
| 161 | bytes = stats->bytes; |
Eric W. Biederman | 57a7744 | 2014-03-13 21:26:42 -0700 | [diff] [blame] | 162 | } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); |
Eric Dumazet | 2681128 | 2012-12-29 16:02:43 +0000 | [diff] [blame] | 163 | result->packets += packets; |
| 164 | result->bytes += bytes; |
David S. Miller | 11687a1 | 2009-06-25 02:45:42 -0700 | [diff] [blame] | 165 | } |
Eric Dumazet | 2681128 | 2012-12-29 16:02:43 +0000 | [diff] [blame] | 166 | return atomic64_read(&priv->dropped); |
| 167 | } |
| 168 | |
| 169 | static struct rtnl_link_stats64 *veth_get_stats64(struct net_device *dev, |
| 170 | struct rtnl_link_stats64 *tot) |
| 171 | { |
| 172 | struct veth_priv *priv = netdev_priv(dev); |
Eric Dumazet | d0e2c55 | 2013-01-04 15:42:40 +0000 | [diff] [blame] | 173 | struct net_device *peer; |
Eric Dumazet | 2681128 | 2012-12-29 16:02:43 +0000 | [diff] [blame] | 174 | struct pcpu_vstats one; |
| 175 | |
| 176 | tot->tx_dropped = veth_stats_one(&one, dev); |
| 177 | tot->tx_bytes = one.bytes; |
| 178 | tot->tx_packets = one.packets; |
| 179 | |
Eric Dumazet | d0e2c55 | 2013-01-04 15:42:40 +0000 | [diff] [blame] | 180 | rcu_read_lock(); |
| 181 | peer = rcu_dereference(priv->peer); |
| 182 | if (peer) { |
| 183 | tot->rx_dropped = veth_stats_one(&one, peer); |
| 184 | tot->rx_bytes = one.bytes; |
| 185 | tot->rx_packets = one.packets; |
| 186 | } |
| 187 | rcu_read_unlock(); |
Pavel Emelyanov | e314dbd | 2007-09-25 16:14:46 -0700 | [diff] [blame] | 188 | |
stephen hemminger | 6311cc4 | 2011-06-08 14:53:59 +0000 | [diff] [blame] | 189 | return tot; |
Pavel Emelyanov | e314dbd | 2007-09-25 16:14:46 -0700 | [diff] [blame] | 190 | } |
| 191 | |
Gao feng | 5c70ef8 | 2013-10-04 16:52:24 +0800 | [diff] [blame] | 192 | /* fake multicast ability */ |
| 193 | static void veth_set_multicast_list(struct net_device *dev) |
| 194 | { |
| 195 | } |
| 196 | |
Pavel Emelyanov | e314dbd | 2007-09-25 16:14:46 -0700 | [diff] [blame] | 197 | static int veth_open(struct net_device *dev) |
| 198 | { |
Eric Dumazet | d0e2c55 | 2013-01-04 15:42:40 +0000 | [diff] [blame] | 199 | struct veth_priv *priv = netdev_priv(dev); |
| 200 | struct net_device *peer = rtnl_dereference(priv->peer); |
Pavel Emelyanov | e314dbd | 2007-09-25 16:14:46 -0700 | [diff] [blame] | 201 | |
Eric Dumazet | d0e2c55 | 2013-01-04 15:42:40 +0000 | [diff] [blame] | 202 | if (!peer) |
Pavel Emelyanov | e314dbd | 2007-09-25 16:14:46 -0700 | [diff] [blame] | 203 | return -ENOTCONN; |
| 204 | |
Eric Dumazet | d0e2c55 | 2013-01-04 15:42:40 +0000 | [diff] [blame] | 205 | if (peer->flags & IFF_UP) { |
Pavel Emelyanov | e314dbd | 2007-09-25 16:14:46 -0700 | [diff] [blame] | 206 | netif_carrier_on(dev); |
Eric Dumazet | d0e2c55 | 2013-01-04 15:42:40 +0000 | [diff] [blame] | 207 | netif_carrier_on(peer); |
Pavel Emelyanov | e314dbd | 2007-09-25 16:14:46 -0700 | [diff] [blame] | 208 | } |
| 209 | return 0; |
| 210 | } |
| 211 | |
Eric W. Biederman | 2cf48a1 | 2009-02-25 19:47:29 +0000 | [diff] [blame] | 212 | static int veth_close(struct net_device *dev) |
| 213 | { |
| 214 | struct veth_priv *priv = netdev_priv(dev); |
Eric Dumazet | 2efd32e | 2013-01-10 08:32:45 +0000 | [diff] [blame] | 215 | struct net_device *peer = rtnl_dereference(priv->peer); |
Eric W. Biederman | 2cf48a1 | 2009-02-25 19:47:29 +0000 | [diff] [blame] | 216 | |
| 217 | netif_carrier_off(dev); |
Eric Dumazet | 2efd32e | 2013-01-10 08:32:45 +0000 | [diff] [blame] | 218 | if (peer) |
| 219 | netif_carrier_off(peer); |
Eric W. Biederman | 2cf48a1 | 2009-02-25 19:47:29 +0000 | [diff] [blame] | 220 | |
| 221 | return 0; |
| 222 | } |
| 223 | |
Eric Biederman | 38d4081 | 2009-03-03 23:36:04 -0800 | [diff] [blame] | 224 | static int is_valid_veth_mtu(int new_mtu) |
| 225 | { |
Eric Dumazet | 807540b | 2010-09-23 05:40:09 +0000 | [diff] [blame] | 226 | return new_mtu >= MIN_MTU && new_mtu <= MAX_MTU; |
Eric Biederman | 38d4081 | 2009-03-03 23:36:04 -0800 | [diff] [blame] | 227 | } |
| 228 | |
| 229 | static int veth_change_mtu(struct net_device *dev, int new_mtu) |
| 230 | { |
| 231 | if (!is_valid_veth_mtu(new_mtu)) |
| 232 | return -EINVAL; |
| 233 | dev->mtu = new_mtu; |
| 234 | return 0; |
| 235 | } |
| 236 | |
Pavel Emelyanov | e314dbd | 2007-09-25 16:14:46 -0700 | [diff] [blame] | 237 | static int veth_dev_init(struct net_device *dev) |
| 238 | { |
WANG Cong | 1c213bd | 2014-02-13 11:46:28 -0800 | [diff] [blame] | 239 | dev->vstats = netdev_alloc_pcpu_stats(struct pcpu_vstats); |
Eric Dumazet | 2681128 | 2012-12-29 16:02:43 +0000 | [diff] [blame] | 240 | if (!dev->vstats) |
Pavel Emelyanov | e314dbd | 2007-09-25 16:14:46 -0700 | [diff] [blame] | 241 | return -ENOMEM; |
Pavel Emelyanov | e314dbd | 2007-09-25 16:14:46 -0700 | [diff] [blame] | 242 | return 0; |
| 243 | } |
| 244 | |
David S. Miller | 11687a1 | 2009-06-25 02:45:42 -0700 | [diff] [blame] | 245 | static void veth_dev_free(struct net_device *dev) |
| 246 | { |
Eric Dumazet | 2681128 | 2012-12-29 16:02:43 +0000 | [diff] [blame] | 247 | free_percpu(dev->vstats); |
David S. Miller | 11687a1 | 2009-06-25 02:45:42 -0700 | [diff] [blame] | 248 | free_netdev(dev); |
| 249 | } |
| 250 | |
WANG Cong | bb446c1 | 2014-06-23 15:36:02 -0700 | [diff] [blame] | 251 | #ifdef CONFIG_NET_POLL_CONTROLLER |
| 252 | static void veth_poll_controller(struct net_device *dev) |
| 253 | { |
| 254 | /* veth only receives frames when its peer sends one |
| 255 | * Since it's a synchronous operation, we are guaranteed |
| 256 | * never to have pending data when we poll for it so |
| 257 | * there is nothing to do here. |
| 258 | * |
| 259 | * We need this though so netpoll recognizes us as an interface that |
| 260 | * supports polling, which enables bridge devices in virt setups to |
| 261 | * still use netconsole |
| 262 | */ |
| 263 | } |
| 264 | #endif /* CONFIG_NET_POLL_CONTROLLER */ |
| 265 | |
Nicolas Dichtel | a45253b | 2015-04-02 17:07:11 +0200 | [diff] [blame] | 266 | static int veth_get_iflink(const struct net_device *dev) |
| 267 | { |
| 268 | struct veth_priv *priv = netdev_priv(dev); |
| 269 | struct net_device *peer; |
| 270 | int iflink; |
| 271 | |
| 272 | rcu_read_lock(); |
| 273 | peer = rcu_dereference(priv->peer); |
| 274 | iflink = peer ? peer->ifindex : 0; |
| 275 | rcu_read_unlock(); |
| 276 | |
| 277 | return iflink; |
| 278 | } |
| 279 | |
Stephen Hemminger | 4456e7b | 2008-11-19 21:50:10 -0800 | [diff] [blame] | 280 | static const struct net_device_ops veth_netdev_ops = { |
Daniel Lezcano | ee92362 | 2009-02-22 00:04:45 -0800 | [diff] [blame] | 281 | .ndo_init = veth_dev_init, |
| 282 | .ndo_open = veth_open, |
Eric W. Biederman | 2cf48a1 | 2009-02-25 19:47:29 +0000 | [diff] [blame] | 283 | .ndo_stop = veth_close, |
Daniel Lezcano | ee92362 | 2009-02-22 00:04:45 -0800 | [diff] [blame] | 284 | .ndo_start_xmit = veth_xmit, |
Eric Biederman | 38d4081 | 2009-03-03 23:36:04 -0800 | [diff] [blame] | 285 | .ndo_change_mtu = veth_change_mtu, |
stephen hemminger | 6311cc4 | 2011-06-08 14:53:59 +0000 | [diff] [blame] | 286 | .ndo_get_stats64 = veth_get_stats64, |
Gao feng | 5c70ef8 | 2013-10-04 16:52:24 +0800 | [diff] [blame] | 287 | .ndo_set_rx_mode = veth_set_multicast_list, |
Daniel Lezcano | ee92362 | 2009-02-22 00:04:45 -0800 | [diff] [blame] | 288 | .ndo_set_mac_address = eth_mac_addr, |
WANG Cong | bb446c1 | 2014-06-23 15:36:02 -0700 | [diff] [blame] | 289 | #ifdef CONFIG_NET_POLL_CONTROLLER |
| 290 | .ndo_poll_controller = veth_poll_controller, |
| 291 | #endif |
Nicolas Dichtel | a45253b | 2015-04-02 17:07:11 +0200 | [diff] [blame] | 292 | .ndo_get_iflink = veth_get_iflink, |
Toshiaki Makita | 1a04a82 | 2015-07-31 15:03:25 +0900 | [diff] [blame] | 293 | .ndo_features_check = passthru_features_check, |
Stephen Hemminger | 4456e7b | 2008-11-19 21:50:10 -0800 | [diff] [blame] | 294 | }; |
| 295 | |
Eric Dumazet | 8093315 | 2012-12-29 16:26:10 +0000 | [diff] [blame] | 296 | #define VETH_FEATURES (NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \ |
| 297 | NETIF_F_HW_CSUM | NETIF_F_RXCSUM | NETIF_F_HIGHDMA | \ |
Eric Dumazet | 82d8189 | 2013-10-25 18:25:03 -0700 | [diff] [blame] | 298 | NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL | \ |
| 299 | NETIF_F_GSO_IPIP | NETIF_F_GSO_SIT | NETIF_F_UFO | \ |
Patrick McHardy | 28d2b13 | 2013-04-19 02:04:32 +0000 | [diff] [blame] | 300 | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | \ |
| 301 | NETIF_F_HW_VLAN_STAG_TX | NETIF_F_HW_VLAN_STAG_RX ) |
Eric Dumazet | 8093315 | 2012-12-29 16:26:10 +0000 | [diff] [blame] | 302 | |
Pavel Emelyanov | e314dbd | 2007-09-25 16:14:46 -0700 | [diff] [blame] | 303 | static void veth_setup(struct net_device *dev) |
| 304 | { |
| 305 | ether_setup(dev); |
| 306 | |
Neil Horman | 550fd08 | 2011-07-26 06:05:38 +0000 | [diff] [blame] | 307 | dev->priv_flags &= ~IFF_TX_SKB_SHARING; |
Hannes Frederic Sowa | 23ea5a9 | 2012-10-30 16:22:01 +0000 | [diff] [blame] | 308 | dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; |
Phil Sutter | 02f01ec | 2015-08-18 10:30:29 +0200 | [diff] [blame] | 309 | dev->priv_flags |= IFF_NO_QUEUE; |
Neil Horman | 550fd08 | 2011-07-26 06:05:38 +0000 | [diff] [blame] | 310 | |
Stephen Hemminger | 4456e7b | 2008-11-19 21:50:10 -0800 | [diff] [blame] | 311 | dev->netdev_ops = &veth_netdev_ops; |
Pavel Emelyanov | e314dbd | 2007-09-25 16:14:46 -0700 | [diff] [blame] | 312 | dev->ethtool_ops = &veth_ethtool_ops; |
| 313 | dev->features |= NETIF_F_LLTX; |
Eric Dumazet | 8093315 | 2012-12-29 16:26:10 +0000 | [diff] [blame] | 314 | dev->features |= VETH_FEATURES; |
Toshiaki Makita | 8d0d21f | 2014-02-18 21:20:08 +0900 | [diff] [blame] | 315 | dev->vlan_features = dev->features & |
Vlad Yasevich | 3f8c707 | 2014-03-27 22:14:48 -0400 | [diff] [blame] | 316 | ~(NETIF_F_HW_VLAN_CTAG_TX | |
| 317 | NETIF_F_HW_VLAN_STAG_TX | |
| 318 | NETIF_F_HW_VLAN_CTAG_RX | |
| 319 | NETIF_F_HW_VLAN_STAG_RX); |
David S. Miller | 11687a1 | 2009-06-25 02:45:42 -0700 | [diff] [blame] | 320 | dev->destructor = veth_dev_free; |
Michał Mirosław | a2c725f | 2011-03-31 01:01:35 +0000 | [diff] [blame] | 321 | |
Eric Dumazet | 8093315 | 2012-12-29 16:26:10 +0000 | [diff] [blame] | 322 | dev->hw_features = VETH_FEATURES; |
Eric Dumazet | 82d8189 | 2013-10-25 18:25:03 -0700 | [diff] [blame] | 323 | dev->hw_enc_features = VETH_FEATURES; |
Pavel Emelyanov | e314dbd | 2007-09-25 16:14:46 -0700 | [diff] [blame] | 324 | } |
| 325 | |
| 326 | /* |
| 327 | * netlink interface |
| 328 | */ |
| 329 | |
| 330 | static int veth_validate(struct nlattr *tb[], struct nlattr *data[]) |
| 331 | { |
| 332 | if (tb[IFLA_ADDRESS]) { |
| 333 | if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) |
| 334 | return -EINVAL; |
| 335 | if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) |
| 336 | return -EADDRNOTAVAIL; |
| 337 | } |
Eric Biederman | 38d4081 | 2009-03-03 23:36:04 -0800 | [diff] [blame] | 338 | if (tb[IFLA_MTU]) { |
| 339 | if (!is_valid_veth_mtu(nla_get_u32(tb[IFLA_MTU]))) |
| 340 | return -EINVAL; |
| 341 | } |
Pavel Emelyanov | e314dbd | 2007-09-25 16:14:46 -0700 | [diff] [blame] | 342 | return 0; |
| 343 | } |
| 344 | |
| 345 | static struct rtnl_link_ops veth_link_ops; |
| 346 | |
Eric W. Biederman | 81adee4 | 2009-11-08 00:53:51 -0800 | [diff] [blame] | 347 | static int veth_newlink(struct net *src_net, struct net_device *dev, |
Pavel Emelyanov | e314dbd | 2007-09-25 16:14:46 -0700 | [diff] [blame] | 348 | struct nlattr *tb[], struct nlattr *data[]) |
| 349 | { |
| 350 | int err; |
| 351 | struct net_device *peer; |
| 352 | struct veth_priv *priv; |
| 353 | char ifname[IFNAMSIZ]; |
| 354 | struct nlattr *peer_tb[IFLA_MAX + 1], **tbp; |
Tom Gundersen | 5517750 | 2014-07-14 16:37:25 +0200 | [diff] [blame] | 355 | unsigned char name_assign_type; |
Patrick McHardy | 3729d50 | 2010-02-26 06:34:54 +0000 | [diff] [blame] | 356 | struct ifinfomsg *ifmp; |
Eric W. Biederman | 81adee4 | 2009-11-08 00:53:51 -0800 | [diff] [blame] | 357 | struct net *net; |
Pavel Emelyanov | e314dbd | 2007-09-25 16:14:46 -0700 | [diff] [blame] | 358 | |
| 359 | /* |
| 360 | * create and register peer first |
Pavel Emelyanov | e314dbd | 2007-09-25 16:14:46 -0700 | [diff] [blame] | 361 | */ |
Pavel Emelyanov | e314dbd | 2007-09-25 16:14:46 -0700 | [diff] [blame] | 362 | if (data != NULL && data[VETH_INFO_PEER] != NULL) { |
| 363 | struct nlattr *nla_peer; |
| 364 | |
| 365 | nla_peer = data[VETH_INFO_PEER]; |
Patrick McHardy | 3729d50 | 2010-02-26 06:34:54 +0000 | [diff] [blame] | 366 | ifmp = nla_data(nla_peer); |
Jiri Pirko | f7b1260 | 2014-02-18 20:53:18 +0100 | [diff] [blame] | 367 | err = rtnl_nla_parse_ifla(peer_tb, |
| 368 | nla_data(nla_peer) + sizeof(struct ifinfomsg), |
| 369 | nla_len(nla_peer) - sizeof(struct ifinfomsg)); |
Pavel Emelyanov | e314dbd | 2007-09-25 16:14:46 -0700 | [diff] [blame] | 370 | if (err < 0) |
| 371 | return err; |
| 372 | |
| 373 | err = veth_validate(peer_tb, NULL); |
| 374 | if (err < 0) |
| 375 | return err; |
| 376 | |
| 377 | tbp = peer_tb; |
Patrick McHardy | 3729d50 | 2010-02-26 06:34:54 +0000 | [diff] [blame] | 378 | } else { |
| 379 | ifmp = NULL; |
Pavel Emelyanov | e314dbd | 2007-09-25 16:14:46 -0700 | [diff] [blame] | 380 | tbp = tb; |
Patrick McHardy | 3729d50 | 2010-02-26 06:34:54 +0000 | [diff] [blame] | 381 | } |
Pavel Emelyanov | e314dbd | 2007-09-25 16:14:46 -0700 | [diff] [blame] | 382 | |
Tom Gundersen | 5517750 | 2014-07-14 16:37:25 +0200 | [diff] [blame] | 383 | if (tbp[IFLA_IFNAME]) { |
Pavel Emelyanov | e314dbd | 2007-09-25 16:14:46 -0700 | [diff] [blame] | 384 | nla_strlcpy(ifname, tbp[IFLA_IFNAME], IFNAMSIZ); |
Tom Gundersen | 5517750 | 2014-07-14 16:37:25 +0200 | [diff] [blame] | 385 | name_assign_type = NET_NAME_USER; |
| 386 | } else { |
Pavel Emelyanov | e314dbd | 2007-09-25 16:14:46 -0700 | [diff] [blame] | 387 | snprintf(ifname, IFNAMSIZ, DRV_NAME "%%d"); |
Tom Gundersen | 5517750 | 2014-07-14 16:37:25 +0200 | [diff] [blame] | 388 | name_assign_type = NET_NAME_ENUM; |
| 389 | } |
Pavel Emelyanov | e314dbd | 2007-09-25 16:14:46 -0700 | [diff] [blame] | 390 | |
Eric W. Biederman | 81adee4 | 2009-11-08 00:53:51 -0800 | [diff] [blame] | 391 | net = rtnl_link_get_net(src_net, tbp); |
| 392 | if (IS_ERR(net)) |
| 393 | return PTR_ERR(net); |
| 394 | |
Tom Gundersen | 5517750 | 2014-07-14 16:37:25 +0200 | [diff] [blame] | 395 | peer = rtnl_create_link(net, ifname, name_assign_type, |
| 396 | &veth_link_ops, tbp); |
Eric W. Biederman | 81adee4 | 2009-11-08 00:53:51 -0800 | [diff] [blame] | 397 | if (IS_ERR(peer)) { |
| 398 | put_net(net); |
Pavel Emelyanov | e314dbd | 2007-09-25 16:14:46 -0700 | [diff] [blame] | 399 | return PTR_ERR(peer); |
Eric W. Biederman | 81adee4 | 2009-11-08 00:53:51 -0800 | [diff] [blame] | 400 | } |
Pavel Emelyanov | e314dbd | 2007-09-25 16:14:46 -0700 | [diff] [blame] | 401 | |
| 402 | if (tbp[IFLA_ADDRESS] == NULL) |
Danny Kukawka | f2cedb6 | 2012-02-15 06:45:39 +0000 | [diff] [blame] | 403 | eth_hw_addr_random(peer); |
Pavel Emelyanov | e314dbd | 2007-09-25 16:14:46 -0700 | [diff] [blame] | 404 | |
Pavel Emelyanov | e6f8f1a | 2012-08-08 21:53:03 +0000 | [diff] [blame] | 405 | if (ifmp && (dev->ifindex != 0)) |
| 406 | peer->ifindex = ifmp->ifi_index; |
| 407 | |
Pavel Emelyanov | e314dbd | 2007-09-25 16:14:46 -0700 | [diff] [blame] | 408 | err = register_netdevice(peer); |
Eric W. Biederman | 81adee4 | 2009-11-08 00:53:51 -0800 | [diff] [blame] | 409 | put_net(net); |
| 410 | net = NULL; |
Pavel Emelyanov | e314dbd | 2007-09-25 16:14:46 -0700 | [diff] [blame] | 411 | if (err < 0) |
| 412 | goto err_register_peer; |
| 413 | |
| 414 | netif_carrier_off(peer); |
| 415 | |
Patrick McHardy | 3729d50 | 2010-02-26 06:34:54 +0000 | [diff] [blame] | 416 | err = rtnl_configure_link(peer, ifmp); |
| 417 | if (err < 0) |
| 418 | goto err_configure_peer; |
| 419 | |
Pavel Emelyanov | e314dbd | 2007-09-25 16:14:46 -0700 | [diff] [blame] | 420 | /* |
| 421 | * register dev last |
| 422 | * |
| 423 | * note, that since we've registered new device the dev's name |
| 424 | * should be re-allocated |
| 425 | */ |
| 426 | |
| 427 | if (tb[IFLA_ADDRESS] == NULL) |
Danny Kukawka | f2cedb6 | 2012-02-15 06:45:39 +0000 | [diff] [blame] | 428 | eth_hw_addr_random(dev); |
Pavel Emelyanov | e314dbd | 2007-09-25 16:14:46 -0700 | [diff] [blame] | 429 | |
Jiri Pirko | 6c8c444 | 2011-04-30 01:28:17 +0000 | [diff] [blame] | 430 | if (tb[IFLA_IFNAME]) |
| 431 | nla_strlcpy(dev->name, tb[IFLA_IFNAME], IFNAMSIZ); |
| 432 | else |
| 433 | snprintf(dev->name, IFNAMSIZ, DRV_NAME "%%d"); |
| 434 | |
Pavel Emelyanov | e314dbd | 2007-09-25 16:14:46 -0700 | [diff] [blame] | 435 | err = register_netdevice(dev); |
| 436 | if (err < 0) |
| 437 | goto err_register_dev; |
| 438 | |
| 439 | netif_carrier_off(dev); |
| 440 | |
| 441 | /* |
| 442 | * tie the deviced together |
| 443 | */ |
| 444 | |
| 445 | priv = netdev_priv(dev); |
Eric Dumazet | d0e2c55 | 2013-01-04 15:42:40 +0000 | [diff] [blame] | 446 | rcu_assign_pointer(priv->peer, peer); |
Pavel Emelyanov | e314dbd | 2007-09-25 16:14:46 -0700 | [diff] [blame] | 447 | |
| 448 | priv = netdev_priv(peer); |
Eric Dumazet | d0e2c55 | 2013-01-04 15:42:40 +0000 | [diff] [blame] | 449 | rcu_assign_pointer(priv->peer, dev); |
Pavel Emelyanov | e314dbd | 2007-09-25 16:14:46 -0700 | [diff] [blame] | 450 | return 0; |
| 451 | |
| 452 | err_register_dev: |
| 453 | /* nothing to do */ |
Patrick McHardy | 3729d50 | 2010-02-26 06:34:54 +0000 | [diff] [blame] | 454 | err_configure_peer: |
Pavel Emelyanov | e314dbd | 2007-09-25 16:14:46 -0700 | [diff] [blame] | 455 | unregister_netdevice(peer); |
| 456 | return err; |
| 457 | |
| 458 | err_register_peer: |
| 459 | free_netdev(peer); |
| 460 | return err; |
| 461 | } |
| 462 | |
Eric Dumazet | 23289a3 | 2009-10-27 07:06:36 +0000 | [diff] [blame] | 463 | static void veth_dellink(struct net_device *dev, struct list_head *head) |
Pavel Emelyanov | e314dbd | 2007-09-25 16:14:46 -0700 | [diff] [blame] | 464 | { |
| 465 | struct veth_priv *priv; |
| 466 | struct net_device *peer; |
| 467 | |
| 468 | priv = netdev_priv(dev); |
Eric Dumazet | d0e2c55 | 2013-01-04 15:42:40 +0000 | [diff] [blame] | 469 | peer = rtnl_dereference(priv->peer); |
| 470 | |
| 471 | /* Note : dellink() is called from default_device_exit_batch(), |
| 472 | * before a rcu_synchronize() point. The devices are guaranteed |
| 473 | * not being freed before one RCU grace period. |
| 474 | */ |
| 475 | RCU_INIT_POINTER(priv->peer, NULL); |
Eric Dumazet | 2454053 | 2009-10-30 01:00:27 -0700 | [diff] [blame] | 476 | unregister_netdevice_queue(dev, head); |
Eric Dumazet | f45a5c2 | 2013-02-08 20:10:49 +0000 | [diff] [blame] | 477 | |
| 478 | if (peer) { |
| 479 | priv = netdev_priv(peer); |
| 480 | RCU_INIT_POINTER(priv->peer, NULL); |
| 481 | unregister_netdevice_queue(peer, head); |
| 482 | } |
Pavel Emelyanov | e314dbd | 2007-09-25 16:14:46 -0700 | [diff] [blame] | 483 | } |
| 484 | |
Thomas Graf | 2371143 | 2012-02-15 04:09:46 +0000 | [diff] [blame] | 485 | static const struct nla_policy veth_policy[VETH_INFO_MAX + 1] = { |
| 486 | [VETH_INFO_PEER] = { .len = sizeof(struct ifinfomsg) }, |
| 487 | }; |
Pavel Emelyanov | e314dbd | 2007-09-25 16:14:46 -0700 | [diff] [blame] | 488 | |
Nicolas Dichtel | e5f4e7b | 2015-01-20 15:15:46 +0100 | [diff] [blame] | 489 | static struct net *veth_get_link_net(const struct net_device *dev) |
| 490 | { |
| 491 | struct veth_priv *priv = netdev_priv(dev); |
| 492 | struct net_device *peer = rtnl_dereference(priv->peer); |
| 493 | |
| 494 | return peer ? dev_net(peer) : dev_net(dev); |
| 495 | } |
| 496 | |
Pavel Emelyanov | e314dbd | 2007-09-25 16:14:46 -0700 | [diff] [blame] | 497 | static struct rtnl_link_ops veth_link_ops = { |
| 498 | .kind = DRV_NAME, |
| 499 | .priv_size = sizeof(struct veth_priv), |
| 500 | .setup = veth_setup, |
| 501 | .validate = veth_validate, |
| 502 | .newlink = veth_newlink, |
| 503 | .dellink = veth_dellink, |
| 504 | .policy = veth_policy, |
| 505 | .maxtype = VETH_INFO_MAX, |
Nicolas Dichtel | e5f4e7b | 2015-01-20 15:15:46 +0100 | [diff] [blame] | 506 | .get_link_net = veth_get_link_net, |
Pavel Emelyanov | e314dbd | 2007-09-25 16:14:46 -0700 | [diff] [blame] | 507 | }; |
| 508 | |
| 509 | /* |
| 510 | * init/fini |
| 511 | */ |
| 512 | |
| 513 | static __init int veth_init(void) |
| 514 | { |
| 515 | return rtnl_link_register(&veth_link_ops); |
| 516 | } |
| 517 | |
| 518 | static __exit void veth_exit(void) |
| 519 | { |
Patrick McHardy | 6836545 | 2008-01-20 17:25:14 -0800 | [diff] [blame] | 520 | rtnl_link_unregister(&veth_link_ops); |
Pavel Emelyanov | e314dbd | 2007-09-25 16:14:46 -0700 | [diff] [blame] | 521 | } |
| 522 | |
| 523 | module_init(veth_init); |
| 524 | module_exit(veth_exit); |
| 525 | |
| 526 | MODULE_DESCRIPTION("Virtual Ethernet Tunnel"); |
| 527 | MODULE_LICENSE("GPL v2"); |
| 528 | MODULE_ALIAS_RTNL_LINK(DRV_NAME); |