Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 1 | #include <linux/etherdevice.h> |
| 2 | #include <linux/if_macvlan.h> |
Basil Gor | f09e224 | 2012-05-03 22:55:24 +0000 | [diff] [blame] | 3 | #include <linux/if_vlan.h> |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 4 | #include <linux/interrupt.h> |
| 5 | #include <linux/nsproxy.h> |
| 6 | #include <linux/compat.h> |
| 7 | #include <linux/if_tun.h> |
| 8 | #include <linux/module.h> |
| 9 | #include <linux/skbuff.h> |
| 10 | #include <linux/cache.h> |
| 11 | #include <linux/sched.h> |
| 12 | #include <linux/types.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 13 | #include <linux/slab.h> |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 14 | #include <linux/wait.h> |
| 15 | #include <linux/cdev.h> |
Al Viro | 4040153 | 2012-02-13 03:58:52 +0000 | [diff] [blame] | 16 | #include <linux/idr.h> |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 17 | #include <linux/fs.h> |
Herbert Xu | 6c36d2e | 2014-11-07 21:22:25 +0800 | [diff] [blame] | 18 | #include <linux/uio.h> |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 19 | |
| 20 | #include <net/net_namespace.h> |
| 21 | #include <net/rtnetlink.h> |
| 22 | #include <net/sock.h> |
Arnd Bergmann | b9fb9ee | 2010-02-18 05:48:17 +0000 | [diff] [blame] | 23 | #include <linux/virtio_net.h> |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 24 | |
| 25 | /* |
| 26 | * A macvtap queue is the central object of this driver, it connects |
| 27 | * an open character device to a macvlan interface. There can be |
| 28 | * multiple queues on one interface, which map back to queues |
| 29 | * implemented in hardware on the underlying device. |
| 30 | * |
| 31 | * macvtap_proto is used to allocate queues through the sock allocation |
| 32 | * mechanism. |
| 33 | * |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 34 | */ |
| 35 | struct macvtap_queue { |
| 36 | struct sock sk; |
| 37 | struct socket sock; |
Eric Dumazet | 4381548 | 2010-04-29 11:01:49 +0000 | [diff] [blame] | 38 | struct socket_wq wq; |
Michael S. Tsirkin | 55afbd0 | 2010-04-29 13:50:48 +0300 | [diff] [blame] | 39 | int vnet_hdr_sz; |
Eric Dumazet | 13707f9 | 2011-01-26 19:28:23 +0000 | [diff] [blame] | 40 | struct macvlan_dev __rcu *vlan; |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 41 | struct file *file; |
Arnd Bergmann | b9fb9ee | 2010-02-18 05:48:17 +0000 | [diff] [blame] | 42 | unsigned int flags; |
Jason Wang | 376b1aa | 2013-06-05 23:54:38 +0000 | [diff] [blame] | 43 | u16 queue_index; |
Jason Wang | 815f236 | 2013-06-05 23:54:39 +0000 | [diff] [blame] | 44 | bool enabled; |
| 45 | struct list_head next; |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 46 | }; |
| 47 | |
Michael S. Tsirkin | 01b07fb | 2014-12-16 15:05:10 +0200 | [diff] [blame] | 48 | #define MACVTAP_FEATURES (IFF_VNET_HDR | IFF_MULTI_QUEUE) |
| 49 | |
| 50 | #define MACVTAP_VNET_LE 0x80000000 |
Michael S. Tsirkin | 6ae7feb | 2014-11-23 17:24:15 +0200 | [diff] [blame] | 51 | |
| 52 | static inline u16 macvtap16_to_cpu(struct macvtap_queue *q, __virtio16 val) |
| 53 | { |
Michael S. Tsirkin | 01b07fb | 2014-12-16 15:05:10 +0200 | [diff] [blame] | 54 | return __virtio16_to_cpu(q->flags & MACVTAP_VNET_LE, val); |
Michael S. Tsirkin | 6ae7feb | 2014-11-23 17:24:15 +0200 | [diff] [blame] | 55 | } |
| 56 | |
| 57 | static inline __virtio16 cpu_to_macvtap16(struct macvtap_queue *q, u16 val) |
| 58 | { |
Michael S. Tsirkin | 01b07fb | 2014-12-16 15:05:10 +0200 | [diff] [blame] | 59 | return __cpu_to_virtio16(q->flags & MACVTAP_VNET_LE, val); |
Michael S. Tsirkin | 6ae7feb | 2014-11-23 17:24:15 +0200 | [diff] [blame] | 60 | } |
| 61 | |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 62 | static struct proto macvtap_proto = { |
| 63 | .name = "macvtap", |
| 64 | .owner = THIS_MODULE, |
| 65 | .obj_size = sizeof (struct macvtap_queue), |
| 66 | }; |
| 67 | |
| 68 | /* |
Eric W. Biederman | e09eff7 | 2011-10-20 04:29:24 +0000 | [diff] [blame] | 69 | * Variables for dealing with macvtaps device numbers. |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 70 | */ |
David S. Miller | 1ebed71 | 2010-07-10 19:25:50 -0700 | [diff] [blame] | 71 | static dev_t macvtap_major; |
Eric W. Biederman | e09eff7 | 2011-10-20 04:29:24 +0000 | [diff] [blame] | 72 | #define MACVTAP_NUM_DEVS (1U << MINORBITS) |
| 73 | static DEFINE_MUTEX(minor_lock); |
| 74 | static DEFINE_IDR(minor_idr); |
| 75 | |
Shirley Ma | 97bc363 | 2011-07-06 12:26:11 +0000 | [diff] [blame] | 76 | #define GOODCOPY_LEN 128 |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 77 | static struct class *macvtap_class; |
| 78 | static struct cdev macvtap_cdev; |
| 79 | |
Arnd Bergmann | 501c774 | 2010-02-18 05:46:50 +0000 | [diff] [blame] | 80 | static const struct proto_ops macvtap_socket_ops; |
| 81 | |
Vlad Yasevich | 2be5c76 | 2013-06-25 16:04:21 -0400 | [diff] [blame] | 82 | #define TUN_OFFLOADS (NETIF_F_HW_CSUM | NETIF_F_TSO_ECN | NETIF_F_TSO | \ |
Vlad Yasevich | e3e3c42 | 2015-02-03 16:36:17 -0500 | [diff] [blame] | 83 | NETIF_F_TSO6 | NETIF_F_UFO) |
Vlad Yasevich | 2be5c76 | 2013-06-25 16:04:21 -0400 | [diff] [blame] | 84 | #define RX_OFFLOADS (NETIF_F_GRO | NETIF_F_LRO) |
Vlad Yasevich | a567dd6 | 2013-08-16 15:25:00 -0400 | [diff] [blame] | 85 | #define TAP_FEATURES (NETIF_F_GSO | NETIF_F_SG) |
| 86 | |
Vlad Yasevich | 6acf54f | 2013-12-11 13:27:10 -0500 | [diff] [blame] | 87 | static struct macvlan_dev *macvtap_get_vlan_rcu(const struct net_device *dev) |
| 88 | { |
| 89 | return rcu_dereference(dev->rx_handler_data); |
| 90 | } |
| 91 | |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 92 | /* |
| 93 | * RCU usage: |
Arnd Bergmann | 02df55d | 2010-02-18 05:45:36 +0000 | [diff] [blame] | 94 | * The macvtap_queue and the macvlan_dev are loosely coupled, the |
| 95 | * pointers from one to the other can only be read while rcu_read_lock |
Vlad Yasevich | 441ac0f | 2013-06-25 16:04:19 -0400 | [diff] [blame] | 96 | * or rtnl is held. |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 97 | * |
Arnd Bergmann | 02df55d | 2010-02-18 05:45:36 +0000 | [diff] [blame] | 98 | * Both the file and the macvlan_dev hold a reference on the macvtap_queue |
| 99 | * through sock_hold(&q->sk). When the macvlan_dev goes away first, |
| 100 | * q->vlan becomes inaccessible. When the files gets closed, |
| 101 | * macvtap_get_queue() fails. |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 102 | * |
Arnd Bergmann | 02df55d | 2010-02-18 05:45:36 +0000 | [diff] [blame] | 103 | * There may still be references to the struct sock inside of the |
| 104 | * queue from outbound SKBs, but these never reference back to the |
| 105 | * file or the dev. The data structure is freed through __sk_free |
| 106 | * when both our references and any pending SKBs are gone. |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 107 | */ |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 108 | |
Jason Wang | 815f236 | 2013-06-05 23:54:39 +0000 | [diff] [blame] | 109 | static int macvtap_enable_queue(struct net_device *dev, struct file *file, |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 110 | struct macvtap_queue *q) |
| 111 | { |
| 112 | struct macvlan_dev *vlan = netdev_priv(dev); |
Jason Wang | 815f236 | 2013-06-05 23:54:39 +0000 | [diff] [blame] | 113 | int err = -EINVAL; |
| 114 | |
Vlad Yasevich | 441ac0f | 2013-06-25 16:04:19 -0400 | [diff] [blame] | 115 | ASSERT_RTNL(); |
Jason Wang | 815f236 | 2013-06-05 23:54:39 +0000 | [diff] [blame] | 116 | |
| 117 | if (q->enabled) |
| 118 | goto out; |
| 119 | |
| 120 | err = 0; |
| 121 | rcu_assign_pointer(vlan->taps[vlan->numvtaps], q); |
| 122 | q->queue_index = vlan->numvtaps; |
| 123 | q->enabled = true; |
| 124 | |
| 125 | vlan->numvtaps++; |
| 126 | out: |
Jason Wang | 815f236 | 2013-06-05 23:54:39 +0000 | [diff] [blame] | 127 | return err; |
| 128 | } |
| 129 | |
Vlad Yasevich | 40b8fe4 | 2014-09-22 16:34:17 -0400 | [diff] [blame] | 130 | /* Requires RTNL */ |
Jason Wang | 815f236 | 2013-06-05 23:54:39 +0000 | [diff] [blame] | 131 | static int macvtap_set_queue(struct net_device *dev, struct file *file, |
| 132 | struct macvtap_queue *q) |
| 133 | { |
| 134 | struct macvlan_dev *vlan = netdev_priv(dev); |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 135 | |
Jason Wang | 815f236 | 2013-06-05 23:54:39 +0000 | [diff] [blame] | 136 | if (vlan->numqueues == MAX_MACVTAP_QUEUES) |
Vlad Yasevich | 40b8fe4 | 2014-09-22 16:34:17 -0400 | [diff] [blame] | 137 | return -EBUSY; |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 138 | |
Arnd Bergmann | 02df55d | 2010-02-18 05:45:36 +0000 | [diff] [blame] | 139 | rcu_assign_pointer(q->vlan, vlan); |
Jason Wang | 376b1aa | 2013-06-05 23:54:38 +0000 | [diff] [blame] | 140 | rcu_assign_pointer(vlan->taps[vlan->numvtaps], q); |
Arnd Bergmann | 02df55d | 2010-02-18 05:45:36 +0000 | [diff] [blame] | 141 | sock_hold(&q->sk); |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 142 | |
| 143 | q->file = file; |
Jason Wang | 376b1aa | 2013-06-05 23:54:38 +0000 | [diff] [blame] | 144 | q->queue_index = vlan->numvtaps; |
Jason Wang | 815f236 | 2013-06-05 23:54:39 +0000 | [diff] [blame] | 145 | q->enabled = true; |
Arnd Bergmann | 02df55d | 2010-02-18 05:45:36 +0000 | [diff] [blame] | 146 | file->private_data = q; |
Jason Wang | 815f236 | 2013-06-05 23:54:39 +0000 | [diff] [blame] | 147 | list_add_tail(&q->next, &vlan->queue_list); |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 148 | |
Krishna Kumar | 1565c7c | 2010-08-04 06:15:59 +0000 | [diff] [blame] | 149 | vlan->numvtaps++; |
Jason Wang | 815f236 | 2013-06-05 23:54:39 +0000 | [diff] [blame] | 150 | vlan->numqueues++; |
Krishna Kumar | 1565c7c | 2010-08-04 06:15:59 +0000 | [diff] [blame] | 151 | |
Vlad Yasevich | 40b8fe4 | 2014-09-22 16:34:17 -0400 | [diff] [blame] | 152 | return 0; |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 153 | } |
| 154 | |
Vlad Yasevich | 441ac0f | 2013-06-25 16:04:19 -0400 | [diff] [blame] | 155 | static int macvtap_disable_queue(struct macvtap_queue *q) |
Jason Wang | 815f236 | 2013-06-05 23:54:39 +0000 | [diff] [blame] | 156 | { |
| 157 | struct macvlan_dev *vlan; |
| 158 | struct macvtap_queue *nq; |
| 159 | |
Vlad Yasevich | 441ac0f | 2013-06-25 16:04:19 -0400 | [diff] [blame] | 160 | ASSERT_RTNL(); |
Jason Wang | 815f236 | 2013-06-05 23:54:39 +0000 | [diff] [blame] | 161 | if (!q->enabled) |
| 162 | return -EINVAL; |
| 163 | |
Vlad Yasevich | 441ac0f | 2013-06-25 16:04:19 -0400 | [diff] [blame] | 164 | vlan = rtnl_dereference(q->vlan); |
| 165 | |
Jason Wang | 815f236 | 2013-06-05 23:54:39 +0000 | [diff] [blame] | 166 | if (vlan) { |
| 167 | int index = q->queue_index; |
| 168 | BUG_ON(index >= vlan->numvtaps); |
Vlad Yasevich | 441ac0f | 2013-06-25 16:04:19 -0400 | [diff] [blame] | 169 | nq = rtnl_dereference(vlan->taps[vlan->numvtaps - 1]); |
Jason Wang | 815f236 | 2013-06-05 23:54:39 +0000 | [diff] [blame] | 170 | nq->queue_index = index; |
| 171 | |
| 172 | rcu_assign_pointer(vlan->taps[index], nq); |
| 173 | RCU_INIT_POINTER(vlan->taps[vlan->numvtaps - 1], NULL); |
| 174 | q->enabled = false; |
| 175 | |
| 176 | vlan->numvtaps--; |
| 177 | } |
| 178 | |
| 179 | return 0; |
| 180 | } |
| 181 | |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 182 | /* |
Arnd Bergmann | 02df55d | 2010-02-18 05:45:36 +0000 | [diff] [blame] | 183 | * The file owning the queue got closed, give up both |
| 184 | * the reference that the files holds as well as the |
| 185 | * one from the macvlan_dev if that still exists. |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 186 | * |
| 187 | * Using the spinlock makes sure that we don't get |
| 188 | * to the queue again after destroying it. |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 189 | */ |
Arnd Bergmann | 02df55d | 2010-02-18 05:45:36 +0000 | [diff] [blame] | 190 | static void macvtap_put_queue(struct macvtap_queue *q) |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 191 | { |
Arnd Bergmann | 02df55d | 2010-02-18 05:45:36 +0000 | [diff] [blame] | 192 | struct macvlan_dev *vlan; |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 193 | |
Vlad Yasevich | 441ac0f | 2013-06-25 16:04:19 -0400 | [diff] [blame] | 194 | rtnl_lock(); |
| 195 | vlan = rtnl_dereference(q->vlan); |
| 196 | |
Arnd Bergmann | 02df55d | 2010-02-18 05:45:36 +0000 | [diff] [blame] | 197 | if (vlan) { |
Jason Wang | 815f236 | 2013-06-05 23:54:39 +0000 | [diff] [blame] | 198 | if (q->enabled) |
Vlad Yasevich | 441ac0f | 2013-06-25 16:04:19 -0400 | [diff] [blame] | 199 | BUG_ON(macvtap_disable_queue(q)); |
Krishna Kumar | 1565c7c | 2010-08-04 06:15:59 +0000 | [diff] [blame] | 200 | |
Jason Wang | 815f236 | 2013-06-05 23:54:39 +0000 | [diff] [blame] | 201 | vlan->numqueues--; |
Eric Dumazet | 2cfa5a0 | 2011-11-23 07:09:32 +0000 | [diff] [blame] | 202 | RCU_INIT_POINTER(q->vlan, NULL); |
Arnd Bergmann | 02df55d | 2010-02-18 05:45:36 +0000 | [diff] [blame] | 203 | sock_put(&q->sk); |
Jason Wang | 815f236 | 2013-06-05 23:54:39 +0000 | [diff] [blame] | 204 | list_del_init(&q->next); |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 205 | } |
| 206 | |
Vlad Yasevich | 441ac0f | 2013-06-25 16:04:19 -0400 | [diff] [blame] | 207 | rtnl_unlock(); |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 208 | |
| 209 | synchronize_rcu(); |
| 210 | sock_put(&q->sk); |
| 211 | } |
| 212 | |
| 213 | /* |
Krishna Kumar | 1565c7c | 2010-08-04 06:15:59 +0000 | [diff] [blame] | 214 | * Select a queue based on the rxq of the device on which this packet |
| 215 | * arrived. If the incoming device is not mq, calculate a flow hash |
| 216 | * to select a queue. If all fails, find the first available queue. |
| 217 | * Cache vlan->numvtaps since it can become zero during the execution |
| 218 | * of this function. |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 219 | */ |
| 220 | static struct macvtap_queue *macvtap_get_queue(struct net_device *dev, |
| 221 | struct sk_buff *skb) |
| 222 | { |
| 223 | struct macvlan_dev *vlan = netdev_priv(dev); |
Krishna Kumar | 1565c7c | 2010-08-04 06:15:59 +0000 | [diff] [blame] | 224 | struct macvtap_queue *tap = NULL; |
Jason Wang | 815f236 | 2013-06-05 23:54:39 +0000 | [diff] [blame] | 225 | /* Access to taps array is protected by rcu, but access to numvtaps |
| 226 | * isn't. Below we use it to lookup a queue, but treat it as a hint |
| 227 | * and validate that the result isn't NULL - in case we are |
| 228 | * racing against queue removal. |
| 229 | */ |
Jason Wang | ed0483f | 2013-06-05 23:54:33 +0000 | [diff] [blame] | 230 | int numvtaps = ACCESS_ONCE(vlan->numvtaps); |
Krishna Kumar | 1565c7c | 2010-08-04 06:15:59 +0000 | [diff] [blame] | 231 | __u32 rxq; |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 232 | |
Krishna Kumar | 1565c7c | 2010-08-04 06:15:59 +0000 | [diff] [blame] | 233 | if (!numvtaps) |
| 234 | goto out; |
| 235 | |
Krishna Kumar | ef0002b | 2011-11-23 22:17:14 +0000 | [diff] [blame] | 236 | /* Check if we can use flow to select a queue */ |
Tom Herbert | 3958afa1b | 2013-12-15 22:12:06 -0800 | [diff] [blame] | 237 | rxq = skb_get_hash(skb); |
Krishna Kumar | ef0002b | 2011-11-23 22:17:14 +0000 | [diff] [blame] | 238 | if (rxq) { |
| 239 | tap = rcu_dereference(vlan->taps[rxq % numvtaps]); |
Jason Wang | 376b1aa | 2013-06-05 23:54:38 +0000 | [diff] [blame] | 240 | goto out; |
Krishna Kumar | ef0002b | 2011-11-23 22:17:14 +0000 | [diff] [blame] | 241 | } |
| 242 | |
Krishna Kumar | 1565c7c | 2010-08-04 06:15:59 +0000 | [diff] [blame] | 243 | if (likely(skb_rx_queue_recorded(skb))) { |
| 244 | rxq = skb_get_rx_queue(skb); |
| 245 | |
| 246 | while (unlikely(rxq >= numvtaps)) |
| 247 | rxq -= numvtaps; |
| 248 | |
| 249 | tap = rcu_dereference(vlan->taps[rxq]); |
Jason Wang | 376b1aa | 2013-06-05 23:54:38 +0000 | [diff] [blame] | 250 | goto out; |
Krishna Kumar | 1565c7c | 2010-08-04 06:15:59 +0000 | [diff] [blame] | 251 | } |
| 252 | |
Jason Wang | 376b1aa | 2013-06-05 23:54:38 +0000 | [diff] [blame] | 253 | tap = rcu_dereference(vlan->taps[0]); |
Krishna Kumar | 1565c7c | 2010-08-04 06:15:59 +0000 | [diff] [blame] | 254 | out: |
| 255 | return tap; |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 256 | } |
| 257 | |
Arnd Bergmann | 02df55d | 2010-02-18 05:45:36 +0000 | [diff] [blame] | 258 | /* |
| 259 | * The net_device is going away, give up the reference |
Krishna Kumar | 1565c7c | 2010-08-04 06:15:59 +0000 | [diff] [blame] | 260 | * that it holds on all queues and safely set the pointer |
| 261 | * from the queues to NULL. |
Arnd Bergmann | 02df55d | 2010-02-18 05:45:36 +0000 | [diff] [blame] | 262 | */ |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 263 | static void macvtap_del_queues(struct net_device *dev) |
| 264 | { |
| 265 | struct macvlan_dev *vlan = netdev_priv(dev); |
Jason Wang | 815f236 | 2013-06-05 23:54:39 +0000 | [diff] [blame] | 266 | struct macvtap_queue *q, *tmp, *qlist[MAX_MACVTAP_QUEUES]; |
Krishna Kumar | 1565c7c | 2010-08-04 06:15:59 +0000 | [diff] [blame] | 267 | int i, j = 0; |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 268 | |
Vlad Yasevich | 441ac0f | 2013-06-25 16:04:19 -0400 | [diff] [blame] | 269 | ASSERT_RTNL(); |
Jason Wang | 815f236 | 2013-06-05 23:54:39 +0000 | [diff] [blame] | 270 | list_for_each_entry_safe(q, tmp, &vlan->queue_list, next) { |
| 271 | list_del_init(&q->next); |
Jason Wang | 376b1aa | 2013-06-05 23:54:38 +0000 | [diff] [blame] | 272 | qlist[j++] = q; |
Jason Wang | 376b1aa | 2013-06-05 23:54:38 +0000 | [diff] [blame] | 273 | RCU_INIT_POINTER(q->vlan, NULL); |
Jason Wang | 815f236 | 2013-06-05 23:54:39 +0000 | [diff] [blame] | 274 | if (q->enabled) |
| 275 | vlan->numvtaps--; |
| 276 | vlan->numqueues--; |
Arnd Bergmann | 02df55d | 2010-02-18 05:45:36 +0000 | [diff] [blame] | 277 | } |
Jason Wang | 815f236 | 2013-06-05 23:54:39 +0000 | [diff] [blame] | 278 | for (i = 0; i < vlan->numvtaps; i++) |
| 279 | RCU_INIT_POINTER(vlan->taps[i], NULL); |
| 280 | BUG_ON(vlan->numvtaps); |
| 281 | BUG_ON(vlan->numqueues); |
Eric W. Biederman | 99f34b3 | 2011-10-20 04:26:01 +0000 | [diff] [blame] | 282 | /* guarantee that any future macvtap_set_queue will fail */ |
| 283 | vlan->numvtaps = MAX_MACVTAP_QUEUES; |
Krishna Kumar | 1565c7c | 2010-08-04 06:15:59 +0000 | [diff] [blame] | 284 | |
| 285 | for (--j; j >= 0; j--) |
| 286 | sock_put(&qlist[j]->sk); |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 287 | } |
| 288 | |
Vlad Yasevich | 6acf54f | 2013-12-11 13:27:10 -0500 | [diff] [blame] | 289 | static rx_handler_result_t macvtap_handle_frame(struct sk_buff **pskb) |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 290 | { |
Vlad Yasevich | 6acf54f | 2013-12-11 13:27:10 -0500 | [diff] [blame] | 291 | struct sk_buff *skb = *pskb; |
| 292 | struct net_device *dev = skb->dev; |
| 293 | struct macvlan_dev *vlan; |
| 294 | struct macvtap_queue *q; |
Vlad Yasevich | a567dd6 | 2013-08-16 15:25:00 -0400 | [diff] [blame] | 295 | netdev_features_t features = TAP_FEATURES; |
| 296 | |
Vlad Yasevich | 6acf54f | 2013-12-11 13:27:10 -0500 | [diff] [blame] | 297 | vlan = macvtap_get_vlan_rcu(dev); |
| 298 | if (!vlan) |
| 299 | return RX_HANDLER_PASS; |
| 300 | |
| 301 | q = macvtap_get_queue(dev, skb); |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 302 | if (!q) |
Vlad Yasevich | 6acf54f | 2013-12-11 13:27:10 -0500 | [diff] [blame] | 303 | return RX_HANDLER_PASS; |
Herbert Xu | 8a35747 | 2010-07-21 21:44:31 +0000 | [diff] [blame] | 304 | |
| 305 | if (skb_queue_len(&q->sk.sk_receive_queue) >= dev->tx_queue_len) |
| 306 | goto drop; |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 307 | |
Vlad Yasevich | 6acf54f | 2013-12-11 13:27:10 -0500 | [diff] [blame] | 308 | skb_push(skb, ETH_HLEN); |
| 309 | |
Vlad Yasevich | 3e4f8b7 | 2013-06-25 16:04:22 -0400 | [diff] [blame] | 310 | /* Apply the forward feature mask so that we perform segmentation |
Vlad Yasevich | e573332 | 2013-08-16 15:25:02 -0400 | [diff] [blame] | 311 | * according to users wishes. This only works if VNET_HDR is |
| 312 | * enabled. |
Vlad Yasevich | 3e4f8b7 | 2013-06-25 16:04:22 -0400 | [diff] [blame] | 313 | */ |
Vlad Yasevich | e573332 | 2013-08-16 15:25:02 -0400 | [diff] [blame] | 314 | if (q->flags & IFF_VNET_HDR) |
| 315 | features |= vlan->tap_features; |
Tom Herbert | 04ffcb2 | 2014-10-14 15:19:06 -0700 | [diff] [blame] | 316 | if (netif_needs_gso(dev, skb, features)) { |
Vlad Yasevich | 3e4f8b7 | 2013-06-25 16:04:22 -0400 | [diff] [blame] | 317 | struct sk_buff *segs = __skb_gso_segment(skb, features, false); |
| 318 | |
| 319 | if (IS_ERR(segs)) |
| 320 | goto drop; |
| 321 | |
| 322 | if (!segs) { |
| 323 | skb_queue_tail(&q->sk.sk_receive_queue, skb); |
| 324 | goto wake_up; |
| 325 | } |
| 326 | |
| 327 | kfree_skb(skb); |
| 328 | while (segs) { |
| 329 | struct sk_buff *nskb = segs->next; |
| 330 | |
| 331 | segs->next = NULL; |
| 332 | skb_queue_tail(&q->sk.sk_receive_queue, segs); |
| 333 | segs = nskb; |
| 334 | } |
| 335 | } else { |
Vlad Yasevich | cbdb042 | 2014-04-29 10:09:50 -0400 | [diff] [blame] | 336 | /* If we receive a partial checksum and the tap side |
| 337 | * doesn't support checksum offload, compute the checksum. |
| 338 | * Note: it doesn't matter which checksum feature to |
| 339 | * check, we either support them all or none. |
| 340 | */ |
| 341 | if (skb->ip_summed == CHECKSUM_PARTIAL && |
| 342 | !(features & NETIF_F_ALL_CSUM) && |
| 343 | skb_checksum_help(skb)) |
| 344 | goto drop; |
Vlad Yasevich | 3e4f8b7 | 2013-06-25 16:04:22 -0400 | [diff] [blame] | 345 | skb_queue_tail(&q->sk.sk_receive_queue, skb); |
| 346 | } |
| 347 | |
| 348 | wake_up: |
Eric Dumazet | 4a4771a | 2010-04-25 22:20:06 +0000 | [diff] [blame] | 349 | wake_up_interruptible_poll(sk_sleep(&q->sk), POLLIN | POLLRDNORM | POLLRDBAND); |
Vlad Yasevich | 6acf54f | 2013-12-11 13:27:10 -0500 | [diff] [blame] | 350 | return RX_HANDLER_CONSUMED; |
Herbert Xu | 8a35747 | 2010-07-21 21:44:31 +0000 | [diff] [blame] | 351 | |
| 352 | drop: |
Vlad Yasevich | 6acf54f | 2013-12-11 13:27:10 -0500 | [diff] [blame] | 353 | /* Count errors/drops only here, thus don't care about args. */ |
| 354 | macvlan_count_rx(vlan, 0, 0, 0); |
Herbert Xu | 8a35747 | 2010-07-21 21:44:31 +0000 | [diff] [blame] | 355 | kfree_skb(skb); |
Vlad Yasevich | 6acf54f | 2013-12-11 13:27:10 -0500 | [diff] [blame] | 356 | return RX_HANDLER_CONSUMED; |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 357 | } |
| 358 | |
Eric W. Biederman | e09eff7 | 2011-10-20 04:29:24 +0000 | [diff] [blame] | 359 | static int macvtap_get_minor(struct macvlan_dev *vlan) |
| 360 | { |
| 361 | int retval = -ENOMEM; |
Eric W. Biederman | e09eff7 | 2011-10-20 04:29:24 +0000 | [diff] [blame] | 362 | |
| 363 | mutex_lock(&minor_lock); |
Tejun Heo | ec09ebc | 2013-02-27 17:04:34 -0800 | [diff] [blame] | 364 | retval = idr_alloc(&minor_idr, vlan, 1, MACVTAP_NUM_DEVS, GFP_KERNEL); |
| 365 | if (retval >= 0) { |
| 366 | vlan->minor = retval; |
| 367 | } else if (retval == -ENOSPC) { |
Eric W. Biederman | e09eff7 | 2011-10-20 04:29:24 +0000 | [diff] [blame] | 368 | printk(KERN_ERR "too many macvtap devices\n"); |
| 369 | retval = -EINVAL; |
Eric W. Biederman | e09eff7 | 2011-10-20 04:29:24 +0000 | [diff] [blame] | 370 | } |
Eric W. Biederman | e09eff7 | 2011-10-20 04:29:24 +0000 | [diff] [blame] | 371 | mutex_unlock(&minor_lock); |
Tejun Heo | ec09ebc | 2013-02-27 17:04:34 -0800 | [diff] [blame] | 372 | return retval < 0 ? retval : 0; |
Eric W. Biederman | e09eff7 | 2011-10-20 04:29:24 +0000 | [diff] [blame] | 373 | } |
| 374 | |
| 375 | static void macvtap_free_minor(struct macvlan_dev *vlan) |
| 376 | { |
| 377 | mutex_lock(&minor_lock); |
| 378 | if (vlan->minor) { |
| 379 | idr_remove(&minor_idr, vlan->minor); |
| 380 | vlan->minor = 0; |
| 381 | } |
| 382 | mutex_unlock(&minor_lock); |
| 383 | } |
| 384 | |
| 385 | static struct net_device *dev_get_by_macvtap_minor(int minor) |
| 386 | { |
| 387 | struct net_device *dev = NULL; |
| 388 | struct macvlan_dev *vlan; |
| 389 | |
| 390 | mutex_lock(&minor_lock); |
| 391 | vlan = idr_find(&minor_idr, minor); |
| 392 | if (vlan) { |
| 393 | dev = vlan->dev; |
| 394 | dev_hold(dev); |
| 395 | } |
| 396 | mutex_unlock(&minor_lock); |
| 397 | return dev; |
| 398 | } |
| 399 | |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 400 | static int macvtap_newlink(struct net *src_net, |
| 401 | struct net_device *dev, |
| 402 | struct nlattr *tb[], |
| 403 | struct nlattr *data[]) |
| 404 | { |
Jason Wang | 815f236 | 2013-06-05 23:54:39 +0000 | [diff] [blame] | 405 | struct macvlan_dev *vlan = netdev_priv(dev); |
Vlad Yasevich | 6acf54f | 2013-12-11 13:27:10 -0500 | [diff] [blame] | 406 | int err; |
| 407 | |
Jason Wang | 815f236 | 2013-06-05 23:54:39 +0000 | [diff] [blame] | 408 | INIT_LIST_HEAD(&vlan->queue_list); |
| 409 | |
Vlad Yasevich | 2be5c76 | 2013-06-25 16:04:21 -0400 | [diff] [blame] | 410 | /* Since macvlan supports all offloads by default, make |
| 411 | * tap support all offloads also. |
| 412 | */ |
| 413 | vlan->tap_features = TUN_OFFLOADS; |
| 414 | |
Vlad Yasevich | 6acf54f | 2013-12-11 13:27:10 -0500 | [diff] [blame] | 415 | err = netdev_rx_handler_register(dev, macvtap_handle_frame, vlan); |
| 416 | if (err) |
| 417 | return err; |
| 418 | |
Eric W. Biederman | 9bf1907 | 2011-10-20 04:28:46 +0000 | [diff] [blame] | 419 | /* Don't put anything that may fail after macvlan_common_newlink |
| 420 | * because we can't undo what it does. |
| 421 | */ |
Vlad Yasevich | 2f6a1b6 | 2013-12-11 13:27:11 -0500 | [diff] [blame] | 422 | return macvlan_common_newlink(src_net, dev, tb, data); |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 423 | } |
| 424 | |
| 425 | static void macvtap_dellink(struct net_device *dev, |
| 426 | struct list_head *head) |
| 427 | { |
Vlad Yasevich | 6acf54f | 2013-12-11 13:27:10 -0500 | [diff] [blame] | 428 | netdev_rx_handler_unregister(dev); |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 429 | macvtap_del_queues(dev); |
| 430 | macvlan_dellink(dev, head); |
| 431 | } |
| 432 | |
Herbert Xu | 8a35747 | 2010-07-21 21:44:31 +0000 | [diff] [blame] | 433 | static void macvtap_setup(struct net_device *dev) |
| 434 | { |
| 435 | macvlan_common_setup(dev); |
| 436 | dev->tx_queue_len = TUN_READQ_SIZE; |
| 437 | } |
| 438 | |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 439 | static struct rtnl_link_ops macvtap_link_ops __read_mostly = { |
| 440 | .kind = "macvtap", |
Herbert Xu | 8a35747 | 2010-07-21 21:44:31 +0000 | [diff] [blame] | 441 | .setup = macvtap_setup, |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 442 | .newlink = macvtap_newlink, |
| 443 | .dellink = macvtap_dellink, |
| 444 | }; |
| 445 | |
| 446 | |
| 447 | static void macvtap_sock_write_space(struct sock *sk) |
| 448 | { |
Eric Dumazet | 4381548 | 2010-04-29 11:01:49 +0000 | [diff] [blame] | 449 | wait_queue_head_t *wqueue; |
| 450 | |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 451 | if (!sock_writeable(sk) || |
| 452 | !test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags)) |
| 453 | return; |
| 454 | |
Eric Dumazet | 4381548 | 2010-04-29 11:01:49 +0000 | [diff] [blame] | 455 | wqueue = sk_sleep(sk); |
| 456 | if (wqueue && waitqueue_active(wqueue)) |
| 457 | wake_up_interruptible_poll(wqueue, POLLOUT | POLLWRNORM | POLLWRBAND); |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 458 | } |
| 459 | |
Eric W. Biederman | 2259fef | 2011-10-20 04:27:24 +0000 | [diff] [blame] | 460 | static void macvtap_sock_destruct(struct sock *sk) |
| 461 | { |
| 462 | skb_queue_purge(&sk->sk_receive_queue); |
| 463 | } |
| 464 | |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 465 | static int macvtap_open(struct inode *inode, struct file *file) |
| 466 | { |
| 467 | struct net *net = current->nsproxy->net_ns; |
Vlad Yasevich | 40b8fe4 | 2014-09-22 16:34:17 -0400 | [diff] [blame] | 468 | struct net_device *dev; |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 469 | struct macvtap_queue *q; |
Vlad Yasevich | 40b8fe4 | 2014-09-22 16:34:17 -0400 | [diff] [blame] | 470 | int err = -ENODEV; |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 471 | |
Vlad Yasevich | 40b8fe4 | 2014-09-22 16:34:17 -0400 | [diff] [blame] | 472 | rtnl_lock(); |
| 473 | dev = dev_get_by_macvtap_minor(iminor(inode)); |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 474 | if (!dev) |
| 475 | goto out; |
| 476 | |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 477 | err = -ENOMEM; |
| 478 | q = (struct macvtap_queue *)sk_alloc(net, AF_UNSPEC, GFP_KERNEL, |
| 479 | &macvtap_proto); |
| 480 | if (!q) |
| 481 | goto out; |
| 482 | |
Jason Wang | d9a90a3 | 2013-06-13 14:23:35 +0800 | [diff] [blame] | 483 | RCU_INIT_POINTER(q->sock.wq, &q->wq); |
Eric Dumazet | 4381548 | 2010-04-29 11:01:49 +0000 | [diff] [blame] | 484 | init_waitqueue_head(&q->wq.wait); |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 485 | q->sock.type = SOCK_RAW; |
| 486 | q->sock.state = SS_CONNECTED; |
Arnd Bergmann | 501c774 | 2010-02-18 05:46:50 +0000 | [diff] [blame] | 487 | q->sock.file = file; |
| 488 | q->sock.ops = &macvtap_socket_ops; |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 489 | sock_init_data(&q->sock, &q->sk); |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 490 | q->sk.sk_write_space = macvtap_sock_write_space; |
Eric W. Biederman | 2259fef | 2011-10-20 04:27:24 +0000 | [diff] [blame] | 491 | q->sk.sk_destruct = macvtap_sock_destruct; |
Arnd Bergmann | b9fb9ee | 2010-02-18 05:48:17 +0000 | [diff] [blame] | 492 | q->flags = IFF_VNET_HDR | IFF_NO_PI | IFF_TAP; |
Michael S. Tsirkin | 55afbd0 | 2010-04-29 13:50:48 +0300 | [diff] [blame] | 493 | q->vnet_hdr_sz = sizeof(struct virtio_net_hdr); |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 494 | |
Shirley Ma | 97bc363 | 2011-07-06 12:26:11 +0000 | [diff] [blame] | 495 | /* |
| 496 | * so far only KVM virtio_net uses macvtap, enable zero copy between |
| 497 | * guest kernel and host kernel when lower device supports zerocopy |
Eric W. Biederman | 047af9cf | 2011-10-20 04:26:39 +0000 | [diff] [blame] | 498 | * |
| 499 | * The macvlan supports zerocopy iff the lower device supports zero |
| 500 | * copy so we don't have to look at the lower device directly. |
Shirley Ma | 97bc363 | 2011-07-06 12:26:11 +0000 | [diff] [blame] | 501 | */ |
Eric W. Biederman | 047af9cf | 2011-10-20 04:26:39 +0000 | [diff] [blame] | 502 | if ((dev->features & NETIF_F_HIGHDMA) && (dev->features & NETIF_F_SG)) |
| 503 | sock_set_flag(&q->sk, SOCK_ZEROCOPY); |
Shirley Ma | 97bc363 | 2011-07-06 12:26:11 +0000 | [diff] [blame] | 504 | |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 505 | err = macvtap_set_queue(dev, file, q); |
| 506 | if (err) |
| 507 | sock_put(&q->sk); |
| 508 | |
| 509 | out: |
| 510 | if (dev) |
| 511 | dev_put(dev); |
| 512 | |
Vlad Yasevich | 40b8fe4 | 2014-09-22 16:34:17 -0400 | [diff] [blame] | 513 | rtnl_unlock(); |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 514 | return err; |
| 515 | } |
| 516 | |
| 517 | static int macvtap_release(struct inode *inode, struct file *file) |
| 518 | { |
Arnd Bergmann | 02df55d | 2010-02-18 05:45:36 +0000 | [diff] [blame] | 519 | struct macvtap_queue *q = file->private_data; |
| 520 | macvtap_put_queue(q); |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 521 | return 0; |
| 522 | } |
| 523 | |
| 524 | static unsigned int macvtap_poll(struct file *file, poll_table * wait) |
| 525 | { |
Arnd Bergmann | 02df55d | 2010-02-18 05:45:36 +0000 | [diff] [blame] | 526 | struct macvtap_queue *q = file->private_data; |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 527 | unsigned int mask = POLLERR; |
| 528 | |
| 529 | if (!q) |
| 530 | goto out; |
| 531 | |
| 532 | mask = 0; |
Eric Dumazet | 4381548 | 2010-04-29 11:01:49 +0000 | [diff] [blame] | 533 | poll_wait(file, &q->wq.wait, wait); |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 534 | |
| 535 | if (!skb_queue_empty(&q->sk.sk_receive_queue)) |
| 536 | mask |= POLLIN | POLLRDNORM; |
| 537 | |
| 538 | if (sock_writeable(&q->sk) || |
| 539 | (!test_and_set_bit(SOCK_ASYNC_NOSPACE, &q->sock.flags) && |
| 540 | sock_writeable(&q->sk))) |
| 541 | mask |= POLLOUT | POLLWRNORM; |
| 542 | |
| 543 | out: |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 544 | return mask; |
| 545 | } |
| 546 | |
Arnd Bergmann | b9fb9ee | 2010-02-18 05:48:17 +0000 | [diff] [blame] | 547 | static inline struct sk_buff *macvtap_alloc_skb(struct sock *sk, size_t prepad, |
| 548 | size_t len, size_t linear, |
| 549 | int noblock, int *err) |
| 550 | { |
| 551 | struct sk_buff *skb; |
| 552 | |
| 553 | /* Under a page? Don't bother with paged skb. */ |
| 554 | if (prepad + len < PAGE_SIZE || !linear) |
| 555 | linear = len; |
| 556 | |
| 557 | skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock, |
Eric Dumazet | 28d6427 | 2013-08-08 14:38:47 -0700 | [diff] [blame] | 558 | err, 0); |
Arnd Bergmann | b9fb9ee | 2010-02-18 05:48:17 +0000 | [diff] [blame] | 559 | if (!skb) |
| 560 | return NULL; |
| 561 | |
| 562 | skb_reserve(skb, prepad); |
| 563 | skb_put(skb, linear); |
| 564 | skb->data_len = len - linear; |
| 565 | skb->len += len - linear; |
| 566 | |
| 567 | return skb; |
| 568 | } |
| 569 | |
| 570 | /* |
| 571 | * macvtap_skb_from_vnet_hdr and macvtap_skb_to_vnet_hdr should |
| 572 | * be shared with the tun/tap driver. |
| 573 | */ |
Michael S. Tsirkin | 6ae7feb | 2014-11-23 17:24:15 +0200 | [diff] [blame] | 574 | static int macvtap_skb_from_vnet_hdr(struct macvtap_queue *q, |
| 575 | struct sk_buff *skb, |
Arnd Bergmann | b9fb9ee | 2010-02-18 05:48:17 +0000 | [diff] [blame] | 576 | struct virtio_net_hdr *vnet_hdr) |
| 577 | { |
| 578 | unsigned short gso_type = 0; |
| 579 | if (vnet_hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) { |
| 580 | switch (vnet_hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) { |
| 581 | case VIRTIO_NET_HDR_GSO_TCPV4: |
| 582 | gso_type = SKB_GSO_TCPV4; |
| 583 | break; |
| 584 | case VIRTIO_NET_HDR_GSO_TCPV6: |
| 585 | gso_type = SKB_GSO_TCPV6; |
| 586 | break; |
| 587 | case VIRTIO_NET_HDR_GSO_UDP: |
| 588 | gso_type = SKB_GSO_UDP; |
| 589 | break; |
| 590 | default: |
| 591 | return -EINVAL; |
| 592 | } |
| 593 | |
| 594 | if (vnet_hdr->gso_type & VIRTIO_NET_HDR_GSO_ECN) |
| 595 | gso_type |= SKB_GSO_TCP_ECN; |
| 596 | |
| 597 | if (vnet_hdr->gso_size == 0) |
| 598 | return -EINVAL; |
| 599 | } |
| 600 | |
| 601 | if (vnet_hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) { |
Michael S. Tsirkin | 6ae7feb | 2014-11-23 17:24:15 +0200 | [diff] [blame] | 602 | if (!skb_partial_csum_set(skb, macvtap16_to_cpu(q, vnet_hdr->csum_start), |
| 603 | macvtap16_to_cpu(q, vnet_hdr->csum_offset))) |
Arnd Bergmann | b9fb9ee | 2010-02-18 05:48:17 +0000 | [diff] [blame] | 604 | return -EINVAL; |
| 605 | } |
| 606 | |
| 607 | if (vnet_hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) { |
Michael S. Tsirkin | 6ae7feb | 2014-11-23 17:24:15 +0200 | [diff] [blame] | 608 | skb_shinfo(skb)->gso_size = macvtap16_to_cpu(q, vnet_hdr->gso_size); |
Pravin B Shelar | c9af6db | 2013-02-11 09:27:41 +0000 | [diff] [blame] | 609 | skb_shinfo(skb)->gso_type = gso_type; |
Arnd Bergmann | b9fb9ee | 2010-02-18 05:48:17 +0000 | [diff] [blame] | 610 | |
| 611 | /* Header must be checked, and gso_segs computed. */ |
| 612 | skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; |
| 613 | skb_shinfo(skb)->gso_segs = 0; |
| 614 | } |
| 615 | return 0; |
| 616 | } |
| 617 | |
Michael S. Tsirkin | 6ae7feb | 2014-11-23 17:24:15 +0200 | [diff] [blame] | 618 | static void macvtap_skb_to_vnet_hdr(struct macvtap_queue *q, |
| 619 | const struct sk_buff *skb, |
| 620 | struct virtio_net_hdr *vnet_hdr) |
Arnd Bergmann | b9fb9ee | 2010-02-18 05:48:17 +0000 | [diff] [blame] | 621 | { |
| 622 | memset(vnet_hdr, 0, sizeof(*vnet_hdr)); |
| 623 | |
| 624 | if (skb_is_gso(skb)) { |
| 625 | struct skb_shared_info *sinfo = skb_shinfo(skb); |
| 626 | |
| 627 | /* This is a hint as to how much should be linear. */ |
Michael S. Tsirkin | 6ae7feb | 2014-11-23 17:24:15 +0200 | [diff] [blame] | 628 | vnet_hdr->hdr_len = cpu_to_macvtap16(q, skb_headlen(skb)); |
| 629 | vnet_hdr->gso_size = cpu_to_macvtap16(q, sinfo->gso_size); |
Arnd Bergmann | b9fb9ee | 2010-02-18 05:48:17 +0000 | [diff] [blame] | 630 | if (sinfo->gso_type & SKB_GSO_TCPV4) |
| 631 | vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4; |
| 632 | else if (sinfo->gso_type & SKB_GSO_TCPV6) |
| 633 | vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6; |
Vlad Yasevich | e3e3c42 | 2015-02-03 16:36:17 -0500 | [diff] [blame] | 634 | else if (sinfo->gso_type & SKB_GSO_UDP) |
| 635 | vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_UDP; |
Arnd Bergmann | b9fb9ee | 2010-02-18 05:48:17 +0000 | [diff] [blame] | 636 | else |
| 637 | BUG(); |
| 638 | if (sinfo->gso_type & SKB_GSO_TCP_ECN) |
| 639 | vnet_hdr->gso_type |= VIRTIO_NET_HDR_GSO_ECN; |
| 640 | } else |
| 641 | vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_NONE; |
| 642 | |
| 643 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
| 644 | vnet_hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; |
Jiri Pirko | df8a39d | 2015-01-13 17:13:44 +0100 | [diff] [blame] | 645 | if (skb_vlan_tag_present(skb)) |
Michael S. Tsirkin | 6ae7feb | 2014-11-23 17:24:15 +0200 | [diff] [blame] | 646 | vnet_hdr->csum_start = cpu_to_macvtap16(q, |
| 647 | skb_checksum_start_offset(skb) + VLAN_HLEN); |
| 648 | else |
| 649 | vnet_hdr->csum_start = cpu_to_macvtap16(q, |
| 650 | skb_checksum_start_offset(skb)); |
| 651 | vnet_hdr->csum_offset = cpu_to_macvtap16(q, skb->csum_offset); |
Jason Wang | 10a8d94 | 2011-06-10 00:56:17 +0000 | [diff] [blame] | 652 | } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) { |
| 653 | vnet_hdr->flags = VIRTIO_NET_HDR_F_DATA_VALID; |
Arnd Bergmann | b9fb9ee | 2010-02-18 05:48:17 +0000 | [diff] [blame] | 654 | } /* else everything is zero */ |
Arnd Bergmann | b9fb9ee | 2010-02-18 05:48:17 +0000 | [diff] [blame] | 655 | } |
| 656 | |
Eric Dumazet | 2f1d8b9 | 2015-02-27 18:35:35 -0800 | [diff] [blame] | 657 | /* Neighbour code has some assumptions on HH_DATA_MOD alignment */ |
| 658 | #define MACVTAP_RESERVE HH_DATA_OFF(ETH_HLEN) |
| 659 | |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 660 | /* Get packet from user space buffer */ |
Shirley Ma | 97bc363 | 2011-07-06 12:26:11 +0000 | [diff] [blame] | 661 | static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m, |
Al Viro | f5ff53b | 2014-06-19 15:36:49 -0400 | [diff] [blame] | 662 | struct iov_iter *from, int noblock) |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 663 | { |
Eric Dumazet | 2f1d8b9 | 2015-02-27 18:35:35 -0800 | [diff] [blame] | 664 | int good_linear = SKB_MAX_HEAD(MACVTAP_RESERVE); |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 665 | struct sk_buff *skb; |
Arnd Bergmann | 02df55d | 2010-02-18 05:45:36 +0000 | [diff] [blame] | 666 | struct macvlan_dev *vlan; |
Al Viro | f5ff53b | 2014-06-19 15:36:49 -0400 | [diff] [blame] | 667 | unsigned long total_len = iov_iter_count(from); |
Shirley Ma | 97bc363 | 2011-07-06 12:26:11 +0000 | [diff] [blame] | 668 | unsigned long len = total_len; |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 669 | int err; |
Arnd Bergmann | b9fb9ee | 2010-02-18 05:48:17 +0000 | [diff] [blame] | 670 | struct virtio_net_hdr vnet_hdr = { 0 }; |
| 671 | int vnet_hdr_len = 0; |
Jason Wang | b92946e | 2012-05-02 11:42:15 +0800 | [diff] [blame] | 672 | int copylen = 0; |
Shirley Ma | 97bc363 | 2011-07-06 12:26:11 +0000 | [diff] [blame] | 673 | bool zerocopy = false; |
Jason Wang | 61d46bf | 2013-07-10 13:43:28 +0800 | [diff] [blame] | 674 | size_t linear; |
Al Viro | f5ff53b | 2014-06-19 15:36:49 -0400 | [diff] [blame] | 675 | ssize_t n; |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 676 | |
Arnd Bergmann | b9fb9ee | 2010-02-18 05:48:17 +0000 | [diff] [blame] | 677 | if (q->flags & IFF_VNET_HDR) { |
Michael S. Tsirkin | 55afbd0 | 2010-04-29 13:50:48 +0300 | [diff] [blame] | 678 | vnet_hdr_len = q->vnet_hdr_sz; |
Arnd Bergmann | b9fb9ee | 2010-02-18 05:48:17 +0000 | [diff] [blame] | 679 | |
| 680 | err = -EINVAL; |
Nicolas Kaiser | ce3c869 | 2011-03-04 13:49:41 +0000 | [diff] [blame] | 681 | if (len < vnet_hdr_len) |
Arnd Bergmann | b9fb9ee | 2010-02-18 05:48:17 +0000 | [diff] [blame] | 682 | goto err; |
Nicolas Kaiser | ce3c869 | 2011-03-04 13:49:41 +0000 | [diff] [blame] | 683 | len -= vnet_hdr_len; |
Arnd Bergmann | b9fb9ee | 2010-02-18 05:48:17 +0000 | [diff] [blame] | 684 | |
Al Viro | f5ff53b | 2014-06-19 15:36:49 -0400 | [diff] [blame] | 685 | err = -EFAULT; |
| 686 | n = copy_from_iter(&vnet_hdr, sizeof(vnet_hdr), from); |
| 687 | if (n != sizeof(vnet_hdr)) |
Arnd Bergmann | b9fb9ee | 2010-02-18 05:48:17 +0000 | [diff] [blame] | 688 | goto err; |
Al Viro | f5ff53b | 2014-06-19 15:36:49 -0400 | [diff] [blame] | 689 | iov_iter_advance(from, vnet_hdr_len - sizeof(vnet_hdr)); |
Arnd Bergmann | b9fb9ee | 2010-02-18 05:48:17 +0000 | [diff] [blame] | 690 | if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && |
Michael S. Tsirkin | 6ae7feb | 2014-11-23 17:24:15 +0200 | [diff] [blame] | 691 | macvtap16_to_cpu(q, vnet_hdr.csum_start) + |
| 692 | macvtap16_to_cpu(q, vnet_hdr.csum_offset) + 2 > |
| 693 | macvtap16_to_cpu(q, vnet_hdr.hdr_len)) |
| 694 | vnet_hdr.hdr_len = cpu_to_macvtap16(q, |
| 695 | macvtap16_to_cpu(q, vnet_hdr.csum_start) + |
| 696 | macvtap16_to_cpu(q, vnet_hdr.csum_offset) + 2); |
Arnd Bergmann | b9fb9ee | 2010-02-18 05:48:17 +0000 | [diff] [blame] | 697 | err = -EINVAL; |
Michael S. Tsirkin | 6ae7feb | 2014-11-23 17:24:15 +0200 | [diff] [blame] | 698 | if (macvtap16_to_cpu(q, vnet_hdr.hdr_len) > len) |
Arnd Bergmann | b9fb9ee | 2010-02-18 05:48:17 +0000 | [diff] [blame] | 699 | goto err; |
| 700 | } |
| 701 | |
| 702 | err = -EINVAL; |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 703 | if (unlikely(len < ETH_HLEN)) |
Arnd Bergmann | b9fb9ee | 2010-02-18 05:48:17 +0000 | [diff] [blame] | 704 | goto err; |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 705 | |
Jason Wang | ece793f | 2013-07-18 10:55:16 +0800 | [diff] [blame] | 706 | if (m && m->msg_control && sock_flag(&q->sk, SOCK_ZEROCOPY)) { |
Al Viro | f5ff53b | 2014-06-19 15:36:49 -0400 | [diff] [blame] | 707 | struct iov_iter i; |
| 708 | |
Michael S. Tsirkin | 6ae7feb | 2014-11-23 17:24:15 +0200 | [diff] [blame] | 709 | copylen = vnet_hdr.hdr_len ? |
| 710 | macvtap16_to_cpu(q, vnet_hdr.hdr_len) : GOODCOPY_LEN; |
Jason Wang | 16a3fa2 | 2013-11-13 14:00:40 +0800 | [diff] [blame] | 711 | if (copylen > good_linear) |
| 712 | copylen = good_linear; |
Jason Wang | 61d46bf | 2013-07-10 13:43:28 +0800 | [diff] [blame] | 713 | linear = copylen; |
Al Viro | f5ff53b | 2014-06-19 15:36:49 -0400 | [diff] [blame] | 714 | i = *from; |
| 715 | iov_iter_advance(&i, copylen); |
| 716 | if (iov_iter_npages(&i, INT_MAX) <= MAX_SKB_FRAGS) |
Jason Wang | ece793f | 2013-07-18 10:55:16 +0800 | [diff] [blame] | 717 | zerocopy = true; |
| 718 | } |
| 719 | |
| 720 | if (!zerocopy) { |
Shirley Ma | 97bc363 | 2011-07-06 12:26:11 +0000 | [diff] [blame] | 721 | copylen = len; |
Michael S. Tsirkin | 6ae7feb | 2014-11-23 17:24:15 +0200 | [diff] [blame] | 722 | if (macvtap16_to_cpu(q, vnet_hdr.hdr_len) > good_linear) |
Jason Wang | 16a3fa2 | 2013-11-13 14:00:40 +0800 | [diff] [blame] | 723 | linear = good_linear; |
| 724 | else |
Michael S. Tsirkin | 6ae7feb | 2014-11-23 17:24:15 +0200 | [diff] [blame] | 725 | linear = macvtap16_to_cpu(q, vnet_hdr.hdr_len); |
Jason Wang | 61d46bf | 2013-07-10 13:43:28 +0800 | [diff] [blame] | 726 | } |
Shirley Ma | 97bc363 | 2011-07-06 12:26:11 +0000 | [diff] [blame] | 727 | |
Eric Dumazet | 2f1d8b9 | 2015-02-27 18:35:35 -0800 | [diff] [blame] | 728 | skb = macvtap_alloc_skb(&q->sk, MACVTAP_RESERVE, copylen, |
Jason Wang | 61d46bf | 2013-07-10 13:43:28 +0800 | [diff] [blame] | 729 | linear, noblock, &err); |
Arnd Bergmann | 02df55d | 2010-02-18 05:45:36 +0000 | [diff] [blame] | 730 | if (!skb) |
| 731 | goto err; |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 732 | |
Jason Wang | 01d6657 | 2012-05-02 11:42:06 +0800 | [diff] [blame] | 733 | if (zerocopy) |
Al Viro | f5ff53b | 2014-06-19 15:36:49 -0400 | [diff] [blame] | 734 | err = zerocopy_sg_from_iter(skb, from); |
Jason Wang | ece793f | 2013-07-18 10:55:16 +0800 | [diff] [blame] | 735 | else { |
Al Viro | f5ff53b | 2014-06-19 15:36:49 -0400 | [diff] [blame] | 736 | err = skb_copy_datagram_from_iter(skb, 0, from, len); |
Jason Wang | ece793f | 2013-07-18 10:55:16 +0800 | [diff] [blame] | 737 | if (!err && m && m->msg_control) { |
| 738 | struct ubuf_info *uarg = m->msg_control; |
| 739 | uarg->callback(uarg, false); |
| 740 | } |
| 741 | } |
| 742 | |
Arnd Bergmann | 02df55d | 2010-02-18 05:45:36 +0000 | [diff] [blame] | 743 | if (err) |
Arnd Bergmann | b9fb9ee | 2010-02-18 05:48:17 +0000 | [diff] [blame] | 744 | goto err_kfree; |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 745 | |
| 746 | skb_set_network_header(skb, ETH_HLEN); |
Arnd Bergmann | b9fb9ee | 2010-02-18 05:48:17 +0000 | [diff] [blame] | 747 | skb_reset_mac_header(skb); |
| 748 | skb->protocol = eth_hdr(skb)->h_proto; |
| 749 | |
| 750 | if (vnet_hdr_len) { |
Michael S. Tsirkin | 6ae7feb | 2014-11-23 17:24:15 +0200 | [diff] [blame] | 751 | err = macvtap_skb_from_vnet_hdr(q, skb, &vnet_hdr); |
Arnd Bergmann | b9fb9ee | 2010-02-18 05:48:17 +0000 | [diff] [blame] | 752 | if (err) |
| 753 | goto err_kfree; |
| 754 | } |
| 755 | |
Jason Wang | 40893fd | 2013-03-26 23:11:22 +0000 | [diff] [blame] | 756 | skb_probe_transport_header(skb, ETH_HLEN); |
Jason Wang | 9b4d669 | 2013-03-25 20:19:55 +0000 | [diff] [blame] | 757 | |
Vlad Yasevich | ac4e4af | 2013-06-25 16:04:20 -0400 | [diff] [blame] | 758 | rcu_read_lock(); |
| 759 | vlan = rcu_dereference(q->vlan); |
Shirley Ma | 97bc363 | 2011-07-06 12:26:11 +0000 | [diff] [blame] | 760 | /* copy skb_ubuf_info for callback when skb has no error */ |
Jason Wang | 01d6657 | 2012-05-02 11:42:06 +0800 | [diff] [blame] | 761 | if (zerocopy) { |
Shirley Ma | 97bc363 | 2011-07-06 12:26:11 +0000 | [diff] [blame] | 762 | skb_shinfo(skb)->destructor_arg = m->msg_control; |
Jason Wang | 01d6657 | 2012-05-02 11:42:06 +0800 | [diff] [blame] | 763 | skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY; |
Pravin B Shelar | c9af6db | 2013-02-11 09:27:41 +0000 | [diff] [blame] | 764 | skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG; |
Jason Wang | 01d6657 | 2012-05-02 11:42:06 +0800 | [diff] [blame] | 765 | } |
Eric Dumazet | 29d7919 | 2013-08-08 08:06:14 -0700 | [diff] [blame] | 766 | if (vlan) { |
Vlad Yasevich | 6acf54f | 2013-12-11 13:27:10 -0500 | [diff] [blame] | 767 | skb->dev = vlan->dev; |
| 768 | dev_queue_xmit(skb); |
Eric Dumazet | 29d7919 | 2013-08-08 08:06:14 -0700 | [diff] [blame] | 769 | } else { |
Arnd Bergmann | 02df55d | 2010-02-18 05:45:36 +0000 | [diff] [blame] | 770 | kfree_skb(skb); |
Eric Dumazet | 29d7919 | 2013-08-08 08:06:14 -0700 | [diff] [blame] | 771 | } |
Vlad Yasevich | ac4e4af | 2013-06-25 16:04:20 -0400 | [diff] [blame] | 772 | rcu_read_unlock(); |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 773 | |
Shirley Ma | 97bc363 | 2011-07-06 12:26:11 +0000 | [diff] [blame] | 774 | return total_len; |
Arnd Bergmann | 02df55d | 2010-02-18 05:45:36 +0000 | [diff] [blame] | 775 | |
Arnd Bergmann | b9fb9ee | 2010-02-18 05:48:17 +0000 | [diff] [blame] | 776 | err_kfree: |
| 777 | kfree_skb(skb); |
| 778 | |
Arnd Bergmann | 02df55d | 2010-02-18 05:45:36 +0000 | [diff] [blame] | 779 | err: |
Vlad Yasevich | ac4e4af | 2013-06-25 16:04:20 -0400 | [diff] [blame] | 780 | rcu_read_lock(); |
| 781 | vlan = rcu_dereference(q->vlan); |
Arnd Bergmann | 02df55d | 2010-02-18 05:45:36 +0000 | [diff] [blame] | 782 | if (vlan) |
Jason Wang | cd3e22b | 2013-11-25 17:19:04 +0800 | [diff] [blame] | 783 | this_cpu_inc(vlan->pcpu_stats->tx_dropped); |
Vlad Yasevich | ac4e4af | 2013-06-25 16:04:20 -0400 | [diff] [blame] | 784 | rcu_read_unlock(); |
Arnd Bergmann | 02df55d | 2010-02-18 05:45:36 +0000 | [diff] [blame] | 785 | |
Arnd Bergmann | 02df55d | 2010-02-18 05:45:36 +0000 | [diff] [blame] | 786 | return err; |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 787 | } |
| 788 | |
Al Viro | f5ff53b | 2014-06-19 15:36:49 -0400 | [diff] [blame] | 789 | static ssize_t macvtap_write_iter(struct kiocb *iocb, struct iov_iter *from) |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 790 | { |
| 791 | struct file *file = iocb->ki_filp; |
Arnd Bergmann | 02df55d | 2010-02-18 05:45:36 +0000 | [diff] [blame] | 792 | struct macvtap_queue *q = file->private_data; |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 793 | |
Al Viro | f5ff53b | 2014-06-19 15:36:49 -0400 | [diff] [blame] | 794 | return macvtap_get_user(q, NULL, from, file->f_flags & O_NONBLOCK); |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 795 | } |
| 796 | |
| 797 | /* Put packet to the user space buffer */ |
| 798 | static ssize_t macvtap_put_user(struct macvtap_queue *q, |
| 799 | const struct sk_buff *skb, |
Herbert Xu | 6c36d2e | 2014-11-07 21:22:25 +0800 | [diff] [blame] | 800 | struct iov_iter *iter) |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 801 | { |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 802 | int ret; |
Arnd Bergmann | b9fb9ee | 2010-02-18 05:48:17 +0000 | [diff] [blame] | 803 | int vnet_hdr_len = 0; |
Basil Gor | f09e224 | 2012-05-03 22:55:24 +0000 | [diff] [blame] | 804 | int vlan_offset = 0; |
Herbert Xu | 6c36d2e | 2014-11-07 21:22:25 +0800 | [diff] [blame] | 805 | int total; |
Arnd Bergmann | b9fb9ee | 2010-02-18 05:48:17 +0000 | [diff] [blame] | 806 | |
| 807 | if (q->flags & IFF_VNET_HDR) { |
| 808 | struct virtio_net_hdr vnet_hdr; |
Michael S. Tsirkin | 55afbd0 | 2010-04-29 13:50:48 +0300 | [diff] [blame] | 809 | vnet_hdr_len = q->vnet_hdr_sz; |
Herbert Xu | 6c36d2e | 2014-11-07 21:22:25 +0800 | [diff] [blame] | 810 | if (iov_iter_count(iter) < vnet_hdr_len) |
Arnd Bergmann | b9fb9ee | 2010-02-18 05:48:17 +0000 | [diff] [blame] | 811 | return -EINVAL; |
| 812 | |
Michael S. Tsirkin | 6ae7feb | 2014-11-23 17:24:15 +0200 | [diff] [blame] | 813 | macvtap_skb_to_vnet_hdr(q, skb, &vnet_hdr); |
Arnd Bergmann | b9fb9ee | 2010-02-18 05:48:17 +0000 | [diff] [blame] | 814 | |
Herbert Xu | 6c36d2e | 2014-11-07 21:22:25 +0800 | [diff] [blame] | 815 | if (copy_to_iter(&vnet_hdr, sizeof(vnet_hdr), iter) != |
| 816 | sizeof(vnet_hdr)) |
Arnd Bergmann | b9fb9ee | 2010-02-18 05:48:17 +0000 | [diff] [blame] | 817 | return -EFAULT; |
Jason Wang | 7cc76f5 | 2014-11-20 16:31:05 +0800 | [diff] [blame] | 818 | |
| 819 | iov_iter_advance(iter, vnet_hdr_len - sizeof(vnet_hdr)); |
Arnd Bergmann | b9fb9ee | 2010-02-18 05:48:17 +0000 | [diff] [blame] | 820 | } |
Herbert Xu | 6c36d2e | 2014-11-07 21:22:25 +0800 | [diff] [blame] | 821 | total = vnet_hdr_len; |
Jason Wang | ce232ce | 2013-12-11 13:08:34 +0800 | [diff] [blame] | 822 | total += skb->len; |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 823 | |
Jiri Pirko | df8a39d | 2015-01-13 17:13:44 +0100 | [diff] [blame] | 824 | if (skb_vlan_tag_present(skb)) { |
Basil Gor | f09e224 | 2012-05-03 22:55:24 +0000 | [diff] [blame] | 825 | struct { |
| 826 | __be16 h_vlan_proto; |
| 827 | __be16 h_vlan_TCI; |
| 828 | } veth; |
Jason Wang | 0fbe0d4 | 2013-07-16 13:36:34 +0800 | [diff] [blame] | 829 | veth.h_vlan_proto = skb->vlan_proto; |
Jiri Pirko | df8a39d | 2015-01-13 17:13:44 +0100 | [diff] [blame] | 830 | veth.h_vlan_TCI = htons(skb_vlan_tag_get(skb)); |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 831 | |
Basil Gor | f09e224 | 2012-05-03 22:55:24 +0000 | [diff] [blame] | 832 | vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto); |
Jason Wang | ce232ce | 2013-12-11 13:08:34 +0800 | [diff] [blame] | 833 | total += VLAN_HLEN; |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 834 | |
Herbert Xu | 6c36d2e | 2014-11-07 21:22:25 +0800 | [diff] [blame] | 835 | ret = skb_copy_datagram_iter(skb, 0, iter, vlan_offset); |
| 836 | if (ret || !iov_iter_count(iter)) |
Basil Gor | f09e224 | 2012-05-03 22:55:24 +0000 | [diff] [blame] | 837 | goto done; |
| 838 | |
Herbert Xu | 6c36d2e | 2014-11-07 21:22:25 +0800 | [diff] [blame] | 839 | ret = copy_to_iter(&veth, sizeof(veth), iter); |
| 840 | if (ret != sizeof(veth) || !iov_iter_count(iter)) |
Basil Gor | f09e224 | 2012-05-03 22:55:24 +0000 | [diff] [blame] | 841 | goto done; |
| 842 | } |
| 843 | |
Herbert Xu | 6c36d2e | 2014-11-07 21:22:25 +0800 | [diff] [blame] | 844 | ret = skb_copy_datagram_iter(skb, vlan_offset, iter, |
| 845 | skb->len - vlan_offset); |
Basil Gor | f09e224 | 2012-05-03 22:55:24 +0000 | [diff] [blame] | 846 | |
| 847 | done: |
Jason Wang | ce232ce | 2013-12-11 13:08:34 +0800 | [diff] [blame] | 848 | return ret ? ret : total; |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 849 | } |
| 850 | |
Zhi Yong Wu | 55ec8e2 | 2013-12-07 04:13:05 +0800 | [diff] [blame] | 851 | static ssize_t macvtap_do_read(struct macvtap_queue *q, |
Al Viro | 3af0bfe | 2014-11-07 14:13:53 -0500 | [diff] [blame] | 852 | struct iov_iter *to, |
Arnd Bergmann | 501c774 | 2010-02-18 05:46:50 +0000 | [diff] [blame] | 853 | int noblock) |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 854 | { |
Hong zhi guo | ccf7e72 | 2012-06-06 22:36:27 +0000 | [diff] [blame] | 855 | DEFINE_WAIT(wait); |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 856 | struct sk_buff *skb; |
Arnd Bergmann | 501c774 | 2010-02-18 05:46:50 +0000 | [diff] [blame] | 857 | ssize_t ret = 0; |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 858 | |
Al Viro | 3af0bfe | 2014-11-07 14:13:53 -0500 | [diff] [blame] | 859 | if (!iov_iter_count(to)) |
| 860 | return 0; |
| 861 | |
| 862 | while (1) { |
Jason Wang | 89cee91 | 2013-06-05 23:54:34 +0000 | [diff] [blame] | 863 | if (!noblock) |
| 864 | prepare_to_wait(sk_sleep(&q->sk), &wait, |
| 865 | TASK_INTERRUPTIBLE); |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 866 | |
| 867 | /* Read frames from the queue */ |
| 868 | skb = skb_dequeue(&q->sk.sk_receive_queue); |
Al Viro | 3af0bfe | 2014-11-07 14:13:53 -0500 | [diff] [blame] | 869 | if (skb) |
| 870 | break; |
| 871 | if (noblock) { |
| 872 | ret = -EAGAIN; |
| 873 | break; |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 874 | } |
Al Viro | 3af0bfe | 2014-11-07 14:13:53 -0500 | [diff] [blame] | 875 | if (signal_pending(current)) { |
| 876 | ret = -ERESTARTSYS; |
| 877 | break; |
| 878 | } |
| 879 | /* Nothing to read, let's sleep */ |
| 880 | schedule(); |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 881 | } |
Al Viro | 3af0bfe | 2014-11-07 14:13:53 -0500 | [diff] [blame] | 882 | if (skb) { |
| 883 | ret = macvtap_put_user(q, skb, to); |
Jason Wang | f51a5e8 | 2014-12-01 16:53:15 +0800 | [diff] [blame] | 884 | if (unlikely(ret < 0)) |
| 885 | kfree_skb(skb); |
| 886 | else |
| 887 | consume_skb(skb); |
Al Viro | 3af0bfe | 2014-11-07 14:13:53 -0500 | [diff] [blame] | 888 | } |
Jason Wang | 89cee91 | 2013-06-05 23:54:34 +0000 | [diff] [blame] | 889 | if (!noblock) |
| 890 | finish_wait(sk_sleep(&q->sk), &wait); |
Arnd Bergmann | 501c774 | 2010-02-18 05:46:50 +0000 | [diff] [blame] | 891 | return ret; |
| 892 | } |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 893 | |
Al Viro | 3af0bfe | 2014-11-07 14:13:53 -0500 | [diff] [blame] | 894 | static ssize_t macvtap_read_iter(struct kiocb *iocb, struct iov_iter *to) |
Arnd Bergmann | 501c774 | 2010-02-18 05:46:50 +0000 | [diff] [blame] | 895 | { |
| 896 | struct file *file = iocb->ki_filp; |
| 897 | struct macvtap_queue *q = file->private_data; |
Al Viro | 3af0bfe | 2014-11-07 14:13:53 -0500 | [diff] [blame] | 898 | ssize_t len = iov_iter_count(to), ret; |
Arnd Bergmann | 501c774 | 2010-02-18 05:46:50 +0000 | [diff] [blame] | 899 | |
Al Viro | 3af0bfe | 2014-11-07 14:13:53 -0500 | [diff] [blame] | 900 | ret = macvtap_do_read(q, to, file->f_flags & O_NONBLOCK); |
Jason Wang | ce232ce | 2013-12-11 13:08:34 +0800 | [diff] [blame] | 901 | ret = min_t(ssize_t, ret, len); |
Zhi Yong Wu | e6ebc7f | 2013-12-06 14:16:50 +0800 | [diff] [blame] | 902 | if (ret > 0) |
| 903 | iocb->ki_pos = ret; |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 904 | return ret; |
| 905 | } |
| 906 | |
Jason Wang | 8f475a3 | 2013-06-05 23:54:36 +0000 | [diff] [blame] | 907 | static struct macvlan_dev *macvtap_get_vlan(struct macvtap_queue *q) |
| 908 | { |
| 909 | struct macvlan_dev *vlan; |
| 910 | |
Vlad Yasevich | 441ac0f | 2013-06-25 16:04:19 -0400 | [diff] [blame] | 911 | ASSERT_RTNL(); |
| 912 | vlan = rtnl_dereference(q->vlan); |
Jason Wang | 8f475a3 | 2013-06-05 23:54:36 +0000 | [diff] [blame] | 913 | if (vlan) |
| 914 | dev_hold(vlan->dev); |
Jason Wang | 8f475a3 | 2013-06-05 23:54:36 +0000 | [diff] [blame] | 915 | |
| 916 | return vlan; |
| 917 | } |
| 918 | |
| 919 | static void macvtap_put_vlan(struct macvlan_dev *vlan) |
| 920 | { |
| 921 | dev_put(vlan->dev); |
| 922 | } |
| 923 | |
Jason Wang | 815f236 | 2013-06-05 23:54:39 +0000 | [diff] [blame] | 924 | static int macvtap_ioctl_set_queue(struct file *file, unsigned int flags) |
| 925 | { |
| 926 | struct macvtap_queue *q = file->private_data; |
| 927 | struct macvlan_dev *vlan; |
| 928 | int ret; |
| 929 | |
| 930 | vlan = macvtap_get_vlan(q); |
| 931 | if (!vlan) |
| 932 | return -EINVAL; |
| 933 | |
| 934 | if (flags & IFF_ATTACH_QUEUE) |
| 935 | ret = macvtap_enable_queue(vlan->dev, file, q); |
| 936 | else if (flags & IFF_DETACH_QUEUE) |
| 937 | ret = macvtap_disable_queue(q); |
Jason Wang | f57855a | 2013-06-13 14:23:36 +0800 | [diff] [blame] | 938 | else |
| 939 | ret = -EINVAL; |
Jason Wang | 815f236 | 2013-06-05 23:54:39 +0000 | [diff] [blame] | 940 | |
| 941 | macvtap_put_vlan(vlan); |
| 942 | return ret; |
| 943 | } |
| 944 | |
Vlad Yasevich | 2be5c76 | 2013-06-25 16:04:21 -0400 | [diff] [blame] | 945 | static int set_offload(struct macvtap_queue *q, unsigned long arg) |
| 946 | { |
| 947 | struct macvlan_dev *vlan; |
| 948 | netdev_features_t features; |
| 949 | netdev_features_t feature_mask = 0; |
| 950 | |
| 951 | vlan = rtnl_dereference(q->vlan); |
| 952 | if (!vlan) |
| 953 | return -ENOLINK; |
| 954 | |
| 955 | features = vlan->dev->features; |
| 956 | |
| 957 | if (arg & TUN_F_CSUM) { |
| 958 | feature_mask = NETIF_F_HW_CSUM; |
| 959 | |
| 960 | if (arg & (TUN_F_TSO4 | TUN_F_TSO6)) { |
| 961 | if (arg & TUN_F_TSO_ECN) |
| 962 | feature_mask |= NETIF_F_TSO_ECN; |
| 963 | if (arg & TUN_F_TSO4) |
| 964 | feature_mask |= NETIF_F_TSO; |
| 965 | if (arg & TUN_F_TSO6) |
| 966 | feature_mask |= NETIF_F_TSO6; |
| 967 | } |
Vlad Yasevich | e3e3c42 | 2015-02-03 16:36:17 -0500 | [diff] [blame] | 968 | |
| 969 | if (arg & TUN_F_UFO) |
| 970 | feature_mask |= NETIF_F_UFO; |
Vlad Yasevich | 2be5c76 | 2013-06-25 16:04:21 -0400 | [diff] [blame] | 971 | } |
| 972 | |
| 973 | /* tun/tap driver inverts the usage for TSO offloads, where |
| 974 | * setting the TSO bit means that the userspace wants to |
| 975 | * accept TSO frames and turning it off means that user space |
| 976 | * does not support TSO. |
| 977 | * For macvtap, we have to invert it to mean the same thing. |
| 978 | * When user space turns off TSO, we turn off GSO/LRO so that |
| 979 | * user-space will not receive TSO frames. |
| 980 | */ |
Vlad Yasevich | e3e3c42 | 2015-02-03 16:36:17 -0500 | [diff] [blame] | 981 | if (feature_mask & (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_UFO)) |
Vlad Yasevich | 2be5c76 | 2013-06-25 16:04:21 -0400 | [diff] [blame] | 982 | features |= RX_OFFLOADS; |
| 983 | else |
| 984 | features &= ~RX_OFFLOADS; |
| 985 | |
| 986 | /* tap_features are the same as features on tun/tap and |
| 987 | * reflect user expectations. |
| 988 | */ |
Vlad Yasevich | a567dd6 | 2013-08-16 15:25:00 -0400 | [diff] [blame] | 989 | vlan->tap_features = feature_mask; |
Vlad Yasevich | 2be5c76 | 2013-06-25 16:04:21 -0400 | [diff] [blame] | 990 | vlan->set_features = features; |
| 991 | netdev_update_features(vlan->dev); |
| 992 | |
| 993 | return 0; |
| 994 | } |
| 995 | |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 996 | /* |
| 997 | * provide compatibility with generic tun/tap interface |
| 998 | */ |
| 999 | static long macvtap_ioctl(struct file *file, unsigned int cmd, |
| 1000 | unsigned long arg) |
| 1001 | { |
Arnd Bergmann | 02df55d | 2010-02-18 05:45:36 +0000 | [diff] [blame] | 1002 | struct macvtap_queue *q = file->private_data; |
| 1003 | struct macvlan_dev *vlan; |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 1004 | void __user *argp = (void __user *)arg; |
| 1005 | struct ifreq __user *ifr = argp; |
| 1006 | unsigned int __user *up = argp; |
Michael S. Tsirkin | 39ec7de | 2014-12-16 15:04:56 +0200 | [diff] [blame] | 1007 | unsigned short u; |
Michael S. Tsirkin | 55afbd0 | 2010-04-29 13:50:48 +0300 | [diff] [blame] | 1008 | int __user *sp = argp; |
| 1009 | int s; |
Arnd Bergmann | 02df55d | 2010-02-18 05:45:36 +0000 | [diff] [blame] | 1010 | int ret; |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 1011 | |
| 1012 | switch (cmd) { |
| 1013 | case TUNSETIFF: |
| 1014 | /* ignore the name, just look at flags */ |
| 1015 | if (get_user(u, &ifr->ifr_flags)) |
| 1016 | return -EFAULT; |
Arnd Bergmann | b9fb9ee | 2010-02-18 05:48:17 +0000 | [diff] [blame] | 1017 | |
| 1018 | ret = 0; |
Michael S. Tsirkin | 6ae7feb | 2014-11-23 17:24:15 +0200 | [diff] [blame] | 1019 | if ((u & ~MACVTAP_FEATURES) != (IFF_NO_PI | IFF_TAP)) |
Arnd Bergmann | b9fb9ee | 2010-02-18 05:48:17 +0000 | [diff] [blame] | 1020 | ret = -EINVAL; |
| 1021 | else |
Michael S. Tsirkin | 39ec7de | 2014-12-16 15:04:56 +0200 | [diff] [blame] | 1022 | q->flags = (q->flags & ~MACVTAP_FEATURES) | u; |
Arnd Bergmann | b9fb9ee | 2010-02-18 05:48:17 +0000 | [diff] [blame] | 1023 | |
| 1024 | return ret; |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 1025 | |
| 1026 | case TUNGETIFF: |
Vlad Yasevich | 441ac0f | 2013-06-25 16:04:19 -0400 | [diff] [blame] | 1027 | rtnl_lock(); |
Jason Wang | 8f475a3 | 2013-06-05 23:54:36 +0000 | [diff] [blame] | 1028 | vlan = macvtap_get_vlan(q); |
Vlad Yasevich | 441ac0f | 2013-06-25 16:04:19 -0400 | [diff] [blame] | 1029 | if (!vlan) { |
| 1030 | rtnl_unlock(); |
Arnd Bergmann | 02df55d | 2010-02-18 05:45:36 +0000 | [diff] [blame] | 1031 | return -ENOLINK; |
Vlad Yasevich | 441ac0f | 2013-06-25 16:04:19 -0400 | [diff] [blame] | 1032 | } |
Arnd Bergmann | 02df55d | 2010-02-18 05:45:36 +0000 | [diff] [blame] | 1033 | |
| 1034 | ret = 0; |
Michael S. Tsirkin | 39ec7de | 2014-12-16 15:04:56 +0200 | [diff] [blame] | 1035 | u = q->flags; |
Eric Dumazet | 13707f9 | 2011-01-26 19:28:23 +0000 | [diff] [blame] | 1036 | if (copy_to_user(&ifr->ifr_name, vlan->dev->name, IFNAMSIZ) || |
Michael S. Tsirkin | 39ec7de | 2014-12-16 15:04:56 +0200 | [diff] [blame] | 1037 | put_user(u, &ifr->ifr_flags)) |
Arnd Bergmann | 02df55d | 2010-02-18 05:45:36 +0000 | [diff] [blame] | 1038 | ret = -EFAULT; |
Jason Wang | 8f475a3 | 2013-06-05 23:54:36 +0000 | [diff] [blame] | 1039 | macvtap_put_vlan(vlan); |
Vlad Yasevich | 441ac0f | 2013-06-25 16:04:19 -0400 | [diff] [blame] | 1040 | rtnl_unlock(); |
Arnd Bergmann | 02df55d | 2010-02-18 05:45:36 +0000 | [diff] [blame] | 1041 | return ret; |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 1042 | |
Jason Wang | 815f236 | 2013-06-05 23:54:39 +0000 | [diff] [blame] | 1043 | case TUNSETQUEUE: |
| 1044 | if (get_user(u, &ifr->ifr_flags)) |
| 1045 | return -EFAULT; |
Vlad Yasevich | 441ac0f | 2013-06-25 16:04:19 -0400 | [diff] [blame] | 1046 | rtnl_lock(); |
| 1047 | ret = macvtap_ioctl_set_queue(file, u); |
| 1048 | rtnl_unlock(); |
Jason Wang | 82a19eb | 2013-07-16 13:36:33 +0800 | [diff] [blame] | 1049 | return ret; |
Jason Wang | 815f236 | 2013-06-05 23:54:39 +0000 | [diff] [blame] | 1050 | |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 1051 | case TUNGETFEATURES: |
Michael S. Tsirkin | 6ae7feb | 2014-11-23 17:24:15 +0200 | [diff] [blame] | 1052 | if (put_user(IFF_TAP | IFF_NO_PI | MACVTAP_FEATURES, up)) |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 1053 | return -EFAULT; |
| 1054 | return 0; |
| 1055 | |
| 1056 | case TUNSETSNDBUF: |
| 1057 | if (get_user(u, up)) |
| 1058 | return -EFAULT; |
| 1059 | |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 1060 | q->sk.sk_sndbuf = u; |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 1061 | return 0; |
| 1062 | |
Michael S. Tsirkin | 55afbd0 | 2010-04-29 13:50:48 +0300 | [diff] [blame] | 1063 | case TUNGETVNETHDRSZ: |
| 1064 | s = q->vnet_hdr_sz; |
| 1065 | if (put_user(s, sp)) |
| 1066 | return -EFAULT; |
| 1067 | return 0; |
| 1068 | |
| 1069 | case TUNSETVNETHDRSZ: |
| 1070 | if (get_user(s, sp)) |
| 1071 | return -EFAULT; |
| 1072 | if (s < (int)sizeof(struct virtio_net_hdr)) |
| 1073 | return -EINVAL; |
| 1074 | |
| 1075 | q->vnet_hdr_sz = s; |
| 1076 | return 0; |
| 1077 | |
Michael S. Tsirkin | 01b07fb | 2014-12-16 15:05:10 +0200 | [diff] [blame] | 1078 | case TUNGETVNETLE: |
| 1079 | s = !!(q->flags & MACVTAP_VNET_LE); |
| 1080 | if (put_user(s, sp)) |
| 1081 | return -EFAULT; |
| 1082 | return 0; |
| 1083 | |
| 1084 | case TUNSETVNETLE: |
| 1085 | if (get_user(s, sp)) |
| 1086 | return -EFAULT; |
| 1087 | if (s) |
| 1088 | q->flags |= MACVTAP_VNET_LE; |
| 1089 | else |
| 1090 | q->flags &= ~MACVTAP_VNET_LE; |
| 1091 | return 0; |
| 1092 | |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 1093 | case TUNSETOFFLOAD: |
| 1094 | /* let the user check for future flags */ |
| 1095 | if (arg & ~(TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6 | |
Vlad Yasevich | e3e3c42 | 2015-02-03 16:36:17 -0500 | [diff] [blame] | 1096 | TUN_F_TSO_ECN | TUN_F_UFO)) |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 1097 | return -EINVAL; |
| 1098 | |
Vlad Yasevich | 2be5c76 | 2013-06-25 16:04:21 -0400 | [diff] [blame] | 1099 | rtnl_lock(); |
| 1100 | ret = set_offload(q, arg); |
| 1101 | rtnl_unlock(); |
| 1102 | return ret; |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 1103 | |
| 1104 | default: |
| 1105 | return -EINVAL; |
| 1106 | } |
| 1107 | } |
| 1108 | |
| 1109 | #ifdef CONFIG_COMPAT |
| 1110 | static long macvtap_compat_ioctl(struct file *file, unsigned int cmd, |
| 1111 | unsigned long arg) |
| 1112 | { |
| 1113 | return macvtap_ioctl(file, cmd, (unsigned long)compat_ptr(arg)); |
| 1114 | } |
| 1115 | #endif |
| 1116 | |
| 1117 | static const struct file_operations macvtap_fops = { |
| 1118 | .owner = THIS_MODULE, |
| 1119 | .open = macvtap_open, |
| 1120 | .release = macvtap_release, |
Al Viro | 3af0bfe | 2014-11-07 14:13:53 -0500 | [diff] [blame] | 1121 | .read = new_sync_read, |
Al Viro | f5ff53b | 2014-06-19 15:36:49 -0400 | [diff] [blame] | 1122 | .write = new_sync_write, |
Al Viro | 3af0bfe | 2014-11-07 14:13:53 -0500 | [diff] [blame] | 1123 | .read_iter = macvtap_read_iter, |
Al Viro | f5ff53b | 2014-06-19 15:36:49 -0400 | [diff] [blame] | 1124 | .write_iter = macvtap_write_iter, |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 1125 | .poll = macvtap_poll, |
| 1126 | .llseek = no_llseek, |
| 1127 | .unlocked_ioctl = macvtap_ioctl, |
| 1128 | #ifdef CONFIG_COMPAT |
| 1129 | .compat_ioctl = macvtap_compat_ioctl, |
| 1130 | #endif |
| 1131 | }; |
| 1132 | |
Ying Xue | 1b78414 | 2015-03-02 15:37:48 +0800 | [diff] [blame] | 1133 | static int macvtap_sendmsg(struct socket *sock, struct msghdr *m, |
| 1134 | size_t total_len) |
Arnd Bergmann | 501c774 | 2010-02-18 05:46:50 +0000 | [diff] [blame] | 1135 | { |
| 1136 | struct macvtap_queue *q = container_of(sock, struct macvtap_queue, sock); |
Al Viro | c0371da | 2014-11-24 10:42:55 -0500 | [diff] [blame] | 1137 | return macvtap_get_user(q, m, &m->msg_iter, m->msg_flags & MSG_DONTWAIT); |
Arnd Bergmann | 501c774 | 2010-02-18 05:46:50 +0000 | [diff] [blame] | 1138 | } |
| 1139 | |
Ying Xue | 1b78414 | 2015-03-02 15:37:48 +0800 | [diff] [blame] | 1140 | static int macvtap_recvmsg(struct socket *sock, struct msghdr *m, |
| 1141 | size_t total_len, int flags) |
Arnd Bergmann | 501c774 | 2010-02-18 05:46:50 +0000 | [diff] [blame] | 1142 | { |
| 1143 | struct macvtap_queue *q = container_of(sock, struct macvtap_queue, sock); |
| 1144 | int ret; |
| 1145 | if (flags & ~(MSG_DONTWAIT|MSG_TRUNC)) |
| 1146 | return -EINVAL; |
Al Viro | c0371da | 2014-11-24 10:42:55 -0500 | [diff] [blame] | 1147 | ret = macvtap_do_read(q, &m->msg_iter, flags & MSG_DONTWAIT); |
David S. Miller | de2aa47 | 2013-12-10 22:06:18 -0500 | [diff] [blame] | 1148 | if (ret > total_len) { |
| 1149 | m->msg_flags |= MSG_TRUNC; |
| 1150 | ret = flags & MSG_TRUNC ? ret : total_len; |
| 1151 | } |
Arnd Bergmann | 501c774 | 2010-02-18 05:46:50 +0000 | [diff] [blame] | 1152 | return ret; |
| 1153 | } |
| 1154 | |
| 1155 | /* Ops structure to mimic raw sockets with tun */ |
| 1156 | static const struct proto_ops macvtap_socket_ops = { |
| 1157 | .sendmsg = macvtap_sendmsg, |
| 1158 | .recvmsg = macvtap_recvmsg, |
| 1159 | }; |
| 1160 | |
| 1161 | /* Get an underlying socket object from tun file. Returns error unless file is |
| 1162 | * attached to a device. The returned object works like a packet socket, it |
| 1163 | * can be used for sock_sendmsg/sock_recvmsg. The caller is responsible for |
| 1164 | * holding a reference to the file for as long as the socket is in use. */ |
| 1165 | struct socket *macvtap_get_socket(struct file *file) |
| 1166 | { |
| 1167 | struct macvtap_queue *q; |
| 1168 | if (file->f_op != &macvtap_fops) |
| 1169 | return ERR_PTR(-EINVAL); |
| 1170 | q = file->private_data; |
| 1171 | if (!q) |
| 1172 | return ERR_PTR(-EBADFD); |
| 1173 | return &q->sock; |
| 1174 | } |
| 1175 | EXPORT_SYMBOL_GPL(macvtap_get_socket); |
| 1176 | |
Eric W. Biederman | 9bf1907 | 2011-10-20 04:28:46 +0000 | [diff] [blame] | 1177 | static int macvtap_device_event(struct notifier_block *unused, |
| 1178 | unsigned long event, void *ptr) |
| 1179 | { |
Jiri Pirko | 351638e | 2013-05-28 01:30:21 +0000 | [diff] [blame] | 1180 | struct net_device *dev = netdev_notifier_info_to_dev(ptr); |
Eric W. Biederman | e09eff7 | 2011-10-20 04:29:24 +0000 | [diff] [blame] | 1181 | struct macvlan_dev *vlan; |
Eric W. Biederman | 9bf1907 | 2011-10-20 04:28:46 +0000 | [diff] [blame] | 1182 | struct device *classdev; |
| 1183 | dev_t devt; |
Eric W. Biederman | e09eff7 | 2011-10-20 04:29:24 +0000 | [diff] [blame] | 1184 | int err; |
Eric W. Biederman | 9bf1907 | 2011-10-20 04:28:46 +0000 | [diff] [blame] | 1185 | |
| 1186 | if (dev->rtnl_link_ops != &macvtap_link_ops) |
| 1187 | return NOTIFY_DONE; |
| 1188 | |
Eric W. Biederman | e09eff7 | 2011-10-20 04:29:24 +0000 | [diff] [blame] | 1189 | vlan = netdev_priv(dev); |
Eric W. Biederman | 9bf1907 | 2011-10-20 04:28:46 +0000 | [diff] [blame] | 1190 | |
| 1191 | switch (event) { |
| 1192 | case NETDEV_REGISTER: |
| 1193 | /* Create the device node here after the network device has |
| 1194 | * been registered but before register_netdevice has |
| 1195 | * finished running. |
| 1196 | */ |
Eric W. Biederman | e09eff7 | 2011-10-20 04:29:24 +0000 | [diff] [blame] | 1197 | err = macvtap_get_minor(vlan); |
| 1198 | if (err) |
| 1199 | return notifier_from_errno(err); |
| 1200 | |
| 1201 | devt = MKDEV(MAJOR(macvtap_major), vlan->minor); |
Eric W. Biederman | 9bf1907 | 2011-10-20 04:28:46 +0000 | [diff] [blame] | 1202 | classdev = device_create(macvtap_class, &dev->dev, devt, |
| 1203 | dev, "tap%d", dev->ifindex); |
Eric W. Biederman | e09eff7 | 2011-10-20 04:29:24 +0000 | [diff] [blame] | 1204 | if (IS_ERR(classdev)) { |
| 1205 | macvtap_free_minor(vlan); |
Eric W. Biederman | 9bf1907 | 2011-10-20 04:28:46 +0000 | [diff] [blame] | 1206 | return notifier_from_errno(PTR_ERR(classdev)); |
Eric W. Biederman | e09eff7 | 2011-10-20 04:29:24 +0000 | [diff] [blame] | 1207 | } |
Eric W. Biederman | 9bf1907 | 2011-10-20 04:28:46 +0000 | [diff] [blame] | 1208 | break; |
| 1209 | case NETDEV_UNREGISTER: |
Eric W. Biederman | e09eff7 | 2011-10-20 04:29:24 +0000 | [diff] [blame] | 1210 | devt = MKDEV(MAJOR(macvtap_major), vlan->minor); |
Eric W. Biederman | 9bf1907 | 2011-10-20 04:28:46 +0000 | [diff] [blame] | 1211 | device_destroy(macvtap_class, devt); |
Eric W. Biederman | e09eff7 | 2011-10-20 04:29:24 +0000 | [diff] [blame] | 1212 | macvtap_free_minor(vlan); |
Eric W. Biederman | 9bf1907 | 2011-10-20 04:28:46 +0000 | [diff] [blame] | 1213 | break; |
| 1214 | } |
| 1215 | |
| 1216 | return NOTIFY_DONE; |
| 1217 | } |
| 1218 | |
| 1219 | static struct notifier_block macvtap_notifier_block __read_mostly = { |
| 1220 | .notifier_call = macvtap_device_event, |
| 1221 | }; |
| 1222 | |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 1223 | static int macvtap_init(void) |
| 1224 | { |
| 1225 | int err; |
| 1226 | |
| 1227 | err = alloc_chrdev_region(&macvtap_major, 0, |
| 1228 | MACVTAP_NUM_DEVS, "macvtap"); |
| 1229 | if (err) |
| 1230 | goto out1; |
| 1231 | |
| 1232 | cdev_init(&macvtap_cdev, &macvtap_fops); |
| 1233 | err = cdev_add(&macvtap_cdev, macvtap_major, MACVTAP_NUM_DEVS); |
| 1234 | if (err) |
| 1235 | goto out2; |
| 1236 | |
| 1237 | macvtap_class = class_create(THIS_MODULE, "macvtap"); |
| 1238 | if (IS_ERR(macvtap_class)) { |
| 1239 | err = PTR_ERR(macvtap_class); |
| 1240 | goto out3; |
| 1241 | } |
| 1242 | |
Eric W. Biederman | 9bf1907 | 2011-10-20 04:28:46 +0000 | [diff] [blame] | 1243 | err = register_netdevice_notifier(&macvtap_notifier_block); |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 1244 | if (err) |
| 1245 | goto out4; |
| 1246 | |
Eric W. Biederman | 9bf1907 | 2011-10-20 04:28:46 +0000 | [diff] [blame] | 1247 | err = macvlan_link_register(&macvtap_link_ops); |
| 1248 | if (err) |
| 1249 | goto out5; |
| 1250 | |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 1251 | return 0; |
| 1252 | |
Eric W. Biederman | 9bf1907 | 2011-10-20 04:28:46 +0000 | [diff] [blame] | 1253 | out5: |
| 1254 | unregister_netdevice_notifier(&macvtap_notifier_block); |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 1255 | out4: |
| 1256 | class_unregister(macvtap_class); |
| 1257 | out3: |
| 1258 | cdev_del(&macvtap_cdev); |
| 1259 | out2: |
| 1260 | unregister_chrdev_region(macvtap_major, MACVTAP_NUM_DEVS); |
| 1261 | out1: |
| 1262 | return err; |
| 1263 | } |
| 1264 | module_init(macvtap_init); |
| 1265 | |
| 1266 | static void macvtap_exit(void) |
| 1267 | { |
| 1268 | rtnl_link_unregister(&macvtap_link_ops); |
Eric W. Biederman | 9bf1907 | 2011-10-20 04:28:46 +0000 | [diff] [blame] | 1269 | unregister_netdevice_notifier(&macvtap_notifier_block); |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 1270 | class_unregister(macvtap_class); |
| 1271 | cdev_del(&macvtap_cdev); |
| 1272 | unregister_chrdev_region(macvtap_major, MACVTAP_NUM_DEVS); |
| 1273 | } |
| 1274 | module_exit(macvtap_exit); |
| 1275 | |
| 1276 | MODULE_ALIAS_RTNL_LINK("macvtap"); |
| 1277 | MODULE_AUTHOR("Arnd Bergmann <arnd@arndb.de>"); |
| 1278 | MODULE_LICENSE("GPL"); |