blob: bd6720962b1fc4325cf4ca848b73bb633bc4d98c [file] [log] [blame]
Arnd Bergmann20d29d72010-01-30 12:24:26 +00001#include <linux/etherdevice.h>
2#include <linux/if_macvlan.h>
Basil Gorf09e2242012-05-03 22:55:24 +00003#include <linux/if_vlan.h>
Arnd Bergmann20d29d72010-01-30 12:24:26 +00004#include <linux/interrupt.h>
5#include <linux/nsproxy.h>
6#include <linux/compat.h>
7#include <linux/if_tun.h>
8#include <linux/module.h>
9#include <linux/skbuff.h>
10#include <linux/cache.h>
11#include <linux/sched.h>
12#include <linux/types.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090013#include <linux/slab.h>
Arnd Bergmann20d29d72010-01-30 12:24:26 +000014#include <linux/wait.h>
15#include <linux/cdev.h>
Al Viro40401532012-02-13 03:58:52 +000016#include <linux/idr.h>
Arnd Bergmann20d29d72010-01-30 12:24:26 +000017#include <linux/fs.h>
Herbert Xu6c36d2e2014-11-07 21:22:25 +080018#include <linux/uio.h>
Arnd Bergmann20d29d72010-01-30 12:24:26 +000019
20#include <net/net_namespace.h>
21#include <net/rtnetlink.h>
22#include <net/sock.h>
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +000023#include <linux/virtio_net.h>
Arnd Bergmann20d29d72010-01-30 12:24:26 +000024
25/*
26 * A macvtap queue is the central object of this driver, it connects
27 * an open character device to a macvlan interface. There can be
28 * multiple queues on one interface, which map back to queues
29 * implemented in hardware on the underlying device.
30 *
31 * macvtap_proto is used to allocate queues through the sock allocation
32 * mechanism.
33 *
Arnd Bergmann20d29d72010-01-30 12:24:26 +000034 */
35struct macvtap_queue {
36 struct sock sk;
37 struct socket sock;
Eric Dumazet43815482010-04-29 11:01:49 +000038 struct socket_wq wq;
Michael S. Tsirkin55afbd02010-04-29 13:50:48 +030039 int vnet_hdr_sz;
Eric Dumazet13707f92011-01-26 19:28:23 +000040 struct macvlan_dev __rcu *vlan;
Arnd Bergmann20d29d72010-01-30 12:24:26 +000041 struct file *file;
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +000042 unsigned int flags;
Jason Wang376b1aa2013-06-05 23:54:38 +000043 u16 queue_index;
Jason Wang815f2362013-06-05 23:54:39 +000044 bool enabled;
45 struct list_head next;
Arnd Bergmann20d29d72010-01-30 12:24:26 +000046};
47
Michael S. Tsirkin01b07fb2014-12-16 15:05:10 +020048#define MACVTAP_FEATURES (IFF_VNET_HDR | IFF_MULTI_QUEUE)
49
50#define MACVTAP_VNET_LE 0x80000000
Greg Kurz8b8e6582015-04-24 14:50:36 +020051#define MACVTAP_VNET_BE 0x40000000
52
53#ifdef CONFIG_TUN_VNET_CROSS_LE
54static inline bool macvtap_legacy_is_little_endian(struct macvtap_queue *q)
55{
56 return q->flags & MACVTAP_VNET_BE ? false :
57 virtio_legacy_is_little_endian();
58}
59
60static long macvtap_get_vnet_be(struct macvtap_queue *q, int __user *sp)
61{
62 int s = !!(q->flags & MACVTAP_VNET_BE);
63
64 if (put_user(s, sp))
65 return -EFAULT;
66
67 return 0;
68}
69
70static long macvtap_set_vnet_be(struct macvtap_queue *q, int __user *sp)
71{
72 int s;
73
74 if (get_user(s, sp))
75 return -EFAULT;
76
77 if (s)
78 q->flags |= MACVTAP_VNET_BE;
79 else
80 q->flags &= ~MACVTAP_VNET_BE;
81
82 return 0;
83}
84#else
85static inline bool macvtap_legacy_is_little_endian(struct macvtap_queue *q)
86{
87 return virtio_legacy_is_little_endian();
88}
89
90static long macvtap_get_vnet_be(struct macvtap_queue *q, int __user *argp)
91{
92 return -EINVAL;
93}
94
95static long macvtap_set_vnet_be(struct macvtap_queue *q, int __user *argp)
96{
97 return -EINVAL;
98}
99#endif /* CONFIG_TUN_VNET_CROSS_LE */
Michael S. Tsirkin6ae7feb2014-11-23 17:24:15 +0200100
Greg Kurz5b11e152015-04-24 14:24:48 +0200101static inline bool macvtap_is_little_endian(struct macvtap_queue *q)
102{
Greg Kurz7d824102015-04-24 14:26:24 +0200103 return q->flags & MACVTAP_VNET_LE ||
Greg Kurz8b8e6582015-04-24 14:50:36 +0200104 macvtap_legacy_is_little_endian(q);
Greg Kurz5b11e152015-04-24 14:24:48 +0200105}
Michael S. Tsirkin6ae7feb2014-11-23 17:24:15 +0200106
107static inline u16 macvtap16_to_cpu(struct macvtap_queue *q, __virtio16 val)
108{
Greg Kurz5b11e152015-04-24 14:24:48 +0200109 return __virtio16_to_cpu(macvtap_is_little_endian(q), val);
Michael S. Tsirkin6ae7feb2014-11-23 17:24:15 +0200110}
111
112static inline __virtio16 cpu_to_macvtap16(struct macvtap_queue *q, u16 val)
113{
Greg Kurz5b11e152015-04-24 14:24:48 +0200114 return __cpu_to_virtio16(macvtap_is_little_endian(q), val);
Michael S. Tsirkin6ae7feb2014-11-23 17:24:15 +0200115}
116
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000117static struct proto macvtap_proto = {
118 .name = "macvtap",
119 .owner = THIS_MODULE,
120 .obj_size = sizeof (struct macvtap_queue),
121};
122
123/*
Eric W. Biedermane09eff72011-10-20 04:29:24 +0000124 * Variables for dealing with macvtaps device numbers.
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000125 */
David S. Miller1ebed712010-07-10 19:25:50 -0700126static dev_t macvtap_major;
Eric W. Biedermane09eff72011-10-20 04:29:24 +0000127#define MACVTAP_NUM_DEVS (1U << MINORBITS)
128static DEFINE_MUTEX(minor_lock);
129static DEFINE_IDR(minor_idr);
130
Shirley Ma97bc3632011-07-06 12:26:11 +0000131#define GOODCOPY_LEN 128
Marc Angel17af2bc2016-05-05 12:14:26 +0200132static const void *macvtap_net_namespace(struct device *d)
133{
134 struct net_device *dev = to_net_dev(d->parent);
135 return dev_net(dev);
136}
137
138static struct class macvtap_class = {
139 .name = "macvtap",
140 .owner = THIS_MODULE,
141 .ns_type = &net_ns_type_operations,
142 .namespace = macvtap_net_namespace,
143};
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000144static struct cdev macvtap_cdev;
145
Arnd Bergmann501c7742010-02-18 05:46:50 +0000146static const struct proto_ops macvtap_socket_ops;
147
Vlad Yasevich2be5c762013-06-25 16:04:21 -0400148#define TUN_OFFLOADS (NETIF_F_HW_CSUM | NETIF_F_TSO_ECN | NETIF_F_TSO | \
Vlad Yaseviche3e3c422015-02-03 16:36:17 -0500149 NETIF_F_TSO6 | NETIF_F_UFO)
Vlad Yasevich2be5c762013-06-25 16:04:21 -0400150#define RX_OFFLOADS (NETIF_F_GRO | NETIF_F_LRO)
Jason Wangf23d5382015-10-23 00:57:05 -0400151#define TAP_FEATURES (NETIF_F_GSO | NETIF_F_SG | NETIF_F_FRAGLIST)
Vlad Yasevicha567dd62013-08-16 15:25:00 -0400152
Vlad Yasevich6acf54f2013-12-11 13:27:10 -0500153static struct macvlan_dev *macvtap_get_vlan_rcu(const struct net_device *dev)
154{
155 return rcu_dereference(dev->rx_handler_data);
156}
157
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000158/*
159 * RCU usage:
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000160 * The macvtap_queue and the macvlan_dev are loosely coupled, the
161 * pointers from one to the other can only be read while rcu_read_lock
Vlad Yasevich441ac0f2013-06-25 16:04:19 -0400162 * or rtnl is held.
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000163 *
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000164 * Both the file and the macvlan_dev hold a reference on the macvtap_queue
165 * through sock_hold(&q->sk). When the macvlan_dev goes away first,
166 * q->vlan becomes inaccessible. When the files gets closed,
167 * macvtap_get_queue() fails.
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000168 *
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000169 * There may still be references to the struct sock inside of the
170 * queue from outbound SKBs, but these never reference back to the
171 * file or the dev. The data structure is freed through __sk_free
172 * when both our references and any pending SKBs are gone.
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000173 */
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000174
Jason Wang815f2362013-06-05 23:54:39 +0000175static int macvtap_enable_queue(struct net_device *dev, struct file *file,
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000176 struct macvtap_queue *q)
177{
178 struct macvlan_dev *vlan = netdev_priv(dev);
Jason Wang815f2362013-06-05 23:54:39 +0000179 int err = -EINVAL;
180
Vlad Yasevich441ac0f2013-06-25 16:04:19 -0400181 ASSERT_RTNL();
Jason Wang815f2362013-06-05 23:54:39 +0000182
183 if (q->enabled)
184 goto out;
185
186 err = 0;
187 rcu_assign_pointer(vlan->taps[vlan->numvtaps], q);
188 q->queue_index = vlan->numvtaps;
189 q->enabled = true;
190
191 vlan->numvtaps++;
192out:
Jason Wang815f2362013-06-05 23:54:39 +0000193 return err;
194}
195
Vlad Yasevich40b8fe42014-09-22 16:34:17 -0400196/* Requires RTNL */
Jason Wang815f2362013-06-05 23:54:39 +0000197static int macvtap_set_queue(struct net_device *dev, struct file *file,
198 struct macvtap_queue *q)
199{
200 struct macvlan_dev *vlan = netdev_priv(dev);
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000201
Jason Wang815f2362013-06-05 23:54:39 +0000202 if (vlan->numqueues == MAX_MACVTAP_QUEUES)
Vlad Yasevich40b8fe42014-09-22 16:34:17 -0400203 return -EBUSY;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000204
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000205 rcu_assign_pointer(q->vlan, vlan);
Jason Wang376b1aa2013-06-05 23:54:38 +0000206 rcu_assign_pointer(vlan->taps[vlan->numvtaps], q);
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000207 sock_hold(&q->sk);
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000208
209 q->file = file;
Jason Wang376b1aa2013-06-05 23:54:38 +0000210 q->queue_index = vlan->numvtaps;
Jason Wang815f2362013-06-05 23:54:39 +0000211 q->enabled = true;
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000212 file->private_data = q;
Jason Wang815f2362013-06-05 23:54:39 +0000213 list_add_tail(&q->next, &vlan->queue_list);
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000214
Krishna Kumar1565c7c2010-08-04 06:15:59 +0000215 vlan->numvtaps++;
Jason Wang815f2362013-06-05 23:54:39 +0000216 vlan->numqueues++;
Krishna Kumar1565c7c2010-08-04 06:15:59 +0000217
Vlad Yasevich40b8fe42014-09-22 16:34:17 -0400218 return 0;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000219}
220
Vlad Yasevich441ac0f2013-06-25 16:04:19 -0400221static int macvtap_disable_queue(struct macvtap_queue *q)
Jason Wang815f2362013-06-05 23:54:39 +0000222{
223 struct macvlan_dev *vlan;
224 struct macvtap_queue *nq;
225
Vlad Yasevich441ac0f2013-06-25 16:04:19 -0400226 ASSERT_RTNL();
Jason Wang815f2362013-06-05 23:54:39 +0000227 if (!q->enabled)
228 return -EINVAL;
229
Vlad Yasevich441ac0f2013-06-25 16:04:19 -0400230 vlan = rtnl_dereference(q->vlan);
231
Jason Wang815f2362013-06-05 23:54:39 +0000232 if (vlan) {
233 int index = q->queue_index;
234 BUG_ON(index >= vlan->numvtaps);
Vlad Yasevich441ac0f2013-06-25 16:04:19 -0400235 nq = rtnl_dereference(vlan->taps[vlan->numvtaps - 1]);
Jason Wang815f2362013-06-05 23:54:39 +0000236 nq->queue_index = index;
237
238 rcu_assign_pointer(vlan->taps[index], nq);
239 RCU_INIT_POINTER(vlan->taps[vlan->numvtaps - 1], NULL);
240 q->enabled = false;
241
242 vlan->numvtaps--;
243 }
244
245 return 0;
246}
247
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000248/*
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000249 * The file owning the queue got closed, give up both
250 * the reference that the files holds as well as the
251 * one from the macvlan_dev if that still exists.
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000252 *
253 * Using the spinlock makes sure that we don't get
254 * to the queue again after destroying it.
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000255 */
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000256static void macvtap_put_queue(struct macvtap_queue *q)
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000257{
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000258 struct macvlan_dev *vlan;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000259
Vlad Yasevich441ac0f2013-06-25 16:04:19 -0400260 rtnl_lock();
261 vlan = rtnl_dereference(q->vlan);
262
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000263 if (vlan) {
Jason Wang815f2362013-06-05 23:54:39 +0000264 if (q->enabled)
Vlad Yasevich441ac0f2013-06-25 16:04:19 -0400265 BUG_ON(macvtap_disable_queue(q));
Krishna Kumar1565c7c2010-08-04 06:15:59 +0000266
Jason Wang815f2362013-06-05 23:54:39 +0000267 vlan->numqueues--;
Eric Dumazet2cfa5a02011-11-23 07:09:32 +0000268 RCU_INIT_POINTER(q->vlan, NULL);
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000269 sock_put(&q->sk);
Jason Wang815f2362013-06-05 23:54:39 +0000270 list_del_init(&q->next);
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000271 }
272
Vlad Yasevich441ac0f2013-06-25 16:04:19 -0400273 rtnl_unlock();
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000274
275 synchronize_rcu();
276 sock_put(&q->sk);
277}
278
279/*
Krishna Kumar1565c7c2010-08-04 06:15:59 +0000280 * Select a queue based on the rxq of the device on which this packet
281 * arrived. If the incoming device is not mq, calculate a flow hash
282 * to select a queue. If all fails, find the first available queue.
283 * Cache vlan->numvtaps since it can become zero during the execution
284 * of this function.
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000285 */
286static struct macvtap_queue *macvtap_get_queue(struct net_device *dev,
287 struct sk_buff *skb)
288{
289 struct macvlan_dev *vlan = netdev_priv(dev);
Krishna Kumar1565c7c2010-08-04 06:15:59 +0000290 struct macvtap_queue *tap = NULL;
Jason Wang815f2362013-06-05 23:54:39 +0000291 /* Access to taps array is protected by rcu, but access to numvtaps
292 * isn't. Below we use it to lookup a queue, but treat it as a hint
293 * and validate that the result isn't NULL - in case we are
294 * racing against queue removal.
295 */
Jason Wanged0483f2013-06-05 23:54:33 +0000296 int numvtaps = ACCESS_ONCE(vlan->numvtaps);
Krishna Kumar1565c7c2010-08-04 06:15:59 +0000297 __u32 rxq;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000298
Krishna Kumar1565c7c2010-08-04 06:15:59 +0000299 if (!numvtaps)
300 goto out;
301
Krishna Kumaref0002b2011-11-23 22:17:14 +0000302 /* Check if we can use flow to select a queue */
Tom Herbert3958afa1b2013-12-15 22:12:06 -0800303 rxq = skb_get_hash(skb);
Krishna Kumaref0002b2011-11-23 22:17:14 +0000304 if (rxq) {
305 tap = rcu_dereference(vlan->taps[rxq % numvtaps]);
Jason Wang376b1aa2013-06-05 23:54:38 +0000306 goto out;
Krishna Kumaref0002b2011-11-23 22:17:14 +0000307 }
308
Krishna Kumar1565c7c2010-08-04 06:15:59 +0000309 if (likely(skb_rx_queue_recorded(skb))) {
310 rxq = skb_get_rx_queue(skb);
311
312 while (unlikely(rxq >= numvtaps))
313 rxq -= numvtaps;
314
315 tap = rcu_dereference(vlan->taps[rxq]);
Jason Wang376b1aa2013-06-05 23:54:38 +0000316 goto out;
Krishna Kumar1565c7c2010-08-04 06:15:59 +0000317 }
318
Jason Wang376b1aa2013-06-05 23:54:38 +0000319 tap = rcu_dereference(vlan->taps[0]);
Krishna Kumar1565c7c2010-08-04 06:15:59 +0000320out:
321 return tap;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000322}
323
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000324/*
325 * The net_device is going away, give up the reference
Krishna Kumar1565c7c2010-08-04 06:15:59 +0000326 * that it holds on all queues and safely set the pointer
327 * from the queues to NULL.
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000328 */
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000329static void macvtap_del_queues(struct net_device *dev)
330{
331 struct macvlan_dev *vlan = netdev_priv(dev);
Pankaj Guptadfe816c2015-06-19 19:47:53 +0530332 struct macvtap_queue *q, *tmp;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000333
Vlad Yasevich441ac0f2013-06-25 16:04:19 -0400334 ASSERT_RTNL();
Jason Wang815f2362013-06-05 23:54:39 +0000335 list_for_each_entry_safe(q, tmp, &vlan->queue_list, next) {
336 list_del_init(&q->next);
Jason Wang376b1aa2013-06-05 23:54:38 +0000337 RCU_INIT_POINTER(q->vlan, NULL);
Jason Wang815f2362013-06-05 23:54:39 +0000338 if (q->enabled)
339 vlan->numvtaps--;
340 vlan->numqueues--;
Pankaj Guptadfe816c2015-06-19 19:47:53 +0530341 sock_put(&q->sk);
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000342 }
Jason Wang815f2362013-06-05 23:54:39 +0000343 BUG_ON(vlan->numvtaps);
344 BUG_ON(vlan->numqueues);
Eric W. Biederman99f34b32011-10-20 04:26:01 +0000345 /* guarantee that any future macvtap_set_queue will fail */
346 vlan->numvtaps = MAX_MACVTAP_QUEUES;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000347}
348
Vlad Yasevich6acf54f2013-12-11 13:27:10 -0500349static rx_handler_result_t macvtap_handle_frame(struct sk_buff **pskb)
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000350{
Vlad Yasevich6acf54f2013-12-11 13:27:10 -0500351 struct sk_buff *skb = *pskb;
352 struct net_device *dev = skb->dev;
353 struct macvlan_dev *vlan;
354 struct macvtap_queue *q;
Vlad Yasevicha567dd62013-08-16 15:25:00 -0400355 netdev_features_t features = TAP_FEATURES;
356
Vlad Yasevich6acf54f2013-12-11 13:27:10 -0500357 vlan = macvtap_get_vlan_rcu(dev);
358 if (!vlan)
359 return RX_HANDLER_PASS;
360
361 q = macvtap_get_queue(dev, skb);
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000362 if (!q)
Vlad Yasevich6acf54f2013-12-11 13:27:10 -0500363 return RX_HANDLER_PASS;
Herbert Xu8a357472010-07-21 21:44:31 +0000364
365 if (skb_queue_len(&q->sk.sk_receive_queue) >= dev->tx_queue_len)
366 goto drop;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000367
Vlad Yasevich6acf54f2013-12-11 13:27:10 -0500368 skb_push(skb, ETH_HLEN);
369
Vlad Yasevich3e4f8b72013-06-25 16:04:22 -0400370 /* Apply the forward feature mask so that we perform segmentation
Vlad Yaseviche5733322013-08-16 15:25:02 -0400371 * according to users wishes. This only works if VNET_HDR is
372 * enabled.
Vlad Yasevich3e4f8b72013-06-25 16:04:22 -0400373 */
Vlad Yaseviche5733322013-08-16 15:25:02 -0400374 if (q->flags & IFF_VNET_HDR)
375 features |= vlan->tap_features;
Johannes Berg8b86a612015-04-17 15:45:04 +0200376 if (netif_needs_gso(skb, features)) {
Vlad Yasevich3e4f8b72013-06-25 16:04:22 -0400377 struct sk_buff *segs = __skb_gso_segment(skb, features, false);
378
379 if (IS_ERR(segs))
380 goto drop;
381
382 if (!segs) {
383 skb_queue_tail(&q->sk.sk_receive_queue, skb);
384 goto wake_up;
385 }
386
Eric Dumazetbe0bd312016-05-06 05:58:21 -0700387 consume_skb(skb);
Vlad Yasevich3e4f8b72013-06-25 16:04:22 -0400388 while (segs) {
389 struct sk_buff *nskb = segs->next;
390
391 segs->next = NULL;
392 skb_queue_tail(&q->sk.sk_receive_queue, segs);
393 segs = nskb;
394 }
395 } else {
Vlad Yasevichcbdb0422014-04-29 10:09:50 -0400396 /* If we receive a partial checksum and the tap side
397 * doesn't support checksum offload, compute the checksum.
398 * Note: it doesn't matter which checksum feature to
399 * check, we either support them all or none.
400 */
401 if (skb->ip_summed == CHECKSUM_PARTIAL &&
Tom Herberta1882222015-12-14 11:19:43 -0800402 !(features & NETIF_F_CSUM_MASK) &&
Vlad Yasevichcbdb0422014-04-29 10:09:50 -0400403 skb_checksum_help(skb))
404 goto drop;
Vlad Yasevich3e4f8b72013-06-25 16:04:22 -0400405 skb_queue_tail(&q->sk.sk_receive_queue, skb);
406 }
407
408wake_up:
Eric Dumazet4a4771a2010-04-25 22:20:06 +0000409 wake_up_interruptible_poll(sk_sleep(&q->sk), POLLIN | POLLRDNORM | POLLRDBAND);
Vlad Yasevich6acf54f2013-12-11 13:27:10 -0500410 return RX_HANDLER_CONSUMED;
Herbert Xu8a357472010-07-21 21:44:31 +0000411
412drop:
Vlad Yasevich6acf54f2013-12-11 13:27:10 -0500413 /* Count errors/drops only here, thus don't care about args. */
414 macvlan_count_rx(vlan, 0, 0, 0);
Herbert Xu8a357472010-07-21 21:44:31 +0000415 kfree_skb(skb);
Vlad Yasevich6acf54f2013-12-11 13:27:10 -0500416 return RX_HANDLER_CONSUMED;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000417}
418
Eric W. Biedermane09eff72011-10-20 04:29:24 +0000419static int macvtap_get_minor(struct macvlan_dev *vlan)
420{
421 int retval = -ENOMEM;
Eric W. Biedermane09eff72011-10-20 04:29:24 +0000422
423 mutex_lock(&minor_lock);
Tejun Heoec09ebc2013-02-27 17:04:34 -0800424 retval = idr_alloc(&minor_idr, vlan, 1, MACVTAP_NUM_DEVS, GFP_KERNEL);
425 if (retval >= 0) {
426 vlan->minor = retval;
427 } else if (retval == -ENOSPC) {
Eric W. Biedermane09eff72011-10-20 04:29:24 +0000428 printk(KERN_ERR "too many macvtap devices\n");
429 retval = -EINVAL;
Eric W. Biedermane09eff72011-10-20 04:29:24 +0000430 }
Eric W. Biedermane09eff72011-10-20 04:29:24 +0000431 mutex_unlock(&minor_lock);
Tejun Heoec09ebc2013-02-27 17:04:34 -0800432 return retval < 0 ? retval : 0;
Eric W. Biedermane09eff72011-10-20 04:29:24 +0000433}
434
435static void macvtap_free_minor(struct macvlan_dev *vlan)
436{
437 mutex_lock(&minor_lock);
438 if (vlan->minor) {
439 idr_remove(&minor_idr, vlan->minor);
440 vlan->minor = 0;
441 }
442 mutex_unlock(&minor_lock);
443}
444
445static struct net_device *dev_get_by_macvtap_minor(int minor)
446{
447 struct net_device *dev = NULL;
448 struct macvlan_dev *vlan;
449
450 mutex_lock(&minor_lock);
451 vlan = idr_find(&minor_idr, minor);
452 if (vlan) {
453 dev = vlan->dev;
454 dev_hold(dev);
455 }
456 mutex_unlock(&minor_lock);
457 return dev;
458}
459
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000460static int macvtap_newlink(struct net *src_net,
461 struct net_device *dev,
462 struct nlattr *tb[],
463 struct nlattr *data[])
464{
Jason Wang815f2362013-06-05 23:54:39 +0000465 struct macvlan_dev *vlan = netdev_priv(dev);
Vlad Yasevich6acf54f2013-12-11 13:27:10 -0500466 int err;
467
Jason Wang815f2362013-06-05 23:54:39 +0000468 INIT_LIST_HEAD(&vlan->queue_list);
469
Vlad Yasevich2be5c762013-06-25 16:04:21 -0400470 /* Since macvlan supports all offloads by default, make
471 * tap support all offloads also.
472 */
473 vlan->tap_features = TUN_OFFLOADS;
474
Vlad Yasevich6acf54f2013-12-11 13:27:10 -0500475 err = netdev_rx_handler_register(dev, macvtap_handle_frame, vlan);
476 if (err)
477 return err;
478
Eric W. Biederman9bf19072011-10-20 04:28:46 +0000479 /* Don't put anything that may fail after macvlan_common_newlink
480 * because we can't undo what it does.
481 */
Vlad Yasevich2f6a1b62013-12-11 13:27:11 -0500482 return macvlan_common_newlink(src_net, dev, tb, data);
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000483}
484
485static void macvtap_dellink(struct net_device *dev,
486 struct list_head *head)
487{
Vlad Yasevich6acf54f2013-12-11 13:27:10 -0500488 netdev_rx_handler_unregister(dev);
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000489 macvtap_del_queues(dev);
490 macvlan_dellink(dev, head);
491}
492
Herbert Xu8a357472010-07-21 21:44:31 +0000493static void macvtap_setup(struct net_device *dev)
494{
495 macvlan_common_setup(dev);
496 dev->tx_queue_len = TUN_READQ_SIZE;
497}
498
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000499static struct rtnl_link_ops macvtap_link_ops __read_mostly = {
500 .kind = "macvtap",
Herbert Xu8a357472010-07-21 21:44:31 +0000501 .setup = macvtap_setup,
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000502 .newlink = macvtap_newlink,
503 .dellink = macvtap_dellink,
504};
505
506
507static void macvtap_sock_write_space(struct sock *sk)
508{
Eric Dumazet43815482010-04-29 11:01:49 +0000509 wait_queue_head_t *wqueue;
510
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000511 if (!sock_writeable(sk) ||
Eric Dumazet9cd3e072015-11-29 20:03:10 -0800512 !test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags))
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000513 return;
514
Eric Dumazet43815482010-04-29 11:01:49 +0000515 wqueue = sk_sleep(sk);
516 if (wqueue && waitqueue_active(wqueue))
517 wake_up_interruptible_poll(wqueue, POLLOUT | POLLWRNORM | POLLWRBAND);
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000518}
519
Eric W. Biederman2259fef2011-10-20 04:27:24 +0000520static void macvtap_sock_destruct(struct sock *sk)
521{
522 skb_queue_purge(&sk->sk_receive_queue);
523}
524
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000525static int macvtap_open(struct inode *inode, struct file *file)
526{
527 struct net *net = current->nsproxy->net_ns;
Vlad Yasevich40b8fe42014-09-22 16:34:17 -0400528 struct net_device *dev;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000529 struct macvtap_queue *q;
Vlad Yasevich40b8fe42014-09-22 16:34:17 -0400530 int err = -ENODEV;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000531
Vlad Yasevich40b8fe42014-09-22 16:34:17 -0400532 rtnl_lock();
533 dev = dev_get_by_macvtap_minor(iminor(inode));
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000534 if (!dev)
535 goto out;
536
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000537 err = -ENOMEM;
538 q = (struct macvtap_queue *)sk_alloc(net, AF_UNSPEC, GFP_KERNEL,
Eric W. Biederman11aa9c22015-05-08 21:09:13 -0500539 &macvtap_proto, 0);
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000540 if (!q)
541 goto out;
542
Jason Wangd9a90a32013-06-13 14:23:35 +0800543 RCU_INIT_POINTER(q->sock.wq, &q->wq);
Eric Dumazet43815482010-04-29 11:01:49 +0000544 init_waitqueue_head(&q->wq.wait);
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000545 q->sock.type = SOCK_RAW;
546 q->sock.state = SS_CONNECTED;
Arnd Bergmann501c7742010-02-18 05:46:50 +0000547 q->sock.file = file;
548 q->sock.ops = &macvtap_socket_ops;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000549 sock_init_data(&q->sock, &q->sk);
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000550 q->sk.sk_write_space = macvtap_sock_write_space;
Eric W. Biederman2259fef2011-10-20 04:27:24 +0000551 q->sk.sk_destruct = macvtap_sock_destruct;
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000552 q->flags = IFF_VNET_HDR | IFF_NO_PI | IFF_TAP;
Michael S. Tsirkin55afbd02010-04-29 13:50:48 +0300553 q->vnet_hdr_sz = sizeof(struct virtio_net_hdr);
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000554
Shirley Ma97bc3632011-07-06 12:26:11 +0000555 /*
556 * so far only KVM virtio_net uses macvtap, enable zero copy between
557 * guest kernel and host kernel when lower device supports zerocopy
Eric W. Biederman047af9cf2011-10-20 04:26:39 +0000558 *
559 * The macvlan supports zerocopy iff the lower device supports zero
560 * copy so we don't have to look at the lower device directly.
Shirley Ma97bc3632011-07-06 12:26:11 +0000561 */
Eric W. Biederman047af9cf2011-10-20 04:26:39 +0000562 if ((dev->features & NETIF_F_HIGHDMA) && (dev->features & NETIF_F_SG))
563 sock_set_flag(&q->sk, SOCK_ZEROCOPY);
Shirley Ma97bc3632011-07-06 12:26:11 +0000564
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000565 err = macvtap_set_queue(dev, file, q);
566 if (err)
567 sock_put(&q->sk);
568
569out:
570 if (dev)
571 dev_put(dev);
572
Vlad Yasevich40b8fe42014-09-22 16:34:17 -0400573 rtnl_unlock();
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000574 return err;
575}
576
577static int macvtap_release(struct inode *inode, struct file *file)
578{
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000579 struct macvtap_queue *q = file->private_data;
580 macvtap_put_queue(q);
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000581 return 0;
582}
583
584static unsigned int macvtap_poll(struct file *file, poll_table * wait)
585{
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000586 struct macvtap_queue *q = file->private_data;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000587 unsigned int mask = POLLERR;
588
589 if (!q)
590 goto out;
591
592 mask = 0;
Eric Dumazet43815482010-04-29 11:01:49 +0000593 poll_wait(file, &q->wq.wait, wait);
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000594
595 if (!skb_queue_empty(&q->sk.sk_receive_queue))
596 mask |= POLLIN | POLLRDNORM;
597
598 if (sock_writeable(&q->sk) ||
Eric Dumazet9cd3e072015-11-29 20:03:10 -0800599 (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &q->sock.flags) &&
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000600 sock_writeable(&q->sk)))
601 mask |= POLLOUT | POLLWRNORM;
602
603out:
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000604 return mask;
605}
606
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000607static inline struct sk_buff *macvtap_alloc_skb(struct sock *sk, size_t prepad,
608 size_t len, size_t linear,
609 int noblock, int *err)
610{
611 struct sk_buff *skb;
612
613 /* Under a page? Don't bother with paged skb. */
614 if (prepad + len < PAGE_SIZE || !linear)
615 linear = len;
616
617 skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
Eric Dumazet28d64272013-08-08 14:38:47 -0700618 err, 0);
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000619 if (!skb)
620 return NULL;
621
622 skb_reserve(skb, prepad);
623 skb_put(skb, linear);
624 skb->data_len = len - linear;
625 skb->len += len - linear;
626
627 return skb;
628}
629
630/*
631 * macvtap_skb_from_vnet_hdr and macvtap_skb_to_vnet_hdr should
632 * be shared with the tun/tap driver.
633 */
Michael S. Tsirkin6ae7feb2014-11-23 17:24:15 +0200634static int macvtap_skb_from_vnet_hdr(struct macvtap_queue *q,
635 struct sk_buff *skb,
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000636 struct virtio_net_hdr *vnet_hdr)
637{
638 unsigned short gso_type = 0;
639 if (vnet_hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
640 switch (vnet_hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
641 case VIRTIO_NET_HDR_GSO_TCPV4:
642 gso_type = SKB_GSO_TCPV4;
643 break;
644 case VIRTIO_NET_HDR_GSO_TCPV6:
645 gso_type = SKB_GSO_TCPV6;
646 break;
647 case VIRTIO_NET_HDR_GSO_UDP:
648 gso_type = SKB_GSO_UDP;
649 break;
650 default:
651 return -EINVAL;
652 }
653
654 if (vnet_hdr->gso_type & VIRTIO_NET_HDR_GSO_ECN)
655 gso_type |= SKB_GSO_TCP_ECN;
656
657 if (vnet_hdr->gso_size == 0)
658 return -EINVAL;
659 }
660
661 if (vnet_hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
Michael S. Tsirkin6ae7feb2014-11-23 17:24:15 +0200662 if (!skb_partial_csum_set(skb, macvtap16_to_cpu(q, vnet_hdr->csum_start),
663 macvtap16_to_cpu(q, vnet_hdr->csum_offset)))
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000664 return -EINVAL;
665 }
666
667 if (vnet_hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
Michael S. Tsirkin6ae7feb2014-11-23 17:24:15 +0200668 skb_shinfo(skb)->gso_size = macvtap16_to_cpu(q, vnet_hdr->gso_size);
Pravin B Shelarc9af6db2013-02-11 09:27:41 +0000669 skb_shinfo(skb)->gso_type = gso_type;
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000670
671 /* Header must be checked, and gso_segs computed. */
672 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
673 skb_shinfo(skb)->gso_segs = 0;
674 }
675 return 0;
676}
677
Michael S. Tsirkin6ae7feb2014-11-23 17:24:15 +0200678static void macvtap_skb_to_vnet_hdr(struct macvtap_queue *q,
679 const struct sk_buff *skb,
680 struct virtio_net_hdr *vnet_hdr)
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000681{
682 memset(vnet_hdr, 0, sizeof(*vnet_hdr));
683
684 if (skb_is_gso(skb)) {
685 struct skb_shared_info *sinfo = skb_shinfo(skb);
686
687 /* This is a hint as to how much should be linear. */
Michael S. Tsirkin6ae7feb2014-11-23 17:24:15 +0200688 vnet_hdr->hdr_len = cpu_to_macvtap16(q, skb_headlen(skb));
689 vnet_hdr->gso_size = cpu_to_macvtap16(q, sinfo->gso_size);
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000690 if (sinfo->gso_type & SKB_GSO_TCPV4)
691 vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
692 else if (sinfo->gso_type & SKB_GSO_TCPV6)
693 vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
Vlad Yaseviche3e3c422015-02-03 16:36:17 -0500694 else if (sinfo->gso_type & SKB_GSO_UDP)
695 vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_UDP;
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000696 else
697 BUG();
698 if (sinfo->gso_type & SKB_GSO_TCP_ECN)
699 vnet_hdr->gso_type |= VIRTIO_NET_HDR_GSO_ECN;
700 } else
701 vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_NONE;
702
703 if (skb->ip_summed == CHECKSUM_PARTIAL) {
704 vnet_hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100705 if (skb_vlan_tag_present(skb))
Michael S. Tsirkin6ae7feb2014-11-23 17:24:15 +0200706 vnet_hdr->csum_start = cpu_to_macvtap16(q,
707 skb_checksum_start_offset(skb) + VLAN_HLEN);
708 else
709 vnet_hdr->csum_start = cpu_to_macvtap16(q,
710 skb_checksum_start_offset(skb));
711 vnet_hdr->csum_offset = cpu_to_macvtap16(q, skb->csum_offset);
Jason Wang10a8d942011-06-10 00:56:17 +0000712 } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
713 vnet_hdr->flags = VIRTIO_NET_HDR_F_DATA_VALID;
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000714 } /* else everything is zero */
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000715}
716
Eric Dumazet2f1d8b92015-02-27 18:35:35 -0800717/* Neighbour code has some assumptions on HH_DATA_MOD alignment */
718#define MACVTAP_RESERVE HH_DATA_OFF(ETH_HLEN)
719
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000720/* Get packet from user space buffer */
Shirley Ma97bc3632011-07-06 12:26:11 +0000721static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
Al Virof5ff53b2014-06-19 15:36:49 -0400722 struct iov_iter *from, int noblock)
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000723{
Eric Dumazet2f1d8b92015-02-27 18:35:35 -0800724 int good_linear = SKB_MAX_HEAD(MACVTAP_RESERVE);
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000725 struct sk_buff *skb;
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000726 struct macvlan_dev *vlan;
Al Virof5ff53b2014-06-19 15:36:49 -0400727 unsigned long total_len = iov_iter_count(from);
Shirley Ma97bc3632011-07-06 12:26:11 +0000728 unsigned long len = total_len;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000729 int err;
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000730 struct virtio_net_hdr vnet_hdr = { 0 };
731 int vnet_hdr_len = 0;
Jason Wangb92946e2012-05-02 11:42:15 +0800732 int copylen = 0;
Ivan Vecerac5c62f12015-07-23 16:37:43 +0200733 int depth;
Shirley Ma97bc3632011-07-06 12:26:11 +0000734 bool zerocopy = false;
Jason Wang61d46bf2013-07-10 13:43:28 +0800735 size_t linear;
Al Virof5ff53b2014-06-19 15:36:49 -0400736 ssize_t n;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000737
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000738 if (q->flags & IFF_VNET_HDR) {
Michael S. Tsirkin55afbd02010-04-29 13:50:48 +0300739 vnet_hdr_len = q->vnet_hdr_sz;
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000740
741 err = -EINVAL;
Nicolas Kaiserce3c8692011-03-04 13:49:41 +0000742 if (len < vnet_hdr_len)
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000743 goto err;
Nicolas Kaiserce3c8692011-03-04 13:49:41 +0000744 len -= vnet_hdr_len;
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000745
Al Virof5ff53b2014-06-19 15:36:49 -0400746 err = -EFAULT;
747 n = copy_from_iter(&vnet_hdr, sizeof(vnet_hdr), from);
748 if (n != sizeof(vnet_hdr))
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000749 goto err;
Al Virof5ff53b2014-06-19 15:36:49 -0400750 iov_iter_advance(from, vnet_hdr_len - sizeof(vnet_hdr));
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000751 if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
Michael S. Tsirkin6ae7feb2014-11-23 17:24:15 +0200752 macvtap16_to_cpu(q, vnet_hdr.csum_start) +
753 macvtap16_to_cpu(q, vnet_hdr.csum_offset) + 2 >
754 macvtap16_to_cpu(q, vnet_hdr.hdr_len))
755 vnet_hdr.hdr_len = cpu_to_macvtap16(q,
756 macvtap16_to_cpu(q, vnet_hdr.csum_start) +
757 macvtap16_to_cpu(q, vnet_hdr.csum_offset) + 2);
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000758 err = -EINVAL;
Michael S. Tsirkin6ae7feb2014-11-23 17:24:15 +0200759 if (macvtap16_to_cpu(q, vnet_hdr.hdr_len) > len)
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000760 goto err;
761 }
762
763 err = -EINVAL;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000764 if (unlikely(len < ETH_HLEN))
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000765 goto err;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000766
Jason Wangece793f2013-07-18 10:55:16 +0800767 if (m && m->msg_control && sock_flag(&q->sk, SOCK_ZEROCOPY)) {
Al Virof5ff53b2014-06-19 15:36:49 -0400768 struct iov_iter i;
769
Michael S. Tsirkin6ae7feb2014-11-23 17:24:15 +0200770 copylen = vnet_hdr.hdr_len ?
771 macvtap16_to_cpu(q, vnet_hdr.hdr_len) : GOODCOPY_LEN;
Jason Wang16a3fa22013-11-13 14:00:40 +0800772 if (copylen > good_linear)
773 copylen = good_linear;
Willem de Bruijn8e2ad412016-03-08 15:18:54 -0500774 else if (copylen < ETH_HLEN)
775 copylen = ETH_HLEN;
Jason Wang61d46bf2013-07-10 13:43:28 +0800776 linear = copylen;
Al Virof5ff53b2014-06-19 15:36:49 -0400777 i = *from;
778 iov_iter_advance(&i, copylen);
779 if (iov_iter_npages(&i, INT_MAX) <= MAX_SKB_FRAGS)
Jason Wangece793f2013-07-18 10:55:16 +0800780 zerocopy = true;
781 }
782
783 if (!zerocopy) {
Shirley Ma97bc3632011-07-06 12:26:11 +0000784 copylen = len;
Willem de Bruijn8e2ad412016-03-08 15:18:54 -0500785 linear = macvtap16_to_cpu(q, vnet_hdr.hdr_len);
786 if (linear > good_linear)
Jason Wang16a3fa22013-11-13 14:00:40 +0800787 linear = good_linear;
Willem de Bruijn8e2ad412016-03-08 15:18:54 -0500788 else if (linear < ETH_HLEN)
789 linear = ETH_HLEN;
Jason Wang61d46bf2013-07-10 13:43:28 +0800790 }
Shirley Ma97bc3632011-07-06 12:26:11 +0000791
Eric Dumazet2f1d8b92015-02-27 18:35:35 -0800792 skb = macvtap_alloc_skb(&q->sk, MACVTAP_RESERVE, copylen,
Jason Wang61d46bf2013-07-10 13:43:28 +0800793 linear, noblock, &err);
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000794 if (!skb)
795 goto err;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000796
Jason Wang01d66572012-05-02 11:42:06 +0800797 if (zerocopy)
Al Virof5ff53b2014-06-19 15:36:49 -0400798 err = zerocopy_sg_from_iter(skb, from);
Jason Wangece793f2013-07-18 10:55:16 +0800799 else {
Al Virof5ff53b2014-06-19 15:36:49 -0400800 err = skb_copy_datagram_from_iter(skb, 0, from, len);
Jason Wangece793f2013-07-18 10:55:16 +0800801 if (!err && m && m->msg_control) {
802 struct ubuf_info *uarg = m->msg_control;
803 uarg->callback(uarg, false);
804 }
805 }
806
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000807 if (err)
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000808 goto err_kfree;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000809
810 skb_set_network_header(skb, ETH_HLEN);
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000811 skb_reset_mac_header(skb);
812 skb->protocol = eth_hdr(skb)->h_proto;
813
814 if (vnet_hdr_len) {
Michael S. Tsirkin6ae7feb2014-11-23 17:24:15 +0200815 err = macvtap_skb_from_vnet_hdr(q, skb, &vnet_hdr);
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000816 if (err)
817 goto err_kfree;
818 }
819
Jason Wang40893fd2013-03-26 23:11:22 +0000820 skb_probe_transport_header(skb, ETH_HLEN);
Jason Wang9b4d6692013-03-25 20:19:55 +0000821
Ivan Vecerac5c62f12015-07-23 16:37:43 +0200822 /* Move network header to the right position for VLAN tagged packets */
823 if ((skb->protocol == htons(ETH_P_8021Q) ||
824 skb->protocol == htons(ETH_P_8021AD)) &&
825 __vlan_get_protocol(skb, skb->protocol, &depth) != 0)
826 skb_set_network_header(skb, depth);
827
Vlad Yasevichac4e4af2013-06-25 16:04:20 -0400828 rcu_read_lock();
829 vlan = rcu_dereference(q->vlan);
Shirley Ma97bc3632011-07-06 12:26:11 +0000830 /* copy skb_ubuf_info for callback when skb has no error */
Jason Wang01d66572012-05-02 11:42:06 +0800831 if (zerocopy) {
Shirley Ma97bc3632011-07-06 12:26:11 +0000832 skb_shinfo(skb)->destructor_arg = m->msg_control;
Jason Wang01d66572012-05-02 11:42:06 +0800833 skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
Pravin B Shelarc9af6db2013-02-11 09:27:41 +0000834 skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
Jason Wang01d66572012-05-02 11:42:06 +0800835 }
Eric Dumazet29d79192013-08-08 08:06:14 -0700836 if (vlan) {
Vlad Yasevich6acf54f2013-12-11 13:27:10 -0500837 skb->dev = vlan->dev;
838 dev_queue_xmit(skb);
Eric Dumazet29d79192013-08-08 08:06:14 -0700839 } else {
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000840 kfree_skb(skb);
Eric Dumazet29d79192013-08-08 08:06:14 -0700841 }
Vlad Yasevichac4e4af2013-06-25 16:04:20 -0400842 rcu_read_unlock();
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000843
Shirley Ma97bc3632011-07-06 12:26:11 +0000844 return total_len;
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000845
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000846err_kfree:
847 kfree_skb(skb);
848
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000849err:
Vlad Yasevichac4e4af2013-06-25 16:04:20 -0400850 rcu_read_lock();
851 vlan = rcu_dereference(q->vlan);
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000852 if (vlan)
Jason Wangcd3e22b2013-11-25 17:19:04 +0800853 this_cpu_inc(vlan->pcpu_stats->tx_dropped);
Vlad Yasevichac4e4af2013-06-25 16:04:20 -0400854 rcu_read_unlock();
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000855
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000856 return err;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000857}
858
Al Virof5ff53b2014-06-19 15:36:49 -0400859static ssize_t macvtap_write_iter(struct kiocb *iocb, struct iov_iter *from)
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000860{
861 struct file *file = iocb->ki_filp;
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000862 struct macvtap_queue *q = file->private_data;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000863
Al Virof5ff53b2014-06-19 15:36:49 -0400864 return macvtap_get_user(q, NULL, from, file->f_flags & O_NONBLOCK);
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000865}
866
867/* Put packet to the user space buffer */
868static ssize_t macvtap_put_user(struct macvtap_queue *q,
869 const struct sk_buff *skb,
Herbert Xu6c36d2e2014-11-07 21:22:25 +0800870 struct iov_iter *iter)
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000871{
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000872 int ret;
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000873 int vnet_hdr_len = 0;
Basil Gorf09e2242012-05-03 22:55:24 +0000874 int vlan_offset = 0;
Herbert Xu6c36d2e2014-11-07 21:22:25 +0800875 int total;
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000876
877 if (q->flags & IFF_VNET_HDR) {
878 struct virtio_net_hdr vnet_hdr;
Michael S. Tsirkin55afbd02010-04-29 13:50:48 +0300879 vnet_hdr_len = q->vnet_hdr_sz;
Herbert Xu6c36d2e2014-11-07 21:22:25 +0800880 if (iov_iter_count(iter) < vnet_hdr_len)
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000881 return -EINVAL;
882
Michael S. Tsirkin6ae7feb2014-11-23 17:24:15 +0200883 macvtap_skb_to_vnet_hdr(q, skb, &vnet_hdr);
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000884
Herbert Xu6c36d2e2014-11-07 21:22:25 +0800885 if (copy_to_iter(&vnet_hdr, sizeof(vnet_hdr), iter) !=
886 sizeof(vnet_hdr))
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000887 return -EFAULT;
Jason Wang7cc76f52014-11-20 16:31:05 +0800888
889 iov_iter_advance(iter, vnet_hdr_len - sizeof(vnet_hdr));
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000890 }
Herbert Xu6c36d2e2014-11-07 21:22:25 +0800891 total = vnet_hdr_len;
Jason Wangce232ce2013-12-11 13:08:34 +0800892 total += skb->len;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000893
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100894 if (skb_vlan_tag_present(skb)) {
Basil Gorf09e2242012-05-03 22:55:24 +0000895 struct {
896 __be16 h_vlan_proto;
897 __be16 h_vlan_TCI;
898 } veth;
Jason Wang0fbe0d42013-07-16 13:36:34 +0800899 veth.h_vlan_proto = skb->vlan_proto;
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100900 veth.h_vlan_TCI = htons(skb_vlan_tag_get(skb));
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000901
Basil Gorf09e2242012-05-03 22:55:24 +0000902 vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto);
Jason Wangce232ce2013-12-11 13:08:34 +0800903 total += VLAN_HLEN;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000904
Herbert Xu6c36d2e2014-11-07 21:22:25 +0800905 ret = skb_copy_datagram_iter(skb, 0, iter, vlan_offset);
906 if (ret || !iov_iter_count(iter))
Basil Gorf09e2242012-05-03 22:55:24 +0000907 goto done;
908
Herbert Xu6c36d2e2014-11-07 21:22:25 +0800909 ret = copy_to_iter(&veth, sizeof(veth), iter);
910 if (ret != sizeof(veth) || !iov_iter_count(iter))
Basil Gorf09e2242012-05-03 22:55:24 +0000911 goto done;
912 }
913
Herbert Xu6c36d2e2014-11-07 21:22:25 +0800914 ret = skb_copy_datagram_iter(skb, vlan_offset, iter,
915 skb->len - vlan_offset);
Basil Gorf09e2242012-05-03 22:55:24 +0000916
917done:
Jason Wangce232ce2013-12-11 13:08:34 +0800918 return ret ? ret : total;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000919}
920
Zhi Yong Wu55ec8e22013-12-07 04:13:05 +0800921static ssize_t macvtap_do_read(struct macvtap_queue *q,
Al Viro3af0bfe2014-11-07 14:13:53 -0500922 struct iov_iter *to,
Arnd Bergmann501c7742010-02-18 05:46:50 +0000923 int noblock)
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000924{
Hong zhi guoccf7e722012-06-06 22:36:27 +0000925 DEFINE_WAIT(wait);
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000926 struct sk_buff *skb;
Arnd Bergmann501c7742010-02-18 05:46:50 +0000927 ssize_t ret = 0;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000928
Al Viro3af0bfe2014-11-07 14:13:53 -0500929 if (!iov_iter_count(to))
930 return 0;
931
932 while (1) {
Jason Wang89cee912013-06-05 23:54:34 +0000933 if (!noblock)
934 prepare_to_wait(sk_sleep(&q->sk), &wait,
935 TASK_INTERRUPTIBLE);
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000936
937 /* Read frames from the queue */
938 skb = skb_dequeue(&q->sk.sk_receive_queue);
Al Viro3af0bfe2014-11-07 14:13:53 -0500939 if (skb)
940 break;
941 if (noblock) {
942 ret = -EAGAIN;
943 break;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000944 }
Al Viro3af0bfe2014-11-07 14:13:53 -0500945 if (signal_pending(current)) {
946 ret = -ERESTARTSYS;
947 break;
948 }
949 /* Nothing to read, let's sleep */
950 schedule();
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000951 }
Vlad Yasevicha499a2e2015-11-09 09:14:17 -0500952 if (!noblock)
953 finish_wait(sk_sleep(&q->sk), &wait);
954
Al Viro3af0bfe2014-11-07 14:13:53 -0500955 if (skb) {
956 ret = macvtap_put_user(q, skb, to);
Jason Wangf51a5e82014-12-01 16:53:15 +0800957 if (unlikely(ret < 0))
958 kfree_skb(skb);
959 else
960 consume_skb(skb);
Al Viro3af0bfe2014-11-07 14:13:53 -0500961 }
Arnd Bergmann501c7742010-02-18 05:46:50 +0000962 return ret;
963}
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000964
Al Viro3af0bfe2014-11-07 14:13:53 -0500965static ssize_t macvtap_read_iter(struct kiocb *iocb, struct iov_iter *to)
Arnd Bergmann501c7742010-02-18 05:46:50 +0000966{
967 struct file *file = iocb->ki_filp;
968 struct macvtap_queue *q = file->private_data;
Al Viro3af0bfe2014-11-07 14:13:53 -0500969 ssize_t len = iov_iter_count(to), ret;
Arnd Bergmann501c7742010-02-18 05:46:50 +0000970
Al Viro3af0bfe2014-11-07 14:13:53 -0500971 ret = macvtap_do_read(q, to, file->f_flags & O_NONBLOCK);
Jason Wangce232ce2013-12-11 13:08:34 +0800972 ret = min_t(ssize_t, ret, len);
Zhi Yong Wue6ebc7f2013-12-06 14:16:50 +0800973 if (ret > 0)
974 iocb->ki_pos = ret;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000975 return ret;
976}
977
Jason Wang8f475a32013-06-05 23:54:36 +0000978static struct macvlan_dev *macvtap_get_vlan(struct macvtap_queue *q)
979{
980 struct macvlan_dev *vlan;
981
Vlad Yasevich441ac0f2013-06-25 16:04:19 -0400982 ASSERT_RTNL();
983 vlan = rtnl_dereference(q->vlan);
Jason Wang8f475a32013-06-05 23:54:36 +0000984 if (vlan)
985 dev_hold(vlan->dev);
Jason Wang8f475a32013-06-05 23:54:36 +0000986
987 return vlan;
988}
989
990static void macvtap_put_vlan(struct macvlan_dev *vlan)
991{
992 dev_put(vlan->dev);
993}
994
Jason Wang815f2362013-06-05 23:54:39 +0000995static int macvtap_ioctl_set_queue(struct file *file, unsigned int flags)
996{
997 struct macvtap_queue *q = file->private_data;
998 struct macvlan_dev *vlan;
999 int ret;
1000
1001 vlan = macvtap_get_vlan(q);
1002 if (!vlan)
1003 return -EINVAL;
1004
1005 if (flags & IFF_ATTACH_QUEUE)
1006 ret = macvtap_enable_queue(vlan->dev, file, q);
1007 else if (flags & IFF_DETACH_QUEUE)
1008 ret = macvtap_disable_queue(q);
Jason Wangf57855a2013-06-13 14:23:36 +08001009 else
1010 ret = -EINVAL;
Jason Wang815f2362013-06-05 23:54:39 +00001011
1012 macvtap_put_vlan(vlan);
1013 return ret;
1014}
1015
Vlad Yasevich2be5c762013-06-25 16:04:21 -04001016static int set_offload(struct macvtap_queue *q, unsigned long arg)
1017{
1018 struct macvlan_dev *vlan;
1019 netdev_features_t features;
1020 netdev_features_t feature_mask = 0;
1021
1022 vlan = rtnl_dereference(q->vlan);
1023 if (!vlan)
1024 return -ENOLINK;
1025
1026 features = vlan->dev->features;
1027
1028 if (arg & TUN_F_CSUM) {
1029 feature_mask = NETIF_F_HW_CSUM;
1030
1031 if (arg & (TUN_F_TSO4 | TUN_F_TSO6)) {
1032 if (arg & TUN_F_TSO_ECN)
1033 feature_mask |= NETIF_F_TSO_ECN;
1034 if (arg & TUN_F_TSO4)
1035 feature_mask |= NETIF_F_TSO;
1036 if (arg & TUN_F_TSO6)
1037 feature_mask |= NETIF_F_TSO6;
1038 }
Vlad Yaseviche3e3c422015-02-03 16:36:17 -05001039
1040 if (arg & TUN_F_UFO)
1041 feature_mask |= NETIF_F_UFO;
Vlad Yasevich2be5c762013-06-25 16:04:21 -04001042 }
1043
1044 /* tun/tap driver inverts the usage for TSO offloads, where
1045 * setting the TSO bit means that the userspace wants to
1046 * accept TSO frames and turning it off means that user space
1047 * does not support TSO.
1048 * For macvtap, we have to invert it to mean the same thing.
1049 * When user space turns off TSO, we turn off GSO/LRO so that
1050 * user-space will not receive TSO frames.
1051 */
Vlad Yaseviche3e3c422015-02-03 16:36:17 -05001052 if (feature_mask & (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_UFO))
Vlad Yasevich2be5c762013-06-25 16:04:21 -04001053 features |= RX_OFFLOADS;
1054 else
1055 features &= ~RX_OFFLOADS;
1056
1057 /* tap_features are the same as features on tun/tap and
1058 * reflect user expectations.
1059 */
Vlad Yasevicha567dd62013-08-16 15:25:00 -04001060 vlan->tap_features = feature_mask;
Vlad Yasevich2be5c762013-06-25 16:04:21 -04001061 vlan->set_features = features;
1062 netdev_update_features(vlan->dev);
1063
1064 return 0;
1065}
1066
Arnd Bergmann20d29d72010-01-30 12:24:26 +00001067/*
1068 * provide compatibility with generic tun/tap interface
1069 */
1070static long macvtap_ioctl(struct file *file, unsigned int cmd,
1071 unsigned long arg)
1072{
Arnd Bergmann02df55d2010-02-18 05:45:36 +00001073 struct macvtap_queue *q = file->private_data;
1074 struct macvlan_dev *vlan;
Arnd Bergmann20d29d72010-01-30 12:24:26 +00001075 void __user *argp = (void __user *)arg;
1076 struct ifreq __user *ifr = argp;
1077 unsigned int __user *up = argp;
Michael S. Tsirkin39ec7de2014-12-16 15:04:56 +02001078 unsigned short u;
Michael S. Tsirkin55afbd02010-04-29 13:50:48 +03001079 int __user *sp = argp;
Justin Cormack7f460d32015-05-13 19:19:02 +01001080 struct sockaddr sa;
Michael S. Tsirkin55afbd02010-04-29 13:50:48 +03001081 int s;
Arnd Bergmann02df55d2010-02-18 05:45:36 +00001082 int ret;
Arnd Bergmann20d29d72010-01-30 12:24:26 +00001083
1084 switch (cmd) {
1085 case TUNSETIFF:
1086 /* ignore the name, just look at flags */
1087 if (get_user(u, &ifr->ifr_flags))
1088 return -EFAULT;
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +00001089
1090 ret = 0;
Michael S. Tsirkin6ae7feb2014-11-23 17:24:15 +02001091 if ((u & ~MACVTAP_FEATURES) != (IFF_NO_PI | IFF_TAP))
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +00001092 ret = -EINVAL;
1093 else
Michael S. Tsirkin39ec7de2014-12-16 15:04:56 +02001094 q->flags = (q->flags & ~MACVTAP_FEATURES) | u;
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +00001095
1096 return ret;
Arnd Bergmann20d29d72010-01-30 12:24:26 +00001097
1098 case TUNGETIFF:
Vlad Yasevich441ac0f2013-06-25 16:04:19 -04001099 rtnl_lock();
Jason Wang8f475a32013-06-05 23:54:36 +00001100 vlan = macvtap_get_vlan(q);
Vlad Yasevich441ac0f2013-06-25 16:04:19 -04001101 if (!vlan) {
1102 rtnl_unlock();
Arnd Bergmann02df55d2010-02-18 05:45:36 +00001103 return -ENOLINK;
Vlad Yasevich441ac0f2013-06-25 16:04:19 -04001104 }
Arnd Bergmann02df55d2010-02-18 05:45:36 +00001105
1106 ret = 0;
Michael S. Tsirkin39ec7de2014-12-16 15:04:56 +02001107 u = q->flags;
Eric Dumazet13707f92011-01-26 19:28:23 +00001108 if (copy_to_user(&ifr->ifr_name, vlan->dev->name, IFNAMSIZ) ||
Michael S. Tsirkin39ec7de2014-12-16 15:04:56 +02001109 put_user(u, &ifr->ifr_flags))
Arnd Bergmann02df55d2010-02-18 05:45:36 +00001110 ret = -EFAULT;
Jason Wang8f475a32013-06-05 23:54:36 +00001111 macvtap_put_vlan(vlan);
Vlad Yasevich441ac0f2013-06-25 16:04:19 -04001112 rtnl_unlock();
Arnd Bergmann02df55d2010-02-18 05:45:36 +00001113 return ret;
Arnd Bergmann20d29d72010-01-30 12:24:26 +00001114
Jason Wang815f2362013-06-05 23:54:39 +00001115 case TUNSETQUEUE:
1116 if (get_user(u, &ifr->ifr_flags))
1117 return -EFAULT;
Vlad Yasevich441ac0f2013-06-25 16:04:19 -04001118 rtnl_lock();
1119 ret = macvtap_ioctl_set_queue(file, u);
1120 rtnl_unlock();
Jason Wang82a19eb2013-07-16 13:36:33 +08001121 return ret;
Jason Wang815f2362013-06-05 23:54:39 +00001122
Arnd Bergmann20d29d72010-01-30 12:24:26 +00001123 case TUNGETFEATURES:
Michael S. Tsirkin6ae7feb2014-11-23 17:24:15 +02001124 if (put_user(IFF_TAP | IFF_NO_PI | MACVTAP_FEATURES, up))
Arnd Bergmann20d29d72010-01-30 12:24:26 +00001125 return -EFAULT;
1126 return 0;
1127
1128 case TUNSETSNDBUF:
Michael S. Tsirkin3ea79242015-09-18 13:41:09 +03001129 if (get_user(s, sp))
Arnd Bergmann20d29d72010-01-30 12:24:26 +00001130 return -EFAULT;
1131
Michael S. Tsirkin3ea79242015-09-18 13:41:09 +03001132 q->sk.sk_sndbuf = s;
Arnd Bergmann20d29d72010-01-30 12:24:26 +00001133 return 0;
1134
Michael S. Tsirkin55afbd02010-04-29 13:50:48 +03001135 case TUNGETVNETHDRSZ:
1136 s = q->vnet_hdr_sz;
1137 if (put_user(s, sp))
1138 return -EFAULT;
1139 return 0;
1140
1141 case TUNSETVNETHDRSZ:
1142 if (get_user(s, sp))
1143 return -EFAULT;
1144 if (s < (int)sizeof(struct virtio_net_hdr))
1145 return -EINVAL;
1146
1147 q->vnet_hdr_sz = s;
1148 return 0;
1149
Michael S. Tsirkin01b07fb2014-12-16 15:05:10 +02001150 case TUNGETVNETLE:
1151 s = !!(q->flags & MACVTAP_VNET_LE);
1152 if (put_user(s, sp))
1153 return -EFAULT;
1154 return 0;
1155
1156 case TUNSETVNETLE:
1157 if (get_user(s, sp))
1158 return -EFAULT;
1159 if (s)
1160 q->flags |= MACVTAP_VNET_LE;
1161 else
1162 q->flags &= ~MACVTAP_VNET_LE;
1163 return 0;
1164
Greg Kurz8b8e6582015-04-24 14:50:36 +02001165 case TUNGETVNETBE:
1166 return macvtap_get_vnet_be(q, sp);
1167
1168 case TUNSETVNETBE:
1169 return macvtap_set_vnet_be(q, sp);
1170
Arnd Bergmann20d29d72010-01-30 12:24:26 +00001171 case TUNSETOFFLOAD:
1172 /* let the user check for future flags */
1173 if (arg & ~(TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6 |
Vlad Yaseviche3e3c422015-02-03 16:36:17 -05001174 TUN_F_TSO_ECN | TUN_F_UFO))
Arnd Bergmann20d29d72010-01-30 12:24:26 +00001175 return -EINVAL;
1176
Vlad Yasevich2be5c762013-06-25 16:04:21 -04001177 rtnl_lock();
1178 ret = set_offload(q, arg);
1179 rtnl_unlock();
1180 return ret;
Arnd Bergmann20d29d72010-01-30 12:24:26 +00001181
Justin Cormackb5082082015-05-11 20:00:10 +01001182 case SIOCGIFHWADDR:
1183 rtnl_lock();
1184 vlan = macvtap_get_vlan(q);
1185 if (!vlan) {
1186 rtnl_unlock();
1187 return -ENOLINK;
1188 }
1189 ret = 0;
1190 u = vlan->dev->type;
1191 if (copy_to_user(&ifr->ifr_name, vlan->dev->name, IFNAMSIZ) ||
1192 copy_to_user(&ifr->ifr_hwaddr.sa_data, vlan->dev->dev_addr, ETH_ALEN) ||
1193 put_user(u, &ifr->ifr_hwaddr.sa_family))
1194 ret = -EFAULT;
1195 macvtap_put_vlan(vlan);
1196 rtnl_unlock();
1197 return ret;
1198
1199 case SIOCSIFHWADDR:
Justin Cormack7f460d32015-05-13 19:19:02 +01001200 if (copy_from_user(&sa, &ifr->ifr_hwaddr, sizeof(sa)))
1201 return -EFAULT;
Justin Cormackb5082082015-05-11 20:00:10 +01001202 rtnl_lock();
1203 vlan = macvtap_get_vlan(q);
1204 if (!vlan) {
1205 rtnl_unlock();
1206 return -ENOLINK;
1207 }
Justin Cormack7f460d32015-05-13 19:19:02 +01001208 ret = dev_set_mac_address(vlan->dev, &sa);
Justin Cormackb5082082015-05-11 20:00:10 +01001209 macvtap_put_vlan(vlan);
1210 rtnl_unlock();
1211 return ret;
1212
Arnd Bergmann20d29d72010-01-30 12:24:26 +00001213 default:
1214 return -EINVAL;
1215 }
1216}
1217
1218#ifdef CONFIG_COMPAT
1219static long macvtap_compat_ioctl(struct file *file, unsigned int cmd,
1220 unsigned long arg)
1221{
1222 return macvtap_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
1223}
1224#endif
1225
1226static const struct file_operations macvtap_fops = {
1227 .owner = THIS_MODULE,
1228 .open = macvtap_open,
1229 .release = macvtap_release,
Al Viro3af0bfe2014-11-07 14:13:53 -05001230 .read_iter = macvtap_read_iter,
Al Virof5ff53b2014-06-19 15:36:49 -04001231 .write_iter = macvtap_write_iter,
Arnd Bergmann20d29d72010-01-30 12:24:26 +00001232 .poll = macvtap_poll,
1233 .llseek = no_llseek,
1234 .unlocked_ioctl = macvtap_ioctl,
1235#ifdef CONFIG_COMPAT
1236 .compat_ioctl = macvtap_compat_ioctl,
1237#endif
1238};
1239
Ying Xue1b784142015-03-02 15:37:48 +08001240static int macvtap_sendmsg(struct socket *sock, struct msghdr *m,
1241 size_t total_len)
Arnd Bergmann501c7742010-02-18 05:46:50 +00001242{
1243 struct macvtap_queue *q = container_of(sock, struct macvtap_queue, sock);
Al Viroc0371da2014-11-24 10:42:55 -05001244 return macvtap_get_user(q, m, &m->msg_iter, m->msg_flags & MSG_DONTWAIT);
Arnd Bergmann501c7742010-02-18 05:46:50 +00001245}
1246
Ying Xue1b784142015-03-02 15:37:48 +08001247static int macvtap_recvmsg(struct socket *sock, struct msghdr *m,
1248 size_t total_len, int flags)
Arnd Bergmann501c7742010-02-18 05:46:50 +00001249{
1250 struct macvtap_queue *q = container_of(sock, struct macvtap_queue, sock);
1251 int ret;
1252 if (flags & ~(MSG_DONTWAIT|MSG_TRUNC))
1253 return -EINVAL;
Al Viroc0371da2014-11-24 10:42:55 -05001254 ret = macvtap_do_read(q, &m->msg_iter, flags & MSG_DONTWAIT);
David S. Millerde2aa472013-12-10 22:06:18 -05001255 if (ret > total_len) {
1256 m->msg_flags |= MSG_TRUNC;
1257 ret = flags & MSG_TRUNC ? ret : total_len;
1258 }
Arnd Bergmann501c7742010-02-18 05:46:50 +00001259 return ret;
1260}
1261
1262/* Ops structure to mimic raw sockets with tun */
1263static const struct proto_ops macvtap_socket_ops = {
1264 .sendmsg = macvtap_sendmsg,
1265 .recvmsg = macvtap_recvmsg,
1266};
1267
1268/* Get an underlying socket object from tun file. Returns error unless file is
1269 * attached to a device. The returned object works like a packet socket, it
1270 * can be used for sock_sendmsg/sock_recvmsg. The caller is responsible for
1271 * holding a reference to the file for as long as the socket is in use. */
1272struct socket *macvtap_get_socket(struct file *file)
1273{
1274 struct macvtap_queue *q;
1275 if (file->f_op != &macvtap_fops)
1276 return ERR_PTR(-EINVAL);
1277 q = file->private_data;
1278 if (!q)
1279 return ERR_PTR(-EBADFD);
1280 return &q->sock;
1281}
1282EXPORT_SYMBOL_GPL(macvtap_get_socket);
1283
Eric W. Biederman9bf19072011-10-20 04:28:46 +00001284static int macvtap_device_event(struct notifier_block *unused,
1285 unsigned long event, void *ptr)
1286{
Jiri Pirko351638e2013-05-28 01:30:21 +00001287 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
Eric W. Biedermane09eff72011-10-20 04:29:24 +00001288 struct macvlan_dev *vlan;
Eric W. Biederman9bf19072011-10-20 04:28:46 +00001289 struct device *classdev;
1290 dev_t devt;
Eric W. Biedermane09eff72011-10-20 04:29:24 +00001291 int err;
Marc Angel17af2bc2016-05-05 12:14:26 +02001292 char tap_name[IFNAMSIZ];
Eric W. Biederman9bf19072011-10-20 04:28:46 +00001293
1294 if (dev->rtnl_link_ops != &macvtap_link_ops)
1295 return NOTIFY_DONE;
1296
Marc Angel17af2bc2016-05-05 12:14:26 +02001297 snprintf(tap_name, IFNAMSIZ, "tap%d", dev->ifindex);
Eric W. Biedermane09eff72011-10-20 04:29:24 +00001298 vlan = netdev_priv(dev);
Eric W. Biederman9bf19072011-10-20 04:28:46 +00001299
1300 switch (event) {
1301 case NETDEV_REGISTER:
1302 /* Create the device node here after the network device has
1303 * been registered but before register_netdevice has
1304 * finished running.
1305 */
Eric W. Biedermane09eff72011-10-20 04:29:24 +00001306 err = macvtap_get_minor(vlan);
1307 if (err)
1308 return notifier_from_errno(err);
1309
1310 devt = MKDEV(MAJOR(macvtap_major), vlan->minor);
Marc Angel17af2bc2016-05-05 12:14:26 +02001311 classdev = device_create(&macvtap_class, &dev->dev, devt,
1312 dev, tap_name);
Eric W. Biedermane09eff72011-10-20 04:29:24 +00001313 if (IS_ERR(classdev)) {
1314 macvtap_free_minor(vlan);
Eric W. Biederman9bf19072011-10-20 04:28:46 +00001315 return notifier_from_errno(PTR_ERR(classdev));
Eric W. Biedermane09eff72011-10-20 04:29:24 +00001316 }
Marc Angel17af2bc2016-05-05 12:14:26 +02001317 err = sysfs_create_link(&dev->dev.kobj, &classdev->kobj,
1318 tap_name);
1319 if (err)
1320 return notifier_from_errno(err);
Eric W. Biederman9bf19072011-10-20 04:28:46 +00001321 break;
1322 case NETDEV_UNREGISTER:
Francesco Ruggerie96c37f2016-04-23 15:04:31 -07001323 /* vlan->minor == 0 if NETDEV_REGISTER above failed */
1324 if (vlan->minor == 0)
1325 break;
Marc Angel17af2bc2016-05-05 12:14:26 +02001326 sysfs_remove_link(&dev->dev.kobj, tap_name);
Eric W. Biedermane09eff72011-10-20 04:29:24 +00001327 devt = MKDEV(MAJOR(macvtap_major), vlan->minor);
Marc Angel17af2bc2016-05-05 12:14:26 +02001328 device_destroy(&macvtap_class, devt);
Eric W. Biedermane09eff72011-10-20 04:29:24 +00001329 macvtap_free_minor(vlan);
Eric W. Biederman9bf19072011-10-20 04:28:46 +00001330 break;
1331 }
1332
1333 return NOTIFY_DONE;
1334}
1335
1336static struct notifier_block macvtap_notifier_block __read_mostly = {
1337 .notifier_call = macvtap_device_event,
1338};
1339
Arnd Bergmann20d29d72010-01-30 12:24:26 +00001340static int macvtap_init(void)
1341{
1342 int err;
1343
1344 err = alloc_chrdev_region(&macvtap_major, 0,
1345 MACVTAP_NUM_DEVS, "macvtap");
1346 if (err)
1347 goto out1;
1348
1349 cdev_init(&macvtap_cdev, &macvtap_fops);
1350 err = cdev_add(&macvtap_cdev, macvtap_major, MACVTAP_NUM_DEVS);
1351 if (err)
1352 goto out2;
1353
Marc Angel17af2bc2016-05-05 12:14:26 +02001354 err = class_register(&macvtap_class);
1355 if (err)
Arnd Bergmann20d29d72010-01-30 12:24:26 +00001356 goto out3;
Arnd Bergmann20d29d72010-01-30 12:24:26 +00001357
Eric W. Biederman9bf19072011-10-20 04:28:46 +00001358 err = register_netdevice_notifier(&macvtap_notifier_block);
Arnd Bergmann20d29d72010-01-30 12:24:26 +00001359 if (err)
1360 goto out4;
1361
Eric W. Biederman9bf19072011-10-20 04:28:46 +00001362 err = macvlan_link_register(&macvtap_link_ops);
1363 if (err)
1364 goto out5;
1365
Arnd Bergmann20d29d72010-01-30 12:24:26 +00001366 return 0;
1367
Eric W. Biederman9bf19072011-10-20 04:28:46 +00001368out5:
1369 unregister_netdevice_notifier(&macvtap_notifier_block);
Arnd Bergmann20d29d72010-01-30 12:24:26 +00001370out4:
Marc Angel17af2bc2016-05-05 12:14:26 +02001371 class_unregister(&macvtap_class);
Arnd Bergmann20d29d72010-01-30 12:24:26 +00001372out3:
1373 cdev_del(&macvtap_cdev);
1374out2:
1375 unregister_chrdev_region(macvtap_major, MACVTAP_NUM_DEVS);
1376out1:
1377 return err;
1378}
1379module_init(macvtap_init);
1380
1381static void macvtap_exit(void)
1382{
1383 rtnl_link_unregister(&macvtap_link_ops);
Eric W. Biederman9bf19072011-10-20 04:28:46 +00001384 unregister_netdevice_notifier(&macvtap_notifier_block);
Marc Angel17af2bc2016-05-05 12:14:26 +02001385 class_unregister(&macvtap_class);
Arnd Bergmann20d29d72010-01-30 12:24:26 +00001386 cdev_del(&macvtap_cdev);
1387 unregister_chrdev_region(macvtap_major, MACVTAP_NUM_DEVS);
Johannes Thumshirnd5de1982015-07-08 17:16:49 +02001388 idr_destroy(&minor_idr);
Arnd Bergmann20d29d72010-01-30 12:24:26 +00001389}
1390module_exit(macvtap_exit);
1391
1392MODULE_ALIAS_RTNL_LINK("macvtap");
1393MODULE_AUTHOR("Arnd Bergmann <arnd@arndb.de>");
1394MODULE_LICENSE("GPL");